158 lines
6.6 KiB
Python
158 lines
6.6 KiB
Python
"""生物由来ファイルダウンロード APIRoute"""
|
|
from datetime import datetime
|
|
from typing import Union
|
|
|
|
import pandas as pd
|
|
from fastapi import APIRouter, Depends, HTTPException
|
|
from fastapi.responses import JSONResponse
|
|
from starlette import status
|
|
|
|
from src.depends.auth import verify_session
|
|
from src.depends.services import get_service
|
|
from src.error.exceptions import DBException
|
|
from src.logging.get_logger import get_logger
|
|
from src.model.internal.session import UserSession
|
|
from src.model.request.bio import BioModel
|
|
from src.model.request.bio_download import BioDownloadModel
|
|
from src.services.batch_status_service import BatchStatusService
|
|
from src.services.bio_view_service import BioViewService
|
|
from src.services.session_service import set_session
|
|
from src.system_var import constants, environment
|
|
|
|
logger = get_logger('生物由来参照')
|
|
|
|
router = APIRouter()
|
|
|
|
#########################
|
|
# APIs #
|
|
#########################
|
|
|
|
|
|
@router.post('/download')
|
|
async def download_bio_data(
|
|
search_param: BioModel = Depends(BioModel.as_body),
|
|
download_param: BioDownloadModel = Depends(BioDownloadModel.as_body),
|
|
bio_service: BioViewService = Depends(get_service(BioViewService)),
|
|
batch_status_service: BatchStatusService = Depends(get_service(BatchStatusService)),
|
|
session: Union[UserSession, None] = Depends(verify_session)
|
|
):
|
|
# 通常のビューとはルーティングの扱いを変えるために、個別のルーターで登録する
|
|
logger.info('生物由来データダウンロード開始')
|
|
logger.info(f'ユーザーID: {download_param.user_id}')
|
|
logger.info(f'拡張子: {download_param.ext}')
|
|
# ファイル名に使用するタイムスタンプを初期化しておく
|
|
current_timestamp = datetime.now()
|
|
# 出力ファイル名
|
|
download_file_name = f'Result_{download_param.user_id}_{current_timestamp:%Y%m%d%H%M%S%f}.{download_param.ext}'
|
|
if session is None:
|
|
return {'status': 'session_expired'}
|
|
# バッチ処理中の場合、機能を利用させない
|
|
if batch_status_service.is_batch_processing():
|
|
return {'status': 'batch_processing'}
|
|
# 生物由来データを検索
|
|
# 検索に使用したクエリも取得
|
|
search_result_df, query = _search_bio_data(bio_service, search_param, download_param.user_id)
|
|
# アクセスログを記録
|
|
bio_service.write_access_log(query, search_param, download_param.user_id, current_timestamp, download_file_name)
|
|
|
|
if search_result_df.size < 1:
|
|
# 検索結果が0件の場合、download_urlを返さない
|
|
logger.info('検索結果が0件です')
|
|
return {'status': 'ok', 'download_url': None}
|
|
|
|
# ファイルに打ち出すカラムを抽出
|
|
# TODO: SQLクエリを修正するため、この処理は不要になる
|
|
extract_df = _extract_output_df(search_result_df)
|
|
|
|
# ファイルを書き出し(Excel or CSV)
|
|
local_file_path = _write_bio_data_to_file(bio_service, download_param, extract_df, download_file_name)
|
|
|
|
# ローカルファイルからS3にアップロードし、ダウンロード用URLを取得する
|
|
try:
|
|
bio_service.upload_bio_data_file(local_file_path)
|
|
download_file_url = bio_service.generate_download_file_url(
|
|
local_file_path, download_param.user_id, download_param.ext)
|
|
except Exception as e:
|
|
logger.exception(f'S3 アクセスエラー{e}')
|
|
raise HTTPException(
|
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
|
detail={'error': 'aws_error', 'message': e.args}
|
|
)
|
|
|
|
# セッション書き換え
|
|
session.update(
|
|
actions=[
|
|
UserSession.last_access_time.set(UserSession.new_last_access_time()),
|
|
UserSession.record_expiration_time.set(UserSession.new_record_expiration_time()),
|
|
]
|
|
)
|
|
set_session(session)
|
|
|
|
# クッキーも書き換え
|
|
json_response = JSONResponse(content={
|
|
'status': 'ok',
|
|
'download_url': download_file_url
|
|
})
|
|
json_response.set_cookie(
|
|
key='session',
|
|
value=session.session_key,
|
|
max_age=environment.SESSION_EXPIRE_MINUTE * 60, # cookieの有効期限は秒数指定なので、60秒をかける
|
|
secure=True,
|
|
httponly=True
|
|
)
|
|
return json_response
|
|
|
|
|
|
def _search_bio_data(bio_service: BioViewService, search_param: BioModel, user_id: str) -> pd.DataFrame:
|
|
try:
|
|
# 生物由来データを検索
|
|
search_result_df, query = bio_service.search_download_bio_data(search_param)
|
|
except DBException as e:
|
|
raise HTTPException(
|
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
|
detail={'error': 'db_error', 'message': e.args}
|
|
)
|
|
|
|
return search_result_df, query
|
|
|
|
|
|
def _extract_output_df(search_result_df: pd.DataFrame) -> pd.DataFrame:
|
|
extract_df = search_result_df[constants.BIO_EXTRACT_COLUMNS]
|
|
|
|
# 値を変換
|
|
# データ種別の正式名を設定
|
|
extract_df.loc[:, 'slip_org_kbn'] = extract_df['slip_org_kbn'].apply(
|
|
lambda key: constants.SLIP_ORG_KBN_FULL_NAME.get(key))
|
|
# データ区分の区分の日本語名を設定
|
|
extract_df.loc[:, 'data_kbn'] = extract_df['data_kbn'].apply(lambda key: constants.DATA_KBN_JP_NAME.get(key))
|
|
# ロット番号エラーフラグの日本語名を設定
|
|
extract_df.loc[:, 'lot_num_err_flg'] = extract_df['lot_num_err_flg'].apply(
|
|
lambda key: constants.LOT_NO_ERR_FLG_JP_NAME.get(key))
|
|
# 訂正前伝票管理番号がセットされているときのみ修正日時、修正者、エラー詳細種別をセット
|
|
extract_df.loc[:, 'ins_dt'] = extract_df['bef_slip_mgt_num'].apply(
|
|
lambda bef_slip_mgt_num: extract_df['ins_dt'] if bef_slip_mgt_num is not None else '')
|
|
extract_df.loc[:, 'ins_usr'] = extract_df['bef_slip_mgt_num'].apply(
|
|
lambda bef_slip_mgt_num: extract_df['ins_usr'] if bef_slip_mgt_num is not None else '')
|
|
|
|
return extract_df
|
|
|
|
|
|
def _write_bio_data_to_file(
|
|
bio_service: BioViewService,
|
|
download_param: BioDownloadModel,
|
|
df: pd.DataFrame,
|
|
download_file_name: str
|
|
) -> str:
|
|
# 種別によって出力を変える
|
|
local_file_path = ''
|
|
if download_param.ext == 'xlsx':
|
|
logger.info('今回はExcelファイルに出力する')
|
|
local_file_path = bio_service.write_excel_file(
|
|
df, download_param.user_id, download_file_name=download_file_name)
|
|
elif download_param.ext == 'csv':
|
|
logger.info('今回はCSVファイルに出力する')
|
|
local_file_path = bio_service.write_csv_file(
|
|
df, download_param.user_id, header=constants.BIO_CSV_HEADER, download_file_name=download_file_name)
|
|
|
|
return local_file_path
|