2024-01-29 16:50:31 +09:00

206 lines
7.9 KiB
Python

"""生物由来照会 APIRoute"""
import datetime
from typing import Optional, Union
import pandas as pd
from fastapi import APIRouter, Depends, HTTPException
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
from starlette import status
from src.depends.auth import verify_session
from src.depends.services import get_service
from src.logging.get_logger import get_logger
from src.model.internal.session import UserSession
from src.model.request.bio import BioModel
from src.model.request.bio_download import BioDownloadModel
from src.model.view.bio_disp_model import BioDisplayModel
from src.services.batch_status_service import BatchStatusService
from src.services.bio_view_service import BioViewService
from src.services.session_service import set_session
from src.system_var import constants, environment
logger = get_logger('生物由来参照')
router = APIRouter()
#########################
# APIs #
#########################
@router.post('/search')
def search_bio_data(
bio_form: Optional[BioModel] = Depends(BioModel.as_form),
bio_service: BioViewService = Depends(get_service(BioViewService)),
batch_status_service: BatchStatusService = Depends(get_service(BatchStatusService)),
session: Union[UserSession, None] = Depends(verify_session)
):
if session is None:
return JSONResponse(content={'status': 'session_expired'}, status_code=status.HTTP_401_UNAUTHORIZED)
# バッチ処理中の場合、機能を利用させない
if batch_status_service.is_batch_processing():
return JSONResponse(content={'status': 'batch_processing'}, status_code=status.HTTP_403_FORBIDDEN)
# 生物由来データと件数を取得
bio_sales_lot_data = bio_service.search_bio_data(bio_form)
bio_sales_lot_count = bio_service.count_bio_data(bio_form, session)
# レスポンスデータを加工
# 日付型のデータのエンコードエラーを解消するための措置
def custom_encode(obj):
encoded_obj = obj.model_dump()
for key, value in encoded_obj.items():
if type(value) == datetime.datetime:
encoded_obj[key] = value.strftime("%Y-%m-%d %H:%M:%S") if obj is not None else ''
if type(value) == datetime.date:
encoded_obj[key] = value.strftime("%Y-%m-%d") if obj is not None else ''
return encoded_obj
data = jsonable_encoder(
bio_sales_lot_data,
custom_encoder={
BioDisplayModel: custom_encode
}
)
# セッション書き換え
session.update(
actions=[
UserSession.last_access_time.set(UserSession.new_last_access_time()),
UserSession.record_expiration_time.set(UserSession.new_record_expiration_time()),
# 検索結果をキャッシュする
UserSession.bio_search_condition.set(bio_form.model_dump()),
UserSession.bio_search_count.set(bio_sales_lot_count),
]
)
set_session(session)
json_response = JSONResponse(content={
'data': data,
'count': bio_sales_lot_count
})
# クッキーも書き換え
json_response.set_cookie(
key='session',
value=session.session_key,
secure=True,
httponly=True
)
return json_response
@router.post('/download')
async def download_bio_data(
search_param: BioModel = Depends(BioModel.as_body),
download_param: BioDownloadModel = Depends(BioDownloadModel.as_body),
bio_service: BioViewService = Depends(get_service(BioViewService)),
batch_status_service: BatchStatusService = Depends(get_service(BatchStatusService)),
session: Union[UserSession, None] = Depends(verify_session)
):
# 通常のビューとはルーティングの扱いを変えるために、個別のルーターで登録する
logger.info('生物由来データダウンロード開始')
logger.info(f'ユーザーID: {download_param.user_id}')
logger.info(f'拡張子: {download_param.ext}')
# ファイル名に使用するタイムスタンプを初期化しておく
current_timestamp = datetime.datetime.now()
# 出力ファイル名
download_file_name = f'Result_{download_param.user_id}_{current_timestamp:%Y%m%d%H%M%S%f}.{download_param.ext}'
if session is None:
return {'status': 'session_expired'}
# バッチ処理中の場合、機能を利用させない
if batch_status_service.is_batch_processing():
return {'status': 'batch_processing'}
# 生物由来データを検索
# 検索に使用したクエリも取得
search_result_df, query = _search_download_bio_data(bio_service, search_param, download_param)
# アクセスログを記録
bio_service.write_access_log(query, search_param, download_param.user_id, current_timestamp, download_file_name)
if search_result_df.size < 1:
# 検索結果が0件の場合、download_urlを返さない
logger.info('検索結果が0件です')
return {'status': 'ok', 'download_url': None}
# ファイルを書き出し(Excel or CSV)
local_file_path = await _write_bio_data_to_file(bio_service, download_param, search_result_df, download_file_name)
logger.info('ファイル書き出し完了')
# ローカルファイルからS3にアップロードし、ダウンロード用URLを取得する
try:
bio_service.upload_bio_data_file(local_file_path)
download_file_url = bio_service.generate_download_file_url(
local_file_path, download_param.user_id, download_param.ext)
except Exception as e:
logger.exception(f'S3 アクセスエラー{e}')
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={'error': 'aws_error', 'message': e.args}
)
# セッション書き換え
session.update(
actions=[
UserSession.last_access_time.set(UserSession.new_last_access_time()),
UserSession.record_expiration_time.set(UserSession.new_record_expiration_time()),
]
)
set_session(session)
# クッキーも書き換え
json_response = JSONResponse(content={
'status': 'ok',
'download_url': download_file_url
})
json_response.set_cookie(
key='session',
value=session.session_key,
secure=True,
httponly=True
)
return json_response
def _search_download_bio_data(
bio_service: BioViewService,
search_param: BioModel,
download_param: BioDownloadModel
) -> pd.DataFrame:
try:
# 生物由来データを検索
# Excelの場合、出力件数を絞る
if download_param.ext == 'xlsx':
search_result_df, query = bio_service.search_download_bio_data(
search_param, limitation=environment.BIO_EXCEL_RESULT_MAX_COUNT)
elif download_param.ext == 'csv':
search_result_df, query = bio_service.search_download_bio_data(search_param)
except Exception as e:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={'error': 'db_error', 'message': e.args}
)
return search_result_df, query
async def _write_bio_data_to_file(
bio_service: BioViewService,
download_param: BioDownloadModel,
df: pd.DataFrame,
download_file_name: str
) -> str:
# 種別によって出力を変える
local_file_path = ''
if download_param.ext == 'xlsx':
logger.info('今回はExcelファイルに出力する')
local_file_path = await bio_service.write_excel_file(
df, download_param.user_id, download_file_name=download_file_name)
elif download_param.ext == 'csv':
logger.info('今回はCSVファイルに出力する')
local_file_path = await bio_service.write_csv_file(
df, download_param.user_id, header=constants.BIO_CSV_HEADER, download_file_name=download_file_name)
return local_file_path