diff --git a/ecs/jskult-webapp/src/controller/bio_download.py b/ecs/jskult-webapp/src/controller/bio_download.py index 863c41e4..620ca20c 100644 --- a/ecs/jskult-webapp/src/controller/bio_download.py +++ b/ecs/jskult-webapp/src/controller/bio_download.py @@ -2,6 +2,7 @@ from datetime import datetime from typing import Union +import pandas as pd from fastapi import APIRouter, Depends, HTTPException from fastapi.responses import JSONResponse from starlette import status @@ -9,13 +10,16 @@ from starlette import status from src.depends.auth import verify_session from src.depends.services import get_service from src.error.exceptions import DBException +from src.logging.get_logger import get_logger from src.model.internal.session import UserSession from src.model.request.bio import BioModel from src.model.request.bio_download import BioDownloadModel from src.services.batch_status_service import BatchStatusService from src.services.bio_view_service import BioViewService from src.services.session_service import set_session -from src.system_var import constants +from src.system_var import constants, environment + +logger = get_logger('生物由来参照') router = APIRouter() @@ -33,70 +37,35 @@ async def download_bio_data( session: Union[UserSession, None] = Depends(verify_session) ): # 通常のビューとはルーティングの扱いを変えるために、個別のルーターで登録する - # error_log(date("Y/m/d H:i:s") . " [INFO] getBioData start" . "\r\n", 3, "$execLog"); - # 改修後のパラメータを打ち出すようにする - # いらない error_log(date("Y/m/d H:i:s") . " [INFO] param:szConditions=" . htmlspecialchars($_POST["szConditions"], ENT_QUOTES) . "\r\n", 3, "$execLog"); - # いらない error_log(date("Y/m/d H:i:s") . " [INFO] param:pageNum=" . htmlspecialchars($_POST["pageNum"], ENT_QUOTES) . "\r\n", 3, "$execLog"); - # いらない error_log(date("Y/m/d H:i:s") . " [INFO] szUser=" . htmlspecialchars($_POST["szUser"], ENT_QUOTES) . "\r\n", 3, "$execLog"); - # いらない error_log(date("Y/m/d H:i:s") . " [INFO] szfilename=" . htmlspecialchars($_POST["szfilename"], ENT_QUOTES) . "\r\n", 3, "$execLog"); - # いらない error_log(date("Y/m/d H:i:s") . " [INFO] extension=" . htmlspecialchars($_POST["extension"], ENT_QUOTES) . "\r\n", 3, "$execLog"); - # いらない error_log(date("Y/m/d H:i:s") . " [INFO] sql=" . htmlspecialchars($_POST["sql"], ENT_QUOTES) . "\r\n", 3, "$execLog"); - # いらない error_log(date("Y/m/d H:i:s") . " [INFO] arrayPrepare=" . $_POST["arrayPrepare"] . "\r\n", 3, "$execLog"); + logger.info('生物由来データダウンロード開始') + logger.info(f'ユーザーID: {download_param.user_id}') + logger.info(f'拡張子: {download_param.ext}') # ファイル名に使用するタイムスタンプを初期化しておく - now = datetime.now() + current_timestamp = datetime.now() if session is None: return {'status': 'session_expired'} # バッチ処理中の場合、機能を利用させない if batch_status_service.is_batch_processing(): return {'status': 'batch_processing'} - try: - # 生物由来データを検索 - search_result_df = bio_service.search_download_bio_data(search_param) - except DBException as e: - # error_log(date("Y/m/d H:i:s") . " [ERROR] " . "\r\n", 3, "$execLog"); - print('DB Error', e.args) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail={'error': 'db_error', 'message': e.args} - ) + # 生物由来データを検索 + search_result_df = _search_bio_data(bio_service, search_param, download_param.user_id) if search_result_df.size < 1: # 検索結果が0件の場合、download_urlを返さない print('Bio data not found') return {'status': 'ok', 'download_url': None} # ファイルに打ち出すカラムを抽出 - extract_df = search_result_df[constants.BIO_EXTRACT_COLUMNS] + # TODO: SQLクエリを修正するため、この処理は不要になる + extract_df = _extract_output_df(search_result_df) - # 値を変換 - # データ種別の正式名を設定 - extract_df.loc[:, 'slip_org_kbn'] = extract_df['slip_org_kbn'].apply( - lambda key: constants.SLIP_ORG_KBN_FULL_NAME.get(key)) - # データ区分の区分の日本語名を設定 - extract_df.loc[:, 'data_kbn'] = extract_df['data_kbn'].apply(lambda key: constants.DATA_KBN_JP_NAME.get(key)) - # ロット番号エラーフラグの日本語名を設定 - extract_df.loc[:, 'lot_no_err_flg'] = extract_df['lot_no_err_flg'].apply( - lambda key: constants.LOT_NO_ERR_FLG_JP_NAME.get(key)) - # 訂正前伝票管理番号がセットされているときのみ修正日時、修正者、エラー詳細種別をセット - extract_df.loc[:, 'ins_dt'] = extract_df['bef_slip_mgt_num'].apply( - lambda bef_slip_mgt_num: extract_df['ins_dt'] if bef_slip_mgt_num is not None else '') - extract_df.loc[:, 'ins_usr'] = extract_df['bef_slip_mgt_num'].apply( - lambda bef_slip_mgt_num: extract_df['ins_usr'] if bef_slip_mgt_num is not None else '') - - # 種別によって出力を変える - local_file_path = '' - if download_param.kind == 'xlsx': - # error_log(date("Y/m/d H:i:s") . " [INFO] 今回はExcelファイルに出力する" . "\r\n", 3, "$execLog"); - local_file_path = bio_service.write_excel_file(extract_df, download_param.user_id, timestamp=now) - elif download_param.kind == 'csv': - # error_log(date("Y/m/d H:i:s") . " [INFO] 今回はCSVファイルに出力する" . "\r\n", 3, "$execLog"); - local_file_path = bio_service.write_csv_file( - extract_df, download_param.user_id, header=constants.BIO_CSV_HEADER, timestamp=now) + # ファイルを書き出し(Excel or CSV) + local_file_path = __write_bio_data_to_file(bio_service, download_param, extract_df, current_timestamp) # ローカルファイルからS3にアップロードし、ダウンロード用URLを取得する try: bio_service.upload_bio_data_file(local_file_path) download_file_url = bio_service.generate_download_file_url( - local_file_path, download_param.user_id, download_param.kind) + local_file_path, download_param.user_id, download_param.ext) except Exception as e: print('S3 access error', e.args) raise HTTPException( @@ -121,8 +90,66 @@ async def download_bio_data( json_response.set_cookie( key='session', value=session.session_key, - max_age=20*60, + max_age=environment.SESSION_EXPIRE_MINUTE * 60, # cookieの有効期限は秒数指定なので、60秒をかける secure=True, httponly=True ) return json_response + + +def _search_bio_data(bio_service: BioViewService, search_param: BioModel, user_id: str) -> pd.DataFrame: + try: + # 生物由来データを検索 + search_result_df = bio_service.search_download_bio_data(search_param) + logger.info(f'ユーザーID: {user_id} Value: {search_param}') + # TODO: ファイルにも出力する + except DBException as e: + logger.exception(f'DB Error: {e}') + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail={'error': 'db_error', 'message': e.args} + ) + + return search_result_df + + +def _extract_output_df(search_result_df: pd.DataFrame) -> pd.DataFrame: + extract_df = search_result_df[constants.BIO_EXTRACT_COLUMNS] + + # 値を変換 + # データ種別の正式名を設定 + extract_df.loc[:, 'slip_org_kbn'] = extract_df['slip_org_kbn'].apply( + lambda key: constants.SLIP_ORG_KBN_FULL_NAME.get(key)) + # データ区分の区分の日本語名を設定 + extract_df.loc[:, 'data_kbn'] = extract_df['data_kbn'].apply(lambda key: constants.DATA_KBN_JP_NAME.get(key)) + # ロット番号エラーフラグの日本語名を設定 + extract_df.loc[:, 'lot_num_err_flg'] = extract_df['lot_num_err_flg'].apply( + lambda key: constants.LOT_NO_ERR_FLG_JP_NAME.get(key)) + # 訂正前伝票管理番号がセットされているときのみ修正日時、修正者、エラー詳細種別をセット + extract_df.loc[:, 'ins_dt'] = extract_df['bef_slip_mgt_num'].apply( + lambda bef_slip_mgt_num: extract_df['ins_dt'] if bef_slip_mgt_num is not None else '') + extract_df.loc[:, 'ins_usr'] = extract_df['bef_slip_mgt_num'].apply( + lambda bef_slip_mgt_num: extract_df['ins_usr'] if bef_slip_mgt_num is not None else '') + + return extract_df + + +def __write_bio_data_to_file( + bio_service: BioViewService, + download_param: BioDownloadModel, + df: pd.DataFrame, + current_timestamp: datetime +) -> str: + # 種別によって出力を変える + local_file_path = '' + if download_param.ext == 'xlsx': + # TODO: ファイルにも出力する + logger.info('今回はExcelファイルに出力する') + local_file_path = bio_service.write_excel_file(df, download_param.user_id, timestamp=current_timestamp) + elif download_param.ext == 'csv': + # TODO: ファイルにも出力する + logger.info('今回はCSVファイルに出力する') + local_file_path = bio_service.write_csv_file( + df, download_param.user_id, header=constants.BIO_CSV_HEADER, timestamp=current_timestamp) + + return local_file_path diff --git a/ecs/jskult-webapp/src/model/db/bio_sales_view.py b/ecs/jskult-webapp/src/model/db/bio_sales_view.py index 9fd90ed7..325ccbbc 100644 --- a/ecs/jskult-webapp/src/model/db/bio_sales_view.py +++ b/ecs/jskult-webapp/src/model/db/bio_sales_view.py @@ -42,7 +42,7 @@ class BioSalesViewModel(BaseDBModel): v_inst_nm: Optional[str] v_inst_addr: Optional[str] comm_cd: Optional[str] - comm_nm: Optional[str] + product_name: Optional[str] whs_rep_comm_nm: Optional[str] whs_rep_nnskfcl_nm: Optional[str] whs_rep_nnsk_fcl_addr: Optional[str] diff --git a/ecs/jskult-webapp/src/model/request/bio_download.py b/ecs/jskult-webapp/src/model/request/bio_download.py index 51bea2e3..5f9e049b 100644 --- a/ecs/jskult-webapp/src/model/request/bio_download.py +++ b/ecs/jskult-webapp/src/model/request/bio_download.py @@ -4,15 +4,15 @@ from pydantic import BaseModel class BioDownloadModel(BaseModel): user_id: str - kind: str + ext: str @classmethod def as_body( cls, user_id: str = Body(), - kind: str = Body() + ext: str = Body() ): return cls( user_id=user_id, - kind=kind + ext=ext ) diff --git a/ecs/jskult-webapp/src/router/session_router.py b/ecs/jskult-webapp/src/router/session_router.py index e2699784..8188c10f 100644 --- a/ecs/jskult-webapp/src/router/session_router.py +++ b/ecs/jskult-webapp/src/router/session_router.py @@ -1,4 +1,3 @@ -import logging from typing import Callable from fastapi import Request, Response @@ -9,9 +8,10 @@ from starlette import status from src.depends.auth import (check_session_expired, get_current_session, verify_session) from src.error.exceptions import UnexpectedException +from src.logging.get_logger import get_logger from src.system_var import constants, environment -logger = logging.getLogger('uvicorn') +logger = get_logger('medaca_router') class MeDaCaRoute(APIRoute): @@ -36,14 +36,11 @@ class MeDaCaRoute(APIRoute): # 返却するルートハンドラーを定義。必ず非同期関数にする必要がある。 async def custom_route_handler(request: Request) -> Response: try: - logger.info('pre routing process') # 事前処理 request = await self.pre_process_route(request) # 本来のルーティング処理 - logger.info('routing process') response = await original_route_handler(request) # 事後処理 - logger.info('post routing process') return await self.post_process_route(request, response) except HTTPException as e: raise e diff --git a/ecs/jskult-webapp/src/system_var/constants.py b/ecs/jskult-webapp/src/system_var/constants.py index 9b022b3d..99bdd872 100644 --- a/ecs/jskult-webapp/src/system_var/constants.py +++ b/ecs/jskult-webapp/src/system_var/constants.py @@ -17,25 +17,25 @@ BIO_EXTRACT_COLUMNS = [ 'tran_kbn_name', 'mkr_cd', 'rec_comm_cd', - 'comm_nm', - 'whs_rep_comm_nm', + 'product_name', + 'whs_rep_comm_name', 'nonyu_fcl_cd', 'rec_nonyu_fcl_name', - 'whs_rep_nnskfcl_nm', + 'whs_rep_nonyu_fcl_name', 'rec_nonyu_fcl_addr', - 'whs_rep_nnsk_fcl_addr', + 'whs_rep_nonyu_fcl_addr', 'rec_lot_num', 'amt_fugo', 'expr_dt', 'data_kbn', - 'lot_no_err_flg', + 'lot_num_err_flg', 'bef_slip_mgt_num', 'ins_usr', 'ins_dt', 'inst_cd', 'inst_name_form', 'address', - 'tel_no', + 'tel_num', 'v_whs_cd', 'v_whsorg_cd', 'whs_org_name', diff --git a/ecs/jskult-webapp/src/templates/bioSearchList.html b/ecs/jskult-webapp/src/templates/bioSearchList.html index 2c813f12..8dae43e4 100644 --- a/ecs/jskult-webapp/src/templates/bioSearchList.html +++ b/ecs/jskult-webapp/src/templates/bioSearchList.html @@ -192,7 +192,7 @@