feat: アクセスログの書き込み処理を追加

This commit is contained in:
shimoda.m@nds-tyo.co.jp 2023-06-01 11:27:27 +09:00
parent bf22d90231
commit 62478a2b93
5 changed files with 119 additions and 50 deletions

View File

@ -42,13 +42,19 @@ async def download_bio_data(
logger.info(f'拡張子: {download_param.ext}')
# ファイル名に使用するタイムスタンプを初期化しておく
current_timestamp = datetime.now()
# 出力ファイル名
download_file_name = f'Result_{download_param.user_id}_{current_timestamp:%Y%m%d%H%M%S%f}.{download_param.ext}'
if session is None:
return {'status': 'session_expired'}
# バッチ処理中の場合、機能を利用させない
if batch_status_service.is_batch_processing():
return {'status': 'batch_processing'}
# 生物由来データを検索
search_result_df = _search_bio_data(bio_service, search_param, download_param.user_id)
# 検索に使用したクエリも取得
search_result_df, query = _search_bio_data(bio_service, search_param, download_param.user_id)
# アクセスログを記録
bio_service.write_access_log(query, search_param, download_param.user_id, current_timestamp, download_file_name)
if search_result_df.size < 1:
# 検索結果が0件の場合、download_urlを返さない
print('Bio data not found')
@ -59,7 +65,7 @@ async def download_bio_data(
extract_df = _extract_output_df(search_result_df)
# ファイルを書き出し(Excel or CSV)
local_file_path = __write_bio_data_to_file(bio_service, download_param, extract_df, current_timestamp)
local_file_path = _write_bio_data_to_file(bio_service, download_param, extract_df, download_file_name)
# ローカルファイルからS3にアップロードし、ダウンロード用URLを取得する
try:
@ -100,8 +106,7 @@ async def download_bio_data(
def _search_bio_data(bio_service: BioViewService, search_param: BioModel, user_id: str) -> pd.DataFrame:
try:
# 生物由来データを検索
search_result_df = bio_service.search_download_bio_data(search_param)
logger.info(f'ユーザーID: {user_id} Value: {search_param}')
search_result_df, query = bio_service.search_download_bio_data(search_param)
# TODO: ファイルにも出力する
except DBException as e:
logger.exception(f'DB Error: {e}')
@ -110,7 +115,7 @@ def _search_bio_data(bio_service: BioViewService, search_param: BioModel, user_i
detail={'error': 'db_error', 'message': e.args}
)
return search_result_df
return search_result_df, query
def _extract_output_df(search_result_df: pd.DataFrame) -> pd.DataFrame:
@ -134,22 +139,21 @@ def _extract_output_df(search_result_df: pd.DataFrame) -> pd.DataFrame:
return extract_df
def __write_bio_data_to_file(
def _write_bio_data_to_file(
bio_service: BioViewService,
download_param: BioDownloadModel,
df: pd.DataFrame,
current_timestamp: datetime
download_file_name: str
) -> str:
# 種別によって出力を変える
local_file_path = ''
if download_param.ext == 'xlsx':
# TODO: ファイルにも出力する
logger.info('今回はExcelファイルに出力する')
local_file_path = bio_service.write_excel_file(df, download_param.user_id, timestamp=current_timestamp)
local_file_path = bio_service.write_excel_file(
df, download_param.user_id, download_file_name=download_file_name)
elif download_param.ext == 'csv':
# TODO: ファイルにも出力する
logger.info('今回はCSVファイルに出力する')
local_file_path = bio_service.write_csv_file(
df, download_param.user_id, header=constants.BIO_CSV_HEADER, timestamp=current_timestamp)
df, download_param.user_id, header=constants.BIO_CSV_HEADER, download_file_name=download_file_name)
return local_file_path

View File

@ -9,18 +9,18 @@ from src.util.string_util import is_not_empty
@sanitize
class BioModel(BaseModel):
wholesaler_code: Optional[str]
wholesaler_sub_code: Optional[str]
wholesaler_name: Optional[str]
org_kbn: Optional[str]
rec_whs_cd: Optional[str]
rec_whs_sub_cd: Optional[str]
whs_name: Optional[str]
slip_org_kbn: Optional[str]
rec_ymd_from: Optional[str]
rec_ymd_to: Optional[str]
rec_lot_num: Optional[str]
data_kbn: Optional[str]
maker_cd: Optional[str]
mkr_cd: Optional[str]
rev_hsdnymd_srk_from: Optional[str]
rev_hsdnymd_srk_to: Optional[str]
ikoFlg: Optional[str]
iko_flg: Optional[str]
@classmethod
def as_form(
@ -121,16 +121,16 @@ class BioModel(BaseModel):
rev_hsdnymd_srk_to = ctrl_rev_hsdnymd_srk_to.replace('/', '')
return cls(
wholesaler_code=wholesaler_code,
wholesaler_sub_code=wholesaler_sub_code,
wholesaler_name=wholesaler_name,
org_kbn=ctrl_org_kbn,
rec_whs_cd=wholesaler_code,
rec_whs_sub_cd=wholesaler_sub_code,
whs_name=wholesaler_name,
slip_org_kbn=ctrl_org_kbn,
rec_ymd_from=rec_ymd_from,
rec_ymd_to=rec_ymd_to,
rec_lot_num=ctrl_rec_lot_num,
data_kbn=ctrl_data_kbn,
maker_cd=ctrl_maker_cd,
mkr_cd=ctrl_maker_cd,
rev_hsdnymd_srk_from=rev_hsdnymd_srk_from,
rev_hsdnymd_srk_to=rev_hsdnymd_srk_to,
ikoFlg=ikoFlg
iko_flg=ikoFlg
)

View File

@ -60,14 +60,14 @@ class BioViewModel(BaseModel):
return ''
form_wholesaler_full_name = \
f'{self.form_data.wholesaler_code}-{self.form_data.wholesaler_sub_code}:{self.form_data.wholesaler_name}'
f'{self.form_data.rec_whs_cd}-{self.form_data.rec_whs_sub_cd}:{self.form_data.whs_name}'
return self._selected_value(form_wholesaler_full_name, selected_wholesaler)
def is_selected_org_kbn(self, selected_org_kbn):
if not self.is_form_submitted():
return ''
return self._selected_value(self.form_data.org_kbn, selected_org_kbn)
return self._selected_value(self.form_data.slip_org_kbn, selected_org_kbn)
def is_input_rec_ymd_from(self):
if not self.is_form_submitted():
@ -97,7 +97,7 @@ class BioViewModel(BaseModel):
if not self.is_form_submitted():
return ''
return self._selected_value(self.form_data.maker_cd, selected_maker_cd)
return self._selected_value(self.form_data.mkr_cd, selected_maker_cd)
def is_input_rev_hsdnymd_srk_from(self):
if not self.is_form_submitted():
@ -115,7 +115,7 @@ class BioViewModel(BaseModel):
if not self.is_form_submitted():
return ''
return 'checked' if self.form_data.ikoFlg else ''
return 'checked' if self.form_data.iko_flg else ''
def disabled_button(self):
return 'disabled' if self.is_data_empty() or self.is_data_overflow_max_length() else ''

View File

@ -65,7 +65,8 @@ class BioSalesViewRepository(BaseRepository):
logger.debug(f'SQL: {query}')
df = self._to_data_frame(query, parameter)
logger.debug(f'count= {len(df.index)}')
return df
# ログ出力のため、クエリも返却
return df, query
except Exception as e:
logger.exception(f"DB Error : Exception={e.args}")
raise e
@ -76,12 +77,12 @@ class BioSalesViewRepository(BaseRepository):
where_clauses: list[SQLCondition] = []
# 卸(コード/サブコード)
if is_not_empty(parameter.wholesaler_code) and is_not_empty(parameter.wholesaler_sub_code):
where_clauses.append(SQLCondition('rec_whs_cd', condition.EQ, 'wholesaler_code'))
where_clauses.append(SQLCondition('rec_whs_sub_cd', condition.EQ, 'wholesaler_sub_code'))
if is_not_empty(parameter.rec_whs_cd) and is_not_empty(parameter.rec_whs_sub_cd):
where_clauses.append(SQLCondition('rec_whs_cd', condition.EQ, 'rec_whs_cd'))
where_clauses.append(SQLCondition('rec_whs_sub_cd', condition.EQ, 'rec_whs_sub_cd'))
# データ種別
if is_not_empty(parameter.org_kbn):
where_clauses.append(SQLCondition('slip_org_kbn', condition.EQ, 'org_kbn'))
if is_not_empty(parameter.slip_org_kbn):
where_clauses.append(SQLCondition('slip_org_kbn', condition.EQ, 'slip_org_kbn'))
# 処理日 開始日
if is_not_empty(parameter.rec_ymd_from):
where_clauses.append(SQLCondition('rec_ymd', condition.GE, 'rec_ymd_from'))
@ -98,8 +99,8 @@ class BioSalesViewRepository(BaseRepository):
if is_not_empty(parameter.data_kbn):
where_clauses.append(SQLCondition('data_kbn', condition.EQ, 'data_kbn'))
# 製品
if is_not_empty(parameter.maker_cd):
where_clauses.append(SQLCondition('mkr_cd', condition.EQ, 'maker_cd'))
if is_not_empty(parameter.mkr_cd):
where_clauses.append(SQLCondition('mkr_cd', condition.EQ, 'mkr_cd'))
# 発伝年月日 開始日
if is_not_empty(parameter.rev_hsdnymd_srk_from):
where_clauses.append(SQLCondition('rev_hsdnymd_srk', condition.GE, 'rev_hsdnymd_srk_from'))
@ -108,12 +109,12 @@ class BioSalesViewRepository(BaseRepository):
where_clauses.append(SQLCondition('rev_hsdnymd_srk', condition.LE, 'rev_hsdnymd_srk_to'))
# 移行フラグ
# チェックが入っていない場合、移行対象(IKO_FLG = '*')を省く
if parameter.ikoFlg is None:
if parameter.iko_flg is None:
where_clauses.append(SQLCondition('iko_flg', condition.IS, 'NULL', literal=True))
# 固定条件
# Viewで返されるロット番号9件をNull以外で抽出
where_clauses.append(SQLCondition('LENGTH(TRIM(rec_lot_num))', condition.GT, '0', literal=True))
where_clauses_str = ' AND '.join([condition.apply() for condition in where_clauses])
# error_log(date("Y/m/d H:i:s") . " [INFO] 条件設定終了:" . $szConditions . "\r\n", 3, "$execLog");
logger.debug(f'条件設定終了:{where_clauses_str}')
return where_clauses_str

View File

@ -1,4 +1,4 @@
import os.path as path
import os
import shutil
from datetime import datetime
@ -6,6 +6,7 @@ import pandas as pd
from src.aws.aws_api_client import AWSAPIClient
from src.aws.s3 import S3Client
from src.logging.get_logger import get_logger
from src.model.internal.session import UserSession
from src.model.request.bio import BioModel
from src.model.view.bio_disp_model import BisDisplayModel
@ -19,6 +20,8 @@ from src.repositories.wholesaler_master_repository import \
from src.services.base_service import BaseService
from src.system_var import constants, environment
logger = get_logger('生物由来参照')
class BioViewService(BaseService):
REPOSITORIES = {
@ -68,13 +71,60 @@ class BioViewService(BaseService):
def search_download_bio_data(self, search_params: BioModel):
# 生物由来データをダウンロードするために、DBから検索した結果をデータフレームに変換
bio_sales_data_frame = self.bio_sales_repository.fetch_as_data_frame(parameter=search_params)
return bio_sales_data_frame
bio_sales_data_frame, query = self.bio_sales_repository.fetch_as_data_frame(parameter=search_params)
return bio_sales_data_frame, query
def write_excel_file(self, data_frame: pd.DataFrame, user_id: str, timestamp: datetime):
def write_access_log(
self,
query: str,
parameters: BioModel,
user_id: str,
timestamp: datetime,
download_file_name: str
):
# アクセスログを書き出し、S3に保管する
access_log_file_name = f'BioAccessLog_{user_id}_{timestamp:%Y%m%d%H%M%S%f}.log'
# アクセスログファイル出力用のロガーを生成
import logging
access_logger = logging.getLogger(access_log_file_name)
level = logging.getLevelName(environment.LOG_LEVEL)
if not isinstance(level, int):
level = logging.INFO
access_logger.setLevel(level)
access_log_file_path = os.path.join(constants.BIO_TEMPORARY_FILE_DIR_PATH, access_log_file_name)
if not access_logger.hasHandlers():
access_log_handler = logging.FileHandler(access_log_file_path)
access_logger.addHandler(access_log_handler)
formatter = logging.Formatter(
'[%(levelname)s]\t%(asctime)s\t%(message)s',
'%Y-%m-%d %H:%M:%S'
)
for handler in logger.handlers:
handler.setFormatter(formatter)
# SQL文を出力
sql_message = f'ユーザーID: {user_id} SQL: {query}\t{download_file_name}'
access_logger.info(sql_message)
# 標準出力にも書き出す
logger.info(sql_message)
# 検索パラメータを1行ずつ書き出す
for param_key, param_value in parameters.dict().items():
if param_value is None or len(param_value) == 0:
continue
parameter_message = f'ユーザーID: {user_id} Value: {param_key} = {param_value}\t{download_file_name}'
logger.info(parameter_message)
access_logger.info(parameter_message)
# S3にアップロード
self.upload_bio_access_log_file(access_log_file_path)
def write_excel_file(self, data_frame: pd.DataFrame, user_id: str, download_file_name: str):
# Excelに書き込み
output_file_path = path.join(constants.BIO_TEMPORARY_FILE_DIR_PATH,
f'Result_{user_id}_{timestamp:%Y%m%d%H%M%S%f}.xlsx')
output_file_path = os.path.join(constants.BIO_TEMPORARY_FILE_DIR_PATH, download_file_name)
# テンプレートファイルをコピーして出力ファイルの枠だけを作る
shutil.copyfile(
@ -92,10 +142,9 @@ class BioViewService(BaseService):
return output_file_path
def write_csv_file(self, data_frame: pd.DataFrame, user_id: str, header: list[str], timestamp: datetime):
def write_csv_file(self, data_frame: pd.DataFrame, user_id: str, header: list[str], download_file_name: str):
# csvに書き込み
output_file_path = path.join(constants.BIO_TEMPORARY_FILE_DIR_PATH,
f'Result_{user_id}_{timestamp:%Y%m%d%H%M%S%f}.csv')
output_file_path = os.path.join(constants.BIO_TEMPORARY_FILE_DIR_PATH, download_file_name)
# 横長のDataFrameとするため、ヘッダーの加工処理
header_data = {}
for df_column, header_column in zip(data_frame.columns, header):
@ -110,13 +159,28 @@ class BioViewService(BaseService):
def upload_bio_data_file(self, local_file_path: str) -> None:
bucket_name = environment.BIO_ACCESS_LOG_BUCKET
# TODO: フォルダを変える
file_key = f'bio/{path.basename(local_file_path)}'
# TODO: ファイルパスにYYYY/MM/DDを加える
file_key = f'data/{os.path.basename(local_file_path)}'
self.s3_client.upload_file(local_file_path, bucket_name, file_key)
# アップロード後、ローカルからは削除する
self.delete_local_file(local_file_path)
def upload_bio_access_log_file(self, local_file_path: str) -> None:
bucket_name = environment.BIO_ACCESS_LOG_BUCKET
# TODO: ファイルパスにYYYY/MM/DDを加える
file_key = f'log/{os.path.basename(local_file_path)}'
self.s3_client.upload_file(local_file_path, bucket_name, file_key)
# アップロード後、ローカルからは削除する
self.delete_local_file(local_file_path)
def generate_download_file_url(self, local_file_path: str, user_id: str, kind: str) -> str:
bucket_name = environment.BIO_ACCESS_LOG_BUCKET
# TODO: フォルダを変える
file_key = f'bio/{path.basename(local_file_path)}'
# TODO: ファイルパスにYYYY/MM/DDを加える
file_key = f'data/{os.path.basename(local_file_path)}'
download_filename = f'{user_id}_生物由来卸販売データ.{kind}'
return self.s3_client.generate_presigned_url(bucket_name, file_key, download_filename)
def delete_local_file(self, local_file_path: str):
os.remove(local_file_path)