diff --git a/Seanium_Brain.py b/Seanium_Brain.py index ebc72b6..5569711 100644 --- a/Seanium_Brain.py +++ b/Seanium_Brain.py @@ -1,4 +1,5 @@ import os +import time import streamlit as st @@ -8,8 +9,14 @@ import GPT import modules.utilities as util import streamlit_toolkit.tools as st_tool +if 'SESSION_TIME' not in st.session_state: + st.session_state['SESSION_TIME'] = time.strftime("%Y%m%d-%H%H%S") + +SESSION_TIME = st.session_state['SESSION_TIME'] SESSION_LANG = st.session_state['SESSION_LANGUAGE'] + PROMPT_PATH = f'.user/prompt/{SESSION_LANG}' +CURRENT_LOG_FILE = f'{INFO.LOG_PATH}/log_{SESSION_TIME}.log' util.remove_oldest_file(INFO.LOG_PATH, 10) diff --git a/batch-programs/create_language_base.bat b/batch-programs/create_language_base.bat index 517f717..58ac4ca 100644 --- a/batch-programs/create_language_base.bat +++ b/batch-programs/create_language_base.bat @@ -7,7 +7,4 @@ call .\venv\Scripts\activate echo creating language base... call python .\batch-programs\create_language_base.py -cls -echo language base created! - pause \ No newline at end of file diff --git a/batch-programs/create_language_base.py b/batch-programs/create_language_base.py index a2c1ec8..4d63370 100644 --- a/batch-programs/create_language_base.py +++ b/batch-programs/create_language_base.py @@ -1,26 +1,32 @@ import os import subprocess import sys + # Add the parent directory to the Python path parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) sys.path.append(parent_dir) from modules import utilities as util +pages = util.scan_directory(f'pages', exclude=['__init__.py', '__pycache__']) +tools = util.scan_directory(f'streamlit_toolkit', exclude=['__init__.py', '__pycache__']) + +files_to_process = [] -pages = util.scan_directory(f'pages') -pages_full = [] -for page in pages: - page = util.get_file_name(page, extension=True) - pages_full.append(f'pages/{page}') main_file = util.get_file_name(f'Seanium_brain.py', extension=True) +files_to_process.append(main_file) -pages_full.append(main_file) +for page in pages: + page = util.get_file_name(page, extension=True) + files_to_process.append(f'pages/{page}') +for tool in tools: + tool = util.get_file_name(tool, extension=True) + files_to_process.append(f'streamlit_toolkit/{tool}') -pages_flatten = ' '.join(pages_full) +files_flatten = ' '.join(files_to_process) locals_path = f'.locals' -languages = util.read_json(f'{locals_path}/languages.json') +languages = util.read_json(os.path.abspath(f'{locals_path}/languages.json')) # create LC_MESSAGES under .local directory if not exist for language in languages.values(): # if language path not exist, create it @@ -28,4 +34,4 @@ for language in languages.values(): os.makedirs(f'{locals_path}/{language}/LC_MESSAGES') # create .pot file -subprocess.call(f'xgettext {pages_flatten} -o {locals_path}/base.pot', shell=True) +subprocess.call(f'xgettext {files_flatten} -o {locals_path}/base.pot', shell=True) diff --git a/modules/INFO.py b/modules/INFO.py index 2072dcb..b275f9b 100644 --- a/modules/INFO.py +++ b/modules/INFO.py @@ -14,8 +14,6 @@ MANIFEST = '.core/manifest.json' INIT_LANGUAGE = '.user/language.json' # activate session -if 'SESSION_TIME' not in st.session_state: - st.session_state['SESSION_TIME'] = time.strftime("%Y%m%d-%H%H%S") if 'SESSION_LANGUAGE' not in st.session_state: st.session_state['SESSION_LANGUAGE'] = util.read_json_at(INIT_LANGUAGE, 'SESSION_LANGUAGE') @@ -23,9 +21,5 @@ if 'SESSION_LANGUAGE' not in st.session_state: if 'FILTER_ROW_COUNT' not in st.session_state: st.session_state['FILTER_ROW_COUNT'] = util.read_json_at(BRAIN_MEMO, 'filter_row_count') -SESSION_TIME = st.session_state['SESSION_TIME'] - -CURRENT_LOG_FILE = f'{LOG_PATH}/log_{SESSION_TIME}.log' - # models MODELS_OPTIONS = ['text-davinci-003', 'text-curie-001', 'text-babbage-001', 'text-ada-001'] diff --git a/modules/utilities.py b/modules/utilities.py index 5a9bc6d..1e7c80a 100644 --- a/modules/utilities.py +++ b/modules/utilities.py @@ -28,8 +28,21 @@ def remove_oldest_file(directory, max_files): os.remove(oldest_file) -def scan_directory(directory): - files = glob.glob(f'{directory}/*') +def scan_directory(directory, include_subdir=False, exclude=None): + if include_subdir: + files = glob.glob(f'{directory}/*', recursive=True) + else: + files = glob.glob(f'{directory}/*.*') + if exclude is not None: + filtered_files = [] + for file in files: + excluded = False + for exclude_dir in exclude: + if exclude_dir in file: + excluded = True + break + if not excluded: + filtered_files.append(file) return files