Merge pull request #6 from sean1832/multilingual_support

Multilingual support
multilingual_support
Zeke Zhang 1 year ago committed by GitHub
commit 6c630216ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,278 @@
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# This file is distributed under the same license as the PACKAGE package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-02-19 03:16+1100\n"
"PO-Revision-Date: 2023-02-18 23:31+1100\n"
"Last-Translator: \n"
"Language-Team: \n"
"Language: en_AU\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"X-Generator: Poedit 3.2.2\n"
#: pages/1_Configs.py:32
msgid "💾Save"
msgstr "💾Save"
#: pages/1_Configs.py:35
msgid "✅File saved!"
msgstr "✅File saved!"
#: pages/1_Configs.py:54 pages/1_Configs.py:261
msgid "Select Note Directory"
msgstr "Select Note Directory"
#: pages/1_Configs.py:197 Seanium_brain.py:99
msgid "Settings"
msgstr "Settings"
#: pages/1_Configs.py:198
msgid "Menu"
msgstr "Menu"
#: pages/1_Configs.py:199 pages/1_Configs.py:205 pages/1_Configs.py:206
msgid "📝Prompts"
msgstr "📝Prompts"
#: pages/1_Configs.py:200 pages/1_Configs.py:248 pages/1_Configs.py:249
#: pages/1_Configs.py:301
msgid "💽Brain Memory"
msgstr "💽Brain Memory"
#: pages/1_Configs.py:201 pages/1_Configs.py:310 pages/1_Configs.py:311
msgid "🔑API Keys"
msgstr "🔑API Keys"
#: pages/1_Configs.py:207
msgid "Configuration of prompts."
msgstr "Configuration of prompts."
#: pages/1_Configs.py:221
msgid "Prompt File"
msgstr "Prompt File"
#: pages/1_Configs.py:225
msgid "New Prompt"
msgstr "New Prompt"
#: pages/1_Configs.py:226
msgid "New Prompt Name"
msgstr "New Prompt Name"
#: pages/1_Configs.py:226
#, fuzzy
msgid "new_prompt"
msgstr "New Prompt"
#: pages/1_Configs.py:227
msgid "Create"
msgstr "Create"
#: pages/1_Configs.py:234 Seanium_brain.py:106
msgid "my-info"
msgstr "my-info"
#: pages/1_Configs.py:235 Seanium_brain.py:117 Seanium_brain.py:182
#, fuzzy
msgid "question"
msgstr "Question Model"
#: pages/1_Configs.py:236
msgid "summarize"
msgstr ""
#: pages/1_Configs.py:238
msgid "Delete Prompt"
msgstr "Delete Prompt"
#: pages/1_Configs.py:239
msgid "❌Delete"
msgstr "❌Delete"
#: pages/1_Configs.py:245
msgid "Prompts"
msgstr "Prompts"
#: pages/1_Configs.py:250
msgid "Modify your brain knowledge base."
msgstr "Modify your brain knowledge base."
#: pages/1_Configs.py:255
msgid "🔄Refresh"
msgstr "🔄Refresh"
#: pages/1_Configs.py:257
msgid "📁Select Note Directory"
msgstr "📁Select Note Directory"
#: pages/1_Configs.py:260
msgid "Note Directory"
msgstr "Note Directory"
#: pages/1_Configs.py:266
msgid "Delimiter"
msgstr "Delimiter"
#: pages/1_Configs.py:269
msgid "Append Mode"
msgstr "Append Mode"
#: pages/1_Configs.py:270
msgid "Force Delimiter"
msgstr "Force Delimiter"
#: pages/1_Configs.py:273
msgid "Filter Mode"
msgstr "Filter Mode"
#: pages/1_Configs.py:279
msgid "Add Filter"
msgstr "Add Filter"
#: pages/1_Configs.py:280
msgid "Delete Filter"
msgstr "Delete Filter"
#: pages/1_Configs.py:300
msgid "Raw Memory Inputs"
msgstr "Raw Memory Inputs"
#: pages/1_Configs.py:312
msgid "Configure your OpenAI API keys."
msgstr "Configure your OpenAI API keys."
#: pages/1_Configs.py:313
msgid "API Keys"
msgstr "API Keys"
#: Seanium_brain.py:63
msgid "📥download log"
msgstr "📥download log"
#: Seanium_brain.py:73
msgid "Processing"
msgstr ""
#: Seanium_brain.py:74
#, fuzzy
msgid "Thinking on"
msgstr "Thinking on Answer"
#: Seanium_brain.py:109
msgid "Operations"
msgstr "Operations"
#: Seanium_brain.py:115
msgid "Question Model"
msgstr "Question Model"
#: Seanium_brain.py:124
msgid "Model"
msgstr ""
#: Seanium_brain.py:127
msgid "Temperature"
msgstr "Temperature"
#: Seanium_brain.py:128
msgid "Max Tokens"
msgstr "Max Tokens"
#: Seanium_brain.py:130
msgid "Advanced Options"
msgstr "Advanced Options"
#: Seanium_brain.py:131
msgid "Top_P"
msgstr ""
#: Seanium_brain.py:132
msgid "Frequency penalty"
msgstr "Frequency penalty"
#: Seanium_brain.py:134
msgid "Presence penalty"
msgstr "Presence penalty"
#: Seanium_brain.py:137
msgid "Chunk size"
msgstr "Chunk size"
#: Seanium_brain.py:138
msgid "Answer count"
msgstr "Answer count"
#: Seanium_brain.py:148
msgid "Clear Log"
msgstr "Clear Log"
#: Seanium_brain.py:149
msgid "Log Cleared"
msgstr "Log Cleared"
#: Seanium_brain.py:154
msgid "version"
msgstr ""
#: Seanium_brain.py:155
msgid "author"
msgstr ""
#: Seanium_brain.py:156
msgid "Report bugs"
msgstr ""
#: Seanium_brain.py:157
msgid "Github Repo"
msgstr ""
#: Seanium_brain.py:160
#, fuzzy
msgid "🧠GPT-Brain"
msgstr "🧠Seanium Brain"
#: Seanium_brain.py:161
msgid ""
"This is my personal AI powered brain feeding my own Obsidian notes. Ask "
"anything."
msgstr ""
#: Seanium_brain.py:163
msgid "This is a beta version. Please [🪲report bugs]("
msgstr ""
#: Seanium_brain.py:164
msgid ") if you find any."
msgstr ""
#: Seanium_brain.py:172
msgid "Building Brain..."
msgstr "Building Brain..."
#: Seanium_brain.py:175
msgid "Brain rebuild!"
msgstr "Brain rebuild!"
#: Seanium_brain.py:179
msgid "Thinking on Answer"
msgstr "Thinking on Answer"
#: Seanium_brain.py:184
msgid "💬Answer"
msgstr "💬Answer"
#: Seanium_brain.py:216
msgid "Ask Brain: "
msgstr "Ask Brain: "
#: Seanium_brain.py:219
msgid "📩Send"
msgstr "📩Send"

@ -0,0 +1,4 @@
{
"English": "en_US",
"简体中文": "zh_CN"
}

@ -0,0 +1,278 @@
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# This file is distributed under the same license as the PACKAGE package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-02-19 03:16+1100\n"
"PO-Revision-Date: 2023-02-19 03:16+1100\n"
"Last-Translator: \n"
"Language-Team: \n"
"Language: zh_CN\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"X-Generator: Poedit 3.2.2\n"
#: pages/1_Configs.py:32
msgid "💾Save"
msgstr "💾保存"
#: pages/1_Configs.py:35
msgid "✅File saved!"
msgstr "✅文件以保存!"
#: pages/1_Configs.py:54 pages/1_Configs.py:261
msgid "Select Note Directory"
msgstr "请选择笔记文件目录"
#: pages/1_Configs.py:197 Seanium_brain.py:99
msgid "Settings"
msgstr "设置"
#: pages/1_Configs.py:198
msgid "Menu"
msgstr "菜单"
#: pages/1_Configs.py:199 pages/1_Configs.py:205 pages/1_Configs.py:206
msgid "📝Prompts"
msgstr "📝咒文prompt"
#: pages/1_Configs.py:200 pages/1_Configs.py:248 pages/1_Configs.py:249
#: pages/1_Configs.py:301
msgid "💽Brain Memory"
msgstr "💽脑记忆"
#: pages/1_Configs.py:201 pages/1_Configs.py:310 pages/1_Configs.py:311
msgid "🔑API Keys"
msgstr "🔑API 密匙"
#: pages/1_Configs.py:207
msgid "Configuration of prompts."
msgstr "这里设置咒文prompt"
#: pages/1_Configs.py:221
msgid "Prompt File"
msgstr "咒文文件"
#: pages/1_Configs.py:225
msgid "New Prompt"
msgstr "新建咒文"
#: pages/1_Configs.py:226
msgid "New Prompt Name"
msgstr "新建文件名"
#: pages/1_Configs.py:226
msgid "new_prompt"
msgstr "新建咒文"
#: pages/1_Configs.py:227
msgid "Create"
msgstr "创建"
#: pages/1_Configs.py:234 Seanium_brain.py:106
msgid "my-info"
msgstr "我的背景"
#: pages/1_Configs.py:235 Seanium_brain.py:117 Seanium_brain.py:182
msgid "question"
msgstr "问题"
#: pages/1_Configs.py:236
msgid "summarize"
msgstr "总结"
#: pages/1_Configs.py:238
msgid "Delete Prompt"
msgstr "删除咒文"
#: pages/1_Configs.py:239
msgid "❌Delete"
msgstr "❌删除"
#: pages/1_Configs.py:245
msgid "Prompts"
msgstr "咒文Prompts"
#: pages/1_Configs.py:250
msgid "Modify your brain knowledge base."
msgstr "这里修改大脑知识库"
#: pages/1_Configs.py:255
msgid "🔄Refresh"
msgstr "🔄刷新"
#: pages/1_Configs.py:257
msgid "📁Select Note Directory"
msgstr "📁选择笔记目录"
#: pages/1_Configs.py:260
msgid "Note Directory"
msgstr "笔记目录"
#: pages/1_Configs.py:266
msgid "Delimiter"
msgstr "分界符号"
#: pages/1_Configs.py:269
msgid "Append Mode"
msgstr "附加模式"
#: pages/1_Configs.py:270
msgid "Force Delimiter"
msgstr "强制分界"
#: pages/1_Configs.py:273
msgid "Filter Mode"
msgstr "筛选模式"
#: pages/1_Configs.py:279
msgid "Add Filter"
msgstr "增加筛选"
#: pages/1_Configs.py:280
msgid "Delete Filter"
msgstr "删除筛选"
#: pages/1_Configs.py:300
msgid "Raw Memory Inputs"
msgstr "笔记数据内容"
#: pages/1_Configs.py:312
msgid "Configure your OpenAI API keys."
msgstr "这里设置OpenAI API密匙"
#: pages/1_Configs.py:313
msgid "API Keys"
msgstr "API密匙"
#: Seanium_brain.py:63
msgid "📥download log"
msgstr "📥下载日志"
#: Seanium_brain.py:73
msgid "Processing"
msgstr "正在处理"
#: Seanium_brain.py:74
msgid "Thinking on"
msgstr "正在思考"
#: Seanium_brain.py:109
msgid "Operations"
msgstr "操作"
#: Seanium_brain.py:115
msgid "Question Model"
msgstr "问题模型"
#: Seanium_brain.py:124
msgid "Model"
msgstr "模型"
#: Seanium_brain.py:127
msgid "Temperature"
msgstr "温度Temperature"
#: Seanium_brain.py:128
msgid "Max Tokens"
msgstr "最大令牌数Max Token"
#: Seanium_brain.py:130
msgid "Advanced Options"
msgstr "高级设置"
#: Seanium_brain.py:131
msgid "Top_P"
msgstr ""
#: Seanium_brain.py:132
msgid "Frequency penalty"
msgstr "频率惩罚Frequency penalty"
#: Seanium_brain.py:134
msgid "Presence penalty"
msgstr "存在惩罚Presence penalty"
#: Seanium_brain.py:137
msgid "Chunk size"
msgstr "区块大小Chunk size"
#: Seanium_brain.py:138
msgid "Answer count"
msgstr "回答数量Answer count"
#: Seanium_brain.py:148
msgid "Clear Log"
msgstr "清除日志"
#: Seanium_brain.py:149
msgid "Log Cleared"
msgstr "日志以清除"
#: Seanium_brain.py:154
msgid "version"
msgstr "版本"
#: Seanium_brain.py:155
msgid "author"
msgstr "作者"
#: Seanium_brain.py:156
msgid "Report bugs"
msgstr "报告bug"
#: Seanium_brain.py:157
msgid "Github Repo"
msgstr "Github源代码"
#: Seanium_brain.py:160
msgid "🧠GPT-Brain"
msgstr "🧠GPT-大脑"
#: Seanium_brain.py:161
msgid ""
"This is my personal AI powered brain feeding my own Obsidian notes. Ask "
"anything."
msgstr "这是我的个人AI知识管理库请随意问。"
#: Seanium_brain.py:163
msgid "This is a beta version. Please [🪲report bugs]("
msgstr "该版本为BETA测试版。如果遇到BUG请[🪲在此处报告BUG]("
#: Seanium_brain.py:164
msgid ") if you find any."
msgstr ")。"
#: Seanium_brain.py:172
msgid "Building Brain..."
msgstr "重建大脑内容"
#: Seanium_brain.py:175
msgid "Brain rebuild!"
msgstr "大脑已重建!"
#: Seanium_brain.py:179
msgid "Thinking on Answer"
msgstr "思考答案中"
#: Seanium_brain.py:184
msgid "💬Answer"
msgstr "💬回答"
#: Seanium_brain.py:216
msgid "Ask Brain: "
msgstr "提问大脑:"
#: Seanium_brain.py:219
msgid "📩Send"
msgstr "📩发送"
#~ msgid ""
#~ "This version does not allow for inquiries in languages other than English."
#~ msgstr "该版本暂不支持英语以外的检索。"

@ -24,9 +24,10 @@
### Todo
- [x] ~~Batch script to update library.~~
- [ ] Versioning.
- [x] ~~Versioning.~~
- [ ] Tooltips for parameters.
- [ ] Chinese support.
- [x] ~~Chinese support.~~
- [x] ~~Multilingual search support.~~
- [ ] Provide detail documentation for users.
- [ ] Automatic update for new version.
- [ ] Release for windows.

@ -24,9 +24,10 @@
### 未来计划
- [x] ~~batch脚本更新库。~~
- [ ] 版本控制。
- [x] ~~版本控制。~~
- [ ] 参数提示。
- [ ] 支持中文UI和搜索。
- [x] ~~支持中文UI~~
- [x] ~~支持多语言检索。~~
- [ ] 提供详细操作指南。
- [ ] 自动更新。
- [ ] 发布windows版本。

@ -2,6 +2,7 @@ import streamlit as st
from modules import utilities as util
from modules import model_data
from modules import language
import brain
import check_update
import time
@ -11,8 +12,9 @@ import os
if 'SESSION_TIME' not in st.session_state:
st.session_state['SESSION_TIME'] = time.strftime("%Y%m%d-%H%H%S")
st.set_page_config(
page_title='Seanium Brain'
page_title='GPT Brain'
)
util.remove_oldest_file('.user/log', 10)
@ -21,12 +23,14 @@ model_options = ['text-davinci-003', 'text-curie-001', 'text-babbage-001', 'text
header = st.container()
body = st.container()
LOG_PATH = '.user/log'
PROMPT_PATH = '.user/prompt'
SESSION_TIME = st.session_state['SESSION_TIME']
SESSION_LANG = st.session_state['SESSION_LANGUAGE']
PROMPT_PATH = f'.user/prompt/{SESSION_LANG}'
CURRENT_LOG_FILE = f'{LOG_PATH}/log_{SESSION_TIME}.log'
BRAIN_MEMO = '.user/brain-memo.json'
MANIFEST = '.core/manifest.json'
def create_log():
if not os.path.exists(CURRENT_LOG_FILE):
util.write_file(f'Session {SESSION_TIME}\n\n', CURRENT_LOG_FILE)
@ -55,7 +59,7 @@ def save_as():
with open(CURRENT_LOG_FILE, 'rb') as f:
content = f.read()
st.download_button(
label="📥download log",
label=_("📥download log"),
data=content,
file_name=f'log_{SESSION_TIME}.txt',
mime='text/plain'
@ -65,8 +69,8 @@ def save_as():
def process_response(query, target_model, prompt_file: str, data: model_data.param):
# check if exclude model is not target model
file_name = util.get_file_name(prompt_file)
print(f'Processing {file_name}...')
with st.spinner(f'Thinking on {file_name}...'):
print(_('Processing') + f" {file_name}...")
with st.spinner(_('Thinking on') + f" {file_name}..."):
results = brain.run(query, target_model, prompt_file,
data.temp,
data.max_tokens,
@ -80,47 +84,57 @@ def process_response(query, target_model, prompt_file: str, data: model_data.par
log(results, delimiter=f'{file_name.upper()}')
def message(msg, condition=None):
if condition is not None:
if condition:
st.warning("⚠️" + msg)
else:
st.warning("⚠️" + msg)
# sidebar
with st.sidebar:
st.title('Settings')
_ = language.set_language()
st.title(_('Settings'))
language.select_language()
prompt_files = util.scan_directory(PROMPT_PATH)
prompt_file_names = [util.get_file_name(file) for file in prompt_files]
prompt_dictionary = dict(zip(prompt_file_names, prompt_files))
# remove 'my-info' from prompt dictionary
prompt_dictionary.pop('my-info')
prompt_dictionary.pop(_('my-info'))
operation_options = list(prompt_dictionary.keys())
operations = st.multiselect('Operations', operation_options, default=util.read_json_at(BRAIN_MEMO, 'operations',
operation_options[0]))
operations = st.multiselect(_('Operations'), operation_options,
default=util.read_json_at(BRAIN_MEMO, f'operations_{SESSION_LANG}', operation_options[0]))
last_question_model = util.read_json_at(BRAIN_MEMO, 'question_model', model_options[0])
# get index of last question model
question_model_index = util.get_index(model_options, last_question_model)
question_model = st.selectbox('Question Model', model_options, index=question_model_index)
question_model = st.selectbox(_('Question Model'), model_options, index=question_model_index)
operations_no_question = [op for op in operations if op != 'question']
operations_no_question = [op for op in operations if op != _('question')]
other_models = []
replace_tokens = []
for operation in operations_no_question:
last_model = util.read_json_at(BRAIN_MEMO, f'{operation}_model', model_options[0])
# get index of last model
model_index = util.get_index(model_options, last_model)
model = st.selectbox(f'{operation} Model', model_options, index=model_index)
model = st.selectbox(f"{operation} " + _('Model'), model_options, index=model_index)
other_models.append(model)
temp = st.slider('Temperature', 0.0, 1.0, value=util.read_json_at(BRAIN_MEMO, 'temp', 0.1))
max_tokens = st.slider('Max Tokens', 850, 4500, value=util.read_json_at(BRAIN_MEMO, 'max_tokens', 1000))
temp = st.slider(_('Temperature'), 0.0, 1.0, value=util.read_json_at(BRAIN_MEMO, 'temp', 0.1))
max_tokens = st.slider(_('Max Tokens'), 850, 4500, value=util.read_json_at(BRAIN_MEMO, 'max_tokens', 1000))
with st.expander(label='Advanced Options'):
top_p = st.slider('Top_P', 0.0, 1.0, value=util.read_json_at(BRAIN_MEMO, 'top_p', 1.0))
freq_panl = st.slider('Frequency penalty', 0.0, 1.0,
with st.expander(label=_('Advanced Options')):
top_p = st.slider(_('Top_P'), 0.0, 1.0, value=util.read_json_at(BRAIN_MEMO, 'top_p', 1.0))
freq_panl = st.slider(_('Frequency penalty'), 0.0, 1.0,
value=util.read_json_at(BRAIN_MEMO, 'frequency_penalty', 0.0))
pres_panl = st.slider('Presence penalty', 0.0, 1.0, value=util.read_json_at(BRAIN_MEMO, 'present_penalty', 0.0))
pres_panl = st.slider(_('Presence penalty'), 0.0, 1.0,
value=util.read_json_at(BRAIN_MEMO, 'present_penalty', 0.0))
chunk_size = st.slider('Chunk Size', 1500, 4500, value=util.read_json_at(BRAIN_MEMO, 'chunk_size', 4000))
chunk_count = st.slider('Answer Count', 1, 5, value=util.read_json_at(BRAIN_MEMO, 'chunk_count', 1))
chunk_size = st.slider(_('Chunk size'), 1500, 4500, value=util.read_json_at(BRAIN_MEMO, 'chunk_size', 4000))
chunk_count = st.slider(_('Answer count'), 1, 5, value=util.read_json_at(BRAIN_MEMO, 'chunk_count', 1))
param = model_data.param(temp=temp,
max_tokens=max_tokens,
@ -130,20 +144,23 @@ with st.sidebar:
chunk_size=chunk_size,
chunk_count=chunk_count)
if st.button('Clear Log', on_click=clear_log):
st.success('Log Cleared')
if st.button(_('Clear Log'), on_click=clear_log):
st.success(_('Log Cleared'))
# info
st.markdown('---')
st.markdown(f"# {util.read_json_at(MANIFEST, 'name')}")
st.markdown(f"version: {util.read_json_at(MANIFEST, 'version')}")
st.markdown(f"author: {util.read_json_at(MANIFEST, 'author')}")
st.markdown(f"[Report bugs]({util.read_json_at(MANIFEST, 'bugs')})")
st.markdown(f"[Github Repo]({util.read_json_at(MANIFEST, 'homepage')})")
st.markdown(_('version') + f": {util.read_json_at(MANIFEST, 'version')}")
st.markdown(_('author') + f": {util.read_json_at(MANIFEST, 'author')}")
st.markdown("[" + _('Report bugs') + "]" + f"({util.read_json_at(MANIFEST, 'bugs')})")
st.markdown("[" + _('Github Repo') + "]" + f"({util.read_json_at(MANIFEST, 'homepage')})")
with header:
st.title('🧠Seanium Brain')
st.text('This is my personal AI powered brain feeding my own Obsidian notes. Ask anything.')
st.title(_('🧠GPT-Brain'))
st.text(_('This is my personal AI powered brain feeding my own Obsidian notes. Ask anything.'))
message(_("This is a beta version. Please [🪲report bugs](") + util.read_json_at(MANIFEST, 'bugs') + _(
") if you find any."))
def execute_brain(q):
@ -151,19 +168,19 @@ def execute_brain(q):
log(f'\n\n\n\n[{str(time.ctime())}] - QUESTION: {q}')
if check_update.isUpdated():
st.success('Building Brain...')
st.success(_('Building Brain...'))
# if brain-info is updated
brain.build(chunk_size)
st.success('Brain rebuild!')
st.success(_('Brain rebuild!'))
time.sleep(2)
# thinking on answer
with st.spinner('Thinking on Answer'):
with st.spinner(_('Thinking on Answer')):
answer = brain.run_answer(q, question_model, temp, max_tokens, top_p, freq_panl, pres_panl,
chunk_count=chunk_count)
if util.contains(operations, 'question'):
if util.contains(operations, _('question')):
# displaying results
st.header('💬Answer')
st.header(_('💬Answer'))
st.info(f'{answer}')
time.sleep(1)
log(answer, delimiter='ANSWER')
@ -183,7 +200,7 @@ def execute_brain(q):
util.update_json(BRAIN_MEMO, key, value)
# write operation to json
util.update_json(BRAIN_MEMO, 'operations', operations)
util.update_json(BRAIN_MEMO, f'operations_{SESSION_LANG}', operations)
# write question model to json
util.update_json(BRAIN_MEMO, 'question_model', question_model)
@ -195,10 +212,10 @@ def execute_brain(q):
# main
with body:
question = st.text_area('Ask Brain: ')
question = st.text_area(_('Ask Brain: '))
col1, col2, col3, col4 = st.columns(4)
with col1:
send = st.button('📩Send')
send = st.button(_('📩Send'))
with col2:
if os.path.exists(CURRENT_LOG_FILE):
save_as()

@ -0,0 +1,13 @@
@echo off
cd..
echo Activating Virtural environment...
call .\venv\Scripts\activate
echo creating language base...
call python .\batch-programs\create_language_base.py
cls
echo language base created!
pause

@ -0,0 +1,31 @@
import os
import subprocess
import sys
# Add the parent directory to the Python path
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(parent_dir)
from modules import utilities as util
pages = util.scan_directory(f'pages')
pages_full = []
for page in pages:
page = util.get_file_name(page, extension=True)
pages_full.append(f'pages/{page}')
main_file = util.get_file_name(f'Seanium_brain.py', extension=True)
pages_full.append(main_file)
pages_flatten = ' '.join(pages_full)
locals_path = f'.locals'
languages = util.read_json(f'{locals_path}/languages.json')
# create LC_MESSAGES under .local directory if not exist
for language in languages.values():
# if language path not exist, create it
if not os.path.exists(f'{locals_path}/{language}/LC_MESSAGES'):
os.makedirs(f'{locals_path}/{language}/LC_MESSAGES')
# create .pot file
subprocess.call(f'xgettext {pages_flatten} -o {locals_path}/base.pot', shell=True)

@ -0,0 +1,12 @@
@echo off
cd..
echo Activating Virtural environment...
call .\venv\Scripts\activate
echo creating language base...
call python .\batch-programs\create_language_base.py
echo updating .po files...
call python .\batch-programs\update_language.py
pause

@ -0,0 +1,20 @@
import os
import subprocess
import sys
# Add the parent directory to the Python path
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(parent_dir)
from modules import utilities as util
locals_path = f'.locals'
languages = util.read_json(f'{locals_path}/languages.json')
try:
# create LC_MESSAGES under .local directory if not exist
for language in languages.values():
subprocess.call(f'msgmerge -U {locals_path}/{language}/LC_MESSAGES/base.po {locals_path}/base.pot', shell=True)
except Exception as e:
print(e)
print('Error: Unable to merge .pot file with .po files')

@ -2,12 +2,18 @@ import openai
import textwrap
from modules import utilities as util
from modules import language
import streamlit as st
from modules import gpt_util as gpt
openai.api_key = util.read_file(r'.user\API-KEYS.txt').strip()
prompt_dir = '.user/prompt'
if 'SESSION_LANGUAGE' not in st.session_state:
st.session_state['SESSION_LANGUAGE'] = util.read_json_at('.user/language.json', 'SESSION_LANGUAGE', 'en_US')
SESSION_LANG = st.session_state['SESSION_LANGUAGE']
prompt_dir = f'.user/prompt/{SESSION_LANG}'
_ = language.set_language()
def build(chunk_size=4000):
@ -25,17 +31,17 @@ def build(chunk_size=4000):
print(info, '\n\n\n')
result.append(info)
util.write_json_file(result, r'.user\brain-data.json')
util.write_json(result, r'.user\brain-data.json')
def run_answer(query, model, temp, max_tokens, top_p, freq_penl, pres_penl, chunk_count):
brain_data = util.read_json_file(r'.user\brain-data.json')
brain_data = util.read_json(r'.user\brain-data.json')
results = gpt.search_chunks(query, brain_data, chunk_count)
answers = []
for result in results:
my_info = util.read_file(f'{prompt_dir}/my-info.txt')
my_info = util.read_file(f'{prompt_dir}/' + _('my-info') + '.txt')
prompt = util.read_file(f'{prompt_dir}/question.txt')
prompt = util.read_file(f'{prompt_dir}/' + _('question') + '.txt')
prompt = prompt.replace('<<INFO>>', result['content'])
prompt = prompt.replace('<<QS>>', query)
prompt = prompt.replace('<<MY-INFO>>', my_info)

@ -0,0 +1,5 @@
为以下文章总结成清晰易读的要点, 注意包含所有的重要信息。
<<DATA>>
要点:

@ -0,0 +1 @@
我的名字是肖恩,我是一名建筑学学生。

@ -0,0 +1,4 @@
根据 <<MY-INFO>> 的前提, 通过以下信息: <<INFO>> , 以一个建筑师的角度回答问题: <<QS>> 并给予答案, 要求答案文笔清晰,能够容易的阅读,并且避免重复的内容。
确保答案包含所有关键的内容。确保答案用中文回答。
答案:

@ -1,17 +1,20 @@
import openai
import numpy as np
# this function compare similarity between two vectors.
# this function compare similarity between two vectors.
# The higher value the dot product have, the more alike between these vectors
def similarity(v1, v2):
return np.dot(v1, v2)
# return a list of vectors
def embedding(content, engine='text-embedding-ada-002'):
response = openai.Embedding.create(input=content, engine=engine)
vector = response['data'][0]['embedding']
return vector
def search_chunks(text, data, count=1):
vector = embedding(text)
points = []
@ -28,9 +31,10 @@ def search_chunks(text, data, count=1):
return ordered[0:count]
def gpt3(prompt, model, temp, max_tokens, top_p, freq_penl, pres_penl):
response = openai.Completion.create(
model= model,
model=model,
prompt=prompt,
temperature=temp,
max_tokens=max_tokens,
@ -39,4 +43,4 @@ def gpt3(prompt, model, temp, max_tokens, top_p, freq_penl, pres_penl):
presence_penalty=pres_penl
)
text = response['choices'][0]['text'].strip()
return text
return text

@ -0,0 +1,28 @@
import gettext
import streamlit as st
import modules.utilities as util
languages = util.read_json('.locals/languages.json')
def select_language():
language_index = util.get_index(list(languages.values()), st.session_state['SESSION_LANGUAGE'])
# Add a language selector widget to the Streamlit app
language = st.sidebar.selectbox('Language', languages.keys(), language_index)
selected_lang = languages[language]
if st.session_state['SESSION_LANGUAGE'] != selected_lang:
st.session_state['SESSION_LANGUAGE'] = selected_lang
util.write_json({'SESSION_LANGUAGE': selected_lang}, '.user/language.json')
st.experimental_rerun()
def set_language():
# set current language
lang_translations = gettext.translation('base', localedir='.locals', languages=[st.session_state['SESSION_LANGUAGE']])
lang_translations.install()
# define _ shortcut for translations
_ = lang_translations.gettext
return _

@ -41,8 +41,11 @@ def scan_directory(directory):
# get file name without extension
def get_file_name(filepath):
return os.path.splitext(os.path.basename(filepath))[0]
def get_file_name(filepath, extension=False):
if extension:
return os.path.basename(filepath)
else:
return os.path.splitext(os.path.basename(filepath))[0]
def create_path_not_exist(path):
@ -106,24 +109,25 @@ def delete_file(filepath):
def create_json_not_exist(filepath, initial_value={}):
if not os.path.exists(filepath):
write_json_file(initial_value, filepath)
write_json(initial_value, filepath)
def write_json_file(content, filepath, mode='w'):
with open(filepath, mode) as file:
def write_json(content, filepath, mode='w', encoding='UTF-8'):
with open(filepath, mode, encoding=encoding) as file:
json.dump(content, file, indent=2)
def read_json_file(filepath):
def read_json(filepath):
try:
with open(filepath, 'r') as file:
with open(filepath, 'r', encoding='UTF-8') as file:
return json.load(file)
except FileNotFoundError:
create_json_not_exist(filepath)
return {}
def read_json_at(filepath, key, default_value=''):
data = read_json_file(filepath)
data = read_json(filepath)
try:
# if key is string, check if it is boolean or numeric
if isinstance(data[key], str):
@ -140,14 +144,14 @@ def read_json_at(filepath, key, default_value=''):
except KeyError:
# if key not found, create key with default value
data[key] = default_value
write_json_file(data, filepath)
write_json(data, filepath)
return data[key]
def update_json(filepath, key, value):
data = read_json_file(filepath)
data = read_json(filepath)
data[key] = value
write_json_file(data, filepath)
write_json(data, filepath)
def contains(list, item):

@ -7,14 +7,18 @@ import os
from modules import utilities as util
import tkinter as tk
from tkinter import filedialog
from modules import language
user_dir = '.user/'
prompt_dir = f'{user_dir}prompt/'
SESSION_LANG = st.session_state['SESSION_LANGUAGE']
prompt_dir = f'{user_dir}prompt/{SESSION_LANG}/'
brain_memo = f'{user_dir}brain-memo.json'
if 'FILTER_ROW_COUNT' not in st.session_state:
st.session_state['FILTER_ROW_COUNT'] = util.read_json_at(brain_memo, 'filter_row_count')
_ = language.set_language()
st.set_page_config(
page_title='Configs'
)
@ -25,10 +29,10 @@ body = st.container()
def save(content, path, page='', json_value: dict = None):
if json_value is None:
json_value = []
save_but = st.button('💾Save')
save_but = st.button(_('💾Save'))
if save_but:
util.write_file(content, path)
st.success(f'✅File saved!')
st.success(_('✅File saved!'))
# write to json file
if page == '💽Brain Memory':
util.update_json(brain_memo, 'delimiter', json_value['delimiter'])
@ -47,7 +51,7 @@ def select_directory():
root.withdraw()
# make sure the dialog is on top of the main window
root.attributes('-topmost', True)
directory = filedialog.askdirectory(initialdir=os.getcwd(), title='Select Note Directory', master=root)
directory = filedialog.askdirectory(initialdir=os.getcwd(), title=_('Select Note Directory'), master=root)
return directory
@ -165,9 +169,9 @@ def filter_data(pages: list, add_filter_button, del_filter_button):
if del_filter_button:
st.session_state['FILTER_ROW_COUNT'] -= 1
if st.session_state['FILTER_ROW_COUNT'] >= 1:
for i in range(st.session_state['FILTER_ROW_COUNT']+1):
for i in range(st.session_state['FILTER_ROW_COUNT'] + 1):
try:
init_info = init_filter_infos[i-1]
init_info = init_filter_infos[i - 1]
init_key = init_info['key']
init_logic = init_info['logic']
init_val = init_info['value']
@ -190,122 +194,124 @@ def filter_data(pages: list, add_filter_button, del_filter_button):
def main():
with st.sidebar:
st.title('Settings')
menu = st.radio('Menu', [
'📝Prompts',
'💽Brain Memory',
'🔑API Keys'
st.title(_('Settings'))
menu = st.radio(_('Menu'), [
_('📝Prompts'),
_('💽Brain Memory'),
_('🔑API Keys')
])
with body:
match menu:
case '📝Prompts':
st.title('📝Prompts')
st.text('Configuration of prompts.')
# read selected file
last_sel_file = util.read_json_at(brain_memo, 'selected_prompt')
all_files = os.listdir(prompt_dir)
# sort files base on creation time
all_files.sort(key=lambda x: os.path.getmtime(f'{prompt_dir}{x}'), reverse=True)
# index of last selected file
try:
last_sel_file_index = all_files.index(last_sel_file)
except ValueError:
last_sel_file_index = 0
selected_file = st.selectbox('Prompt File', all_files, last_sel_file_index)
col1, col2 = st.columns(2)
with col1:
if st_toggle.st_toggle_switch('New Prompt', label_after=True):
new_file = st.text_input('New Prompt Name', value='new_prompt')
if st.button('Create'):
util.write_file('', f'{prompt_dir}{new_file}.txt')
# change select file to new fie
util.update_json(brain_memo, 'selected_prompt', selected_file)
if menu == _('📝Prompts'):
st.title(_('📝Prompts'))
st.text(_('Configuration of prompts.'))
# read selected file
last_sel_file = util.read_json_at(brain_memo, 'selected_prompt')
all_files = os.listdir(prompt_dir)
# sort files base on creation time
all_files.sort(key=lambda x: os.path.getmtime(f'{prompt_dir}{x}'), reverse=True)
# index of last selected file
try:
last_sel_file_index = all_files.index(last_sel_file)
except ValueError:
last_sel_file_index = 0
selected_file = st.selectbox(_('Prompt File'), all_files, last_sel_file_index)
col1, col2 = st.columns(2)
with col1:
if st_toggle.st_toggle_switch(_('New Prompt'), label_after=True):
new_file = st.text_input(_('New Prompt Name'), value=_('new_prompt'))
if st.button(_('Create')):
util.write_file('', f'{prompt_dir}{new_file}.txt')
# change select file to new fie
util.update_json(brain_memo, 'selected_prompt', selected_file)
# refresh page
st.experimental_rerun()
with col2:
is_core = selected_file == _('my-info') + '.txt' or \
selected_file == _('question') + '.txt' or \
selected_file == _('summarize') + '.txt'
if not is_core:
if st_toggle.st_toggle_switch(_('Delete Prompt'), label_after=True):
if st.button(_('❌Delete')):
util.delete_file(f'{prompt_dir}{selected_file}')
# refresh page
st.experimental_rerun()
with col2:
is_core = selected_file == 'my-info.txt' or \
selected_file == 'question.txt' or \
selected_file == 'summarize.txt'
if not is_core:
if st_toggle.st_toggle_switch('Delete Prompt', label_after=True):
if st.button('❌Delete'):
util.delete_file(f'{prompt_dir}{selected_file}')
# refresh page
st.experimental_rerun()
selected_path = prompt_dir + selected_file
mod_text = st.text_area('Prompts', value=util.read_file(selected_path), height=500)
save(mod_text, selected_path)
case '💽Brain Memory':
st.title('💽Brain Memory')
st.text('Modify your brain knowledge base.')
memory_data = util.read_file(f'{user_dir}input.txt')
col1, col2 = st.columns(2)
with col1:
st.button('🔄Refresh')
with col2:
if st.button('📁Select Note Directory'):
note_dir = select_directory()
util.update_json(brain_memo, 'note_dir', note_dir)
note_dir = st.text_input('Note Directory', value=util.read_json_at(brain_memo, 'note_dir'),
placeholder='Select Note Directory', key='note_dir')
col1, col2, col3, col4 = st.columns([1, 2, 2, 2])
with col1:
delimiter_memo = util.read_json_at(brain_memo, 'delimiter')
delimiter = st.text_input('Delimiter', delimiter_memo, placeholder='e.g. +++')
with col2:
append_mode = st.checkbox('Append Mode', value=util.read_json_at(brain_memo, 'append_mode'))
force_delimiter = st.checkbox('Force Delimiter', value=util.read_json_at(brain_memo, 'force_mode'))
with col3:
advanced_mode = st_toggle.st_toggle_switch('Filter Mode',
label_after=True,
default_value=util.read_json_at(brain_memo,
'advanced_mode', False))
with col4:
if advanced_mode:
add_filter_button = st.button('Add Filter')
del_filter_button = st.button('Delete Filter')
# if note directory is selected
if note_dir != '':
# if advanced mode enabled
if advanced_mode:
note_datas = util.read_files(note_dir, single_string=False)
note_datas, filter_info = filter_data(note_datas, add_filter_button, del_filter_button)
# note_datas, filter_key, filter_logic, filter_val = filter_data(note_datas, True)
modified_data = util.parse_data(note_datas, delimiter, force_delimiter)
else:
modified_data = util.read_files(note_dir, single_string=True, delimiter=delimiter,
force=force_delimiter)
if append_mode:
memory_data += modified_data
else:
memory_data = modified_data
mod_text = st.text_area('Raw Memory Inputs', value=memory_data, height=500)
save(mod_text, f'{user_dir}input.txt', '💽Brain Memory', {
'delimiter': delimiter,
'append_mode': append_mode,
'force_mode': force_delimiter,
'advanced_mode': advanced_mode,
'filter_info': filter_info,
'filter_row_count': len(filter_info),
})
case '🔑API Keys':
st.title('🔑API Keys')
st.text('Configure your OpenAI API keys.')
mod_text = st.text_input('API Keys', value=util.read_file(f'{user_dir}API-KEYS.txt'))
save(mod_text, f'{user_dir}API-KEYS.txt')
selected_path = prompt_dir + selected_file
mod_text = st.text_area(_('Prompts'), value=util.read_file(selected_path), height=500)
save(mod_text, selected_path)
if menu == _('💽Brain Memory'):
st.title(_('💽Brain Memory'))
st.text(_('Modify your brain knowledge base.'))
memory_data = util.read_file(f'{user_dir}input.txt')
col1, col2 = st.columns(2)
with col1:
st.button(_('🔄Refresh'))
with col2:
if st.button(_('📁Select Note Directory')):
note_dir = select_directory()
util.update_json(brain_memo, 'note_dir', note_dir)
note_dir = st.text_input(_('Note Directory'), value=util.read_json_at(brain_memo, 'note_dir'),
placeholder=_('Select Note Directory'), key='note_dir')
col1, col2, col3, col4 = st.columns([1, 2, 2, 2])
with col1:
delimiter_memo = util.read_json_at(brain_memo, 'delimiter')
delimiter = st.text_input(_('Delimiter'), delimiter_memo, placeholder='e.g. +++')
with col2:
append_mode = st.checkbox(_('Append Mode'), value=util.read_json_at(brain_memo, 'append_mode'))
force_delimiter = st.checkbox(_('Force Delimiter'),
value=util.read_json_at(brain_memo, 'force_mode'))
with col3:
advanced_mode = st_toggle.st_toggle_switch(_('Filter Mode'),
label_after=True,
default_value=util.read_json_at(brain_memo,
'advanced_mode', False))
with col4:
if advanced_mode:
add_filter_button = st.button("" + _('Add Filter'))
del_filter_button = st.button("" + _('Delete Filter'))
filter_info = {}
# if note directory is selected
if note_dir != '':
# if advanced mode enabled
if advanced_mode:
note_datas = util.read_files(note_dir, single_string=False)
note_datas, filter_info = filter_data(note_datas, add_filter_button, del_filter_button)
# note_datas, filter_key, filter_logic, filter_val = filter_data(note_datas, True)
modified_data = util.parse_data(note_datas, delimiter, force_delimiter)
else:
modified_data = util.read_files(note_dir, single_string=True, delimiter=delimiter,
force=force_delimiter)
if append_mode:
memory_data += modified_data
else:
memory_data = modified_data
mod_text = st.text_area(_('Raw Memory Inputs'), value=memory_data, height=500)
save(mod_text, f'{user_dir}input.txt', _('💽Brain Memory'), {
'delimiter': delimiter,
'append_mode': append_mode,
'force_mode': force_delimiter,
'advanced_mode': advanced_mode,
'filter_info': filter_info,
'filter_row_count': len(filter_info),
})
if menu == _('🔑API Keys'):
st.title(_('🔑API Keys'))
st.text(_('Configure your OpenAI API keys.'))
mod_text = st.text_input(_('API Keys'), value=util.read_file(f'{user_dir}API-KEYS.txt'))
save(mod_text, f'{user_dir}API-KEYS.txt')
if __name__ == '__main__':

@ -23,9 +23,12 @@ REM if .user\ not exist, create one
if not exist .user\ (md .user\)
REM Create API KEY file
set /p API_KEYS=[Enter your API keys]:
echo %API_KEYS%> .user\API-KEYS.txt
echo API key written to file!
if not exist .user\API-KEYS.txt (
set /p API_KEYS=[Enter your API keys]:
echo %API_KEYS%> .user\API-KEYS.txt
echo API key written to file!
)
REM copy example prompt
if not exist .user\prompt (md .user\prompt)
@ -35,8 +38,11 @@ REM wait 2 tick
ping 127.0.0.1 -n 2 > NUL
REM create input txt file
echo.> .user\input.txt
echo input file created!
if not exist .user\input.txt (
echo.> .user\input.txt
echo input file created!
)
python web_ui/initial_file_creator.py

Loading…
Cancel
Save