some more things

pull/954/head
Commenter123321 9 months ago
parent 7181f2897b
commit 6ba092469a

@ -1,15 +1,15 @@
from __future__ import annotations from __future__ import annotations
from dataclasses import dataclass from dataclasses import dataclass
from .typing import Union from .typing import Union
from .Provider import BaseProvider, RetryProvider from .Provider import BaseProvider, RetryProvider
from .Provider import ( from .Provider import (
ChatgptLogin, ChatgptLogin,
ChatgptAi, ChatgptAi,
ChatBase, ChatBase,
Vercel, Vercel,
DeepAi, DeepAi,
Aivvm, Aivvm,
Bard, Bard,
H2o, H2o,
GptGo, GptGo,
Bing, Bing,
@ -24,237 +24,241 @@ from .Provider import (
ChatgptDuo, ChatgptDuo,
) )
@dataclass(unsafe_hash=True) @dataclass(unsafe_hash=True)
class Model: class Model:
name: str name: str
base_provider: str base_provider: str
best_provider: Union[type[BaseProvider], RetryProvider] = None best_provider: Union[type[BaseProvider], RetryProvider] = None
# Config for HuggingChat, OpenAssistant # Config for HuggingChat, OpenAssistant
# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You # Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
default = Model( default = Model(
name = "", name="",
base_provider = "", base_provider="",
best_provider = RetryProvider([ best_provider=RetryProvider([
Bing, # Not fully GPT 3 or 4 Bing, # Not fully GPT 3 or 4
PerplexityAi, # Adds references to sources PerplexityAi, # Adds references to sources
Wewordle, # Responds with markdown Wewordle, # Responds with markdown
Yqcloud, # Answers short questions in chinese Yqcloud, # Answers short questions in chinese
ChatBase, # Don't want to answer creatively ChatBase, # Don't want to answer creatively
ChatgptDuo, # Include search results ChatgptDuo, # Include search results
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn,
]) ])
) )
# GPT-3.5 / GPT-4 # GPT-3.5 / GPT-4
gpt_35_turbo = Model( gpt_35_turbo = Model(
name = 'gpt-3.5-turbo', name='gpt-3.5-turbo',
base_provider = 'openai', base_provider='openai',
best_provider = RetryProvider([ best_provider=RetryProvider([
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn,
]) ])
) )
gpt_4 = Model( gpt_4 = Model(
name = 'gpt-4', name='gpt-4',
base_provider = 'openai', base_provider='openai',
best_provider = RetryProvider([ best_provider=RetryProvider([
Myshell, AItianhuSpace, Aivvm Myshell, AItianhuSpace, Aivvm
]) ])
) )
# Bard # Bard
palm = Model( palm = Model(
name = 'palm', name='palm',
base_provider = 'google', base_provider='google',
best_provider = Bard) best_provider=Bard)
# H2o # H2o
falcon_7b = Model( falcon_7b = Model(
name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3', name='h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
base_provider = 'huggingface', base_provider='huggingface',
best_provider = H2o) best_provider=H2o)
falcon_40b = Model( falcon_40b = Model(
name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1', name='h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
base_provider = 'huggingface', base_provider='huggingface',
best_provider = H2o) best_provider=H2o)
llama_13b = Model( llama_13b = Model(
name = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b', name='h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b',
base_provider = 'huggingface', base_provider='huggingface',
best_provider = H2o) best_provider=H2o)
# Vercel # Vercel
claude_instant_v1 = Model( claude_instant_v1 = Model(
name = 'claude-instant-v1', name='claude-instant-v1',
base_provider = 'anthropic', base_provider='anthropic',
best_provider = Vercel) best_provider=Vercel)
claude_v1 = Model( claude_v1 = Model(
name = 'claude-v1', name='claude-v1',
base_provider = 'anthropic', base_provider='anthropic',
best_provider = Vercel) best_provider=Vercel)
claude_v2 = Model( claude_v2 = Model(
name = 'claude-v2', name='claude-v2',
base_provider = 'anthropic', base_provider='anthropic',
best_provider = Vercel) best_provider=Vercel)
command_light_nightly = Model( command_light_nightly = Model(
name = 'command-light-nightly', name='command-light-nightly',
base_provider = 'cohere', base_provider='cohere',
best_provider = Vercel) best_provider=Vercel)
command_nightly = Model( command_nightly = Model(
name = 'command-nightly', name='command-nightly',
base_provider = 'cohere', base_provider='cohere',
best_provider = Vercel) best_provider=Vercel)
gpt_neox_20b = Model( gpt_neox_20b = Model(
name = 'EleutherAI/gpt-neox-20b', name='EleutherAI/gpt-neox-20b',
base_provider = 'huggingface', base_provider='huggingface',
best_provider = Vercel) best_provider=Vercel)
oasst_sft_1_pythia_12b = Model( oasst_sft_1_pythia_12b = Model(
name = 'OpenAssistant/oasst-sft-1-pythia-12b', name='OpenAssistant/oasst-sft-1-pythia-12b',
base_provider = 'huggingface', base_provider='huggingface',
best_provider = Vercel) best_provider=Vercel)
oasst_sft_4_pythia_12b_epoch_35 = Model( oasst_sft_4_pythia_12b_epoch_35 = Model(
name = 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', name='OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
base_provider = 'huggingface', base_provider='huggingface',
best_provider = Vercel) best_provider=Vercel)
santacoder = Model( santacoder = Model(
name = 'bigcode/santacoder', name='bigcode/santacoder',
base_provider = 'huggingface', base_provider='huggingface',
best_provider = Vercel) best_provider=Vercel)
bloom = Model( bloom = Model(
name = 'bigscience/bloom', name='bigscience/bloom',
base_provider = 'huggingface', base_provider='huggingface',
best_provider = Vercel) best_provider=Vercel)
flan_t5_xxl = Model( flan_t5_xxl = Model(
name = 'google/flan-t5-xxl', name='google/flan-t5-xxl',
base_provider = 'huggingface', base_provider='huggingface',
best_provider = Vercel) best_provider=Vercel)
code_davinci_002 = Model( code_davinci_002 = Model(
name = 'code-davinci-002', name='code-davinci-002',
base_provider = 'openai', base_provider='openai',
best_provider = Vercel) best_provider=Vercel)
gpt_35_turbo_16k = Model( gpt_35_turbo_16k = Model(
name = 'gpt-3.5-turbo-16k', name='gpt-3.5-turbo-16k',
base_provider = 'openai', base_provider='openai',
best_provider = Aivvm) best_provider=Aivvm)
gpt_35_turbo_16k_0613 = Model( gpt_35_turbo_16k_0613 = Model(
name = 'gpt-3.5-turbo-16k-0613', name='gpt-3.5-turbo-16k-0613',
base_provider = 'openai') base_provider='openai')
gpt_35_turbo_0613 = Model( gpt_35_turbo_0613 = Model(
name = 'gpt-3.5-turbo-0613', name='gpt-3.5-turbo-0613',
base_provider = 'openai', base_provider='openai',
best_provider = RetryProvider([ best_provider=RetryProvider([
Aivvm, ChatgptLogin Aivvm, ChatgptLogin
]) ])
) )
gpt_4_0613 = Model( gpt_4_0613 = Model(
name = 'gpt-4-0613', name='gpt-4-0613',
base_provider = 'openai', base_provider='openai',
best_provider = Aivvm) best_provider=Aivvm)
gpt_4_32k = Model( gpt_4_32k = Model(
name = 'gpt-4-32k', name='gpt-4-32k',
base_provider = 'openai', base_provider='openai',
best_provider = Aivvm) best_provider=Aivvm)
gpt_4_32k_0613 = Model( gpt_4_32k_0613 = Model(
name = 'gpt-4-32k-0613', name='gpt-4-32k-0613',
base_provider = 'openai', base_provider='openai',
best_provider = Aivvm) best_provider=Aivvm)
text_ada_001 = Model( text_ada_001 = Model(
name = 'text-ada-001', name='text-ada-001',
base_provider = 'openai', base_provider='openai',
best_provider = Vercel) best_provider=Vercel)
text_babbage_001 = Model( text_babbage_001 = Model(
name = 'text-babbage-001', name='text-babbage-001',
base_provider = 'openai', base_provider='openai',
best_provider = Vercel) best_provider=Vercel)
text_curie_001 = Model( text_curie_001 = Model(
name = 'text-curie-001', name='text-curie-001',
base_provider = 'openai', base_provider='openai',
best_provider = Vercel) best_provider=Vercel)
text_davinci_002 = Model( text_davinci_002 = Model(
name = 'text-davinci-002', name='text-davinci-002',
base_provider = 'openai', base_provider='openai',
best_provider = Vercel) best_provider=Vercel)
text_davinci_003 = Model( text_davinci_003 = Model(
name = 'text-davinci-003', name='text-davinci-003',
base_provider = 'openai', base_provider='openai',
best_provider = Vercel) best_provider=Vercel)
llama13b_v2_chat = Model( llama13b_v2_chat = Model(
name = 'replicate:a16z-infra/llama13b-v2-chat', name='replicate:a16z-infra/llama13b-v2-chat',
base_provider = 'replicate', base_provider='replicate',
best_provider = Vercel) best_provider=Vercel)
llama7b_v2_chat = Model( llama7b_v2_chat = Model(
name = 'replicate:a16z-infra/llama7b-v2-chat', name='replicate:a16z-infra/llama7b-v2-chat',
base_provider = 'replicate', base_provider='replicate',
best_provider = Vercel) best_provider=Vercel)
class ModelUtils: class ModelUtils:
convert: dict[str, Model] = { convert: dict[str, Model] = {
# gpt-3.5 / gpt-4 # gpt-3.5 / gpt-4
'gpt-3.5-turbo' : gpt_35_turbo, 'gpt-3.5-turbo': gpt_35_turbo,
'gpt-3.5-turbo-16k' : gpt_35_turbo_16k, 'gpt-3.5-turbo-16k': gpt_35_turbo_16k,
'gpt-4' : gpt_4, 'gpt-3.5-turbo-16k-0613': gpt_35_turbo_16k_0613,
'gpt-4-0613' : gpt_4_0613, 'gpt-4': gpt_4,
'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613, 'gpt-4-0613': gpt_4_0613,
'gpt-4-32k': gpt_4_32k,
'gpt-4-32k-0613': gpt_4_32k_0613,
# Bard # Bard
'palm2' : palm, 'palm2': palm,
'palm' : palm, 'palm': palm,
'google' : palm, 'google': palm,
'google-bard' : palm, 'google-bard': palm,
'google-palm' : palm, 'google-palm': palm,
'bard' : palm, 'bard': palm,
# H2o # H2o
'falcon-40b' : falcon_40b, 'falcon-40b': falcon_40b,
'falcon-7b' : falcon_7b, 'falcon-7b': falcon_7b,
'llama-13b' : llama_13b, 'llama-13b': llama_13b,
# Vercel # Vercel
'claude-instant-v1' : claude_instant_v1, 'claude-instant-v1': claude_instant_v1,
'claude-v1' : claude_v1, 'claude-v1': claude_v1,
'claude-v2' : claude_v2, 'claude-v2': claude_v2,
'command-nightly' : command_nightly, 'command-nightly': command_nightly,
'gpt-neox-20b' : gpt_neox_20b, 'gpt-neox-20b': gpt_neox_20b,
'santacoder' : santacoder, 'santacoder': santacoder,
'bloom' : bloom, 'bloom': bloom,
'flan-t5-xxl' : flan_t5_xxl, 'flan-t5-xxl': flan_t5_xxl,
'code-davinci-002' : code_davinci_002, 'code-davinci-002': code_davinci_002,
'text-ada-001' : text_ada_001, 'text-ada-001': text_ada_001,
'text-babbage-001' : text_babbage_001, 'text-babbage-001': text_babbage_001,
'text-curie-001' : text_curie_001, 'text-curie-001': text_curie_001,
'text-davinci-002' : text_davinci_002, 'text-davinci-002': text_davinci_002,
'text-davinci-003' : text_davinci_003, 'text-davinci-003': text_davinci_003,
'llama13b-v2-chat' : llama13b_v2_chat, 'llama13b-v2-chat': llama13b_v2_chat,
'llama7b-v2-chat' : llama7b_v2_chat, 'llama7b-v2-chat': llama7b_v2_chat,
'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b, 'oasst-sft-1-pythia-12b': oasst_sft_1_pythia_12b,
'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35, 'oasst-sft-4-pythia-12b-epoch-3.5': oasst_sft_4_pythia_12b_epoch_35,
'command-light-nightly' : command_light_nightly, 'command-light-nightly': command_light_nightly,
} }

@ -7,19 +7,20 @@ import g4f, asyncio
print("create:", end=" ", flush=True) print("create:", end=" ", flush=True)
for response in g4f.ChatCompletion.create( for response in g4f.ChatCompletion.create(
model=g4f.models.gpt_4, model=g4f.models.gpt_4_32k_0613,
provider=g4f.Provider.Vercel, provider=g4f.Provider.Aivvm,
messages=[{"role": "user", "content": "hello!"}], messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}],
temperature=0.0,
stream=True
): ):
print(response, end="", flush=True) print(response, end="", flush=True)
print() print()
async def run_async(): async def run_async():
response = await g4f.ChatCompletion.create_async( response = await g4f.ChatCompletion.create_async(
model=g4f.models.gpt_4_32k_0613, model=g4f.models.gpt_35_turbo_16k_0613,
provider=g4f.Provider.Aivvm, provider=g4f.Provider.Aivvm,
messages=[{"role": "user", "content": "hello!"}], messages=[{"role": "user", "content": "hello!"}],
temperature=0.0
) )
print("create_async:", response) print("create_async:", response)

Loading…
Cancel
Save