some more things

pull/954/head
Commenter123321 9 months ago
parent 7181f2897b
commit 6ba092469a

@ -24,12 +24,14 @@ from .Provider import (
ChatgptDuo,
)
@dataclass(unsafe_hash=True)
class Model:
name: str
base_provider: str
best_provider: Union[type[BaseProvider], RetryProvider] = None
# Config for HuggingChat, OpenAssistant
# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
default = Model(
@ -219,9 +221,11 @@ class ModelUtils:
# gpt-3.5 / gpt-4
'gpt-3.5-turbo': gpt_35_turbo,
'gpt-3.5-turbo-16k': gpt_35_turbo_16k,
'gpt-3.5-turbo-16k-0613': gpt_35_turbo_16k_0613,
'gpt-4': gpt_4,
'gpt-4-0613': gpt_4_0613,
'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
'gpt-4-32k': gpt_4_32k,
'gpt-4-32k-0613': gpt_4_32k_0613,
# Bard
'palm2': palm,

@ -7,19 +7,20 @@ import g4f, asyncio
print("create:", end=" ", flush=True)
for response in g4f.ChatCompletion.create(
model=g4f.models.gpt_4,
provider=g4f.Provider.Vercel,
messages=[{"role": "user", "content": "hello!"}],
model=g4f.models.gpt_4_32k_0613,
provider=g4f.Provider.Aivvm,
messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}],
temperature=0.0,
stream=True
):
print(response, end="", flush=True)
print()
async def run_async():
response = await g4f.ChatCompletion.create_async(
model=g4f.models.gpt_4_32k_0613,
model=g4f.models.gpt_35_turbo_16k_0613,
provider=g4f.Provider.Aivvm,
messages=[{"role": "user", "content": "hello!"}],
temperature=0.0
)
print("create_async:", response)

Loading…
Cancel
Save