Merge pull request #911 from xtekky/revert-907-fix-type-hint

Revert "Fix type hint"
pull/912/head
Tekky 11 months ago committed by GitHub
commit a1d000ea3b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -145,7 +145,7 @@ import g4f
print(g4f.Provider.Ails.params) # supported args print(g4f.Provider.Ails.params) # supported args
# Automatic selection of Provider # Automatic selection of provider
# streamed completion # streamed completion
response = g4f.ChatCompletion.create( response = g4f.ChatCompletion.create(
@ -166,7 +166,7 @@ response = g4f.ChatCompletion.create(
print(response) print(response)
# Set with Provider # Set with provider
response = g4f.ChatCompletion.create( response = g4f.ChatCompletion.create(
model="gpt-3.5-turbo", model="gpt-3.5-turbo",
provider=g4f.Provider.DeepAi, provider=g4f.Provider.DeepAi,
@ -474,8 +474,8 @@ if __name__ == "__main__":
to add another provider, its very simple: to add another provider, its very simple:
1. create a new file in [g4f/Provider](./g4f/Provider) with the name of the Provider 1. create a new file in [g4f/provider](./g4f/provider) with the name of the Provider
2. Implement a class that extends [BaseProvider](./g4f/Provider/base_provider.py). 2. Implement a class that extends [BaseProvider](./g4f/provider/base_provider.py).
```py ```py
from .base_provider import BaseProvider from .base_provider import BaseProvider
@ -499,7 +499,7 @@ class HogeService(BaseProvider):
3. Here, you can adjust the settings, for example if the website does support streaming, set `working` to `True`... 3. Here, you can adjust the settings, for example if the website does support streaming, set `working` to `True`...
4. Write code to request the provider in `create_completion` and `yield` the response, _even if_ its a one-time response, do not hesitate to look at other providers for inspiration 4. Write code to request the provider in `create_completion` and `yield` the response, _even if_ its a one-time response, do not hesitate to look at other providers for inspiration
5. Add the Provider Name in [g4f/provider/\_\_init__.py](./g4f/Provider/__init__.py) 5. Add the Provider Name in [g4f/provider/**init**.py](./g4f/provider/__init__.py)
```py ```py
from .base_provider import BaseProvider from .base_provider import BaseProvider

@ -70,4 +70,4 @@ class AItianhu(AsyncGeneratorProvider):
("top_p", "int"), ("top_p", "int"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -85,7 +85,7 @@ class Ails(AsyncGeneratorProvider):
("temperature", "float"), ("temperature", "float"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"
def _hash(json_data: dict[str, str]) -> SHA256: def _hash(json_data: dict[str, str]) -> SHA256:

@ -75,4 +75,4 @@ class Aivvm(AsyncGeneratorProvider):
("temperature", "float"), ("temperature", "float"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -88,4 +88,4 @@ class Bard(AsyncProvider):
("proxy", "str"), ("proxy", "str"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -59,4 +59,4 @@ class ChatBase(AsyncGeneratorProvider):
("stream", "bool"), ("stream", "bool"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -64,4 +64,4 @@ class ChatgptLogin(AsyncProvider):
("temperature", "float"), ("temperature", "float"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -60,4 +60,4 @@ class CodeLinkAva(AsyncGeneratorProvider):
("temperature", "float"), ("temperature", "float"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -74,4 +74,4 @@ class DfeHub(BaseProvider):
("top_p", "int"), ("top_p", "int"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -108,4 +108,4 @@ class EasyChat(BaseProvider):
("active_server", "int"), ("active_server", "int"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -78,4 +78,4 @@ class Equing(BaseProvider):
("stream", "bool"), ("stream", "bool"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -83,4 +83,4 @@ class FastGpt(ABC):
("stream", "bool"), ("stream", "bool"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -66,7 +66,7 @@ class GetGpt(BaseProvider):
('max_tokens', 'int'), ('max_tokens', 'int'),
] ]
param = ', '.join([': '.join(p) for p in params]) param = ', '.join([': '.join(p) for p in params])
return f'g4f.Provider.{cls.__name__} supports: ({param})' return f'g4f.provider.{cls.__name__} supports: ({param})'
def _encrypt(e: str): def _encrypt(e: str):

@ -98,4 +98,4 @@ class H2o(AsyncGeneratorProvider):
("return_full_text", "bool"), ("return_full_text", "bool"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -103,4 +103,4 @@ class HuggingChat(AsyncGeneratorProvider):
("proxy", "str"), ("proxy", "str"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -88,4 +88,4 @@ class Liaobots(AsyncGeneratorProvider):
("auth", "str"), ("auth", "str"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -61,4 +61,4 @@ class Lockchat(BaseProvider):
("temperature", "float"), ("temperature", "float"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -99,4 +99,4 @@ class OpenAssistant(AsyncGeneratorProvider):
("proxy", "str"), ("proxy", "str"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -91,4 +91,4 @@ class OpenaiChat(AsyncProvider):
("cookies", "dict[str, str]") ("cookies", "dict[str, str]")
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -69,4 +69,4 @@ class Raycast(BaseProvider):
("auth", "str"), ("auth", "str"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -94,4 +94,4 @@ class Theb(BaseProvider):
("top_p", "int") ("top_p", "int")
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -64,4 +64,4 @@ class V50(BaseProvider):
("top_p", "int"), ("top_p", "int"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -63,4 +63,4 @@ class Vitalentum(AsyncGeneratorProvider):
("temperature", "float"), ("temperature", "float"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -60,4 +60,4 @@ class Wuguokai(BaseProvider):
("stream", "bool") ("stream", "bool")
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -73,4 +73,4 @@ class Ylokh(AsyncGeneratorProvider):
("temperature", "float"), ("temperature", "float"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -34,7 +34,7 @@ class BaseProvider(ABC):
("stream", "bool"), ("stream", "bool"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.Provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"
_cookies = {} _cookies = {}

@ -6,7 +6,6 @@ import random
logging = False logging = False
class ChatCompletion: class ChatCompletion:
@staticmethod @staticmethod
def create( def create(
@ -22,8 +21,9 @@ class ChatCompletion:
except KeyError: except KeyError:
raise Exception(f'The model: {model} does not exist') raise Exception(f'The model: {model} does not exist')
if not provider: if not provider:
if isinstance(model.best_provider, list): if isinstance(model.best_provider, tuple):
provider = random.choice(model.best_provider) provider = random.choice(model.best_provider)
else: else:
provider = model.best_provider provider = model.best_provider

@ -0,0 +1,212 @@
from __future__ import annotations
from dataclasses import dataclass
from .Provider import BaseProvider, Bard, H2o, Vercel
from .Provider import Aichat, Aivvm, ChatBase, ChatgptAi, ChatgptLogin, CodeLinkAva
from .Provider import DeepAi, Vercel, Vitalentum, Ylokh, You, Yqcloud
from .typing import Union
@dataclass
class Model:
name: str
base_provider: str
best_provider: Union[type[BaseProvider], tuple[type[BaseProvider]]] = None
# Config for HuggingChat, OpenAssistant
# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
default = Model(
name="",
base_provider="huggingface"
)
# GPT-3.5 / GPT-4
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = (
Vercel, Aichat, Aivvm, ChatBase, ChatgptAi, ChatgptLogin,
CodeLinkAva, DeepAi, Vitalentum, Ylokh, You, Yqcloud
)
)
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
)
# Bard
palm = Model(
name = 'palm',
base_provider = 'google',
best_provider = Bard)
# H2o
falcon_7b = Model(
name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
base_provider = 'huggingface',
best_provider = H2o)
falcon_40b = Model(
name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
base_provider = 'huggingface',
best_provider = H2o)
llama_13b = Model(
name = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b',
base_provider = 'huggingface',
best_provider = H2o)
# Vercel
claude_instant_v1 = Model(
name = 'anthropic:claude-instant-v1',
base_provider = 'anthropic',
best_provider = Vercel)
claude_v1 = Model(
name = 'anthropic:claude-v1',
base_provider = 'anthropic',
best_provider = Vercel)
claude_v2 = Model(
name = 'anthropic:claude-v2',
base_provider = 'anthropic',
best_provider = Vercel)
command_light_nightly = Model(
name = 'cohere:command-light-nightly',
base_provider = 'cohere',
best_provider = Vercel)
command_nightly = Model(
name = 'cohere:command-nightly',
base_provider = 'cohere',
best_provider = Vercel)
gpt_neox_20b = Model(
name = 'huggingface:EleutherAI/gpt-neox-20b',
base_provider = 'huggingface',
best_provider = Vercel)
oasst_sft_1_pythia_12b = Model(
name = 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
base_provider = 'huggingface',
best_provider = Vercel)
oasst_sft_4_pythia_12b_epoch_35 = Model(
name = 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
base_provider = 'huggingface',
best_provider = Vercel)
santacoder = Model(
name = 'huggingface:bigcode/santacoder',
base_provider = 'huggingface',
best_provider = Vercel)
bloom = Model(
name = 'huggingface:bigscience/bloom',
base_provider = 'huggingface',
best_provider = Vercel)
flan_t5_xxl = Model(
name = 'huggingface:google/flan-t5-xxl',
base_provider = 'huggingface',
best_provider = Vercel)
code_davinci_002 = Model(
name = 'openai:code-davinci-002',
base_provider = 'openai',
best_provider = Vercel)
gpt_35_turbo_16k = Model(
name = 'openai:gpt-3.5-turbo-16k',
base_provider = 'openai',
best_provider = Vercel)
gpt_35_turbo_16k_0613 = Model(
name = 'openai:gpt-3.5-turbo-16k-0613',
base_provider = 'openai')
gpt_4_0613 = Model(
name = 'openai:gpt-4-0613',
base_provider = 'openai',
best_provider = Vercel)
text_ada_001 = Model(
name = 'openai:text-ada-001',
base_provider = 'openai',
best_provider = Vercel)
text_babbage_001 = Model(
name = 'openai:text-babbage-001',
base_provider = 'openai',
best_provider = Vercel)
text_curie_001 = Model(
name = 'openai:text-curie-001',
base_provider = 'openai',
best_provider = Vercel)
text_davinci_002 = Model(
name = 'openai:text-davinci-002',
base_provider = 'openai',
best_provider = Vercel)
text_davinci_003 = Model(
name = 'openai:text-davinci-003',
base_provider = 'openai',
best_provider = Vercel)
llama13b_v2_chat = Model(
name = 'replicate:a16z-infra/llama13b-v2-chat',
base_provider = 'replicate',
best_provider = Vercel)
llama7b_v2_chat = Model(
name = 'replicate:a16z-infra/llama7b-v2-chat',
base_provider = 'replicate',
best_provider = Vercel)
class ModelUtils:
convert: dict[str, Model] = {
# GPT-3.5 / GPT-4
'gpt-3.5-turbo' : gpt_35_turbo,
'gpt-4' : gpt_4,
# Bard
'palm2' : palm,
'palm' : palm,
'google' : palm,
'google-bard' : palm,
'google-palm' : palm,
'bard' : palm,
# H2o
'falcon-40b' : falcon_40b,
'falcon-7b' : falcon_7b,
'llama-13b' : llama_13b,
# Vercel
'claude-instant-v1' : claude_instant_v1,
'claude-v1' : claude_v1,
'claude-v2' : claude_v2,
'command-nightly' : command_nightly,
'gpt-neox-20b' : gpt_neox_20b,
'santacoder' : santacoder,
'bloom' : bloom,
'flan-t5-xxl' : flan_t5_xxl,
'code-davinci-002' : code_davinci_002,
'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
'gpt-4-0613' : gpt_4_0613,
'text-ada-001' : text_ada_001,
'text-babbage-001' : text_babbage_001,
'text-curie-001' : text_curie_001,
'text-davinci-002' : text_davinci_002,
'text-davinci-003' : text_davinci_003,
'llama13b-v2-chat' : llama13b_v2_chat,
'llama7b-v2-chat' : llama7b_v2_chat,
'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b,
'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35,
'command-light-nightly' : command_light_nightly,
'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
}

@ -12,10 +12,9 @@ CreateResult = Generator[str, None, None]
__all__ = [ __all__ = [
'Any', 'Any',
'AsyncGenerator', 'AsyncGenerator',
'CreateResult',
'Generator', 'Generator',
'SHA256',
'Tuple', 'Tuple',
'TypedDict', 'TypedDict',
'Union', 'SHA256',
'CreateResult',
] ]

@ -3,13 +3,13 @@ from pathlib import Path
def main(): def main():
content = create_content() content = create_content()
with open("g4f/Provider/__init__.py", "w", encoding="utf-8") as f: with open("g4f/provider/__init__.py", "w", encoding="utf-8") as f:
f.write(content) f.write(content)
def create_content(): def create_content():
path = Path() path = Path()
paths = path.glob("g4f/Provider/*.py") paths = path.glob("g4f/provider/*.py")
paths = [p for p in paths if p.name not in ["__init__.py", "base_provider.py"]] paths = [p for p in paths if p.name not in ["__init__.py", "base_provider.py"]]
classnames = [p.stem for p in paths] classnames = [p.stem for p in paths]

@ -81,7 +81,7 @@ def print_providers():
netloc = urlparse(_provider.url).netloc netloc = urlparse(_provider.url).netloc
website = f"[{netloc}]({_provider.url})" website = f"[{netloc}]({_provider.url})"
provider_name = f"g4f.Provider.{_provider.__name__}" provider_name = f"g4f.provider.{_provider.__name__}"
has_gpt_35 = "✔️" if _provider.supports_gpt_35_turbo else "" has_gpt_35 = "✔️" if _provider.supports_gpt_35_turbo else ""
has_gpt_4 = "✔️" if _provider.supports_gpt_4 else "" has_gpt_4 = "✔️" if _provider.supports_gpt_4 else ""
@ -128,7 +128,7 @@ def print_models():
name = re.split(r":|/", model.name)[-1] name = re.split(r":|/", model.name)[-1]
base_provider = base_provider_names[model.base_provider] base_provider = base_provider_names[model.base_provider]
provider_name = f"g4f.Provider.{model.best_provider.__name__}" provider_name = f"g4f.provider.{model.best_provider.__name__}"
provider_url = provider_urls[model.best_provider.__name__] provider_url = provider_urls[model.best_provider.__name__]
netloc = urlparse(provider_url).netloc netloc = urlparse(provider_url).netloc
website = f"[{netloc}]({provider_url})" website = f"[{netloc}]({provider_url})"

Loading…
Cancel
Save