~ | Merge pull request #1029 from hlohaus/die

Some small fixes
This commit is contained in:
Tekky 2023-10-10 15:30:05 +01:00 committed by GitHub
commit e06012ae06
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 55 additions and 57 deletions

View File

@ -158,7 +158,6 @@ docker compose down
```py
import g4f
print(g4f.Provider.Ails.params) # supported args
# Automatic selection of provider
@ -166,7 +165,7 @@ print(g4f.Provider.Ails.params) # supported args
# streamed completion
response = g4f.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello world"}],
messages=[{"role": "user", "content": "Hello"}],
stream=True,
)
@ -176,22 +175,10 @@ for message in response:
# normal response
response = g4f.ChatCompletion.create(
model=g4f.models.gpt_4,
messages=[{"role": "user", "content": "hi"}],
messages=[{"role": "user", "content": "Hello"}],
) # alterative model setting
print(response)
# Set with provider
response = g4f.ChatCompletion.create(
model="gpt-3.5-turbo",
provider=g4f.Provider.DeepAi,
messages=[{"role": "user", "content": "Hello world"}],
stream=True,
)
for message in response:
print(message)
```
##### Completion
```py
@ -215,6 +202,7 @@ print(response)
##### Providers:
```py
import g4f
from g4f.Provider import (
AItianhu,
Acytoo,
@ -237,8 +225,17 @@ from g4f.Provider import (
You,
Yqcloud,
)
# Usage:
response = g4f.ChatCompletion.create(..., provider=ProviderName)
# Set with provider
response = g4f.ChatCompletion.create(
model="gpt-3.5-turbo",
provider=g4f.Provider.Aichat,
messages=[{"role": "user", "content": "Hello"}],
stream=True,
)
for message in response:
print(message)
```
##### Cookies Required:
@ -250,6 +247,7 @@ When running the g4f package locally, the package automatically retrieves cookie
```py
import g4f
from g4f.Provider import (
Bard,
Bing,
@ -257,6 +255,7 @@ from g4f.Provider import (
OpenAssistant,
OpenaiChat,
)
# Usage:
response = g4f.ChatCompletion.create(
model=g4f.models.default,
@ -319,6 +318,7 @@ response = await g4f.ChatCompletion.create(
proxy="http://host:port",
# or socks5://user:pass@host:port
)
print(f"Result:", response)
```
@ -532,18 +532,18 @@ if __name__ == "__main__":
## Contribute
####Create Provider with AI Tool
#### Create Provider with AI Tool
Call in your terminal the "create_provider" script:
```bash
$ python etc/tool/create_provider.py
python etc/tool/create_provider.py
```
1. Enter your name for the new provider.
2. Copy&Paste a cURL command from your browser developer tools.
3. Let the AI create the provider for you.
4. Customize the provider according to your needs.
####Create Provider
#### Create Provider
0. Check out the current [list of potential providers](https://github.com/zukixa/cool-ai-stuff#ai-chat-websites), or find your own provider source!
1. Create a new file in [g4f/provider](./g4f/provider) with the name of the Provider

View File

@ -86,15 +86,16 @@ Replace "hello" with `format_prompt(messages)`.
And replace "gpt-3.5-turbo" with `model`.
"""
print("Create code...")
response = []
for chunk in g4f.ChatCompletion.create(
model=g4f.models.gpt_35_long,
messages=[{"role": "user", "content": prompt}],
timeout=300,
stream=True
stream=True,
):
response.append(chunk)
print(chunk, end="", flush=True)
response.append(chunk)
print()
response = "".join(response)

View File

@ -23,7 +23,7 @@ class Acytoo(AsyncGeneratorProvider):
headers=_create_header()
) as session:
async with session.post(
cls.url + '/api/completions',
f'{cls.url}/api/completions',
proxy=proxy,
json=_create_payload(messages, **kwargs)
) as response:
@ -40,7 +40,7 @@ def _create_header():
}
def _create_payload(messages: list[dict[str, str]], temperature: float = 0.5, **kwargs):
def _create_payload(messages: Messages, temperature: float = 0.5, **kwargs):
return {
'key' : '',
'model' : 'gpt-3.5-turbo',

View File

@ -3,7 +3,7 @@ from __future__ import annotations
from aiohttp import ClientSession
import json
from ..typing import AsyncGenerator
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@ -16,10 +16,10 @@ class GptGo(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncGenerator:
) -> AsyncResult:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",

View File

@ -5,7 +5,7 @@ import uuid
from aiohttp import ClientSession
from ..typing import AsyncGenerator
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@ -18,10 +18,10 @@ class H2o(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncGenerator:
) -> AsyncResult:
model = model if model else cls.model
headers = {"Referer": cls.url + "/"}

View File

@ -6,7 +6,7 @@ from aiohttp import ClientSession
from aiohttp.http import WSMsgType
import asyncio
from ..typing import AsyncGenerator
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@ -27,11 +27,11 @@ class Myshell(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
messages: Messages,
proxy: str = None,
timeout: int = 90,
**kwargs
) -> AsyncGenerator:
) -> AsyncResult:
if not model:
bot_id = models["samantha"]
elif model in models:

View File

@ -3,7 +3,7 @@ from __future__ import annotations
import random
from datetime import datetime
from ..typing import AsyncGenerator
from ..typing import AsyncResult, Messages
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt
@ -17,11 +17,11 @@ class Phind(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
messages: Messages,
proxy: str = None,
timeout: int = 120,
**kwargs
) -> AsyncGenerator:
) -> AsyncResult:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
user_id = ''.join(random.choice(chars) for _ in range(24))
data = {

View File

@ -5,7 +5,7 @@ from concurrent.futures import ThreadPoolExecutor
from abc import ABC, abstractmethod
from .helper import get_event_loop, get_cookies, format_prompt
from ..typing import AsyncGenerator, CreateResult
from ..typing import CreateResult, AsyncResult, Messages
class BaseProvider(ABC):
@ -20,7 +20,7 @@ class BaseProvider(ABC):
@abstractmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
messages: Messages,
stream: bool,
**kwargs
) -> CreateResult:
@ -30,7 +30,7 @@ class BaseProvider(ABC):
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
messages: Messages,
*,
loop: AbstractEventLoop = None,
executor: ThreadPoolExecutor = None,
@ -69,7 +69,7 @@ class AsyncProvider(BaseProvider):
def create_completion(
cls,
model: str,
messages: list[dict[str, str]],
messages: Messages,
stream: bool = False,
**kwargs
) -> CreateResult:
@ -81,7 +81,7 @@ class AsyncProvider(BaseProvider):
@abstractmethod
async def create_async(
model: str,
messages: list[dict[str, str]],
messages: Messages,
**kwargs
) -> str:
raise NotImplementedError()
@ -94,7 +94,7 @@ class AsyncGeneratorProvider(AsyncProvider):
def create_completion(
cls,
model: str,
messages: list[dict[str, str]],
messages: Messages,
stream: bool = True,
**kwargs
) -> CreateResult:
@ -116,7 +116,7 @@ class AsyncGeneratorProvider(AsyncProvider):
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
messages: Messages,
**kwargs
) -> str:
return "".join([
@ -132,7 +132,7 @@ class AsyncGeneratorProvider(AsyncProvider):
@abstractmethod
def create_async_generator(
model: str,
messages: list[dict[str, str]],
messages: Messages,
**kwargs
) -> AsyncGenerator:
) -> AsyncResult:
raise NotImplementedError()

View File

@ -2,7 +2,7 @@ from __future__ import annotations
import requests
from ...typing import Any, CreateResult
from ...typing import Any, CreateResult, Messages
from ..base_provider import BaseProvider
@ -14,7 +14,7 @@ class AiService(BaseProvider):
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
messages: Messages,
stream: bool,
**kwargs: Any,
) -> CreateResult:

View File

@ -4,7 +4,7 @@ import asyncio
import sys
from asyncio import AbstractEventLoop
from os import path
from typing import Dict, List
from ..typing import Dict, List, Messages
import browser_cookie3
# Change event loop policy on windows
@ -53,7 +53,7 @@ def get_cookies(cookie_domain: str) -> Dict[str, str]:
return _cookies[cookie_domain]
def format_prompt(messages: List[Dict[str, str]], add_special_tokens=False) -> str:
def format_prompt(messages: Messages, add_special_tokens=False) -> str:
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
[

View File

@ -2,7 +2,7 @@ from __future__ import annotations
import random
from typing import List, Type, Dict
from ..typing import CreateResult
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider, AsyncProvider
from ..debug import logging
@ -10,10 +10,7 @@ from ..debug import logging
class RetryProvider(AsyncProvider):
__name__: str = "RetryProvider"
working: bool = True
needs_auth: bool = False
supports_stream: bool = True
supports_gpt_35_turbo: bool = False
supports_gpt_4: bool = False
def __init__(
self,
@ -27,7 +24,7 @@ class RetryProvider(AsyncProvider):
def create_completion(
self,
model: str,
messages: List[Dict[str, str]],
messages: Messages,
stream: bool = False,
**kwargs
) -> CreateResult:
@ -54,17 +51,17 @@ class RetryProvider(AsyncProvider):
if logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
if started:
break
raise e
self.raise_exceptions()
async def create_async(
self,
model: str,
messages: List[Dict[str, str]],
messages: Messages,
**kwargs
) -> str:
providers = [provider for provider in self.providers]
providers = self.providers
if self.shuffle:
random.shuffle(providers)