pull/839/head
Bagus Indrayana 11 months ago
commit c9d2f06090

@ -1,10 +1,10 @@
![248433934-7886223b-c1d1-4260-82aa-da5741f303bb](https://github.com/xtekky/gpt4free/assets/98614666/ea012c87-76e0-496a-8ac4-e2de090cc6c9) ![248433934-7886223b-c1d1-4260-82aa-da5741f303bb](https://github.com/xtekky/gpt4free/assets/98614666/ea012c87-76e0-496a-8ac4-e2de090cc6c9)
By using this repository or any code related to it, you agree to the [legal notice](./LEGAL_NOTICE.md). The author is not responsible for any copies, forks, or reuploads made by other users. This is the author's only account and repository. To prevent impersonation or irresponsible actions, you may comply with the GNU GPL license this Repository uses. By using this repository or any code related to it, you agree to the [legal notice](./LEGAL_NOTICE.md). The author is not responsible for any copies, forks, reuploads made by other users, or anything else related to gpt4free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses.
This (quite censored) New Version of gpt4free, was just released, it may contain bugs, open an issue or contribute a PR when encountering one, some features were disabled. This (quite censored) New Version of gpt4free, was just released so it may contain bugs. Please open an issue or contribute a PR when encountering one.
Docker is for now not available but I would be happy if someone contributes a PR. The g4f GUI will be uploaded soon enough. P.S: Docker is for now not available but I would be happy if someone contributes a PR. The g4f GUI will be uploaded soon enough.
### New ### New
- pypi package: - pypi package:
@ -83,7 +83,7 @@ import g4f
import g4f import g4f
print(g4f.provider.Ails.params) # supported args print(g4f.Provider.Ails.params) # supported args
# Automatic selection of provider # Automatic selection of provider
@ -109,7 +109,7 @@ print(response)
# Set with provider # Set with provider
response = g4f.ChatCompletion.create( response = g4f.ChatCompletion.create(
model="gpt-3.5-turbo", model="gpt-3.5-turbo",
provider=g4f.provider.DeepAi, provider=g4f.Provider.DeepAi,
messages=[{"role": "user", "content": "Hello world"}], messages=[{"role": "user", "content": "Hello world"}],
stream=True, stream=True,
) )
@ -120,7 +120,7 @@ for message in response:
providers: providers:
```py ```py
from g4f.provider import ( from g4f.Provider import (
Acytoo, Acytoo,
Aichat, Aichat,
Ails, Ails,
@ -185,61 +185,61 @@ if __name__ == "__main__":
| Website| Provider| gpt-3.5 | gpt-4 | Streaming | Status | Auth | | Website| Provider| gpt-3.5 | gpt-4 | Streaming | Status | Auth |
| ------ | ------- | ------- | ----- | --------- | ------ | ---- | | ------ | ------- | ------- | ----- | --------- | ------ | ---- |
| [www.aitianhu.com](https://www.aitianhu.com/api/chat-process) | g4f.provider.AItianhu | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [www.aitianhu.com](https://www.aitianhu.com/api/chat-process) | g4f.Provider.AItianhu | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chat.acytoo.com](https://chat.acytoo.com/api/completions) | g4f.provider.Acytoo | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [chat.acytoo.com](https://chat.acytoo.com/api/completions) | g4f.Provider.Acytoo | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [aiservice.vercel.app](https://aiservice.vercel.app/api/chat/answer) | g4f.provider.AiService | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [aiservice.vercel.app](https://aiservice.vercel.app/api/chat/answer) | g4f.Provider.AiService | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chat-gpt.org](https://chat-gpt.org/chat) | g4f.provider.Aichat | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [chat-gpt.org](https://chat-gpt.org/chat) | g4f.Provider.Aichat | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [ai.ls](https://ai.ls) | g4f.provider.Ails | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [ai.ls](https://ai.ls) | g4f.Provider.Ails | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [bard.google.com](https://bard.google.com) | g4f.provider.Bard | ❌ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ | | [bard.google.com](https://bard.google.com) | g4f.Provider.Bard | ❌ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [bing.com](https://bing.com/chat) | g4f.provider.Bing | ❌ | ✔️ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [bing.com](https://bing.com/chat) | g4f.Provider.Bing | ❌ | ✔️ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgpt.ai](https://chatgpt.ai/gpt-4/) | g4f.provider.ChatgptAi | ❌ | ✔️ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [chatgpt.ai](https://chatgpt.ai/gpt-4/) | g4f.Provider.ChatgptAi | ❌ | ✔️ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chatgptlogin.ac](https://chatgptlogin.ac) | g4f.provider.ChatgptLogin | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [chatgptlogin.ac](https://chatgptlogin.ac) | g4f.Provider.ChatgptLogin | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [deepai.org](https://deepai.org) | g4f.provider.DeepAi | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [deepai.org](https://deepai.org) | g4f.Provider.DeepAi | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chat.dfehub.com](https://chat.dfehub.com/api/chat) | g4f.provider.DfeHub | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [chat.dfehub.com](https://chat.dfehub.com/api/chat) | g4f.Provider.DfeHub | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [free.easychat.work](https://free.easychat.work) | g4f.provider.EasyChat | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [free.easychat.work](https://free.easychat.work) | g4f.Provider.EasyChat | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [forefront.com](https://forefront.com) | g4f.provider.Forefront | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [forefront.com](https://forefront.com) | g4f.Provider.Forefront | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chat.getgpt.world](https://chat.getgpt.world/) | g4f.provider.GetGpt | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [chat.getgpt.world](https://chat.getgpt.world/) | g4f.Provider.GetGpt | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [gpt-gm.h2o.ai](https://gpt-gm.h2o.ai) | g4f.provider.H2o | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [gpt-gm.h2o.ai](https://gpt-gm.h2o.ai) | g4f.Provider.H2o | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [liaobots.com](https://liaobots.com) | g4f.provider.Liaobots | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔️ | | [liaobots.com](https://liaobots.com) | g4f.Provider.Liaobots | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔️ |
| [supertest.lockchat.app](http://supertest.lockchat.app) | g4f.provider.Lockchat | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [supertest.lockchat.app](http://supertest.lockchat.app) | g4f.Provider.Lockchat | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [opchatgpts.net](https://opchatgpts.net) | g4f.provider.Opchatgpts | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [opchatgpts.net](https://opchatgpts.net) | g4f.Provider.Opchatgpts | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [backend.raycast.com](https://backend.raycast.com/api/v1/ai/chat_completions) | g4f.provider.Raycast | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ | | [backend.raycast.com](https://backend.raycast.com/api/v1/ai/chat_completions) | g4f.Provider.Raycast | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [theb.ai](https://theb.ai) | g4f.provider.Theb | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [theb.ai](https://theb.ai) | g4f.Provider.Theb | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [play.vercel.ai](https://play.vercel.ai) | g4f.provider.Vercel | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [play.vercel.ai](https://play.vercel.ai) | g4f.Provider.Vercel | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [wewordle.org](https://wewordle.org/gptapi/v1/android/turbo) | g4f.provider.Wewordle | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [wewordle.org](https://wewordle.org/gptapi/v1/android/turbo) | g4f.Provider.Wewordle | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [you.com](https://you.com) | g4f.provider.You | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [you.com](https://you.com) | g4f.Provider.You | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chat9.yqcloud.top](https://chat9.yqcloud.top/) | g4f.provider.Yqcloud | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [chat9.yqcloud.top](https://chat9.yqcloud.top/) | g4f.Provider.Yqcloud | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
### Other Models ### Other Models
| Model | Base Provider | Provider | Website | | Model | Base Provider | Provider | Website |
| --------------------------------------- | ------------- | ------------------- | ------------------------------------------- | | --------------------------------------- | ------------- | ------------------- | ------------------------------------------- |
| palm | Google | g4f.provider.Bard | [bard.google.com](https://bard.google.com/) | | palm | Google | g4f.Provider.Bard | [bard.google.com](https://bard.google.com/) |
| h2ogpt-gm-oasst1-en-2048-falcon-7b-v3 | Huggingface | g4f.provider.H2o | [www.h2o.ai](https://www.h2o.ai/) | | h2ogpt-gm-oasst1-en-2048-falcon-7b-v3 | Huggingface | g4f.Provider.H2o | [www.h2o.ai](https://www.h2o.ai/) |
| h2ogpt-gm-oasst1-en-2048-falcon-40b-v1 | Huggingface | g4f.provider.H2o | [www.h2o.ai](https://www.h2o.ai/) | | h2ogpt-gm-oasst1-en-2048-falcon-40b-v1 | Huggingface | g4f.Provider.H2o | [www.h2o.ai](https://www.h2o.ai/) |
| h2ogpt-gm-oasst1-en-2048-open-llama-13b | Huggingface | g4f.provider.H2o | [www.h2o.ai](https://www.h2o.ai/) | | h2ogpt-gm-oasst1-en-2048-open-llama-13b | Huggingface | g4f.Provider.H2o | [www.h2o.ai](https://www.h2o.ai/) |
| claude-instant-v1 | Anthropic | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | claude-instant-v1 | Anthropic | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| claude-v1 | Anthropic | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | claude-v1 | Anthropic | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| claude-v2 | Anthropic | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | claude-v2 | Anthropic | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| command-light-nightly | Cohere | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | command-light-nightly | Cohere | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| command-nightly | Cohere | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | command-nightly | Cohere | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| gpt-neox-20b | Huggingface | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | gpt-neox-20b | Huggingface | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| oasst-sft-1-pythia-12b | Huggingface | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | oasst-sft-1-pythia-12b | Huggingface | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| oasst-sft-4-pythia-12b-epoch-3.5 | Huggingface | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | oasst-sft-4-pythia-12b-epoch-3.5 | Huggingface | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| santacoder | Huggingface | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | santacoder | Huggingface | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| bloom | Huggingface | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | bloom | Huggingface | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| flan-t5-xxl | Huggingface | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | flan-t5-xxl | Huggingface | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| code-davinci-002 | OpenAI | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | code-davinci-002 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| gpt-3.5-turbo-16k | OpenAI | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | gpt-3.5-turbo-16k | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| gpt-3.5-turbo-16k-0613 | OpenAI | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | gpt-3.5-turbo-16k-0613 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| gpt-4-0613 | OpenAI | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | gpt-4-0613 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| text-ada-001 | OpenAI | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | text-ada-001 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| text-babbage-001 | OpenAI | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | text-babbage-001 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| text-curie-001 | OpenAI | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | text-curie-001 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| text-davinci-002 | OpenAI | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | text-davinci-002 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| text-davinci-003 | OpenAI | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | text-davinci-003 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| llama13b-v2-chat | Replicate | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | llama13b-v2-chat | Replicate | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
| llama7b-v2-chat | Replicate | g4f.provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | | llama7b-v2-chat | Replicate | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) |
## Related gpt4free projects ## Related gpt4free projects
@ -342,8 +342,8 @@ __all__ = [
```py ```py
import g4f import g4f
response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.provider.PROVIDERNAME, response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Provider.PROVIDERNAME,
messages=[{"role": "user", "content": "test"}], stream=g4f.provider.PROVIDERNAME.supports_stream) messages=[{"role": "user", "content": "test"}], stream=g4f.Provider.PROVIDERNAME.supports_stream)
for message in response: for message in response:
print(message, flush=True, end='') print(message, flush=True, end='')
@ -351,7 +351,7 @@ for message in response:
## ChatGPT clone ## ChatGPT clone
> Currently implementing new features and trying to scale it, please be patient it may be unstable > We are currently implementing new features and trying to scale it, but please be patient as it may be unstable
> https://chat.g4f.ai/chat > https://chat.g4f.ai/chat
> This site was developed by me and includes **gpt-4/3.5**, **internet access** and **gpt-jailbreak's** like DAN > This site was developed by me and includes **gpt-4/3.5**, **internet access** and **gpt-jailbreak's** like DAN
> Run locally here: https://github.com/xtekky/chatgpt-clone > Run locally here: https://github.com/xtekky/chatgpt-clone
@ -384,4 +384,4 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
<a href="https://github.com/xtekky/gpt4free/stargazers"> <a href="https://github.com/xtekky/gpt4free/stargazers">
<img width="500" alt="Star History Chart" src="https://api.star-history.com/svg?repos=xtekky/gpt4free&type=Date"> <img width="500" alt="Star History Chart" src="https://api.star-history.com/svg?repos=xtekky/gpt4free&type=Date">
</a> </a>

@ -19,11 +19,12 @@ class Acytoo(BaseProvider):
**kwargs: Any, **kwargs: Any,
) -> CreateResult: ) -> CreateResult:
headers = _create_header() headers = _create_header()
payload = _create_payload(messages) payload = _create_payload(messages, kwargs.get('temperature', 0.5))
url = "https://chat.acytoo.com/api/completions" url = "https://chat.acytoo.com/api/completions"
response = requests.post(url=url, headers=headers, json=payload) response = requests.post(url=url, headers=headers, json=payload)
response.raise_for_status() response.raise_for_status()
response.encoding = "utf-8"
yield response.text yield response.text
@ -34,7 +35,7 @@ def _create_header():
} }
def _create_payload(messages: list[dict[str, str]]): def _create_payload(messages: list[dict[str, str]], temperature):
payload_messages = [ payload_messages = [
message | {"createdAt": int(time.time()) * 1000} for message in messages message | {"createdAt": int(time.time()) * 1000} for message in messages
] ]
@ -42,6 +43,6 @@ def _create_payload(messages: list[dict[str, str]]):
"key": "", "key": "",
"model": "gpt-3.5-turbo", "model": "gpt-3.5-turbo",
"messages": payload_messages, "messages": payload_messages,
"temperature": 1, "temperature": temperature,
"password": "", "password": "",
} }

@ -40,9 +40,9 @@ class Aichat(BaseProvider):
json_data = { json_data = {
"message": base, "message": base,
"temperature": 1, "temperature": kwargs.get('temperature', 0.5),
"presence_penalty": 0, "presence_penalty": 0,
"top_p": 1, "top_p": kwargs.get('top_p', 1),
"frequency_penalty": 0, "frequency_penalty": 0,
} }
@ -52,4 +52,6 @@ class Aichat(BaseProvider):
json=json_data, json=json_data,
) )
response.raise_for_status() response.raise_for_status()
if not response.json()['response']:
raise Exception("Error Response: " + response.json())
yield response.json()["message"] yield response.json()["message"]

@ -3,99 +3,97 @@ import random
import re import re
import browser_cookie3 import browser_cookie3
import requests from aiohttp import ClientSession
import asyncio
from ..typing import Any, CreateResult from ..typing import Any, CreateResult
from .base_provider import BaseProvider from .base_provider import BaseProvider
class Bard(BaseProvider): class Bard(BaseProvider):
url = "https://bard.google.com" url = "https://bard.google.com"
needs_auth = True needs_auth = True
working = True working = True
@staticmethod @classmethod
def create_completion( def create_completion(
cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool,
proxy: str = None,
cookies: dict = {},
**kwargs: Any, **kwargs: Any,
) -> CreateResult: ) -> CreateResult:
psid = { yield asyncio.run(cls.create_async(str, messages, proxy, cookies))
cookie.name: cookie.value
for cookie in browser_cookie3.chrome(domain_name=".google.com") @classmethod
}["__Secure-1PSID"] async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
cookies: dict = {},
**kwargs: Any,
) -> str:
if not cookies:
for cookie in browser_cookie3.load(domain_name='.google.com'):
cookies[cookie.name] = cookie.value
formatted = "\n".join( formatted = "\n".join(
["%s: %s" % (message["role"], message["content"]) for message in messages] ["%s: %s" % (message["role"], message["content"]) for message in messages]
) )
prompt = f"{formatted}\nAssistant:" prompt = f"{formatted}\nAssistant:"
proxy = kwargs.get("proxy", False) if proxy and "://" not in proxy:
if proxy == False: proxy = f"http://{proxy}"
print(
"warning!, you did not give a proxy, a lot of countries are banned from Google Bard, so it may not work"
)
snlm0e = None
conversation_id = None
response_id = None
choice_id = None
client = requests.Session()
client.proxies = (
{"http": f"http://{proxy}", "https": f"http://{proxy}"} if proxy else {}
)
client.headers = { headers = {
"authority": "bard.google.com", 'authority': 'bard.google.com',
"content-type": "application/x-www-form-urlencoded;charset=UTF-8", 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
"origin": "https://bard.google.com", 'origin': 'https://bard.google.com',
"referer": "https://bard.google.com/", 'referer': 'https://bard.google.com/',
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36", 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
"x-same-domain": "1", 'x-same-domain': '1',
"cookie": f"__Secure-1PSID={psid}",
} }
if snlm0e is not None: async with ClientSession(
result = re.search( cookies=cookies,
r"SNlM0e\":\"(.*?)\"", client.get("https://bard.google.com/").text headers=headers
) ) as session:
if result is not None: async with session.get(cls.url, proxy=proxy) as response:
snlm0e = result.group(1) text = await response.text()
params = { match = re.search(r'SNlM0e\":\"(.*?)\"', text)
"bl": "boq_assistant-bard-web-server_20230326.21_p0", if match:
"_reqid": random.randint(1111, 9999), snlm0e = match.group(1)
"rt": "c",
} params = {
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
data = { '_reqid': random.randint(1111, 9999),
"at": snlm0e, 'rt': 'c'
"f.req": json.dumps( }
[
None, data = {
json.dumps( 'at': snlm0e,
[[prompt], None, [conversation_id, response_id, choice_id]] 'f.req': json.dumps([None, json.dumps([[prompt]])])
), }
]
), intents = '.'.join([
} 'assistant',
'lamda',
intents = ".".join(["assistant", "lamda", "BardFrontendService"]) 'BardFrontendService'
])
response = client.post(
f"https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate", async with session.post(
data=data, f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate',
params=params, data=data,
) params=params,
response.raise_for_status() proxy=proxy
) as response:
chat_data = json.loads(response.content.splitlines()[3])[0][2] response = await response.text()
if chat_data: response = json.loads(response.splitlines()[3])[0][2]
json_chat_data = json.loads(chat_data) response = json.loads(response)[4][0][1][0]
return response
yield json_chat_data[0][0]
@classmethod @classmethod
@property @property

@ -9,8 +9,9 @@ from .base_provider import BaseProvider
class ChatgptLogin(BaseProvider): class ChatgptLogin(BaseProvider):
url = "https://chatgptlogin.ac" url = "https://opchatgpts.net"
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = True
@staticmethod @staticmethod
def create_completion( def create_completion(
@ -24,15 +25,15 @@ class ChatgptLogin(BaseProvider):
"accept": "*/*", "accept": "*/*",
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"content-type": "application/json", "content-type": "application/json",
"origin": "https://chatgptlogin.ac", "origin": "https://opchatgpts.net",
"referer": "https://chatgptlogin.ac/use-chatgpt-free/", "referer": "https://opchatgpts.net/chatgpt-free-use/",
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', "sec-ch-ua": '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
"sec-ch-ua-mobile": "?0", "sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"', "sec-ch-ua-platform": '"Windows"',
"sec-fetch-dest": "empty", "sec-fetch-dest": "empty",
"sec-fetch-mode": "cors", "sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin", "sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"x-wp-nonce": _get_nonce(), "x-wp-nonce": _get_nonce(),
} }
@ -59,7 +60,7 @@ class ChatgptLogin(BaseProvider):
} }
response = requests.post( response = requests.post(
"https://chatgptlogin.ac/wp-json/ai-chatbot/v1/chat", "https://opchatgpts.net/wp-json/ai-chatbot/v1/chat",
headers=headers, headers=headers,
json=json_data, json=json_data,
) )
@ -81,9 +82,9 @@ class ChatgptLogin(BaseProvider):
def _get_nonce() -> str: def _get_nonce() -> str:
res = requests.get( res = requests.get(
"https://chatgptlogin.ac/use-chatgpt-free/", "https://opchatgpts.net/chatgpt-free-use/",
headers={ headers={
"Referer": "https://chatgptlogin.ac/use-chatgpt-free/", "Referer": "https://opchatgpts.net/chatgpt-free-use/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
}, },
) )

@ -10,6 +10,7 @@ class EasyChat(BaseProvider):
url = "https://free.easychat.work" url = "https://free.easychat.work"
supports_stream = True supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = True
@staticmethod @staticmethod
def create_completion( def create_completion(
@ -25,6 +26,7 @@ class EasyChat(BaseProvider):
"https://chat2.fastgpt.me", "https://chat2.fastgpt.me",
"https://chat3.fastgpt.me", "https://chat3.fastgpt.me",
"https://chat4.fastgpt.me", "https://chat4.fastgpt.me",
"https://gxos1h1ddt.fastgpt.me"
] ]
server = active_servers[kwargs.get("active_server", 0)] server = active_servers[kwargs.get("active_server", 0)]
headers = { headers = {
@ -34,9 +36,17 @@ class EasyChat(BaseProvider):
"content-type": "application/json", "content-type": "application/json",
"origin": f"{server}", "origin": f"{server}",
"referer": f"{server}/", "referer": f"{server}/",
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
"x-requested-with": "XMLHttpRequest", "x-requested-with": "XMLHttpRequest",
'plugins': '0',
'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
'usesearch': 'false',
'x-requested-with': 'XMLHttpRequest'
} }
json_data = { json_data = {
@ -57,14 +67,25 @@ class EasyChat(BaseProvider):
f"{server}/api/openai/v1/chat/completions", f"{server}/api/openai/v1/chat/completions",
headers=headers, headers=headers,
json=json_data, json=json_data,
stream=stream,
) )
if response.status_code == 200:
response.raise_for_status() if stream == False:
print(response.text) json_data = response.json()
for chunk in response.iter_lines(): if "choices" in json_data:
if b"content" in chunk: yield json_data["choices"][0]["message"]["content"]
data = json.loads(chunk.decode().split("data: ")[1]) else:
yield data["choices"][0]["delta"]["content"] yield Exception("No response from server")
else:
for chunk in response.iter_lines():
if b"content" in chunk:
splitData = chunk.decode().split("data: ")
if len(splitData) > 1:
yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
else:
yield Exception(f"Error {response.status_code} from server")
@classmethod @classmethod
@property @property

@ -53,6 +53,9 @@ class Equing(ABC):
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions', response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream) headers=headers, json=json_data, stream=stream)
if not stream:
yield response.json()["choices"][0]["message"]["content"]
return
for line in response.iter_content(chunk_size=1024): for line in response.iter_content(chunk_size=1024):
if line: if line:

@ -75,6 +75,8 @@ class H2o(BaseProvider):
headers=headers, headers=headers,
json=data, json=data,
) )
response.raise_for_status()
response.encoding = "utf-8"
generated_text = response.text.replace("\n", "").split("data:") generated_text = response.text.replace("\n", "").split("data:")
generated_text = json.loads(generated_text[-1]) generated_text = json.loads(generated_text[-1])

@ -1,50 +0,0 @@
import json
import os
import requests
from g4f.typing import get_type_hints
url = "https://backend.raycast.com/api/v1/ai/chat_completions"
model = ['gpt-3.5-turbo', 'gpt-4']
supports_stream = True
needs_auth = True
working = True
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
auth = kwargs.get('auth')
headers = {
'Accept': 'application/json',
'Accept-Language': 'en-US,en;q=0.9',
'Authorization': f'Bearer {auth}',
'Content-Type': 'application/json',
'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0',
}
parsed_messages = []
for message in messages:
parsed_messages.append({
'author': message['role'],
'content': {'text': message['content']}
})
data = {
"debug": False,
"locale": "en-CN",
"messages": parsed_messages,
"model": model,
"provider": "openai",
"source": "ai_chat",
"system_instruction": "markdown",
"temperature": 0.5
}
response = requests.post(url, headers=headers, json=data, stream=True)
for token in response.iter_lines():
if b'data: ' not in token:
continue
completion_chunk = json.loads(token.decode().replace('data: ', ''))
token = completion_chunk['text']
if token != None:
yield token
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])

@ -1,18 +1,17 @@
import json import json
import requests import requests
from ..Provider.base_provider import BaseProvider
from ..typing import Any, CreateResult from ..typing import Any, CreateResult
from .base_provider import BaseProvider
class Raycast(BaseProvider): class Raycast(BaseProvider):
url = "https://backend.raycast.com/api/v1/ai/chat_completions" url = "https://raycast.com"
working = True # model = ['gpt-3.5-turbo', 'gpt-4']
supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_gpt_4 = True supports_gpt_4 = True
supports_stream = True
needs_auth = True needs_auth = True
working = True
@staticmethod @staticmethod
def create_completion( def create_completion(
@ -21,20 +20,20 @@ class Raycast(BaseProvider):
stream: bool, stream: bool,
**kwargs: Any, **kwargs: Any,
) -> CreateResult: ) -> CreateResult:
auth = kwargs.get("auth") auth = kwargs.get('auth')
headers = { headers = {
"Accept": "application/json", 'Accept': 'application/json',
"Accept-Language": "en-US,en;q=0.9", 'Accept-Language': 'en-US,en;q=0.9',
"Authorization": f"Bearer {auth}", 'Authorization': f'Bearer {auth}',
"Content-Type": "application/json", 'Content-Type': 'application/json',
"User-Agent": "Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0", 'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0',
} }
parsed_messages: list[dict[str, Any]] = [] parsed_messages = []
for message in messages: for message in messages:
parsed_messages.append( parsed_messages.append({
{"author": message["role"], "content": {"text": message["content"]}} 'author': message['role'],
) 'content': {'text': message['content']}
})
data = { data = {
"debug": False, "debug": False,
"locale": "en-CN", "locale": "en-CN",
@ -43,14 +42,28 @@ class Raycast(BaseProvider):
"provider": "openai", "provider": "openai",
"source": "ai_chat", "source": "ai_chat",
"system_instruction": "markdown", "system_instruction": "markdown",
"temperature": 0.5, "temperature": 0.5
} }
url = "https://backend.raycast.com/api/v1/ai/chat_completions" response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True)
response = requests.post(url, headers=headers, json=data, stream=True)
for token in response.iter_lines(): for token in response.iter_lines():
if b"data: " not in token: if b'data: ' not in token:
continue continue
completion_chunk = json.loads(token.decode().replace("data: ", "")) completion_chunk = json.loads(token.decode().replace('data: ', ''))
token = completion_chunk["text"] token = completion_chunk['text']
if token != None: if token != None:
yield token yield token
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
("top_p", "int"),
("model", "str"),
("auth", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -1,16 +1,15 @@
import json import json,random,requests
# from curl_cffi import requests
from curl_cffi import requests
from ..typing import Any, CreateResult from ..typing import Any, CreateResult
from .base_provider import BaseProvider from .base_provider import BaseProvider
class Theb(BaseProvider): class Theb(BaseProvider):
url = "https://theb.ai" url = "https://theb.ai"
working = False working = True
supports_stream = True supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
needs_auth = True
@staticmethod @staticmethod
def create_completion( def create_completion(
@ -19,21 +18,79 @@ class Theb(BaseProvider):
stream: bool, stream: bool,
**kwargs: Any, **kwargs: Any,
) -> CreateResult: ) -> CreateResult:
prompt = messages[-1]["content"] conversation = ''
for message in messages:
conversation += '%s: %s\n' % (message['role'], message['content'])
conversation += 'assistant: '
auth = kwargs.get("auth", {
"bearer_token":"free",
"org_id":"theb",
})
bearer_token = auth["bearer_token"]
org_id = auth["org_id"]
headers = { headers = {
"accept": "application/json, text/plain, */*", 'authority': 'beta.theb.ai',
"content-type": "application/json", 'accept': 'text/event-stream',
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'authorization': 'Bearer '+bearer_token,
'content-type': 'application/json',
'origin': 'https://beta.theb.ai',
'referer': 'https://beta.theb.ai/home',
'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
'x-ai-model': 'ee8d4f29cb7047f78cbe84313ed6ace8',
} }
# generate 10 random number
# 0.1 - 0.9
req_rand = random.randint(100000000, 9999999999)
json_data: dict[str, Any] = {"prompt": prompt, "options": {}} json_data: dict[str, Any] = {
"text": conversation,
"category": "04f58f64a4aa4191a957b47290fee864",
"model": "ee8d4f29cb7047f78cbe84313ed6ace8",
"model_params": {
"system_prompt": "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}",
"temperature": kwargs.get("temperature", 1),
"top_p": kwargs.get("top_p", 1),
"frequency_penalty": kwargs.get("frequency_penalty", 0),
"presence_penalty": kwargs.get("presence_penalty", 0),
"long_term_memory": "auto"
}
}
response = requests.post( response = requests.post(
"https://chatbot.theb.ai/api/chat-process", "https://beta.theb.ai/api/conversation?org_id="+org_id+"&req_rand="+str(req_rand),
headers=headers, headers=headers,
json=json_data, json=json_data,
impersonate="chrome110", stream=True,
) )
response.raise_for_status() response.raise_for_status()
line = response.text.splitlines()[-1] content = ""
text = json.loads(line)["text"] next_content = ""
yield text for chunk in response.iter_lines():
if b"content" in chunk:
next_content = content
data = json.loads(chunk.decode().split("data: ")[1])
content = data["content"]
yield data["content"].replace(next_content, "")
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("auth", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
("presence_penalty", "int"),
("frequency_penalty", "int"),
("top_p", "int")
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -0,0 +1,62 @@
import uuid, requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
class V50(BaseProvider):
url = 'https://p5.v50.ltd'
supports_gpt_35_turbo = True
supports_stream = False
needs_auth = False
working = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool,
**kwargs: Any,
) -> CreateResult:
conversation = ''
for message in messages:
conversation += '%s: %s\n' % (message['role'], message['content'])
conversation += 'assistant: '
payload = {
"prompt": conversation,
"options": {},
"systemMessage": ".",
"temperature": kwargs.get("temperature", 0.4),
"top_p": kwargs.get("top_p", 0.4),
"model": model,
"user": str(uuid.uuid4())
}
headers = {
'authority': 'p5.v50.ltd',
'accept': 'application/json, text/plain, */*',
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'content-type': 'application/json',
'origin': 'https://p5.v50.ltd',
'referer': 'https://p5.v50.ltd/',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
}
response = requests.post("https://p5.v50.ltd/api/chat-process",
json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
yield response.text
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
("top_p", "int"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -22,6 +22,7 @@ class Yqcloud(BaseProvider):
url = "https://api.aichatos.cloud/api/generateStream" url = "https://api.aichatos.cloud/api/generateStream"
response = requests.post(url=url, headers=headers, json=payload) response = requests.post(url=url, headers=headers, json=payload)
response.raise_for_status() response.raise_for_status()
response.encoding = 'utf-8'
yield response.text yield response.text

@ -25,6 +25,7 @@ from .You import You
from .Yqcloud import Yqcloud from .Yqcloud import Yqcloud
from .Equing import Equing from .Equing import Equing
from .FastGpt import FastGpt from .FastGpt import FastGpt
from .V50 import V50
from .Wuguokai import Wuguokai from .Wuguokai import Wuguokai
__all__ = [ __all__ = [
@ -56,4 +57,5 @@ __all__ = [
"Equing", "Equing",
"FastGpt", "FastGpt",
"Wuguokai" "Wuguokai"
"V50"
] ]

@ -11,7 +11,7 @@ with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
with open("requirements.txt") as f: with open("requirements.txt") as f:
required = f.read().splitlines() required = f.read().splitlines()
VERSION = '0.0.2.1' VERSION = '0.0.2.3'
DESCRIPTION = ( DESCRIPTION = (
"The official gpt4free repository | various collection of powerful language models" "The official gpt4free repository | various collection of powerful language models"
) )
@ -27,6 +27,11 @@ setup(
long_description=long_description, long_description=long_description,
packages=find_packages(), packages=find_packages(),
install_requires=required, install_requires=required,
url='https://github.com/xtekky/gpt4free', # Link to your GitHub repository
project_urls={
'Source Code': 'https://github.com/xtekky/gpt4free', # GitHub link
'Bug Tracker': 'https://github.com/xtekky/gpt4free/issues', # Link to issue tracker
},
keywords=[ keywords=[
"python", "python",
"chatbot", "chatbot",

@ -8,7 +8,7 @@ import g4f
stream = False stream = False
response = g4f.ChatCompletion.create( response = g4f.ChatCompletion.create(
model="gpt-3.5-turbo", model="gpt-3.5-turbo",
provider=g4f.provider.Ails, provider=g4f.Provider.Ails,
messages=[{"role": "user", "content": "hello"}], messages=[{"role": "user", "content": "hello"}],
stream=stream, stream=stream,
active_server=5, active_server=5,

@ -24,4 +24,4 @@ def main():
if __name__ == "__main__": if __name__ == "__main__":
main() main()
Loading…
Cancel
Save