diff --git a/Docker/Dockerfile b/Docker/Dockerfile
deleted file mode 100644
index a7bb7d0a..00000000
--- a/Docker/Dockerfile
+++ /dev/null
@@ -1,12 +0,0 @@
-FROM python:3.10-slim
-
-RUN apt-get update && apt-get install -y git
-
-RUN git clone https://github.com/xtekky/gpt4free.git
-WORKDIR /gpt4free
-RUN pip install --no-cache-dir -r requirements.txt
-RUN cp gui/streamlit_app.py .
-
-EXPOSE 8501
-
-CMD ["streamlit", "run", "streamlit_app.py"]
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000..5ed902fb
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,18 @@
+FROM python:3.10
+
+RUN apt-get update && apt-get install -y git
+
+RUN mkdir -p /usr/src/gpt4free
+WORKDIR /usr/src/gpt4free
+
+# RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple/
+# RUN pip config set global.trusted-host mirrors.aliyun.com
+
+COPY requirements.txt /usr/src/gpt4free/
+RUN pip install --no-cache-dir -r requirements.txt
+COPY . /usr/src/gpt4free
+RUN cp gui/streamlit_app.py .
+
+EXPOSE 8501
+
+CMD ["streamlit", "run", "streamlit_app.py"]
diff --git a/README.md b/README.md
index 2f65b242..9f42aa80 100644
--- a/README.md
+++ b/README.md
@@ -56,7 +56,6 @@ Till the long bitter end, will this boy live to fight.
_____________________________
-# GPT4free - use ChatGPT, for free!!
##### You may join our discord server for updates and support ; )
- [Discord Link](https://discord.gg/gpt4free)
@@ -92,10 +91,10 @@ Please note the following:
| **Copyright** | Copyright information | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#copyright) | - |
| **Star History** | Star History | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#star-history) | - |
| **Usage Examples** | | | |
-| `theb` | Example usage for theb (gpt-3.5) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](openai_rev/theb/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
-| `forefront` | Example usage for forefront (gpt-4) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./forefront/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | ||
-| `quora (poe)` | Example usage for quora | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./quora/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
-| `you` | Example usage for you | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./you/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
+| `theb` | Example usage for theb (gpt-3.5) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](gpt4free/theb/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
+| `forefront` | Example usage for forefront (gpt-4) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](gpt4free/forefront/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | ||
+| `quora (poe)` | Example usage for quora | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](gpt4free/quora/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
+| `you` | Example usage for you | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](gpt4free/you/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
| **Try it Out** | | | |
| Google Colab Jupyter Notebook | Example usage for gpt4free | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/DanielShemesh/gpt4free-colab/blob/main/gpt4free.ipynb) | - |
| replit Example (feel free to fork this repl) | Example usage for gpt4free | [![](https://img.shields.io/badge/Open%20in-Replit-1A1E27?logo=replit)](https://replit.com/@gpt4free/gpt4free-webui) | - |
@@ -127,10 +126,10 @@ Please note the following:
## Best sites
#### gpt-4
-- [`/forefront`](./forefront/README.md)
+- [`/forefront`](gpt4free/forefront/README.md)
#### gpt-3.5
-- [`/you`](./you/README.md)
+- [`/you`](gpt4free/you/README.md)
## Install
Download or clone this GitHub repo
diff --git a/cocalc/__init__.py b/cocalc/__init__.py
deleted file mode 100644
index 5a32274c..00000000
--- a/cocalc/__init__.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import requests
-
-class Completion:
- @staticmethod
- def create(prompt:str, cookieInput:str) -> str:
- # Initialize a session with custom headers
- session = Completion._initialize_session(cookieInput)
-
- # Set the data that will be submitted
- payload = Completion._create_payload(prompt, ("ASSUME I HAVE FULL ACCESS TO COCALC. "))
-
- # Submit the request and return the results
- return Completion._submit_request(session, payload)
-
- @classmethod
- def _initialize_session(cls, conversationCookie) -> requests.Session:
- """Initialize a session with custom headers for the request."""
-
- session = requests.Session()
- headers = {
- 'Accept': '*/*',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Origin': 'https://cocalc.com',
- 'Referer': 'https://cocalc.com/api/v2/openai/chatgpt',
- 'Cookie': conversationCookie,
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
- }
- session.headers.update(headers)
-
- return session
-
- @classmethod
- def _create_payload(
- cls,
- prompt: str,
- system_prompt: str
- ) -> dict:
-
- return {
- "input": prompt,
- "system": system_prompt,
- "tag": "next:index"
- }
-
- @classmethod
- def _submit_request(
- cls,
- session: requests.Session,
- payload: dict
- ) -> str:
-
- response = session.post(
- "https://cocalc.com/api/v2/openai/chatgpt", json=payload).json()
- return {
- "response":response["output"],
- "success":response["success"]
- }
\ No newline at end of file
diff --git a/forefront/README.md b/forefront/README.md
deleted file mode 100644
index a2be8187..00000000
--- a/forefront/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-### Example: `forefront` (use like openai pypi package)
-
-```python
-import forefront
-
-# create an account
-token = forefront.Account.create(logging=False)
-print(token)
-
-# get a response
-for response in forefront.StreamingCompletion.create(token = token,
- prompt = 'hello world', model='gpt-4'):
-
- print(response.completion.choices[0].text, end = '')
-print("")
-```
diff --git a/forefront/__init__.py b/forefront/__init__.py
deleted file mode 100644
index 25d05448..00000000
--- a/forefront/__init__.py
+++ /dev/null
@@ -1,154 +0,0 @@
-from json import loads
-from re import match
-from time import time, sleep
-from uuid import uuid4
-
-from requests import post
-from tls_client import Session
-
-from forefront.mail import Mail
-from forefront.typing import ForeFrontResponse
-
-
-class Account:
- @staticmethod
- def create(proxy=None, logging=False):
-
- proxies = {
- 'http': 'http://' + proxy,
- 'https': 'http://' + proxy} if proxy else False
-
- start = time()
-
- mail = Mail(proxies)
- mail_token = None
- mail_adress = mail.get_mail()
-
- # print(mail_adress)
-
- client = Session(client_identifier='chrome110')
- client.proxies = proxies
- client.headers = {
- "origin": "https://accounts.forefront.ai",
- "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
- }
-
- response = client.post('https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
- data={
- "email_address": mail_adress
- }
- )
- try:
- trace_token = response.json()['response']['id']
- if logging: print(trace_token)
- except KeyError:
- return 'Failed to create account!'
-
- response = client.post(
- f"https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6",
- data={
- "strategy": "email_code",
- }
- )
-
- if logging: print(response.text)
-
- if not 'sign_up_attempt' in response.text:
- return 'Failed to create account!'
-
- while True:
- sleep(1)
- for _ in mail.fetch_inbox():
- if logging: print(mail.get_message_content(_["id"]))
- mail_token = match(r"(\d){5,6}", mail.get_message_content(_["id"])).group(0)
-
- if mail_token:
- break
-
- if logging: print(mail_token)
-
- response = client.post(
- f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4',
- data={
- 'code': mail_token,
- 'strategy': 'email_code'
- })
-
- if logging: print(response.json())
-
- token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
-
- with open('accounts.txt', 'a') as f:
- f.write(f'{mail_adress}:{token}\n')
-
- if logging: print(time() - start)
-
- return token
-
-
-class StreamingCompletion:
- @staticmethod
- def create(
- token=None,
- chatId=None,
- prompt='',
- actionType='new',
- defaultPersona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
- model='gpt-4') -> ForeFrontResponse:
-
- if not token: raise Exception('Token is required!')
- if not chatId: chatId = str(uuid4())
-
- headers = {
- 'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'authorization': 'Bearer ' + token,
- 'cache-control': 'no-cache',
- 'content-type': 'application/json',
- 'origin': 'https://chat.forefront.ai',
- 'pragma': 'no-cache',
- 'referer': 'https://chat.forefront.ai/',
- 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'cross-site',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
- }
-
- json_data = {
- 'text': prompt,
- 'action': actionType,
- 'parentId': chatId,
- 'workspaceId': chatId,
- 'messagePersona': defaultPersona,
- 'model': model
- }
-
- for chunk in post('https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
- headers=headers, json=json_data, stream=True).iter_lines():
-
- if b'finish_reason":null' in chunk:
- data = loads(chunk.decode('utf-8').split('data: ')[1])
- token = data['choices'][0]['delta'].get('content')
-
- if token != None:
- yield ForeFrontResponse({
- 'id': chatId,
- 'object': 'text_completion',
- 'created': int(time()),
- 'model': model,
- 'choices': [{
- 'text': token,
- 'index': 0,
- 'logprobs': None,
- 'finish_reason': 'stop'
- }],
- 'usage': {
- 'prompt_tokens': len(prompt),
- 'completion_tokens': len(token),
- 'total_tokens': len(prompt) + len(token)
- }
- })
diff --git a/forefront/typing.py b/forefront/typing.py
deleted file mode 100644
index a11ac49f..00000000
--- a/forefront/typing.py
+++ /dev/null
@@ -1,36 +0,0 @@
-class ForeFrontResponse:
- class Completion:
- class Choices:
- def __init__(self, choice: dict) -> None:
- self.text = choice['text']
- self.content = self.text.encode()
- self.index = choice['index']
- self.logprobs = choice['logprobs']
- self.finish_reason = choice['finish_reason']
-
- def __repr__(self) -> str:
- return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
-
- def __init__(self, choices: dict) -> None:
- self.choices = [self.Choices(choice) for choice in choices]
-
- class Usage:
- def __init__(self, usage_dict: dict) -> None:
- self.prompt_tokens = usage_dict['prompt_tokens']
- self.completion_tokens = usage_dict['completion_tokens']
- self.total_tokens = usage_dict['total_tokens']
-
- def __repr__(self):
- return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
-
- def __init__(self, response_dict: dict) -> None:
- self.response_dict = response_dict
- self.id = response_dict['id']
- self.object = response_dict['object']
- self.created = response_dict['created']
- self.model = response_dict['model']
- self.completion = self.Completion(response_dict['choices'])
- self.usage = self.Usage(response_dict['usage'])
-
- def json(self) -> dict:
- return self.response_dict
diff --git a/gpt4free/README.md b/gpt4free/README.md
new file mode 100644
index 00000000..23f81787
--- /dev/null
+++ b/gpt4free/README.md
@@ -0,0 +1,116 @@
+# gpt4free package
+
+### What is it?
+
+gpt4free is a python package that provides some language model api's
+
+### Main Features
+
+- It's free to use
+- Easy access
+
+### Installation:
+
+```bash
+pip install gpt4free
+```
+
+#### Usage:
+
+```python
+import gpt4free
+import gpt4free
+from gpt4free import Provider, quora, forefront
+
+# usage You
+response = gpt4free.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi')
+print(response)
+
+# usage Poe
+token = quora.Account.create(logging=False)
+response = gpt4free.Completion.create(Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT')
+print(response)
+
+# usage forefront
+token = forefront.Account.create(logging=False)
+response = gpt4free.Completion.create(
+ Provider.ForeFront, prompt='Write a poem on Lionel Messi', model='gpt-4', token=token
+)
+print(response)
+print(f'END')
+
+# usage theb
+response = gpt4free.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi')
+print(response)
+
+# usage cocalc
+response = gpt4free.Completion.create(Provider.CoCalc, prompt='Write a poem on Lionel Messi', cookie_input='')
+print(response)
+
+```
+
+### Invocation Arguments
+
+`gpt4free.Completion.create()` method has two required arguments
+
+1. Provider: This is an enum representing different provider
+2. prompt: This is the user input
+
+#### Keyword Arguments
+
+Some of the keyword arguments are optional, while others are required.
+
+- You:
+ - `safe_search`: boolean - default value is `False`
+ - `include_links`: boolean - default value is `False`
+ - `detailed`: boolean - default value is `False`
+- Quora:
+ - `token`: str - this needs to be provided by the user
+ - `model`: str - default value is `gpt-4`.
+
+ (Available models: `['Sage', 'GPT-4', 'Claude+', 'Claude-instant', 'ChatGPT', 'Dragonfly', 'NeevaAI']`)
+- ForeFront:
+ - `token`: str - this need to be provided by the user
+
+- Theb:
+ (no keyword arguments required)
+- CoCalc:
+ - `cookie_input`: str - this needs to be provided by user
+
+#### Token generation of quora
+```python
+from gpt4free import quora
+
+token = quora.Account.create(logging=False)
+```
+
+### Token generation of ForeFront
+```python
+from gpt4free import forefront
+
+token = forefront.Account.create(logging=False)
+```
+
+## Copyright:
+
+This program is licensed under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.txt)
+
+### Copyright Notice:
+
+```
+xtekky/gpt4free: multiple reverse engineered language-model api's to decentralise the ai industry.
+Copyright (C) 2023 xtekky
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+```
diff --git a/gpt4free/__init__.py b/gpt4free/__init__.py
new file mode 100644
index 00000000..5336c825
--- /dev/null
+++ b/gpt4free/__init__.py
@@ -0,0 +1,64 @@
+from enum import Enum
+
+from gpt4free import cocalc
+from gpt4free import forefront
+from gpt4free import quora
+from gpt4free import theb
+from gpt4free import you
+
+
+class Provider(Enum):
+ """An enum representing different providers."""
+
+ You = 'you'
+ Poe = 'poe'
+ ForeFront = 'fore_front'
+ Theb = 'theb'
+ CoCalc = 'cocalc'
+
+
+class Completion:
+ """This class will be used for invoking the given provider"""
+
+ @staticmethod
+ def create(provider: Provider, prompt: str, **kwargs) -> str:
+ """
+ Invokes the given provider with given prompt and addition arguments and returns the string response
+
+ :param provider: an enum representing the provider to use while invoking
+ :param prompt: input provided by the user
+ :param kwargs: Additional keyword arguments to pass to the provider while invoking
+ :return: A string representing the response from the provider
+ """
+ if provider == Provider.Poe:
+ return Completion.__poe_service(prompt, **kwargs)
+ elif provider == Provider.You:
+ return Completion.__you_service(prompt, **kwargs)
+ elif provider == Provider.ForeFront:
+ return Completion.__fore_front_service(prompt, **kwargs)
+ elif provider == Provider.Theb:
+ return Completion.__theb_service(prompt, **kwargs)
+ elif provider == Provider.CoCalc:
+ return Completion.__cocalc_service(prompt, **kwargs)
+ else:
+ raise Exception('Provider not exist, Please try again')
+
+ @staticmethod
+ def __you_service(prompt: str, **kwargs) -> str:
+ return you.Completion.create(prompt, **kwargs).text
+
+ @staticmethod
+ def __poe_service(prompt: str, **kwargs) -> str:
+ return quora.Completion.create(prompt=prompt, **kwargs).text
+
+ @staticmethod
+ def __fore_front_service(prompt: str, **kwargs) -> str:
+ return forefront.Completion.create(prompt=prompt, **kwargs).text
+
+ @staticmethod
+ def __theb_service(prompt: str, **kwargs):
+ return ''.join(theb.Completion.create(prompt=prompt))
+
+ @staticmethod
+ def __cocalc_service(prompt: str, **kwargs):
+ return cocalc.Completion.create(prompt, cookie_input=kwargs.get('cookie_input', '')).text
diff --git a/gpt4free/cocalc/__init__.py b/gpt4free/cocalc/__init__.py
new file mode 100644
index 00000000..372f29a4
--- /dev/null
+++ b/gpt4free/cocalc/__init__.py
@@ -0,0 +1,47 @@
+import requests
+from fake_useragent import UserAgent
+from pydantic import BaseModel
+
+
+class CoCalcResponse(BaseModel):
+ text: str
+ status: bool
+
+
+class Completion:
+ @staticmethod
+ def create(prompt: str, cookie_input: str) -> CoCalcResponse:
+ # Initialize a session with custom headers
+ session = Completion._initialize_session(cookie_input)
+
+ # Set the data that will be submitted
+ payload = Completion._create_payload(prompt, 'ASSUME I HAVE FULL ACCESS TO COCALC. ')
+
+ # Submit the request and return the results
+ return Completion._submit_request(session, payload)
+
+ @classmethod
+ def _initialize_session(cls, conversation_cookie) -> requests.Session:
+ """Initialize a session with custom headers for the request."""
+
+ session = requests.Session()
+ headers = {
+ 'Accept': '*/*',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Origin': 'https://cocalc.com',
+ 'Referer': 'https://cocalc.com/api/v2/openai/chatgpt',
+ 'Cookie': conversation_cookie,
+ 'User-Agent': UserAgent().random,
+ }
+ session.headers.update(headers)
+
+ return session
+
+ @staticmethod
+ def _create_payload(prompt: str, system_prompt: str) -> dict:
+ return {'input': prompt, 'system': system_prompt, 'tag': 'next:index'}
+
+ @staticmethod
+ def _submit_request(session: requests.Session, payload: dict) -> CoCalcResponse:
+ response = session.post('https://cocalc.com/api/v2/openai/chatgpt', json=payload).json()
+ return CoCalcResponse(text=response['output'], status=response['success'])
diff --git a/cocalc/readme.md b/gpt4free/cocalc/readme.md
similarity index 77%
rename from cocalc/readme.md
rename to gpt4free/cocalc/readme.md
index 04095339..f0911155 100644
--- a/cocalc/readme.md
+++ b/gpt4free/cocalc/readme.md
@@ -1,11 +1,10 @@
### Example: `cocalc`
-
```python
# import library
-import cocalc
+from gpt4free import cocalc
-cocalc.Completion.create(prompt="How are you!", cookieInput="cookieinput") ## Tutorial
+cocalc.Completion.create(prompt="How are you!", cookie_input="cookieinput") ## Tutorial
```
### How to grab cookie input
diff --git a/gpt4free/forefront/README.md b/gpt4free/forefront/README.md
new file mode 100644
index 00000000..3d0aac4d
--- /dev/null
+++ b/gpt4free/forefront/README.md
@@ -0,0 +1,16 @@
+### Example: `forefront` (use like openai pypi package)
+
+```python
+
+from gpt4free import forefront
+
+# create an account
+token = forefront.Account.create(logging=False)
+print(token)
+
+# get a response
+for response in forefront.StreamingCompletion.create(token=token,
+ prompt='hello world', model='gpt-4'):
+ print(response.completion.choices[0].text, end='')
+print("")
+```
diff --git a/gpt4free/forefront/__init__.py b/gpt4free/forefront/__init__.py
new file mode 100644
index 00000000..f0ca1a15
--- /dev/null
+++ b/gpt4free/forefront/__init__.py
@@ -0,0 +1,192 @@
+from json import loads
+from re import match
+from time import time, sleep
+from typing import Generator, Optional
+from uuid import uuid4
+
+from fake_useragent import UserAgent
+from requests import post
+from tls_client import Session
+
+from .mail import Mail
+from .typing import ForeFrontResponse
+
+
+class Account:
+ @staticmethod
+ def create(proxy: Optional[str] = None, logging: bool = False):
+ proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
+
+ start = time()
+
+ mail_client = Mail(proxies)
+ mail_token = None
+ mail_address = mail_client.get_mail()
+
+ # print(mail_address)
+
+ client = Session(client_identifier='chrome110')
+ client.proxies = proxies
+ client.headers = {
+ 'origin': 'https://accounts.forefront.ai',
+ 'user-agent': UserAgent().random,
+ }
+
+ response = client.post(
+ 'https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
+ data={'email_address': mail_address},
+ )
+
+ try:
+ trace_token = response.json()['response']['id']
+ if logging:
+ print(trace_token)
+ except KeyError:
+ return 'Failed to create account!'
+
+ response = client.post(
+ f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6',
+ data={
+ 'strategy': 'email_code',
+ },
+ )
+
+ if logging:
+ print(response.text)
+
+ if 'sign_up_attempt' not in response.text:
+ return 'Failed to create account!'
+
+ while True:
+ sleep(1)
+ for _ in mail_client.fetch_inbox():
+ if logging:
+ print(mail_client.get_message_content(_['id']))
+ mail_token = match(r'(\d){5,6}', mail_client.get_message_content(_['id'])).group(0)
+
+ if mail_token:
+ break
+
+ if logging:
+ print(mail_token)
+
+ response = client.post(
+ f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4',
+ data={'code': mail_token, 'strategy': 'email_code'},
+ )
+
+ if logging:
+ print(response.json())
+
+ token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
+
+ with open('accounts.txt', 'a') as f:
+ f.write(f'{mail_address}:{token}\n')
+
+ if logging:
+ print(time() - start)
+
+ return token
+
+
+class StreamingCompletion:
+ @staticmethod
+ def create(
+ token=None,
+ chat_id=None,
+ prompt='',
+ action_type='new',
+ default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
+ model='gpt-4',
+ ) -> Generator[ForeFrontResponse, None, None]:
+ if not token:
+ raise Exception('Token is required!')
+ if not chat_id:
+ chat_id = str(uuid4())
+
+ headers = {
+ 'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'authorization': 'Bearer ' + token,
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.forefront.ai',
+ 'pragma': 'no-cache',
+ 'referer': 'https://chat.forefront.ai/',
+ 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': UserAgent().random,
+ }
+
+ json_data = {
+ 'text': prompt,
+ 'action': action_type,
+ 'parentId': chat_id,
+ 'workspaceId': chat_id,
+ 'messagePersona': default_persona,
+ 'model': model,
+ }
+
+ for chunk in post(
+ 'https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
+ headers=headers,
+ json=json_data,
+ stream=True,
+ ).iter_lines():
+ if b'finish_reason":null' in chunk:
+ data = loads(chunk.decode('utf-8').split('data: ')[1])
+ token = data['choices'][0]['delta'].get('content')
+
+ if token is not None:
+ yield ForeFrontResponse(
+ **{
+ 'id': chat_id,
+ 'object': 'text_completion',
+ 'created': int(time()),
+ 'text': token,
+ 'model': model,
+ 'choices': [{'text': token, 'index': 0, 'logprobs': None, 'finish_reason': 'stop'}],
+ 'usage': {
+ 'prompt_tokens': len(prompt),
+ 'completion_tokens': len(token),
+ 'total_tokens': len(prompt) + len(token),
+ },
+ }
+ )
+
+
+class Completion:
+ @staticmethod
+ def create(
+ token=None,
+ chat_id=None,
+ prompt='',
+ action_type='new',
+ default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
+ model='gpt-4',
+ ) -> ForeFrontResponse:
+ text = ''
+ final_response = None
+ for response in StreamingCompletion.create(
+ token=token,
+ chat_id=chat_id,
+ prompt=prompt,
+ action_type=action_type,
+ default_persona=default_persona,
+ model=model,
+ ):
+ if response:
+ final_response = response
+ text += response.text
+
+ if final_response:
+ final_response.text = text
+ else:
+ raise Exception('Unable to get the response, Please try again')
+
+ return final_response
diff --git a/forefront/mail.py b/gpt4free/forefront/mail.py
similarity index 85%
rename from forefront/mail.py
rename to gpt4free/forefront/mail.py
index 41c2a647..2c00051c 100644
--- a/forefront/mail.py
+++ b/gpt4free/forefront/mail.py
@@ -23,21 +23,17 @@ class Mail:
"sec-fetch-dest": "empty",
"referer": "https://mail.tm/",
"accept-encoding": "gzip, deflate, br",
- "accept-language": "en-GB,en-US;q=0.9,en;q=0.8"
+ "accept-language": "en-GB,en-US;q=0.9,en;q=0.8",
}
def get_mail(self) -> str:
token = ''.join(choices(ascii_letters, k=14)).lower()
- init = self.client.post("https://api.mail.tm/accounts", json={
- "address": f"{token}@bugfoo.com",
- "password": token
- })
+ init = self.client.post(
+ "https://api.mail.tm/accounts", json={"address": f"{token}@bugfoo.com", "password": token}
+ )
if init.status_code == 201:
- resp = self.client.post("https://api.mail.tm/token", json={
- **init.json(),
- "password": token
- })
+ resp = self.client.post("https://api.mail.tm/token", json={**init.json(), "password": token})
self.client.headers['authorization'] = 'Bearer ' + resp.json()['token']
diff --git a/gpt4free/forefront/typing.py b/gpt4free/forefront/typing.py
new file mode 100644
index 00000000..23e90903
--- /dev/null
+++ b/gpt4free/forefront/typing.py
@@ -0,0 +1,26 @@
+from typing import Any, List
+
+from pydantic import BaseModel
+
+
+class Choice(BaseModel):
+ text: str
+ index: int
+ logprobs: Any
+ finish_reason: str
+
+
+class Usage(BaseModel):
+ prompt_tokens: int
+ completion_tokens: int
+ total_tokens: int
+
+
+class ForeFrontResponse(BaseModel):
+ id: str
+ object: str
+ created: int
+ model: str
+ choices: List[Choice]
+ usage: Usage
+ text: str
diff --git a/quora/README.md b/gpt4free/quora/README.md
similarity index 79%
rename from quora/README.md
rename to gpt4free/quora/README.md
index 24679277..9c652c59 100644
--- a/quora/README.md
+++ b/gpt4free/quora/README.md
@@ -21,26 +21,25 @@ models = {
```python
# import quora (poe) package
-import quora
+from gpt4free import quora
# create account
# make sure to set enable_bot_creation to True
-token = quora.Account.create(logging = True, enable_bot_creation=True)
+token = quora.Account.create(logging=True, enable_bot_creation=True)
model = quora.Model.create(
- token = token,
- model = 'gpt-3.5-turbo', # or claude-instant-v1.0
- system_prompt = 'you are ChatGPT a large language model ...'
+ token=token,
+ model='gpt-3.5-turbo', # or claude-instant-v1.0
+ system_prompt='you are ChatGPT a large language model ...'
)
-print(model.name) # gptx....
+print(model.name) # gptx....
# streaming response
for response in quora.StreamingCompletion.create(
- custom_model = model.name,
- prompt ='hello world',
- token = token):
-
+ custom_model=model.name,
+ prompt='hello world',
+ token=token):
print(response.completion.choices[0].text)
```
@@ -56,7 +55,7 @@ print(response.completion.choices[0].text)
### Update Use This For Poe
```python
-from quora import Poe
+from gpt4free.quora import Poe
# available models: ['Sage', 'GPT-4', 'Claude+', 'Claude-instant', 'ChatGPT', 'Dragonfly', 'NeevaAI']
diff --git a/quora/__init__.py b/gpt4free/quora/__init__.py
similarity index 82%
rename from quora/__init__.py
rename to gpt4free/quora/__init__.py
index cd5ec8f9..f548ff41 100644
--- a/quora/__init__.py
+++ b/gpt4free/quora/__init__.py
@@ -6,11 +6,12 @@ from pathlib import Path
from random import choice, choices, randint
from re import search, findall
from string import ascii_letters, digits
-from typing import Optional, Union
+from typing import Optional, Union, List, Any, Generator
from urllib.parse import unquote
import selenium.webdriver.support.expected_conditions as EC
from fake_useragent import UserAgent
+from pydantic import BaseModel
from pypasser import reCaptchaV3
from requests import Session
from selenium.webdriver import Firefox, Chrome, FirefoxOptions, ChromeOptions
@@ -18,8 +19,8 @@ from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from tls_client import Session as TLS
-from quora.api import Client as PoeClient
-from quora.mail import Emailnator
+from .api import Client as PoeClient
+from .mail import Emailnator
SELENIUM_WEB_DRIVER_ERROR_MSG = b'''The error message you are receiving is due to the `geckodriver` executable not
being found in your system\'s PATH. To resolve this issue, you need to download the geckodriver and add its location
@@ -67,42 +68,27 @@ def extract_formkey(html):
return formkey
-class PoeResponse:
- class Completion:
- class Choices:
- def __init__(self, choice: dict) -> None:
- self.text = choice['text']
- self.content = self.text.encode()
- self.index = choice['index']
- self.logprobs = choice['logprobs']
- self.finish_reason = choice['finish_reason']
+class Choice(BaseModel):
+ text: str
+ index: int
+ logprobs: Any
+ finish_reason: str
- def __repr__(self) -> str:
- return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
- def __init__(self, choices: dict) -> None:
- self.choices = [self.Choices(choice) for choice in choices]
+class Usage(BaseModel):
+ prompt_tokens: int
+ completion_tokens: int
+ total_tokens: int
- class Usage:
- def __init__(self, usage_dict: dict) -> None:
- self.prompt_tokens = usage_dict['prompt_tokens']
- self.completion_tokens = usage_dict['completion_tokens']
- self.total_tokens = usage_dict['total_tokens']
- def __repr__(self):
- return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
-
- def __init__(self, response_dict: dict) -> None:
- self.response_dict = response_dict
- self.id = response_dict['id']
- self.object = response_dict['object']
- self.created = response_dict['created']
- self.model = response_dict['model']
- self.completion = self.Completion(response_dict['choices'])
- self.usage = self.Usage(response_dict['usage'])
-
- def json(self) -> dict:
- return self.response_dict
+class PoeResponse(BaseModel):
+ id: int
+ object: str
+ created: int
+ model: str
+ choices: List[Choice]
+ usage: Usage
+ text: str
class ModelResponse:
@@ -116,18 +102,12 @@ class ModelResponse:
class Model:
@staticmethod
def create(
- token: str,
- model: str = 'gpt-3.5-turbo', # claude-instant
- system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
- description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
- handle: str = None,
+ token: str,
+ model: str = 'gpt-3.5-turbo', # claude-instant
+ system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
+ description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
+ handle: str = None,
) -> ModelResponse:
- models = {
- 'gpt-3.5-turbo': 'chinchilla',
- 'claude-instant-v1.0': 'a2',
- 'gpt-4': 'beaver',
- }
-
if not handle:
handle = f'gptx{randint(1111111, 9999999)}'
@@ -162,7 +142,7 @@ class Model:
obj={
'queryName': 'CreateBotMain_poeBotCreate_Mutation',
'variables': {
- 'model': models[model],
+ 'model': MODELS[model],
'handle': handle,
'prompt': system_prompt,
'isPromptPublic': True,
@@ -202,9 +182,9 @@ class Model:
class Account:
@staticmethod
def create(
- proxy: Optional[str] = None,
- logging: bool = False,
- enable_bot_creation: bool = False,
+ proxy: Optional[str] = None,
+ logging: bool = False,
+ enable_bot_creation: bool = False,
):
client = TLS(client_identifier='chrome110')
client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
@@ -309,22 +289,23 @@ class Account:
class StreamingCompletion:
@staticmethod
def create(
- model: str = 'gpt-4',
- custom_model: bool = None,
- prompt: str = 'hello world',
- token: str = '',
- ):
+ model: str = 'gpt-4',
+ custom_model: bool = None,
+ prompt: str = 'hello world',
+ token: str = '',
+ ) -> Generator[PoeResponse, None, None]:
_model = MODELS[model] if not custom_model else custom_model
client = PoeClient(token)
for chunk in client.send_message(_model, prompt):
yield PoeResponse(
- {
+ **{
'id': chunk['messageId'],
'object': 'text_completion',
'created': chunk['creationTime'],
'model': _model,
+ 'text': chunk['text_new'],
'choices': [
{
'text': chunk['text_new'],
@@ -343,33 +324,28 @@ class StreamingCompletion:
class Completion:
+ @staticmethod
def create(
- model: str = 'gpt-4',
- custom_model: str = None,
- prompt: str = 'hello world',
- token: str = '',
- ):
- models = {
- 'sage': 'capybara',
- 'gpt-4': 'beaver',
- 'claude-v1.2': 'a2_2',
- 'claude-instant-v1.0': 'a2',
- 'gpt-3.5-turbo': 'chinchilla',
- }
-
- _model = models[model] if not custom_model else custom_model
+ model: str = 'gpt-4',
+ custom_model: str = None,
+ prompt: str = 'hello world',
+ token: str = '',
+ ) -> PoeResponse:
+ _model = MODELS[model] if not custom_model else custom_model
client = PoeClient(token)
- for chunk in client.send_message(_model, prompt):
- pass
+ chunk = None
+ for response in client.send_message(_model, prompt):
+ chunk = response
return PoeResponse(
- {
+ **{
'id': chunk['messageId'],
'object': 'text_completion',
'created': chunk['creationTime'],
'model': _model,
+ 'text': chunk['text'],
'choices': [
{
'text': chunk['text'],
@@ -389,22 +365,22 @@ class Completion:
class Poe:
def __init__(
- self,
- model: str = 'ChatGPT',
- driver: str = 'firefox',
- download_driver: bool = False,
- driver_path: Optional[str] = None,
- cookie_path: str = './quora/cookie.json',
+ self,
+ model: str = 'ChatGPT',
+ driver: str = 'firefox',
+ download_driver: bool = False,
+ driver_path: Optional[str] = None,
+ cookie_path: str = './quora/cookie.json',
):
# validating the model
if model and model not in MODELS:
raise RuntimeError('Sorry, the model you provided does not exist. Please check and try again.')
self.model = MODELS[model]
self.cookie_path = cookie_path
- self.cookie = self.__load_cookie(driver, download_driver, driver_path=driver_path)
+ self.cookie = self.__load_cookie(driver, driver_path=driver_path)
self.client = PoeClient(self.cookie)
- def __load_cookie(self, driver: str, download_driver: bool, driver_path: Optional[str] = None) -> str:
+ def __load_cookie(self, driver: str, driver_path: Optional[str] = None) -> str:
if (cookie_file := Path(self.cookie_path)).exists():
with cookie_file.open() as fp:
cookie = json.load(fp)
@@ -451,8 +427,8 @@ class Poe:
driver.close()
return cookie
- @classmethod
- def __resolve_driver(cls, driver: str, driver_path: Optional[str] = None) -> Union[Firefox, Chrome]:
+ @staticmethod
+ def __resolve_driver(driver: str, driver_path: Optional[str] = None) -> Union[Firefox, Chrome]:
options = FirefoxOptions() if driver == 'firefox' else ChromeOptions()
options.add_argument('-headless')
@@ -473,12 +449,12 @@ class Poe:
return response
def create_bot(
- self,
- name: str,
- /,
- prompt: str = '',
- base_model: str = 'ChatGPT',
- description: str = '',
+ self,
+ name: str,
+ /,
+ prompt: str = '',
+ base_model: str = 'ChatGPT',
+ description: str = '',
) -> None:
if base_model not in MODELS:
raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.')
diff --git a/quora/api.py b/gpt4free/quora/api.py
similarity index 94%
rename from quora/api.py
rename to gpt4free/quora/api.py
index 697f6663..897215a8 100644
--- a/quora/api.py
+++ b/gpt4free/quora/api.py
@@ -225,7 +225,7 @@ class Client:
r = request_with_retries(self.session.post, self.gql_url, data=payload, headers=headers)
data = r.json()
- if data["data"] == None:
+ if data["data"] is None:
logger.warn(f'{query_name} returned an error: {data["errors"][0]["message"]} | Retrying ({i + 1}/20)')
time.sleep(2)
continue
@@ -316,7 +316,7 @@ class Client:
return
# indicate that the response id is tied to the human message id
- elif key != "pending" and value == None and message["state"] != "complete":
+ elif key != "pending" and value is None and message["state"] != "complete":
self.active_messages[key] = message["messageId"]
self.message_queues[key].put(message)
return
@@ -384,7 +384,7 @@ class Client:
continue
# update info about response
- message["text_new"] = message["text"][len(last_text):]
+ message["text_new"] = message["text"][len(last_text) :]
last_text = message["text"]
message_id = message["messageId"]
@@ -402,7 +402,7 @@ class Client:
logger.info(f"Downloading {count} messages from {chatbot}")
messages = []
- if cursor == None:
+ if cursor is None:
chat_data = self.get_bot(self.bot_names[chatbot])
if not chat_data["messagesConnection"]["edges"]:
return []
@@ -456,21 +456,21 @@ class Client:
logger.info(f"No more messages left to delete.")
def create_bot(
- self,
- handle,
- prompt="",
- base_model="chinchilla",
- description="",
- intro_message="",
- api_key=None,
- api_bot=False,
- api_url=None,
- prompt_public=True,
- pfp_url=None,
- linkification=False,
- markdown_rendering=True,
- suggested_replies=False,
- private=False,
+ self,
+ handle,
+ prompt="",
+ base_model="chinchilla",
+ description="",
+ intro_message="",
+ api_key=None,
+ api_bot=False,
+ api_url=None,
+ prompt_public=True,
+ pfp_url=None,
+ linkification=False,
+ markdown_rendering=True,
+ suggested_replies=False,
+ private=False,
):
result = self.send_query(
"PoeBotCreateMutation",
@@ -499,21 +499,21 @@ class Client:
return data
def edit_bot(
- self,
- bot_id,
- handle,
- prompt="",
- base_model="chinchilla",
- description="",
- intro_message="",
- api_key=None,
- api_url=None,
- private=False,
- prompt_public=True,
- pfp_url=None,
- linkification=False,
- markdown_rendering=True,
- suggested_replies=False,
+ self,
+ bot_id,
+ handle,
+ prompt="",
+ base_model="chinchilla",
+ description="",
+ intro_message="",
+ api_key=None,
+ api_url=None,
+ private=False,
+ prompt_public=True,
+ pfp_url=None,
+ linkification=False,
+ markdown_rendering=True,
+ suggested_replies=False,
):
result = self.send_query(
"PoeBotEditMutation",
diff --git a/quora/cookies.txt b/gpt4free/quora/cookies.txt
similarity index 100%
rename from quora/cookies.txt
rename to gpt4free/quora/cookies.txt
diff --git a/quora/graphql/AddHumanMessageMutation.graphql b/gpt4free/quora/graphql/AddHumanMessageMutation.graphql
similarity index 100%
rename from quora/graphql/AddHumanMessageMutation.graphql
rename to gpt4free/quora/graphql/AddHumanMessageMutation.graphql
diff --git a/quora/graphql/AddMessageBreakMutation.graphql b/gpt4free/quora/graphql/AddMessageBreakMutation.graphql
similarity index 100%
rename from quora/graphql/AddMessageBreakMutation.graphql
rename to gpt4free/quora/graphql/AddMessageBreakMutation.graphql
diff --git a/quora/graphql/AutoSubscriptionMutation.graphql b/gpt4free/quora/graphql/AutoSubscriptionMutation.graphql
similarity index 100%
rename from quora/graphql/AutoSubscriptionMutation.graphql
rename to gpt4free/quora/graphql/AutoSubscriptionMutation.graphql
diff --git a/quora/graphql/BioFragment.graphql b/gpt4free/quora/graphql/BioFragment.graphql
similarity index 100%
rename from quora/graphql/BioFragment.graphql
rename to gpt4free/quora/graphql/BioFragment.graphql
diff --git a/quora/graphql/ChatAddedSubscription.graphql b/gpt4free/quora/graphql/ChatAddedSubscription.graphql
similarity index 100%
rename from quora/graphql/ChatAddedSubscription.graphql
rename to gpt4free/quora/graphql/ChatAddedSubscription.graphql
diff --git a/quora/graphql/ChatFragment.graphql b/gpt4free/quora/graphql/ChatFragment.graphql
similarity index 100%
rename from quora/graphql/ChatFragment.graphql
rename to gpt4free/quora/graphql/ChatFragment.graphql
diff --git a/quora/graphql/ChatListPaginationQuery.graphql b/gpt4free/quora/graphql/ChatListPaginationQuery.graphql
similarity index 100%
rename from quora/graphql/ChatListPaginationQuery.graphql
rename to gpt4free/quora/graphql/ChatListPaginationQuery.graphql
diff --git a/quora/graphql/ChatPaginationQuery.graphql b/gpt4free/quora/graphql/ChatPaginationQuery.graphql
similarity index 100%
rename from quora/graphql/ChatPaginationQuery.graphql
rename to gpt4free/quora/graphql/ChatPaginationQuery.graphql
diff --git a/quora/graphql/ChatViewQuery.graphql b/gpt4free/quora/graphql/ChatViewQuery.graphql
similarity index 100%
rename from quora/graphql/ChatViewQuery.graphql
rename to gpt4free/quora/graphql/ChatViewQuery.graphql
diff --git a/quora/graphql/DeleteHumanMessagesMutation.graphql b/gpt4free/quora/graphql/DeleteHumanMessagesMutation.graphql
similarity index 100%
rename from quora/graphql/DeleteHumanMessagesMutation.graphql
rename to gpt4free/quora/graphql/DeleteHumanMessagesMutation.graphql
diff --git a/quora/graphql/DeleteMessageMutation.graphql b/gpt4free/quora/graphql/DeleteMessageMutation.graphql
similarity index 100%
rename from quora/graphql/DeleteMessageMutation.graphql
rename to gpt4free/quora/graphql/DeleteMessageMutation.graphql
diff --git a/quora/graphql/HandleFragment.graphql b/gpt4free/quora/graphql/HandleFragment.graphql
similarity index 100%
rename from quora/graphql/HandleFragment.graphql
rename to gpt4free/quora/graphql/HandleFragment.graphql
diff --git a/quora/graphql/LoginWithVerificationCodeMutation.graphql b/gpt4free/quora/graphql/LoginWithVerificationCodeMutation.graphql
similarity index 100%
rename from quora/graphql/LoginWithVerificationCodeMutation.graphql
rename to gpt4free/quora/graphql/LoginWithVerificationCodeMutation.graphql
diff --git a/quora/graphql/MessageAddedSubscription.graphql b/gpt4free/quora/graphql/MessageAddedSubscription.graphql
similarity index 100%
rename from quora/graphql/MessageAddedSubscription.graphql
rename to gpt4free/quora/graphql/MessageAddedSubscription.graphql
diff --git a/quora/graphql/MessageDeletedSubscription.graphql b/gpt4free/quora/graphql/MessageDeletedSubscription.graphql
similarity index 100%
rename from quora/graphql/MessageDeletedSubscription.graphql
rename to gpt4free/quora/graphql/MessageDeletedSubscription.graphql
diff --git a/quora/graphql/MessageFragment.graphql b/gpt4free/quora/graphql/MessageFragment.graphql
similarity index 100%
rename from quora/graphql/MessageFragment.graphql
rename to gpt4free/quora/graphql/MessageFragment.graphql
diff --git a/quora/graphql/MessageRemoveVoteMutation.graphql b/gpt4free/quora/graphql/MessageRemoveVoteMutation.graphql
similarity index 100%
rename from quora/graphql/MessageRemoveVoteMutation.graphql
rename to gpt4free/quora/graphql/MessageRemoveVoteMutation.graphql
diff --git a/quora/graphql/MessageSetVoteMutation.graphql b/gpt4free/quora/graphql/MessageSetVoteMutation.graphql
similarity index 100%
rename from quora/graphql/MessageSetVoteMutation.graphql
rename to gpt4free/quora/graphql/MessageSetVoteMutation.graphql
diff --git a/quora/graphql/PoeBotCreateMutation.graphql b/gpt4free/quora/graphql/PoeBotCreateMutation.graphql
similarity index 100%
rename from quora/graphql/PoeBotCreateMutation.graphql
rename to gpt4free/quora/graphql/PoeBotCreateMutation.graphql
diff --git a/quora/graphql/PoeBotEditMutation.graphql b/gpt4free/quora/graphql/PoeBotEditMutation.graphql
similarity index 100%
rename from quora/graphql/PoeBotEditMutation.graphql
rename to gpt4free/quora/graphql/PoeBotEditMutation.graphql
diff --git a/quora/graphql/SendMessageMutation.graphql b/gpt4free/quora/graphql/SendMessageMutation.graphql
similarity index 100%
rename from quora/graphql/SendMessageMutation.graphql
rename to gpt4free/quora/graphql/SendMessageMutation.graphql
diff --git a/quora/graphql/SendVerificationCodeForLoginMutation.graphql b/gpt4free/quora/graphql/SendVerificationCodeForLoginMutation.graphql
similarity index 100%
rename from quora/graphql/SendVerificationCodeForLoginMutation.graphql
rename to gpt4free/quora/graphql/SendVerificationCodeForLoginMutation.graphql
diff --git a/quora/graphql/ShareMessagesMutation.graphql b/gpt4free/quora/graphql/ShareMessagesMutation.graphql
similarity index 100%
rename from quora/graphql/ShareMessagesMutation.graphql
rename to gpt4free/quora/graphql/ShareMessagesMutation.graphql
diff --git a/quora/graphql/SignupWithVerificationCodeMutation.graphql b/gpt4free/quora/graphql/SignupWithVerificationCodeMutation.graphql
similarity index 100%
rename from quora/graphql/SignupWithVerificationCodeMutation.graphql
rename to gpt4free/quora/graphql/SignupWithVerificationCodeMutation.graphql
diff --git a/quora/graphql/StaleChatUpdateMutation.graphql b/gpt4free/quora/graphql/StaleChatUpdateMutation.graphql
similarity index 100%
rename from quora/graphql/StaleChatUpdateMutation.graphql
rename to gpt4free/quora/graphql/StaleChatUpdateMutation.graphql
diff --git a/quora/graphql/SubscriptionsMutation.graphql b/gpt4free/quora/graphql/SubscriptionsMutation.graphql
similarity index 100%
rename from quora/graphql/SubscriptionsMutation.graphql
rename to gpt4free/quora/graphql/SubscriptionsMutation.graphql
diff --git a/quora/graphql/SummarizePlainPostQuery.graphql b/gpt4free/quora/graphql/SummarizePlainPostQuery.graphql
similarity index 100%
rename from quora/graphql/SummarizePlainPostQuery.graphql
rename to gpt4free/quora/graphql/SummarizePlainPostQuery.graphql
diff --git a/quora/graphql/SummarizeQuotePostQuery.graphql b/gpt4free/quora/graphql/SummarizeQuotePostQuery.graphql
similarity index 100%
rename from quora/graphql/SummarizeQuotePostQuery.graphql
rename to gpt4free/quora/graphql/SummarizeQuotePostQuery.graphql
diff --git a/quora/graphql/SummarizeSharePostQuery.graphql b/gpt4free/quora/graphql/SummarizeSharePostQuery.graphql
similarity index 100%
rename from quora/graphql/SummarizeSharePostQuery.graphql
rename to gpt4free/quora/graphql/SummarizeSharePostQuery.graphql
diff --git a/quora/graphql/UserSnippetFragment.graphql b/gpt4free/quora/graphql/UserSnippetFragment.graphql
similarity index 100%
rename from quora/graphql/UserSnippetFragment.graphql
rename to gpt4free/quora/graphql/UserSnippetFragment.graphql
diff --git a/quora/graphql/ViewerInfoQuery.graphql b/gpt4free/quora/graphql/ViewerInfoQuery.graphql
similarity index 100%
rename from quora/graphql/ViewerInfoQuery.graphql
rename to gpt4free/quora/graphql/ViewerInfoQuery.graphql
diff --git a/quora/graphql/ViewerStateFragment.graphql b/gpt4free/quora/graphql/ViewerStateFragment.graphql
similarity index 100%
rename from quora/graphql/ViewerStateFragment.graphql
rename to gpt4free/quora/graphql/ViewerStateFragment.graphql
diff --git a/quora/graphql/ViewerStateUpdatedSubscription.graphql b/gpt4free/quora/graphql/ViewerStateUpdatedSubscription.graphql
similarity index 100%
rename from quora/graphql/ViewerStateUpdatedSubscription.graphql
rename to gpt4free/quora/graphql/ViewerStateUpdatedSubscription.graphql
diff --git a/quora/graphql/__init__.py b/gpt4free/quora/graphql/__init__.py
similarity index 100%
rename from quora/graphql/__init__.py
rename to gpt4free/quora/graphql/__init__.py
diff --git a/quora/mail.py b/gpt4free/quora/mail.py
similarity index 93%
rename from quora/mail.py
rename to gpt4free/quora/mail.py
index e6ce96c2..864d9568 100644
--- a/quora/mail.py
+++ b/gpt4free/quora/mail.py
@@ -42,9 +42,7 @@ class Emailnator:
while True:
sleep(2)
- mail_token = self.client.post(
- "https://www.emailnator.com/message-list", json={"email": self.email}
- )
+ mail_token = self.client.post("https://www.emailnator.com/message-list", json={"email": self.email})
mail_token = loads(mail_token.text)["messageData"]
diff --git a/theb/README.md b/gpt4free/theb/README.md
similarity index 90%
rename from theb/README.md
rename to gpt4free/theb/README.md
index ca978fce..a4abdf62 100644
--- a/theb/README.md
+++ b/gpt4free/theb/README.md
@@ -1,9 +1,8 @@
### Example: `theb` (use like openai pypi package)
-
```python
# import library
-import theb
+from gpt4free import theb
# simple streaming completion
for token in theb.Completion.create('hello world'):
diff --git a/theb/__init__.py b/gpt4free/theb/__init__.py
similarity index 68%
rename from theb/__init__.py
rename to gpt4free/theb/__init__.py
index 726e025e..96053877 100644
--- a/theb/__init__.py
+++ b/gpt4free/theb/__init__.py
@@ -1,8 +1,12 @@
-from re import findall
from json import loads
from queue import Queue, Empty
+from re import findall
from threading import Thread
+from typing import Generator
+
from curl_cffi import requests
+from fake_useragent import UserAgent
+
class Completion:
# experimental
@@ -14,29 +18,29 @@ class Completion:
message_queue = Queue()
stream_completed = False
+ @staticmethod
def request(prompt: str):
headers = {
'authority': 'chatbot.theb.ai',
'content-type': 'application/json',
'origin': 'https://chatbot.theb.ai',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
+ 'user-agent': UserAgent().random,
}
- requests.post('https://chatbot.theb.ai/api/chat-process', headers=headers,
- content_callback = Completion.handle_stream_response,
- json = {
- 'prompt': prompt,
- 'options': {}
- }
+ requests.post(
+ 'https://chatbot.theb.ai/api/chat-process',
+ headers=headers,
+ content_callback=Completion.handle_stream_response,
+ json={'prompt': prompt, 'options': {}},
)
Completion.stream_completed = True
@staticmethod
- def create(prompt: str):
+ def create(prompt: str) -> Generator[str, None, None]:
Thread(target=Completion.request, args=[prompt]).start()
- while Completion.stream_completed != True or not Completion.message_queue.empty():
+ while not Completion.stream_completed or not Completion.message_queue.empty():
try:
message = Completion.message_queue.get(timeout=0.01)
for message in findall(Completion.regex, message):
diff --git a/theb/theb_test.py b/gpt4free/theb/theb_test.py
similarity index 63%
rename from theb/theb_test.py
rename to gpt4free/theb/theb_test.py
index 177c970a..c57d5c62 100644
--- a/theb/theb_test.py
+++ b/gpt4free/theb/theb_test.py
@@ -1,4 +1,4 @@
import theb
for token in theb.Completion.create('hello world'):
- print(token, end='', flush=True)
\ No newline at end of file
+ print(token, end='', flush=True)
diff --git a/you/README.md b/gpt4free/you/README.md
similarity index 96%
rename from you/README.md
rename to gpt4free/you/README.md
index 25c20085..11b4723e 100644
--- a/you/README.md
+++ b/gpt4free/you/README.md
@@ -1,7 +1,8 @@
### Example: `you` (use like openai pypi package)
```python
-import you
+
+from gpt4free import you
# simple request with links and details
response = you.Completion.create(
diff --git a/you/__init__.py b/gpt4free/you/__init__.py
similarity index 64%
rename from you/__init__.py
rename to gpt4free/you/__init__.py
index 8bf31f0d..97b48464 100644
--- a/you/__init__.py
+++ b/gpt4free/you/__init__.py
@@ -1,28 +1,36 @@
+import json
import re
-from json import loads
+from typing import Optional, List, Dict, Any
from uuid import uuid4
from fake_useragent import UserAgent
+from pydantic import BaseModel
from tls_client import Session
+class PoeResponse(BaseModel):
+ text: Optional[str] = None
+ links: List[str] = []
+ extra: Dict[str, Any] = {}
+
+
class Completion:
@staticmethod
def create(
- prompt: str,
- page: int = 1,
- count: int = 10,
- safe_search: str = 'Moderate',
- on_shopping_page: bool = False,
- mkt: str = '',
- response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
- domain: str = 'youchat',
- query_trace_id: str = None,
- chat: list = None,
- include_links: bool = False,
- detailed: bool = False,
- debug: bool = False,
- ) -> dict:
+ prompt: str,
+ page: int = 1,
+ count: int = 10,
+ safe_search: str = 'Moderate',
+ on_shopping_page: bool = False,
+ mkt: str = '',
+ response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
+ domain: str = 'youchat',
+ query_trace_id: str = None,
+ chat: list = None,
+ include_links: bool = False,
+ detailed: bool = False,
+ debug: bool = False,
+ ) -> PoeResponse:
if chat is None:
chat = []
@@ -57,26 +65,28 @@ class Completion:
r'(?<=event: youChatSerpResults\ndata:)(.*\n)*?(?=event: )', response.text
).group()
third_party_search_results = re.search(
- r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text).group()
+ r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text
+ ).group()
# slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0]
text = ''.join(re.findall(r'{\"youChatToken\": \"(.*?)\"}', response.text))
extra = {
- 'youChatSerpResults': loads(you_chat_serp_results),
+ 'youChatSerpResults': json.loads(you_chat_serp_results),
# 'slots' : loads(slots)
}
- return {
- 'response': text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"'),
- 'links': loads(third_party_search_results)['search']['third_party_search_results']
- if include_links
- else None,
- 'extra': extra if detailed else None,
- }
+ response = PoeResponse(text=text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"'))
+ if include_links:
+ response.links = json.loads(third_party_search_results)['search']['third_party_search_results']
- @classmethod
- def __get_headers(cls) -> dict:
+ if detailed:
+ response.extra = extra
+
+ return response
+
+ @staticmethod
+ def __get_headers() -> dict:
return {
'authority': 'you.com',
'accept': 'text/event-stream',
@@ -93,6 +103,6 @@ class Completion:
'user-agent': UserAgent().random,
}
- @classmethod
- def __get_failure_response(cls) -> dict:
- return dict(response='Unable to fetch the response, Please try again.', links=[], extra={})
+ @staticmethod
+ def __get_failure_response() -> PoeResponse:
+ return PoeResponse(text='Unable to fetch the response, Please try again.')
diff --git a/gui/query_methods.py b/gui/query_methods.py
index 1a4a3402..6225453b 100644
--- a/gui/query_methods.py
+++ b/gui/query_methods.py
@@ -3,11 +3,10 @@ import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
-import forefront, quora, theb, you
+from gpt4free import quora, forefront, theb, you
import random
-
def query_forefront(question: str) -> str:
# create an account
token = forefront.Account.create(logging=False)
@@ -15,65 +14,59 @@ def query_forefront(question: str) -> str:
response = ""
# get a response
try:
- for i in forefront.StreamingCompletion.create(token = token, prompt = 'hello world', model='gpt-4'):
- response += i.completion.choices[0].text
-
- return response
-
+ return forefront.Completion.create(token=token, prompt='hello world', model='gpt-4').text
except Exception as e:
# Return error message if an exception occurs
- return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
+ return (
+ f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
+ )
def query_quora(question: str) -> str:
token = quora.Account.create(logging=False, enable_bot_creation=True)
- response = quora.Completion.create(
- model='gpt-4',
- prompt=question,
- token=token
- )
-
- return response.completion.choices[0].tex
+ return quora.Completion.create(model='gpt-4', prompt=question, token=token).text
def query_theb(question: str) -> str:
# Set cloudflare clearance cookie and get answer from GPT-4 model
response = ""
try:
- result = theb.Completion.create(
- prompt = question)
- return result
-
+ return ''.join(theb.Completion.create(prompt=question))
+
except Exception as e:
# Return error message if an exception occurs
- return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
+ return (
+ f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
+ )
def query_you(question: str) -> str:
# Set cloudflare clearance cookie and get answer from GPT-4 model
try:
- result = you.Completion.create(
- prompt = question)
+ result = you.Completion.create(prompt=question)
return result["response"]
-
+
except Exception as e:
# Return error message if an exception occurs
- return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
+ return (
+ f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
+ )
+
# Define a dictionary containing all query methods
avail_query_methods = {
- "Forefront": query_forefront,
- "Poe": query_quora,
- "Theb": query_theb,
- "You": query_you,
- # "Writesonic": query_writesonic,
- # "T3nsor": query_t3nsor,
- # "Phind": query_phind,
- # "Ora": query_ora,
- }
+ "Forefront": query_forefront,
+ "Poe": query_quora,
+ "Theb": query_theb,
+ "You": query_you,
+ # "Writesonic": query_writesonic,
+ # "T3nsor": query_t3nsor,
+ # "Phind": query_phind,
+ # "Ora": query_ora,
+}
+
def query(user_input: str, selected_method: str = "Random") -> str:
-
# If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it
if selected_method != "Random" and selected_method in avail_query_methods:
try:
@@ -104,4 +97,3 @@ def query(user_input: str, selected_method: str = "Random") -> str:
query_methods_list.remove(chosen_query)
return result
-
diff --git a/gui/streamlit_app.py b/gui/streamlit_app.py
index d1975bbd..2dba0a7b 100644
--- a/gui/streamlit_app.py
+++ b/gui/streamlit_app.py
@@ -4,7 +4,7 @@ import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
import streamlit as st
-from openai_rev import you
+from gpt4free import you
def get_answer(question: str) -> str:
diff --git a/gui/streamlit_chat_app.py b/gui/streamlit_chat_app.py
index dce8ef29..68011229 100644
--- a/gui/streamlit_chat_app.py
+++ b/gui/streamlit_chat_app.py
@@ -1,6 +1,6 @@
+import atexit
import os
import sys
-import atexit
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
@@ -9,9 +9,9 @@ from streamlit_chat import message
from query_methods import query, avail_query_methods
import pickle
-
conversations_file = "conversations.pkl"
+
def load_conversations():
try:
with open(conversations_file, "rb") as f:
@@ -31,11 +31,11 @@ def save_conversations(conversations, current_conversation):
break
if not updated:
conversations.append(current_conversation)
-
+
temp_conversations_file = "temp_" + conversations_file
with open(temp_conversations_file, "wb") as f:
pickle.dump(conversations, f)
-
+
os.replace(temp_conversations_file, conversations_file)
@@ -44,10 +44,10 @@ def exit_handler():
# Perform cleanup operations here, like saving data or closing open files.
save_conversations(st.session_state.conversations, st.session_state.current_conversation)
+
# Register the exit_handler function to be called when the program is closing.
atexit.register(exit_handler)
-
st.header("Chat Placeholder")
if 'conversations' not in st.session_state:
@@ -61,7 +61,7 @@ if 'selected_conversation' not in st.session_state:
if 'input_field_key' not in st.session_state:
st.session_state['input_field_key'] = 0
-
+
if 'query_method' not in st.session_state:
st.session_state['query_method'] = query
@@ -69,19 +69,22 @@ if 'query_method' not in st.session_state:
if 'current_conversation' not in st.session_state or st.session_state['current_conversation'] is None:
st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
-
input_placeholder = st.empty()
-user_input = input_placeholder.text_input('You:', key=f'input_text_{len(st.session_state["current_conversation"]["user_inputs"])}')
+user_input = input_placeholder.text_input(
+ 'You:', key=f'input_text_{len(st.session_state["current_conversation"]["user_inputs"])}'
+)
submit_button = st.button("Submit")
if user_input or submit_button:
output = query(user_input, st.session_state['query_method'])
-
- st.session_state.current_conversation['user_inputs'].append(user_input)
- st.session_state.current_conversation['generated_responses'].append(output)
- save_conversations(st.session_state.conversations, st.session_state.current_conversation)
- user_input = input_placeholder.text_input('You:', value='', key=f'input_text_{len(st.session_state["current_conversation"]["user_inputs"])}') # Clear the input field
+ escaped_output = output.encode('utf-8').decode('unicode-escape')
+ st.session_state.current_conversation['user_inputs'].append(user_input)
+ st.session_state.current_conversation['generated_responses'].append(escaped_output)
+ save_conversations(st.session_state.conversations, st.session_state.current_conversation)
+ user_input = input_placeholder.text_input(
+ 'You:', value='', key=f'input_text_{len(st.session_state["current_conversation"]["user_inputs"])}'
+ ) # Clear the input field
# Add a button to create a new conversation
if st.sidebar.button("New Conversation"):
@@ -89,11 +92,7 @@ if st.sidebar.button("New Conversation"):
st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
st.session_state['input_field_key'] += 1
-st.session_state['query_method'] = st.sidebar.selectbox(
- "Select API:",
- options=avail_query_methods,
- index=0
-)
+st.session_state['query_method'] = st.sidebar.selectbox("Select API:", options=avail_query_methods, index=0)
# Sidebar
st.sidebar.header("Conversation History")
diff --git a/pyproject.toml b/pyproject.toml
index 7d3be065..83df5dc5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -2,10 +2,10 @@
name = "openai-rev"
version = "0.1.0"
description = ""
-authors = ["Raju Komati "]
+authors = []
license = "GPL-3.0"
readme = "README.md"
-packages = [{ include = "openai_rev" }]
+packages = [{ include = "gpt4free" }]
exclude = ["**/*.txt"]
[tool.poetry.dependencies]
diff --git a/test.py b/test.py
index 4b39bd0a..0fd2ec8b 100644
--- a/test.py
+++ b/test.py
@@ -1,4 +1,4 @@
-import theb
+from gpt4free import theb
for token in theb.Completion.create('hello world'):
print(token, end='', flush=True)
diff --git a/testing/forefront_test.py b/testing/forefront_test.py
index 8d25ed89..b7b5c57c 100644
--- a/testing/forefront_test.py
+++ b/testing/forefront_test.py
@@ -1,4 +1,4 @@
-from openai_rev import forefront
+from gpt4free import forefront
# create an account
token = forefront.Account.create(logging=True)
diff --git a/testing/poe_account_create_test.py b/testing/poe_account_create_test.py
index 7072597f..ace2306e 100644
--- a/testing/poe_account_create_test.py
+++ b/testing/poe_account_create_test.py
@@ -6,8 +6,8 @@ from typing import Optional
from tls_client import Session as TLS
from twocaptcha import TwoCaptcha
-from openai_rev.quora import extract_formkey
-from openai_rev.quora.mail import Emailnator
+from gpt4free.quora import extract_formkey
+from gpt4free.quora.mail import Emailnator
solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358')
diff --git a/testing/poe_test.py b/testing/poe_test.py
index 809804f2..22d95f5f 100644
--- a/testing/poe_test.py
+++ b/testing/poe_test.py
@@ -1,6 +1,6 @@
from time import sleep
-from openai_rev import quora
+from gpt4free import quora
token = quora.Account.create(proxy=None, logging=True)
print('token', token)
diff --git a/testing/quora_test_2.py b/testing/quora_test_2.py
index 5d06f9ed..297ca7a1 100644
--- a/testing/quora_test_2.py
+++ b/testing/quora_test_2.py
@@ -1,4 +1,4 @@
-from openai_rev import quora
+from gpt4free import quora
token = quora.Account.create(logging=True, enable_bot_creation=True)
diff --git a/testing/test_main.py b/testing/test_main.py
index 612ec695..7c28f1d2 100644
--- a/testing/test_main.py
+++ b/testing/test_main.py
@@ -1,24 +1,27 @@
-from openai_rev import openai_rev, Provider, quora, forefront
+import gpt4free
+from gpt4free import Provider, quora, forefront
# usage You
-response = openai_rev.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi')
+response = gpt4free.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi')
print(response)
# usage Poe
token = quora.Account.create(logging=False)
-response = openai_rev.Completion.create(
- Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT'
-)
+response = gpt4free.Completion.create(Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT')
print(response)
# usage forefront
token = forefront.Account.create(logging=False)
-response = openai_rev.Completion.create(
+response = gpt4free.Completion.create(
Provider.ForeFront, prompt='Write a poem on Lionel Messi', model='gpt-4', token=token
)
print(response)
print(f'END')
# usage theb
-response = openai_rev.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi')
+response = gpt4free.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi')
+print(response)
+
+# usage cocalc
+response = gpt4free.Completion.create(Provider.CoCalc, prompt='Write a poem on Lionel Messi', cookie_input='')
print(response)
diff --git a/testing/you_test.py b/testing/you_test.py
index 34800301..1e9f6205 100644
--- a/testing/you_test.py
+++ b/testing/you_test.py
@@ -1,4 +1,4 @@
-from openai_rev import you
+from gpt4free import you
# simple request with links and details
response = you.Completion.create(prompt="hello world", detailed=True, include_links=True)
@@ -22,6 +22,6 @@ while True:
response = you.Completion.create(prompt=prompt, chat=chat)
- print("Bot:", response["response"])
+ print("Bot:", response.text)
- chat.append({"question": prompt, "answer": response["response"]})
+ chat.append({"question": prompt, "answer": response.text})