gpt4free/g4f/Provider/Opchatgpts.py

61 lines
1.9 KiB
Python
Raw Normal View History

from __future__ import annotations
2023-07-28 10:07:17 +00:00
import requests
from ..typing import Any, CreateResult
2023-07-28 10:07:17 +00:00
from .base_provider import BaseProvider
class Opchatgpts(BaseProvider):
2023-08-27 15:37:44 +00:00
url = "https://opchatgpts.net"
working = True
2023-07-28 10:07:17 +00:00
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
2023-08-27 15:37:44 +00:00
stream: bool, **kwargs: Any) -> CreateResult:
temperature = kwargs.get("temperature", 0.8)
max_tokens = kwargs.get("max_tokens", 1024)
2023-07-28 10:07:17 +00:00
system_prompt = kwargs.get(
"system_prompt",
2023-08-27 15:37:44 +00:00
"Converse as if you were an AI assistant. Be friendly, creative.")
2023-07-28 10:07:17 +00:00
payload = _create_payload(
2023-08-27 15:37:44 +00:00
messages = messages,
temperature = temperature,
max_tokens = max_tokens,
system_prompt = system_prompt)
2023-07-28 10:07:17 +00:00
2023-08-27 15:37:44 +00:00
response = requests.post("https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", json=payload)
2023-07-28 10:07:17 +00:00
response.raise_for_status()
yield response.json()["reply"]
def _create_payload(
messages: list[dict[str, str]],
temperature: float,
2023-08-27 15:37:44 +00:00
max_tokens: int, system_prompt: str) -> dict:
2023-07-28 10:07:17 +00:00
return {
2023-08-27 15:37:44 +00:00
"env" : "chatbot",
"session" : "N/A",
"prompt" : "\n",
"context" : system_prompt,
"messages" : messages,
"newMessage" : messages[::-1][0]["content"],
"userName" : '<div class="mwai-name-text">User:</div>',
"aiName" : '<div class="mwai-name-text">AI:</div>',
"model" : "gpt-3.5-turbo",
"temperature" : temperature,
"maxTokens" : max_tokens,
"maxResults" : 1,
"apiKey" : "",
"service" : "openai",
"embeddingsIndex" : "",
"stop" : "",
2023-07-28 10:07:17 +00:00
}