mirror of https://github.com/xtekky/gpt4free
- | Merge pull request #873
- | Move httpx requirement, Fix Ails and ChatgptAi Provider, Improve scriptspull/880/head 0.0.2.7
commit
7ca1a59d95
@ -1,60 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import requests
|
||||
from .ChatgptLogin import ChatgptLogin
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Opchatgpts(BaseProvider):
|
||||
url = "https://opchatgpts.net"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
temperature = kwargs.get("temperature", 0.8)
|
||||
max_tokens = kwargs.get("max_tokens", 1024)
|
||||
system_prompt = kwargs.get(
|
||||
"system_prompt",
|
||||
"Converse as if you were an AI assistant. Be friendly, creative.")
|
||||
|
||||
payload = _create_payload(
|
||||
messages = messages,
|
||||
temperature = temperature,
|
||||
max_tokens = max_tokens,
|
||||
system_prompt = system_prompt)
|
||||
|
||||
response = requests.post("https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", json=payload)
|
||||
|
||||
response.raise_for_status()
|
||||
yield response.json()["reply"]
|
||||
|
||||
|
||||
def _create_payload(
|
||||
messages: list[dict[str, str]],
|
||||
temperature: float,
|
||||
max_tokens: int, system_prompt: str) -> dict:
|
||||
|
||||
return {
|
||||
"env" : "chatbot",
|
||||
"session" : "N/A",
|
||||
"prompt" : "\n",
|
||||
"context" : system_prompt,
|
||||
"messages" : messages,
|
||||
"newMessage" : messages[::-1][0]["content"],
|
||||
"userName" : '<div class="mwai-name-text">User:</div>',
|
||||
"aiName" : '<div class="mwai-name-text">AI:</div>',
|
||||
"model" : "gpt-3.5-turbo",
|
||||
"temperature" : temperature,
|
||||
"maxTokens" : max_tokens,
|
||||
"maxResults" : 1,
|
||||
"apiKey" : "",
|
||||
"service" : "openai",
|
||||
"embeddingsIndex" : "",
|
||||
"stop" : "",
|
||||
}
|
||||
class Opchatgpts(ChatgptLogin):
|
||||
url = "https://opchatgpts.net"
|
||||
working = True
|
@ -0,0 +1,37 @@
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import asyncio
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import g4f
|
||||
from g4f.Provider import AsyncProvider
|
||||
from testing.test_providers import get_providers
|
||||
from testing.log_time import log_time_async
|
||||
|
||||
async def create_async(provider: AsyncProvider):
|
||||
model = g4f.models.gpt_35_turbo.name if provider.supports_gpt_35_turbo else g4f.models.default.name
|
||||
try:
|
||||
response = await log_time_async(
|
||||
provider.create_async,
|
||||
model=model,
|
||||
messages=[{"role": "user", "content": "Hello Assistant!"}]
|
||||
)
|
||||
assert type(response) is str
|
||||
assert len(response) > 0
|
||||
return response
|
||||
except Exception as e:
|
||||
return e
|
||||
|
||||
async def run_async():
|
||||
_providers: list[AsyncProvider] = [
|
||||
_provider
|
||||
for _provider in get_providers()
|
||||
if _provider.working and hasattr(_provider, "create_async")
|
||||
]
|
||||
responses = [create_async(_provider) for _provider in _providers]
|
||||
responses = await asyncio.gather(*responses)
|
||||
for idx, provider in enumerate(_providers):
|
||||
print(f"{provider.__name__}:", responses[idx])
|
||||
|
||||
print("Total:", asyncio.run(log_time_async(run_async)))
|
Loading…
Reference in New Issue