mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-15 06:13:01 +00:00
44 lines
1.3 KiB
Python
44 lines
1.3 KiB
Python
from __future__ import annotations
|
|
|
|
from .Openai import Openai
|
|
from ...typing import AsyncResult, Messages
|
|
|
|
class Groq(Openai):
|
|
label = "Groq"
|
|
url = "https://console.groq.com/playground"
|
|
working = True
|
|
default_model = "mixtral-8x7b-32768"
|
|
models = [
|
|
"distil-whisper-large-v3-en",
|
|
"gemma2-9b-it",
|
|
"gemma-7b-it",
|
|
"llama3-groq-70b-8192-tool-use-preview",
|
|
"llama3-groq-8b-8192-tool-use-preview",
|
|
"llama-3.1-70b-versatile",
|
|
"llama-3.1-8b-instant",
|
|
"llama-3.2-1b-preview",
|
|
"llama-3.2-3b-preview",
|
|
"llama-3.2-11b-vision-preview",
|
|
"llama-3.2-90b-vision-preview",
|
|
"llama-guard-3-8b",
|
|
"llava-v1.5-7b-4096-preview",
|
|
"llama3-70b-8192",
|
|
"llama3-8b-8192",
|
|
"mixtral-8x7b-32768",
|
|
"whisper-large-v3",
|
|
"whisper-large-v3-turbo",
|
|
]
|
|
model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"}
|
|
|
|
@classmethod
|
|
def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
api_base: str = "https://api.groq.com/openai/v1",
|
|
**kwargs
|
|
) -> AsyncResult:
|
|
return super().create_async_generator(
|
|
model, messages, api_base=api_base, **kwargs
|
|
)
|