mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-19 03:25:32 +00:00
commit
6d09dbf4a9
27
etc/testing/test_api.py
Normal file
27
etc/testing/test_api.py
Normal file
@ -0,0 +1,27 @@
|
||||
import openai
|
||||
|
||||
# Set your Hugging Face token as the API key if you use embeddings
|
||||
# If you don't use embeddings, leave it empty
|
||||
openai.api_key = "YOUR_HUGGING_FACE_TOKEN" # Replace with your actual token
|
||||
|
||||
# Set the API base URL if needed, e.g., for a local development environment
|
||||
openai.api_base = "http://localhost:1337/v1"
|
||||
|
||||
def main():
|
||||
response = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": "write a poem about a tree"}],
|
||||
stream=True,
|
||||
)
|
||||
if isinstance(response, dict):
|
||||
# Not streaming
|
||||
print(response.choices[0].message.content)
|
||||
else:
|
||||
# Streaming
|
||||
for token in response:
|
||||
content = token["choices"][0]["delta"].get("content")
|
||||
if content is not None:
|
||||
print(content, end="", flush=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -83,28 +83,17 @@ class Api:
|
||||
model = item_data.get('model')
|
||||
stream = True if item_data.get("stream") == "True" else False
|
||||
messages = item_data.get('messages')
|
||||
conversation = item_data.get('conversation') if item_data.get('conversation') != None else None
|
||||
provider = item_data.get('provider').replace('g4f.Provider.', '')
|
||||
provider = item_data.get('provider', '').replace('g4f.Provider.', '')
|
||||
provider = provider if provider and provider != "Auto" else None
|
||||
if provider != None:
|
||||
provider = g4f.Provider.ProviderUtils.convert.get(provider)
|
||||
|
||||
try:
|
||||
if model == 'pi':
|
||||
response = g4f.ChatCompletion.create(
|
||||
model=model,
|
||||
stream=stream,
|
||||
messages=messages,
|
||||
conversation=conversation,
|
||||
provider = provider,
|
||||
ignored=self.list_ignored_providers)
|
||||
else:
|
||||
response = g4f.ChatCompletion.create(
|
||||
model=model,
|
||||
stream=stream,
|
||||
messages=messages,
|
||||
provider = provider,
|
||||
ignored=self.list_ignored_providers)
|
||||
ignored=self.list_ignored_providers
|
||||
)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return Response(content=json.dumps({"error": "An error occurred while generating the response."}, indent=4), media_type="application/json")
|
||||
@ -179,9 +168,12 @@ class Api:
|
||||
|
||||
content = json.dumps(end_completion_data, separators=(',', ':'))
|
||||
yield f'data: {content}\n\n'
|
||||
|
||||
except GeneratorExit:
|
||||
pass
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
content=json.dumps({"error": "An error occurred while generating the response."}, indent=4)
|
||||
yield f'data: {content}\n\n'
|
||||
|
||||
return StreamingResponse(streaming(), media_type="text/event-stream")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user