Improve code with ai

pull/1000/head
Heiner Lohaus 1 year ago
parent f7bb30036e
commit dfdb759639

@ -22,7 +22,10 @@ Improve the code in this file:
```py
{code}
```
Don't remove anything. Add type hints if possible.
Don't remove anything.
Add typehints if possible.
Don't add any typehints to kwargs.
Don't remove license comments.
"""
print("Create code...")
@ -30,7 +33,7 @@ response = []
for chunk in g4f.ChatCompletion.create(
model=g4f.models.gpt_35_long,
messages=[{"role": "user", "content": prompt}],
timeout=0,
timeout=300,
stream=True
):
response.append(chunk)

@ -1,8 +1,10 @@
from __future__ import annotations
import asyncio, sys
import asyncio
import sys
from asyncio import AbstractEventLoop
from os import path
from typing import Dict, List
import browser_cookie3
# Change event loop policy on windows
@ -13,7 +15,7 @@ if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# Local Cookie Storage
_cookies: dict[str, dict[str, str]] = {}
_cookies: Dict[str, Dict[str, str]] = {}
# If event loop is already running, handle nested event loops
# If "nest_asyncio" is installed, patch the event loop.
@ -34,11 +36,13 @@ def get_event_loop() -> AbstractEventLoop:
return event_loop
except ImportError:
raise RuntimeError(
'Use "create_async" instead of "create" function in a running event loop. Or install the "nest_asyncio" package.')
'Use "create_async" instead of "create" function in a running event loop. Or install the "nest_asyncio" package.'
)
# Load cookies for a domain from all supported browser.
# Cache the results in the "_cookies" variable
def get_cookies(cookie_domain: str) -> dict:
# Load cookies for a domain from all supported browsers.
# Cache the results in the "_cookies" variable.
def get_cookies(cookie_domain: str) -> Dict[str, str]:
if cookie_domain not in _cookies:
_cookies[cookie_domain] = {}
try:
@ -49,15 +53,18 @@ def get_cookies(cookie_domain: str) -> dict:
return _cookies[cookie_domain]
def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
def format_prompt(messages: List[Dict[str, str]], add_special_tokens=False) -> str:
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
[
"%s: %s" % ((message["role"]).capitalize(), message["content"])
for message in messages
]
)
return f"{formatted}\nAssistant:"
else:
return messages[0]["content"]
def get_browser(user_data_dir: str = None):
from undetected_chromedriver import Chrome

@ -5,7 +5,7 @@ import json
import asyncio
from functools import partialmethod
from asyncio import Future, Queue
from typing import AsyncGenerator
from typing import AsyncGenerator, Union, Optional
from curl_cffi.requests import AsyncSession, Response
import curl_cffi
@ -37,7 +37,14 @@ class StreamResponse:
async def json(self, **kwargs) -> dict:
return json.loads(await self.read(), **kwargs)
async def iter_lines(self, chunk_size=None, decode_unicode=False, delimiter=None) -> AsyncGenerator[bytes, None]:
async def iter_lines(
self, chunk_size: Optional[int] = None, decode_unicode: bool = False, delimiter: Optional[str] = None
) -> AsyncGenerator[bytes, None]:
"""
Copied from: https://requests.readthedocs.io/en/latest/_modules/requests/models/
which is under the License: Apache 2.0
"""
pending: bytes = None
async for chunk in self.iter_content(
@ -60,7 +67,9 @@ class StreamResponse:
if pending is not None:
yield pending
async def iter_content(self, chunk_size=None, decode_unicode=False) -> AsyncGenerator[bytes, None]:
async def iter_content(
self, chunk_size: Optional[int] = None, decode_unicode: bool = False
) -> AsyncGenerator[bytes, None]:
if chunk_size:
warnings.warn("chunk_size is ignored, there is no way to tell curl that.")
if decode_unicode:
@ -76,14 +85,14 @@ class StreamResponse:
class StreamRequest:
def __init__(self, session: AsyncSession, method: str, url: str, **kwargs) -> None:
def __init__(self, session: AsyncSession, method: str, url: str, **kwargs: Union[bool, int, str]) -> None:
self.session: AsyncSession = session
self.loop: asyncio.AbstractEventLoop = session.loop if session.loop else asyncio.get_running_loop()
self.queue: Queue[bytes] = Queue()
self.method: str = method
self.url: str = url
self.options: dict = kwargs
self.handle: curl_cffi.AsyncCurl = None
self.handle: Optional[curl_cffi.AsyncCurl] = None
def _on_content(self, data: bytes) -> None:
if not self.enter.done():
@ -134,10 +143,7 @@ class StreamRequest:
response.request = request
else:
response = self.session._parse_response(self.curl, request, _, header_buffer)
return StreamResponse(
response,
self.queue
)
return StreamResponse(response, self.queue)
async def __aenter__(self) -> StreamResponse:
return await self.fetch()
@ -163,10 +169,7 @@ class StreamRequest:
class StreamSession(AsyncSession):
def request(
self,
method: str,
url: str,
**kwargs
self, method: str, url: str, **kwargs
) -> StreamRequest:
return StreamRequest(self, method, url, **kwargs)

Loading…
Cancel
Save