Merge pull request #737 from AlephZero255/main

Fix empty H2o output
pull/742/head
xtekky 1 year ago committed by GitHub
commit 4133ac9200
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,106 +1,94 @@
from requests import Session from requests import Session
from uuid import uuid4 from uuid import uuid4
from json import loads from json import loads
import os import os
import json import json
import requests import requests
from ...typing import sha256, Dict, get_type_hints from ...typing import sha256, Dict, get_type_hints
url = 'https://gpt-gm.h2o.ai' url = 'https://gpt-gm.h2o.ai'
model = ['falcon-40b', 'falcon-7b', 'llama-13b'] model = ['falcon-40b', 'falcon-7b', 'llama-13b']
supports_stream = True supports_stream = True
needs_auth = False needs_auth = False
models = { models = {
'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3', 'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1', 'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b' 'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
} }
def _create_completion(model: str, messages: list, stream: bool, **kwargs): def _create_completion(model: str, messages: list, stream: bool, **kwargs):
conversation = 'instruction: this is a conversation beween, a user and an AI assistant, respond to the latest message, referring to the conversation if needed\n' conversation = ''
for message in messages: for message in messages:
conversation += '%s: %s\n' % (message['role'], message['content']) conversation += '%s: %s\n' % (message['role'], message['content'])
conversation += 'assistant:'
conversation += 'assistant: '
client = Session() session = requests.Session()
client.headers = {
'authority': 'gpt-gm.h2o.ai', response = session.get("https://gpt-gm.h2o.ai/")
'origin': 'https://gpt-gm.h2o.ai', headers = {
'referer': 'https://gpt-gm.h2o.ai/', "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
'sec-ch-ua-mobile': '?0', "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
'sec-ch-ua-platform': '"Windows"', "Content-Type": "application/x-www-form-urlencoded",
'sec-fetch-dest': 'document', "Upgrade-Insecure-Requests": "1",
'sec-fetch-mode': 'navigate', "Sec-Fetch-Dest": "document",
'sec-fetch-site': 'same-origin', "Sec-Fetch-Mode": "navigate",
'sec-fetch-user': '?1', "Sec-Fetch-Site": "same-origin",
'upgrade-insecure-requests': '1', "Sec-Fetch-User": "?1",
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', "Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"
} }
data = {
client.get('https://gpt-gm.h2o.ai/') "ethicsModalAccepted": "true",
response = client.post('https://gpt-gm.h2o.ai/settings', data={ "shareConversationsWithModelAuthors": "true",
'ethicsModalAccepted': 'true', "ethicsModalAcceptedAt": "",
'shareConversationsWithModelAuthors': 'true', "activeModel": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
'ethicsModalAcceptedAt': '', "searchEnabled": "true"
'activeModel': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1', }
'searchEnabled': 'true', response = session.post("https://gpt-gm.h2o.ai/settings", headers=headers, data=data)
})
headers = {
'authority': 'gpt-gm.h2o.ai', headers = {
'accept': '*/*', "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', "Accept": "*/*",
'origin': 'https://gpt-gm.h2o.ai', "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
'referer': 'https://gpt-gm.h2o.ai/', "Content-Type": "application/json",
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', "Sec-Fetch-Dest": "empty",
'sec-ch-ua-mobile': '?0', "Sec-Fetch-Mode": "cors",
'sec-ch-ua-platform': '"Windows"', "Sec-Fetch-Site": "same-origin",
'sec-fetch-dest': 'empty', "Referer": "https://gpt-gm.h2o.ai/"
'sec-fetch-mode': 'cors', }
'sec-fetch-site': 'same-origin', data = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', "model": models[model]
} }
json_data = { conversation_id = session.post("https://gpt-gm.h2o.ai/conversation", headers=headers, json=data)
'model': models[model] data = {
} "inputs": conversation,
"parameters": {
response = client.post('https://gpt-gm.h2o.ai/conversation', "temperature": kwargs.get('temperature', 0.4),
headers=headers, json=json_data) "truncate": kwargs.get('truncate', 2048),
conversationId = response.json()['conversationId'] "max_new_tokens": kwargs.get('max_new_tokens', 1024),
"do_sample": kwargs.get('do_sample', True),
"repetition_penalty": kwargs.get('repetition_penalty', 1.2),
completion = client.post(f'https://gpt-gm.h2o.ai/conversation/{conversationId}', stream=True, json = { "return_full_text": kwargs.get('return_full_text', False)
'inputs': conversation, },
'parameters': { "stream": True,
'temperature': kwargs.get('temperature', 0.4), "options": {
'truncate': kwargs.get('truncate', 2048), "id": kwargs.get('id', str(uuid4())),
'max_new_tokens': kwargs.get('max_new_tokens', 1024), "response_id": kwargs.get('response_id', str(uuid4())),
'do_sample': kwargs.get('do_sample', True), "is_retry": False,
'repetition_penalty': kwargs.get('repetition_penalty', 1.2), "use_cache": False,
'return_full_text': kwargs.get('return_full_text', False) "web_search_id": ""
}, }
'stream': True, }
'options': {
'id': kwargs.get('id', str(uuid4())), response = session.post(f"https://gpt-gm.h2o.ai/conversation/{conversation_id.json()['conversationId']}", headers=headers, json=data)
'response_id': kwargs.get('response_id', str(uuid4())), generated_text = response.text.replace("\n", "").split("data:")
'is_retry': False, generated_text = json.loads(generated_text[-1])
'use_cache': False,
'web_search_id': '' return generated_text["generated_text"]
}
}) params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
for line in completion.iter_lines():
if b'data' in line:
line = loads(line.decode('utf-8').replace('data:', ''))
token = line['token']['text']
if token == '<|endoftext|>':
break
else:
yield (token)
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
Loading…
Cancel
Save