fixed the situation where there is no openai api key...again

This commit is contained in:
xssdoctor 2024-04-06 12:22:34 -04:00
parent eafc2df48c
commit 18acd5a319
2 changed files with 51 additions and 58 deletions

View File

@ -38,12 +38,11 @@ class Standalone:
if args is None:
args = type('Args', (), {})()
env_file = os.path.expanduser(env_file)
self.client = None
load_dotenv(env_file)
assert 'OPENAI_API_KEY' in os.environ, "Error: OPENAI_API_KEY not found in environment variables. Please run fabric --setup and add a key."
api_key = os.environ['OPENAI_API_KEY']
base_url = os.environ.get(
'OPENAI_BASE_URL', 'https://api.openai.com/v1/')
self.client = OpenAI(api_key=api_key, base_url=base_url)
if "OPENAI_API_KEY" in os.environ:
api_key = os.environ['OPENAI_API_KEY']
self.client = OpenAI(api_key=api_key)
self.local = False
self.config_pattern_directory = config_directory
self.pattern = pattern
@ -280,33 +279,26 @@ class Standalone:
def fetch_available_models(self):
gptlist = []
fullOllamaList = []
claudeList = ['claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307',
'claude-2.1']
try:
models = [model.id.strip()
for model in self.client.models.list().data]
except APIConnectionError as e:
if getattr(e.__cause__, 'args', [''])[0] == "Illegal header value b'Bearer '":
print("Error: Cannot connect to the OpenAI API Server because the API key is not set. Please run fabric --setup and add a key.")
claudeList = ['claude-3-opus-20240229', 'claude-3-sonnet-20240229',
'claude-3-haiku-20240307', 'claude-2.1']
else:
print(
f"Error: {e.message} trying to access {e.request.url}: {getattr(e.__cause__, 'args', [''])}")
sys.exit()
try:
if self.client:
models = [model.id.strip()
for model in self.client.models.list().data]
if "/" in models[0] or "\\" in models[0]:
gptlist = [item[item.rfind(
"/") + 1:] if "/" in item else item[item.rfind("\\") + 1:] for item in models]
else:
gptlist = [item.strip()
for item in models if item.startswith("gpt")]
gptlist.sort()
except APIConnectionError as e:
print("OpenAI API key not set. Skipping GPT models.")
except Exception as e:
print(f"Error: {getattr(e.__context__, 'args', [''])[0]}")
sys.exit()
if "/" in models[0] or "\\" in models[0]:
# lmstudio returns full paths to models. Iterate and truncate everything before and including the last slash
gptlist = [item[item.rfind(
"/") + 1:] if "/" in item else item[item.rfind("\\") + 1:] for item in models]
else:
# Keep items that start with "gpt"
gptlist = [item.strip()
for item in models if item.startswith("gpt")]
gptlist.sort()
import ollama
try:
default_modelollamaList = ollama.list()['models']
@ -314,6 +306,7 @@ class Standalone:
fullOllamaList.append(model['name'])
except:
fullOllamaList = []
return gptlist, fullOllamaList, claudeList
def get_cli_input(self):

View File

@ -1,44 +1,44 @@
framework: crewai
topic: 'write me a 20 word essay on apples
topic: 'give me the complete voting record of senator marco rubio
'
roles:
researcher:
backstory: Has an extensive background in conducting research using digital tools
to extract relevant information.
goal: Gather comprehensive information about apples
role: Researcher
data_researcher:
backstory: Skilled in using various data search tools to find accurate information.
goal: Gather relevant data on Senator Marco Rubio's voting record
role: Data Researcher
tasks:
collect_information_on_apples:
description: Use digital tools to find credible sources of information on
apples covering history, types, and benefits.
expected_output: Collected data on apples, including historical background,
varieties, and health benefits.
data_collection:
description: Use provided search tools to collect voting records of Senator
Marco Rubio from different sources.
expected_output: A collection of CSV, XML or other data files containing the
required information.
tools:
- ''
analyst:
backstory: Expert in analyzing large volumes of data to identify the most relevant
and interesting facts.
goal: Analyze gathered information to distill key points
role: Analyst
data_processor:
backstory: Expert in processing and cleaning raw data, preparing it for analysis
or presentation.
goal: Process and format collected data into a readable output
role: Data Processor
tasks:
synthesize_information:
description: Review the collected data and extract the most pertinent facts
about apples, focusing on uniqueness and impact.
expected_output: A summary highlighting key facts about apples, such as nutritional
benefits, global popularity, and cultural significance.
data_processing:
description: Clean and process the collected voting records into a structured
JSON format.
expected_output: A JSON file containing Senator Marco Rubio's complete voting
record.
tools:
- ''
writer:
backstory: Specializes in creating short, impactful pieces of writing that capture
the essence of the subject matter.
goal: Craft a concise and engaging essay on apples
role: Writer
presenter:
backstory: Skilled in extracting and summarizing information, presenting it in
a clear and concise format.
goal: Generate the final output for user consumption
role: Presenter
tasks:
write_essay:
description: Based on the analyzed data, write a compelling 20-word essay
on apples that encapsulates their essence and significance.
expected_output: An engaging 20-word essay on apples.
presentation_creation:
description: Create an easily digestible presentation from the processed data
on Senator Marco Rubio's voting record.
expected_output: A well-structured text or multimedia output that highlights
key aspects of Senator Marco Rubio's voting history.
tools:
- ''
dependencies: []