mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-18 03:25:46 +00:00
some cleanup and for job specific names for circleci
This commit is contained in:
parent
3668cf00cf
commit
113d04dce3
@ -4,7 +4,7 @@ orbs:
|
|||||||
python: circleci/python@1.2
|
python: circleci/python@1.2
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-linux:
|
build-py-linux:
|
||||||
docker:
|
docker:
|
||||||
- image: circleci/python:3.8
|
- image: circleci/python:3.8
|
||||||
steps:
|
steps:
|
||||||
@ -35,7 +35,7 @@ jobs:
|
|||||||
paths:
|
paths:
|
||||||
- "*.whl"
|
- "*.whl"
|
||||||
|
|
||||||
build-macos:
|
build-py-macos:
|
||||||
macos:
|
macos:
|
||||||
xcode: "14.2.0"
|
xcode: "14.2.0"
|
||||||
resource_class: macos.m1.large.gen1
|
resource_class: macos.m1.large.gen1
|
||||||
@ -66,7 +66,7 @@ jobs:
|
|||||||
paths:
|
paths:
|
||||||
- "*.whl"
|
- "*.whl"
|
||||||
|
|
||||||
build-windows:
|
build-py-windows:
|
||||||
executor:
|
executor:
|
||||||
name: win/default
|
name: win/default
|
||||||
steps:
|
steps:
|
||||||
@ -132,13 +132,13 @@ jobs:
|
|||||||
|
|
||||||
workflows:
|
workflows:
|
||||||
version: 2
|
version: 2
|
||||||
build-deploy:
|
build-py-deploy:
|
||||||
jobs:
|
jobs:
|
||||||
- build-linux
|
- build-py-linux
|
||||||
- build-macos
|
- build-py-macos
|
||||||
- build-windows
|
- build-py-windows
|
||||||
- store-and-upload-wheels:
|
- store-and-upload-wheels:
|
||||||
requires:
|
requires:
|
||||||
- build-windows
|
- build-py-windows
|
||||||
- build-linux
|
- build-py-linux
|
||||||
- build-macos
|
- build-py-macos
|
||||||
|
@ -114,6 +114,7 @@ class GPT4All():
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def download_model(model_filename, model_path):
|
def download_model(model_filename, model_path):
|
||||||
|
# TODO: Find good way of safely removing file that got interrupted.
|
||||||
def get_download_url(model_filename):
|
def get_download_url(model_filename):
|
||||||
return f"https://gpt4all.io/models/{model_filename}"
|
return f"https://gpt4all.io/models/{model_filename}"
|
||||||
|
|
||||||
|
@ -41,4 +41,3 @@ def prompt_unloaded_llama():
|
|||||||
|
|
||||||
response = response.strip()
|
response = response.strip()
|
||||||
assert response == "LLAMA ERROR: prompt won't work with an unloaded model!"
|
assert response == "LLAMA ERROR: prompt won't work with an unloaded model!"
|
||||||
|
|
Loading…
Reference in New Issue
Block a user