From 113d04dce323ae73f284b264178e0bfda0043f6d Mon Sep 17 00:00:00 2001 From: Richard Guo Date: Wed, 10 May 2023 16:40:24 -0400 Subject: [PATCH] some cleanup and for job specific names for circleci --- .circleci/config.yml | 20 +++++++++---------- gpt4all-bindings/python/gpt4all/gpt4all.py | 1 + .../python/tests/test_pyllmodel.py | 1 - 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index c32206b0..e1f3d86c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,7 +4,7 @@ orbs: python: circleci/python@1.2 jobs: - build-linux: + build-py-linux: docker: - image: circleci/python:3.8 steps: @@ -35,7 +35,7 @@ jobs: paths: - "*.whl" - build-macos: + build-py-macos: macos: xcode: "14.2.0" resource_class: macos.m1.large.gen1 @@ -66,7 +66,7 @@ jobs: paths: - "*.whl" - build-windows: + build-py-windows: executor: name: win/default steps: @@ -132,13 +132,13 @@ jobs: workflows: version: 2 - build-deploy: + build-py-deploy: jobs: - - build-linux - - build-macos - - build-windows + - build-py-linux + - build-py-macos + - build-py-windows - store-and-upload-wheels: requires: - - build-windows - - build-linux - - build-macos + - build-py-windows + - build-py-linux + - build-py-macos diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py index a569be94..7e93a3b0 100644 --- a/gpt4all-bindings/python/gpt4all/gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/gpt4all.py @@ -114,6 +114,7 @@ class GPT4All(): @staticmethod def download_model(model_filename, model_path): + # TODO: Find good way of safely removing file that got interrupted. def get_download_url(model_filename): return f"https://gpt4all.io/models/{model_filename}" diff --git a/gpt4all-bindings/python/tests/test_pyllmodel.py b/gpt4all-bindings/python/tests/test_pyllmodel.py index 2208c425..5535cb7b 100644 --- a/gpt4all-bindings/python/tests/test_pyllmodel.py +++ b/gpt4all-bindings/python/tests/test_pyllmodel.py @@ -41,4 +41,3 @@ def prompt_unloaded_llama(): response = response.strip() assert response == "LLAMA ERROR: prompt won't work with an unloaded model!" - \ No newline at end of file