diff --git a/gpt4all-backend/scripts/convert_bert_hf_to_gguf.py b/gpt4all-backend/scripts/convert_bert_hf_to_gguf.py old mode 100644 new mode 100755 index f3fa8a2d..16d6f457 --- a/gpt4all-backend/scripts/convert_bert_hf_to_gguf.py +++ b/gpt4all-backend/scripts/convert_bert_hf_to_gguf.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 from __future__ import annotations import json diff --git a/gpt4all-backend/scripts/convert_mpt_hf_to_gguf.py b/gpt4all-backend/scripts/convert_mpt_hf_to_gguf.py old mode 100644 new mode 100755 index 1b5d1367..a49ceb36 --- a/gpt4all-backend/scripts/convert_mpt_hf_to_gguf.py +++ b/gpt4all-backend/scripts/convert_mpt_hf_to_gguf.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # Convert Hugging Face fine-tuned bloom-like models to ggml format # # Usage: @@ -44,7 +45,7 @@ def bytes_to_unicode(): if not 3 <= len(sys.argv) < 5: - print("Usage: python {} model-name dir-output [ftype]".format(Path(__file__).name)) + print("Usage: {} model-name dir-output [ftype]".format(Path(__file__).name)) print(" model-name: name of the model to convert. Example: 'bigscience/bloomz-560m'") print(" dir-output: directory where the output file will be written") print(" ftype == 0 -> float32") diff --git a/gpt4all-backend/scripts/convert_replit_hf_to_gguf.py b/gpt4all-backend/scripts/convert_replit_hf_to_gguf.py old mode 100644 new mode 100755 index e436e8ac..38b504ad --- a/gpt4all-backend/scripts/convert_replit_hf_to_gguf.py +++ b/gpt4all-backend/scripts/convert_replit_hf_to_gguf.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 from __future__ import annotations import json