2023-05-15 16:45:56 +00:00
INCLUDE_PATH := $( abspath ./)
LIBRARY_PATH := $( abspath ./)
CMAKEFLAGS =
i f n d e f U N A M E _ S
UNAME_S := $( shell uname -s)
e n d i f
i f n d e f U N A M E _ P
UNAME_P := $( shell uname -p)
e n d i f
i f n d e f U N A M E _ M
UNAME_M := $( shell uname -m)
e n d i f
CCV := $( shell $( CC) --version | head -n 1)
CXXV := $( shell $( CXX) --version | head -n 1)
# Mac OS + Arm can report x86_64
# ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789
i f e q ( $( UNAME_S ) , D a r w i n )
ifneq ( $( UNAME_P) ,arm)
SYSCTL_M := $( shell sysctl -n hw.optional.arm64 2>/dev/null)
ifeq ( $( SYSCTL_M) ,1)
# UNAME_P := arm
# UNAME_M := arm64
warn := $( warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\# issuecomment-1282546789)
endif
endif
e n d i f
#
# Compile flags
#
# keep standard at C11 and C++11
CFLAGS = -I. -I../../gpt4all-backend/llama.cpp -I../../gpt4all-backend -I -O3 -DNDEBUG -std= c11 -fPIC
CXXFLAGS = -I. -I../../gpt4all-backend/llama.cpp -I../../gpt4all-backend -O3 -DNDEBUG -std= c++17 -fPIC
LDFLAGS =
# warnings
CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith -Wno-unused-function
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar
# OS specific
# TODO: support Windows
i f e q ( $( UNAME_S ) , L i n u x )
CFLAGS += -pthread
CXXFLAGS += -pthread
e n d i f
i f e q ( $( UNAME_S ) , D a r w i n )
CFLAGS += -pthread
CXXFLAGS += -pthread
e n d i f
i f e q ( $( UNAME_S ) , F r e e B S D )
CFLAGS += -pthread
CXXFLAGS += -pthread
e n d i f
i f e q ( $( UNAME_S ) , N e t B S D )
CFLAGS += -pthread
CXXFLAGS += -pthread
e n d i f
i f e q ( $( UNAME_S ) , O p e n B S D )
CFLAGS += -pthread
CXXFLAGS += -pthread
e n d i f
i f e q ( $( UNAME_S ) , H a i k u )
CFLAGS += -pthread
CXXFLAGS += -pthread
e n d i f
# Architecture specific
# TODO: probably these flags need to be tweaked on some architectures
# feel free to update the Makefile for your architecture and send a pull request or issue
i f e q ( $( UNAME_M ) , $( filter $ ( UNAME_M ) ,x 86_ 64 i 686) )
# Use all CPU extensions that are available:
CFLAGS += -march= native -mtune= native
CXXFLAGS += -march= native -mtune= native
e n d i f
i f n e q ( $( filter ppc 64%,$ ( UNAME_M ) ) , )
POWER9_M := $( shell grep "POWER9" /proc/cpuinfo)
ifneq ( ,$( findstring POWER9,$( POWER9_M) ) )
CFLAGS += -mcpu= power9
CXXFLAGS += -mcpu= power9
endif
# Require c++23's std::byteswap for big-endian support.
ifeq ( $( UNAME_M) ,ppc64)
CXXFLAGS += -std= c++23 -DGGML_BIG_ENDIAN
endif
e n d i f
i f n d e f L L A M A _ N O _ A C C E L E R A T E
# Mac M1 - include Accelerate framework.
# `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time).
ifeq ( $( UNAME_S) ,Darwin)
CFLAGS += -DGGML_USE_ACCELERATE
LDFLAGS += -framework Accelerate
endif
e n d i f
i f d e f L L A M A _ O P E N B L A S
CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/openblas
LDFLAGS += -lopenblas
e n d i f
i f d e f L L A M A _ G P R O F
CFLAGS += -pg
CXXFLAGS += -pg
e n d i f
i f n e q ( $( filter aarch 64%,$ ( UNAME_M ) ) , )
CFLAGS += -mcpu= native
CXXFLAGS += -mcpu= native
e n d i f
i f n e q ( $( filter armv 6%,$ ( UNAME_M ) ) , )
# Raspberry Pi 1, 2, 3
CFLAGS += -mfpu= neon-fp-armv8 -mfp16-format= ieee -mno-unaligned-access
e n d i f
i f n e q ( $( filter armv 7%,$ ( UNAME_M ) ) , )
# Raspberry Pi 4
CFLAGS += -mfpu= neon-fp-armv8 -mfp16-format= ieee -mno-unaligned-access -funsafe-math-optimizations
e n d i f
i f n e q ( $( filter armv 8%,$ ( UNAME_M ) ) , )
# Raspberry Pi 4
CFLAGS += -mfp16-format= ieee -mno-unaligned-access
e n d i f
#
# Print build information
#
$(info I go-gpt4all build info : )
$(info I UNAME_S : $( UNAME_S ) )
$(info I UNAME_P : $( UNAME_P ) )
$(info I UNAME_M : $( UNAME_M ) )
$(info I CFLAGS : $( CFLAGS ) )
$(info I CXXFLAGS : $( CXXFLAGS ) )
$(info I LDFLAGS : $( LDFLAGS ) )
$(info I CMAKEFLAGS : $( CMAKEFLAGS ) )
$(info I CC : $( CCV ) )
$(info I CXX : $( CXXV ) )
$( info )
llmodel.o :
2023-11-07 16:20:14 +00:00
[ -e buildllm ] || mkdir buildllm
2023-06-01 14:05:03 +00:00
cd buildllm && cmake ../../../gpt4all-backend/ $( CMAKEFLAGS) && make
2023-06-01 13:03:59 +00:00
cd buildllm && cp -rf CMakeFiles/llmodel.dir/llmodel_c.cpp.o ../llmodel_c.o
cd buildllm && cp -rf CMakeFiles/llmodel.dir/llmodel.cpp.o ../llmodel.o
2023-05-15 16:45:56 +00:00
clean :
rm -f *.o
rm -f *.a
rm -rf buildllm
rm -rf example/main
2023-11-07 16:20:14 +00:00
binding.o : binding .cpp binding .h
2023-05-15 16:45:56 +00:00
$( CXX) $( CXXFLAGS) binding.cpp -o binding.o -c $( LDFLAGS)
2023-06-01 14:05:03 +00:00
libgpt4all.a : binding .o llmodel .o
ar src libgpt4all.a llmodel.o binding.o
2023-05-15 16:45:56 +00:00
test : libgpt 4all .a
@C_INCLUDE_PATH= ${ INCLUDE_PATH } LIBRARY_PATH = ${ LIBRARY_PATH } go test -v ./...
example/main : libgpt 4all .a
C_INCLUDE_PATH = $( INCLUDE_PATH) LIBRARY_PATH = $( INCLUDE_PATH) go build -o example/main ./example/