diff --git a/gpt4all-backend/llama.cpp.cmake b/gpt4all-backend/llama.cpp.cmake index c3dbf01a..89d63e54 100644 --- a/gpt4all-backend/llama.cpp.cmake +++ b/gpt4all-backend/llama.cpp.cmake @@ -381,7 +381,7 @@ function(include_ggml DIRECTORY SUFFIX WITH_LLAMA) message(STATUS "x86 detected") if (MSVC) if (LLAMA_AVX512) - target_compile_definitions(ggml${SUFFIX} PRIVATE + target_compile_options(ggml${SUFFIX} PRIVATE $<$:/arch:AVX512> $<$:/arch:AVX512>) # MSVC has no compile-time flags enabling specific @@ -399,11 +399,11 @@ function(include_ggml DIRECTORY SUFFIX WITH_LLAMA) $<$:__AVX512VNNI__>) endif() elseif (LLAMA_AVX2) - target_compile_definitions(ggml${SUFFIX} PRIVATE + target_compile_options(ggml${SUFFIX} PRIVATE $<$:/arch:AVX2> $<$:/arch:AVX2>) elseif (LLAMA_AVX) - target_compile_definitions(ggml${SUFFIX} PRIVATE + target_compile_options(ggml${SUFFIX} PRIVATE $<$:/arch:AVX> $<$:/arch:AVX>) endif()