2023-04-25 23:16:45 +00:00
cmake_minimum_required ( VERSION 3.16 )
2023-05-25 19:22:45 +00:00
set ( CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON )
2023-04-25 23:16:45 +00:00
if ( APPLE )
2023-05-08 12:23:00 +00:00
option ( BUILD_UNIVERSAL "Build a Universal binary on macOS" ON )
2023-04-25 23:16:45 +00:00
if ( BUILD_UNIVERSAL )
# Build a Universal binary on macOS
# This requires that the found Qt library is compiled as Universal binaries.
set ( CMAKE_OSX_ARCHITECTURES "arm64;x86_64" CACHE STRING "" FORCE )
else ( )
# Build for the host architecture on macOS
set ( CMAKE_OSX_ARCHITECTURES "${CMAKE_HOST_SYSTEM_PROCESSOR}" CACHE STRING "" FORCE )
endif ( )
endif ( )
# Include the binary directory for the generated header file
include_directories ( "${CMAKE_CURRENT_BINARY_DIR}" )
2023-05-10 15:46:40 +00:00
set ( LLMODEL_VERSION_MAJOR 0 )
2023-05-31 21:04:01 +00:00
set ( LLMODEL_VERSION_MINOR 2 )
set ( LLMODEL_VERSION_PATCH 0 )
2023-05-10 15:46:40 +00:00
set ( LLMODEL_VERSION "${LLMODEL_VERSION_MAJOR}.${LLMODEL_VERSION_MINOR}.${LLMODEL_VERSION_PATCH}" )
project ( llmodel VERSION ${ LLMODEL_VERSION } LANGUAGES CXX C )
2023-04-25 23:16:45 +00:00
2023-05-31 21:04:01 +00:00
set ( CMAKE_CXX_STANDARD 20 )
2023-04-25 23:16:45 +00:00
set ( CMAKE_CXX_STANDARD_REQUIRED ON )
2023-05-31 21:04:01 +00:00
set ( CMAKE_LIBRARY_OUTPUT_DIRECTORY ${ CMAKE_RUNTIME_OUTPUT_DIRECTORY } )
set ( BUILD_SHARED_LIBS ON )
2023-04-25 23:16:45 +00:00
2023-05-31 21:04:01 +00:00
# Check for IPO support
include ( CheckIPOSupported )
check_ipo_supported ( RESULT IPO_SUPPORTED OUTPUT IPO_ERROR )
if ( NOT IPO_SUPPORTED )
message ( WARNING "Interprocedural optimization is not supported by your toolchain! This will lead to bigger file sizes and worse performance: ${IPO_ERROR}" )
else ( )
message ( STATUS "Interprocedural optimization support detected" )
endif ( )
include ( llama.cpp.cmake )
set ( BUILD_VARIANTS default avxonly )
2023-04-25 23:16:45 +00:00
set ( CMAKE_VERBOSE_MAKEFILE ON )
2023-05-31 21:04:01 +00:00
# Go through each build variant
foreach ( BUILD_VARIANT IN LISTS BUILD_VARIANTS )
# Determine flags
if ( BUILD_VARIANT STREQUAL avxonly )
set ( GPT4ALL_ALLOW_NON_AVX NO )
else ( )
set ( GPT4ALL_ALLOW_NON_AVX YES )
endif ( )
set ( LLAMA_AVX2 ${ GPT4ALL_ALLOW_NON_AVX } )
set ( LLAMA_F16C ${ GPT4ALL_ALLOW_NON_AVX } )
set ( LLAMA_FMA ${ GPT4ALL_ALLOW_NON_AVX } )
# Include GGML
include_ggml ( llama.cpp-mainline -mainline- ${ BUILD_VARIANT } ON )
include_ggml ( llama.cpp-230511 -230511- ${ BUILD_VARIANT } ON )
include_ggml ( llama.cpp-230519 -230519- ${ BUILD_VARIANT } ON )
# Function for preparing individual implementations
function ( prepare_target TARGET_NAME BASE_LIB )
set ( TARGET_NAME ${ TARGET_NAME } - ${ BUILD_VARIANT } )
message ( STATUS "Configuring model implementation target ${TARGET_NAME}" )
# Link to ggml/llama
target_link_libraries ( ${ TARGET_NAME }
2023-06-05 18:30:56 +00:00
P R I V A T E $ { B A S E _ L I B } - $ { B U I L D _ V A R I A N T } )
2023-05-31 21:04:01 +00:00
# Let it know about its build variant
target_compile_definitions ( ${ TARGET_NAME }
P R I V A T E G G M L _ B U I L D _ V A R I A N T = " $ { B U I L D _ V A R I A N T } " )
# Enable IPO if possible
set_property ( TARGET ${ TARGET_NAME }
P R O P E R T Y I N T E R P R O C E D U R A L _ O P T I M I Z A T I O N $ { I P O _ S U P P O R T E D } )
endfunction ( )
# Add each individual implementations
add_library ( llamamodel-mainline- ${ BUILD_VARIANT } SHARED
2023-06-02 14:47:12 +00:00
l l a m a m o d e l . c p p l l m o d e l _ s h a r e d . c p p )
2023-05-31 21:04:01 +00:00
target_compile_definitions ( llamamodel-mainline- ${ BUILD_VARIANT } PRIVATE
L L A M A _ V E R S I O N S = > = 3 L L A M A _ D A T E = 9 9 9 9 9 9 )
prepare_target ( llamamodel-mainline llama-mainline )
add_library ( llamamodel-230519- ${ BUILD_VARIANT } SHARED
2023-06-02 14:51:09 +00:00
l l a m a m o d e l . c p p l l m o d e l _ s h a r e d . c p p )
2023-05-31 21:04:01 +00:00
target_compile_definitions ( llamamodel-230519- ${ BUILD_VARIANT } PRIVATE
L L A M A _ V E R S I O N S = = = 2 L L A M A _ D A T E = 2 3 0 5 1 9 )
prepare_target ( llamamodel-230519 llama-230519 )
add_library ( llamamodel-230511- ${ BUILD_VARIANT } SHARED
2023-06-02 14:51:09 +00:00
l l a m a m o d e l . c p p l l m o d e l _ s h a r e d . c p p )
2023-05-31 21:04:01 +00:00
target_compile_definitions ( llamamodel-230511- ${ BUILD_VARIANT } PRIVATE
L L A M A _ V E R S I O N S = < = 1 L L A M A _ D A T E = 2 3 0 5 1 1 )
prepare_target ( llamamodel-230511 llama-230511 )
add_library ( gptj- ${ BUILD_VARIANT } SHARED
2023-06-02 14:47:12 +00:00
g p t j . c p p u t i l s . h u t i l s . c p p l l m o d e l _ s h a r e d . c p p )
2023-05-31 21:04:01 +00:00
prepare_target ( gptj ggml-230511 )
add_library ( mpt- ${ BUILD_VARIANT } SHARED
2023-06-02 14:47:12 +00:00
m p t . c p p u t i l s . h u t i l s . c p p l l m o d e l _ s h a r e d . c p p )
2023-05-31 21:04:01 +00:00
prepare_target ( mpt ggml-230511 )
2023-06-06 21:09:00 +00:00
add_library ( replit- ${ BUILD_VARIANT } SHARED
r e p l i t . c p p u t i l s . h u t i l s . c p p l l m o d e l _ s h a r e d . c p p )
prepare_target ( replit ggml-230511 )
2023-05-31 21:04:01 +00:00
endforeach ( )
2023-04-25 23:16:45 +00:00
add_library ( llmodel
2023-06-04 12:59:24 +00:00
l l m o d e l . h l l m o d e l . c p p l l m o d e l _ s h a r e d . c p p
2023-05-31 21:04:01 +00:00
l l m o d e l _ c . h l l m o d e l _ c . c p p
d l h a n d l e . h
2023-04-25 23:16:45 +00:00
)
2023-05-31 21:04:01 +00:00
target_compile_definitions ( llmodel PRIVATE LIB_FILE_EXT= "${CMAKE_SHARED_LIBRARY_SUFFIX}" )
2023-04-25 23:16:45 +00:00
2023-05-10 15:46:40 +00:00
set_target_properties ( llmodel PROPERTIES
V E R S I O N $ { P R O J E C T _ V E R S I O N }
S O V E R S I O N $ { P R O J E C T _ V E R S I O N _ M A J O R } )
2023-04-25 23:16:45 +00:00
set ( COMPONENT_NAME_MAIN ${ PROJECT_NAME } )
set ( CMAKE_INSTALL_PREFIX ${ CMAKE_BINARY_DIR } /install )