Skip to content

Commit

Permalink
[llama-cpp] Create a new port (#120)
Browse files Browse the repository at this point in the history
* circleci: update port list

* [llama-cpp] create a new port"

* [llama-cpp] update to b1213

* [llama-cpp] update to b1273

* gh-actions: test llama-cpp

* [llama-cpp] fix apple build

* [llama-cpp] correct build info
  • Loading branch information
luncliff committed Sep 29, 2023
1 parent 4702082 commit d6c866b
Show file tree
Hide file tree
Showing 8 changed files with 295 additions and 3 deletions.
2 changes: 1 addition & 1 deletion .circleci/port-setup.txt
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@ opencl:x64-windows
egl-registry:x64-windows
opengl-registry:x64-windows
vulkan-headers:x64-windows
nlohmann-json:x64-windows
nlohmann-json:x64-windows
2 changes: 1 addition & 1 deletion .circleci/port-windows.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
grpc[codegen]:x64-windows
onnxruntime[directml,xnnpack,test]:x64-windows
onnxruntime[directml,xnnpack,test]:x64-windows
55 changes: 55 additions & 0 deletions ports/llama-cpp/fix-cmake.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
diff --git a/scripts/build-info.cmake b/scripts/build-info.cmake
index c86ab43..0571c20 100644
--- a/scripts/build-info.cmake
+++ b/scripts/build-info.cmake
@@ -1,9 +1,5 @@
set(TEMPLATE_FILE "${CMAKE_CURRENT_SOURCE_DIR}/scripts/build-info.h.in")
set(HEADER_FILE "${CMAKE_CURRENT_SOURCE_DIR}/build-info.h")
-set(BUILD_NUMBER 0)
-set(BUILD_COMMIT "unknown")
-set(BUILD_COMPILER "unknown")
-set(BUILD_TARGET "unknown")

# Look for git
find_package(Git)
@@ -18,22 +14,7 @@ if(NOT Git_FOUND)
endif()

# Get the commit count and hash
-if(Git_FOUND)
- execute_process(
- COMMAND ${GIT_EXECUTABLE} rev-parse --short HEAD
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
- OUTPUT_VARIABLE HEAD
- OUTPUT_STRIP_TRAILING_WHITESPACE
- )
- execute_process(
- COMMAND ${GIT_EXECUTABLE} rev-list --count HEAD
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
- OUTPUT_VARIABLE COUNT
- OUTPUT_STRIP_TRAILING_WHITESPACE
- )
- set(BUILD_COMMIT ${HEAD})
- set(BUILD_NUMBER ${COUNT})
-endif()
+# see portfile.cmake

if(MSVC)
set(BUILD_COMPILER "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}")
@@ -72,3 +53,16 @@ if(EXISTS ${HEADER_FILE})
else()
configure_file(${TEMPLATE_FILE} ${HEADER_FILE})
endif()
+
+if(NOT DEFINED BUILD_NUMBER)
+ set(BUILD_NUMBER 0)
+endif()
+if(NOT DEFINED BUILD_COMMIT)
+ set(BUILD_COMMIT "unknown")
+endif()
+if(NOT DEFINED BUILD_COMPILER)
+ set(BUILD_COMPILER "unknown")
+endif()
+if(NOT DEFINED BUILD_TARGET)
+ set(BUILD_TARGET "unknown")
+endif()
67 changes: 67 additions & 0 deletions ports/llama-cpp/fix-openblas.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 11ebe9e..8516e59 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -137,8 +137,8 @@ if (LLAMA_OPENBLAS)
$ENV{OpenBLAS_HOME}
$ENV{OpenBLAS_HOME}/include
)
- find_path(OPENBLAS_INC NAMES cblas.h PATHS ${OPENBLAS_INCLUDE_SEARCH_PATHS})
- add_compile_options(-I${OPENBLAS_INC})
+ find_path(OPENBLAS_INC NAMES cblas.h openblas/cblas.h PATHS ${OPENBLAS_INCLUDE_SEARCH_PATHS})
+ include_directories(${OPENBLAS_INC})
else()
message(WARNING "OpenBLAS not found")
endif()
diff --git a/ggml.c b/ggml.c
index 8505889..6b5344c 100644
--- a/ggml.c
+++ b/ggml.c
@@ -146,7 +146,7 @@ inline static void* ggml_aligned_malloc(size_t size) {
#if defined(GGML_USE_ACCELERATE)
#include <Accelerate/Accelerate.h>
#elif defined(GGML_USE_OPENBLAS)
-#include <cblas.h>
+#include <openblas/cblas.h>
#elif defined(GGML_USE_CUBLAS)
#include "ggml-cuda.h"
#endif
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8516e59..cb607b2 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -139,6 +139,8 @@ if (LLAMA_OPENBLAS)
)
find_path(OPENBLAS_INC NAMES cblas.h openblas/cblas.h PATHS ${OPENBLAS_INCLUDE_SEARCH_PATHS})
include_directories(${OPENBLAS_INC})
+ find_library(OPENBLAS_LIB NAMES openblas REQUIRED)
+ link_libraries(${OPENBLAS_LIB})
else()
message(WARNING "OpenBLAS not found")
endif()
diff --git a/CMakeLists.txt b/CMakeLists.txt
index cb607b2..f4fd773 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -123,7 +123,7 @@ if (LLAMA_OPENBLAS)

add_compile_definitions(GGML_USE_OPENBLAS)
add_link_options(${BLAS_LIBRARIES})
- set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} openblas)
+ set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} BLAS::BLAS)

# find header file
set(OPENBLAS_INCLUDE_SEARCH_PATHS
@@ -340,7 +340,11 @@ if (GGML_CUDA_SOURCES)
set_property(TARGET llama PROPERTY CUDA_ARCHITECTURES OFF)
endif()

-
+install(TARGETS ggml llama
+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+)
#
# programs, examples and tests
#
105 changes: 105 additions & 0 deletions ports/llama-cpp/portfile.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
if(VCPKG_TARGET_IS_WINDOWS)
vcpkg_check_linkage(ONLY_DYNAMIC_LIBRARY)
endif()

vcpkg_from_github(
OUT_SOURCE_PATH SOURCE_PATH
REPO ggerganov/llama.cpp
REF b1273 # commit 99115f3fa654b593099c6719ad30e3f54ce231e1
SHA512 2b3e8fd9673647f59a4fa96621afe2f77ab10a2bee88a96b662b493beb2b66f17c854c1077f01f8ea8998d0296f92225d3033aae0adc756810f80caf45b9a456
HEAD_REF master
PATCHES
# fix-openblas.patch
fix-cmake.patch
)

vcpkg_find_acquire_program(PKGCONFIG)
message(STATUS "Using pkgconfig: ${PKGCONFIG}")

# check https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors
if(VCPKG_TARGET_IS_WINDOWS)
list(APPEND BLAS_OPTIONS -DLLAMA_BLAS_VENDOR=OpenBLAS)
elseif(VCPKG_TARGET_IS_OSX OR VCPKG_TARGET_IS_IOS)
list(APPEND BLAS_OPTIONS -DLLAMA_BLAS_VENDOR=Apple)
else()
# todo: Intel MKL, ARM, ACML, etc...
list(APPEND BLAS_OPTIONS -DLLAMA_BLAS_VENDOR=Generic)
endif()

if(VCPKG_TARGET_ARCHITECTURE STREQUAL "x64")
list(APPEND ARCH_OPTIONS
-DLLAMA_AVX512=ON -DLLAMA_AVX512_VBMI=ON -DLLAMA_AVX512_VNNI=ON
# -DLLAMA_AVX2=ON
# -DLLAMA_AVX=ON
)
endif()

vcpkg_check_features(OUT_FEATURE_OPTIONS FEATURE_OPTIONS
FEATURES
cublas LLAMA_CUBLAS
cublas LLAMA_CUDA_F16
clblast LLAMA_CLBLAST
mpi LLAMA_MPI
test LLAMA_BUILD_TESTS
)

string(COMPARE EQUAL "${VCPKG_CRT_LINKAGE}" "static" USE_STATIC)

if(VCPKG_TARGET_IS_OSX OR VCPKG_TARGET_IS_IOS)
set(TARGET_IS_APPLE ON)
else()
set(TARGET_IS_APPLE OFF)
endif()

vcpkg_cmake_configure(
SOURCE_PATH "${SOURCE_PATH}"
OPTIONS
${ARCH_OPTIONS}
${FEATURE_OPTIONS}
-DLLAMA_ACCELERATE=${TARGET_IS_APPLE}
-DLLAMA_METAL=${TARGET_IS_APPLE}
-DLLAMA_STATIC=${USE_STATIC}
-DLLAMA_BLAS=ON
${BLAS_OPTIONS}
-DPKG_CONFIG_EXECUTABLE:FILEPATH="${PKGCONFIG}"
-DBUILD_COMMIT:STRING="99115f3fa654b593099c6719ad30e3f54ce231e1"
-DBUILD_NUMBER:STRING="1273"
OPTIONS_RELEASE
-DLLAMA_METAL_NDEBUG=ON
)
vcpkg_cmake_build(TARGET "llama" LOGFILE_BASE build-llama)
vcpkg_cmake_install()
vcpkg_cmake_config_fixup(CONFIG_PATH "lib/cmake/Llama" PACKAGE_NAME "Llama")
vcpkg_copy_pdbs()

vcpkg_copy_tools(TOOL_NAMES
baby-llama beam-search benchmark convert-llama2c-to-ggml embd-input-test embedding llama-bench
main perplexity quantize-stats quantize save-load-state server simple speculative train-text-from-scratch
AUTO_CLEAN
)
if("test" IN_LIST FEATURES)
vcpkg_copy_tools(TOOL_NAMES
test-grad0 test-grammar-parser test-llama-grammar test-quantize-fns test-quantize-perf
test-sampling test-tokenizer-0-falcon test-tokenizer-0-llama test-tokenizer-1-llama
AUTO_CLEAN
)
endif()

file(INSTALL "${SOURCE_PATH}/llama.h" DESTINATION "${CURRENT_PACKAGES_DIR}/include")

file(INSTALL "${CURRENT_PACKAGES_DIR}/bin/convert.py"
"${CURRENT_PACKAGES_DIR}/bin/convert-lora-to-ggml.py"
DESTINATION "${CURRENT_PACKAGES_DIR}/tools/${PORT}"
)

file(REMOVE_RECURSE
"${CURRENT_PACKAGES_DIR}/debug/include"
)
file(REMOVE
"${CURRENT_PACKAGES_DIR}/bin/convert.py"
"${CURRENT_PACKAGES_DIR}/debug/bin/convert.py"
"${CURRENT_PACKAGES_DIR}/bin/convert-lora-to-ggml.py"
"${CURRENT_PACKAGES_DIR}/debug/bin/convert-lora-to-ggml.py"
)

vcpkg_install_copyright(FILE_LIST "${SOURCE_PATH}/LICENSE")
51 changes: 51 additions & 0 deletions ports/llama-cpp/vcpkg.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
{
"name": "llama-cpp",
"version-string": "b1273",
"description": "Port of Facebook's LLaMA model in C/C++",
"homepage": "https://github.com/ggerganov/llama.cpp",
"supports": "windows | linux | osx",
"dependencies": [
{
"name": "openblas",
"platform": "windows"
},
{
"name": "vcpkg-cmake",
"host": true
},
{
"name": "vcpkg-cmake-config",
"host": true
}
],
"features": {
"clblast": {
"description": "Use CLBlast",
"dependencies": [
"clblast"
]
},
"cublas": {
"description": "Use CUDA",
"dependencies": [
"cudnn"
]
},
"mpi": {
"description": "Use MPI",
"dependencies": [
{
"name": "msmpi",
"platform": "windows"
},
{
"name": "openmpi",
"platform": "!windows"
}
]
},
"test": {
"description": "Build tests"
}
}
}
9 changes: 8 additions & 1 deletion ports/zlib-ng/portfile.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,13 @@ vcpkg_from_github(
SHA512 1c19a62bb00727ac49049c299fb70060da95b5fafa448144ae4133372ec8c3da15cef6c1303485290f269b23c580696554ca0383dba3e1f9609f65c332981988
HEAD_REF master
)
if(VCPKG_TARGET_ARCHITECTURE STREQUAL "x64")
# list(APPEND ARCH_OPTIONS)
elseif(VCPKG_TARGET_ARCHITECTURE STREQUAL "x86")
# list(APPEND ARCH_OPTIONS)
elseif(VCPKG_TARGET_ARCHITECTURE STREQUAL "arm64")
# list(APPEND ARCH_OPTIONS)
endif()

vcpkg_check_features(OUT_FEATURE_OPTIONS FEATURE_OPTIONS
FEATURES
Expand All @@ -14,7 +21,7 @@ vcpkg_check_features(OUT_FEATURE_OPTIONS FEATURE_OPTIONS
vcpkg_cmake_configure(
SOURCE_PATH "${SOURCE_PATH}"
OPTIONS
${FEATURE_OPTIONS}
${ARCH_OPTIONS} ${FEATURE_OPTIONS}
-DZLIB_ENABLE_TESTS=OFF
-DWITH_NEW_STRATEGIES=ON
-DWITH_NATIVE_INSTRUCTIONS=OFF # `-march=native` breaks `check_c_source_compiles`
Expand Down
7 changes: 7 additions & 0 deletions test/vcpkg.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,13 @@
"name": "apple-nio-ssl",
"platform": "osx | ios"
},
{
"name": "llama-cpp",
"features": [
"test"
],
"platform": "windows | osx"
},
"openssl3",
{
"name": "vcpkg-cmake",
Expand Down

0 comments on commit d6c866b

Please sign in to comment.