@@ -28,6 +28,7 @@ if (LLAMA_BUILD)
2828 # which point to directories outside the build tree to the install RPATH
2929 set (CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE )
3030 set (CMAKE_INSTALL_RPATH "$ORIGIN" )
31+ set (CMAKE_SKIP_RPATH FALSE )
3132
3233 # Building llama
3334 if (APPLE AND NOT CMAKE_SYSTEM_PROCESSOR MATCHES "arm64" )
@@ -52,6 +53,10 @@ if (LLAMA_BUILD)
5253 FRAMEWORK DESTINATION ${LLAMA_CPP_PYTHON_INSTALL_DIR}
5354 RESOURCE DESTINATION ${LLAMA_CPP_PYTHON_INSTALL_DIR}
5455 )
56+ set_target_properties (llama PROPERTIES
57+ INSTALL_RPATH "$ORIGIN"
58+ BUILD_WITH_INSTALL_RPATH TRUE
59+ )
5560 install (
5661 TARGETS ggml
5762 LIBRARY DESTINATION ${LLAMA_CPP_PYTHON_INSTALL_DIR}
@@ -60,6 +65,10 @@ if (LLAMA_BUILD)
6065 FRAMEWORK DESTINATION ${LLAMA_CPP_PYTHON_INSTALL_DIR}
6166 RESOURCE DESTINATION ${LLAMA_CPP_PYTHON_INSTALL_DIR}
6267 )
68+ set_target_properties (ggml PROPERTIES
69+ INSTALL_RPATH "$ORIGIN"
70+ BUILD_WITH_INSTALL_RPATH TRUE
71+ )
6372 # Workaround for Windows + CUDA https://github.com/abetlen/llama-cpp-python/issues/563
6473 if (WIN32 AND (LLAMA_CUDA OR LLAMA_CUBLAS))
6574 install (
@@ -93,5 +102,9 @@ if (LLAMA_BUILD)
93102 FRAMEWORK DESTINATION ${LLAMA_CPP_PYTHON_INSTALL_DIR}
94103 RESOURCE DESTINATION ${LLAMA_CPP_PYTHON_INSTALL_DIR}
95104 )
105+ set_target_properties (llava PROPERTIES
106+ INSTALL_RPATH "$ORIGIN"
107+ BUILD_WITH_INSTALL_RPATH TRUE
108+ )
96109 endif ()
97110endif ()
0 commit comments