diff --git a/examples/models/llama/CMakeLists.txt b/examples/models/llama/CMakeLists.txt index e6d45424bd4..12385f32d20 100644 --- a/examples/models/llama/CMakeLists.txt +++ b/examples/models/llama/CMakeLists.txt @@ -111,7 +111,8 @@ target_link_options_shared_lib(quantized_ops_lib) list(APPEND link_libraries quantized_kernels quantized_ops_lib) if(EXECUTORCH_BUILD_KERNELS_CUSTOM) - list(APPEND link_libraries $) + target_link_options_shared_lib(custom_ops) + list(APPEND link_libraries custom_ops) endif() if(EXECUTORCH_BUILD_TORCHAO) diff --git a/examples/models/llama/README.md b/examples/models/llama/README.md index 39e4c79873b..f9d3921ba05 100644 --- a/examples/models/llama/README.md +++ b/examples/models/llama/README.md @@ -427,7 +427,7 @@ cmake -DPYTHON_EXECUTABLE=python \ -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \ -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \ - -DEXECUTORCH_BUILD_XNNPACK=ON \ + -DEXECUTORCH_BUILD_XNNPACK=OFF \ -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \ -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \ -DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \