Skip to content

Commit a3da7c1

Browse files
authored
Fix Reorg Issues (#657)
Signed-off-by: letonghan <[email protected]>
1 parent bea9bb0 commit a3da7c1

File tree

5 files changed

+13
-4
lines changed

5 files changed

+13
-4
lines changed

comps/intent_detection/langchain/intent_detection.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55

66
from langchain import LLMChain, PromptTemplate
77
from langchain_community.llms import HuggingFaceEndpoint
8-
from template import IntentTemplate
98

109
from comps import GeneratedDoc, LLMParamsDoc, ServiceType, opea_microservices, register_microservice
1110

comps/llms/text-generation/vllm/langchain/dependency/build_docker_vllm.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ fi
3030

3131
# Build the docker image for vLLM based on the hardware mode
3232
if [ "$hw_mode" = "hpu" ]; then
33-
docker build -f docker/Dockerfile.Intel_HPU -t opea/vllm:hpu --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
33+
docker build -f docker/Dockerfile.intel_hpu -t opea/vllm:hpu --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
3434
else
3535
git clone https://github.com/vllm-project/vllm.git
3636
cd ./vllm/

comps/lvms/tgi-llava/template.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
# Copyright (C) 2024 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
5+
class ChatTemplate:
6+
7+
@staticmethod
8+
def generate_multimodal_rag_on_videos_prompt(question: str, context: str):
9+
template = """The transcript associated with the image is '{context}'. {question}"""
10+
return template.format(context=context, question=question)

tests/embeddings/test_embeddings_tei_langchain.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ function build_docker_images() {
2020
}
2121

2222
function start_service() {
23-
tei_endpoint=5001=
23+
tei_endpoint=5001
2424
model="BAAI/bge-base-en-v1.5"
2525
unset http_proxy
2626
docker run -d --name="test-comps-embedding-tei-endpoint" -p $tei_endpoint:80 -v ./data:/data --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 --model-id $model

tests/intent_detection/test_intent_detection_langchain.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ function start_service() {
4040

4141
function validate_microservice() {
4242
intent_port=5043
43-
result=$(http_proxy="" curl http://${ip_address}:${intent_port}/v1/chat/intent\
43+
result=$(http_proxy="" curl http://localhost:${intent_port}/v1/chat/intent\
4444
-X POST \
4545
-d '{"query":"What is Deep Learning?","max_new_tokens":10,"top_k":1,"temperature":0.001,"streaming":false}' \
4646
-H 'Content-Type: application/json')

0 commit comments

Comments
 (0)