@@ -11,14 +11,14 @@ ip_address=$(hostname -I | awk '{print $1}')
11
11
function build_docker_images() {
12
12
cd $WORKPATH
13
13
echo $( pwd)
14
- docker build --no-cache -t opea/video-llama-lvm-server:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/video-llama/dependency/Dockerfile .
14
+ docker build --no-cache -t opea/video-llama-lvm-server:comps --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/video-llama/dependency/Dockerfile .
15
15
if $? ; then
16
16
echo " opea/video-llama-lvm-server built fail"
17
17
exit 1
18
18
else
19
19
echo " opea/video-llama-lvm-server built successful"
20
20
fi
21
- docker build --no-cache -t opea/lvm-video-llama:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/video-llama/Dockerfile .
21
+ docker build --no-cache -t opea/lvm-video-llama:comps --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/video-llama/Dockerfile .
22
22
if $? ; then
23
23
echo " opea/lvm-video-llama built fail"
24
24
exit 1
@@ -41,23 +41,22 @@ function start_service() {
41
41
-e https_proxy=$https_proxy \
42
42
-e no_proxy=$no_proxy \
43
43
-e llm_download=" True" \
44
- -v " /home/$USER /.cache:/home/user/.cache" \
45
- -v video-llama-model:/home/user/model \
46
- opea/video-llama-lvm-server:latest
44
+ opea/video-llama-lvm-server:comps
47
45
48
46
docker run -d --name=" test-comps-lvm-video-llama" -p $server_port :9000 \
49
47
--ipc=host \
50
48
-e http_proxy=$http_proxy \
51
49
-e https_proxy=$https_proxy \
52
50
-e no_proxy=$no_proxy \
53
51
-e LVM_ENDPOINT=$LVM_ENDPOINT \
54
- opea/lvm-video-llama:latest
52
+ opea/lvm-video-llama:comps
55
53
56
54
echo " Waiting for the LVM service to start"
55
+
57
56
# check whether lvm dependency is fully ready
58
57
n=0
59
58
until [[ " $n " -ge 100 ]] || [[ $ready == true ]]; do
60
- docker logs test-comps-lvm-video-llama-dependency > > ${LOG_PATH} /lvm-video-llama-dependency.log
59
+ docker logs test-comps-lvm-video-llama-dependency & > ${LOG_PATH} /lvm-video-llama-dependency.log
61
60
n=$(( n+ 1 ))
62
61
if grep -q " Uvicorn running on" ${LOG_PATH} /lvm-video-llama-dependency.log; then
63
62
break
@@ -69,7 +68,7 @@ function start_service() {
69
68
# check whether lvm service is fully ready
70
69
n=0
71
70
until [[ " $n " -ge 100 ]] || [[ $ready == true ]]; do
72
- docker logs test-comps-lvm-video-llama > > ${LOG_PATH} /lvm-video-llama.log
71
+ docker logs test-comps-lvm-video-llama & > ${LOG_PATH} /lvm-video-llama.log
73
72
n=$(( n+ 1 ))
74
73
if grep -q " Uvicorn running on" ${LOG_PATH} /lvm-video-llama.log; then
75
74
break
@@ -88,8 +87,8 @@ function validate_microservice() {
88
87
echo " Result correct."
89
88
else
90
89
echo " Result wrong."
91
- docker logs test-comps-lvm-video-llama-dependency >> ${LOG_PATH} /video-llama-dependency.log
92
- docker logs test-comps-lvm-video-llama >> ${LOG_PATH} /video-llama.log
90
+ docker logs test-comps-lvm-video-llama-dependency & > ${LOG_PATH} /lvm- video-llama-dependency.log
91
+ docker logs test-comps-lvm-video-llama & > ${LOG_PATH} /lvm- video-llama.log
93
92
exit 1
94
93
fi
95
94
}
0 commit comments