Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions chart/templates/backends/llamacpp.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ metadata:
spec:
command:
- ./llama-server
image: ghcr.io/ggerganov/llama.cpp
version: server
image: {{ .Values.backendRuntime.llamacpp.image.repository }}
version: {{ .Values.backendRuntime.llamacpp.image.tag }}
# Do not edit the preset argument name unless you know what you're doing.
# Free to add more arguments with your requirements.
recommendedConfigs:
Expand Down
4 changes: 2 additions & 2 deletions chart/templates/backends/ollama.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ spec:
command:
- sh
- -c
image: ollama/ollama
version: latest
image: {{ .Values.backendRuntime.ollama.image.repository }}
version: {{ .Values.backendRuntime.ollama.image.tag }}
envs:
- name: OLLAMA_HOST
value: 0.0.0.0:8080
Expand Down
4 changes: 2 additions & 2 deletions chart/templates/backends/sglang.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ spec:
- python3
- -m
- sglang.launch_server
image: lmsysorg/sglang
version: v0.2.10-cu121
image: {{ .Values.backendRuntime.sglang.image.repository }}
version: {{ .Values.backendRuntime.sglang.image.tag }}
# Do not edit the preset argument name unless you know what you're doing.
# Free to add more arguments with your requirements.
recommendedConfigs:
Expand Down
4 changes: 2 additions & 2 deletions chart/templates/backends/tgi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ metadata:
app.kubernetes.io/created-by: llmaz
name: tgi
spec:
image: ghcr.io/huggingface/text-generation-inference
version: 2.3.1
image: {{ .Values.backendRuntime.tgi.image.repository }}
version: {{ .Values.backendRuntime.tgi.image.tag }}
# Do not edit the preset argument name unless you know what you're doing.
# Free to add more arguments with your requirements.
recommendedConfigs:
Expand Down
4 changes: 2 additions & 2 deletions chart/templates/backends/vllm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ spec:
- python3
- -m
- vllm.entrypoints.openai.api_server
image: vllm/vllm-openai
version: v0.7.3
image: {{ .Values.backendRuntime.vllm.image.repository }}
version: {{ .Values.backendRuntime.vllm.image.tag }}
lifecycle:
preStop:
exec:
Expand Down
2 changes: 1 addition & 1 deletion chart/templates/lws/leaderworkerset.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16681,7 +16681,7 @@ spec:
- --zap-log-level=2
command:
- /manager
image: registry.k8s.io/lws/lws:v0.5.0
image: {{ .Values.backendRuntime.image.repository }}:{{ .Values.backendRuntime.image.tag }}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see that this part of the helm chart has been rolled back. Is this an missing or do we not want users to config freely during installation of lws by default? 🤔 @kerthcet

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

First of all, this part is not right, because we should use les config, secondly update the image is not enough, because we may need to upgrade the crd as well. So if we want to upgrade lws, let's follow lws upgradation steps.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the reply! That makes a lot of sense

livenessProbe:
httpGet:
path: /healthz
Expand Down
23 changes: 23 additions & 0 deletions chart/values.global.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,29 @@ fullnameOverride: "llmaz"

backendRuntime:
install: true
llamacpp:
image:
repository: ghcr.io/ggerganov/llama.cpp
tag: server
ollama:
image:
repository: ollama/ollama
tag: latest
sglang:
image:
repository: lmsysorg/sglang
tag: v0.2.10-cu121
tgi:
image:
repository: ghcr.io/huggingface/text-generation-inference
tag: 2.3.1
vllm:
image:
repository: vllm/vllm-openai
tag: v0.7.3

leaderWorkerSet:
install: true
image:
repository: registry.k8s.io/lws/lws
tag: v0.5.0
Loading