Skip to content

Commit 8de53da

Browse files
authored
smaller and non gated models for docs (#5378)
1 parent fac17ac commit 8de53da

File tree

6 files changed

+51
-44
lines changed

6 files changed

+51
-44
lines changed

docs/backend/native_api.ipynb

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@
4949
"\n",
5050
"\n",
5151
"server_process, port = launch_server_cmd(\n",
52-
" \"python -m sglang.launch_server --model-path meta-llama/Llama-3.2-1B-Instruct --host 0.0.0.0\"\n",
52+
" \"python3 -m sglang.launch_server --model-path qwen/qwen2.5-0.5b-instruct --host 0.0.0.0\"\n",
5353
")\n",
5454
"\n",
5555
"wait_for_server(f\"http://localhost:{port}\")"
@@ -105,9 +105,9 @@
105105
"response = requests.get(url)\n",
106106
"response_json = response.json()\n",
107107
"print_highlight(response_json)\n",
108-
"assert response_json[\"model_path\"] == \"meta-llama/Llama-3.2-1B-Instruct\"\n",
108+
"assert response_json[\"model_path\"] == \"qwen/qwen2.5-0.5b-instruct\"\n",
109109
"assert response_json[\"is_generation\"] is True\n",
110-
"assert response_json[\"tokenizer_path\"] == \"meta-llama/Llama-3.2-1B-Instruct\"\n",
110+
"assert response_json[\"tokenizer_path\"] == \"qwen/qwen2.5-0.5b-instruct\"\n",
111111
"assert response_json.keys() == {\"model_path\", \"is_generation\", \"tokenizer_path\"}"
112112
]
113113
},
@@ -213,7 +213,7 @@
213213
"# successful update with same architecture and size\n",
214214
"\n",
215215
"url = f\"http://localhost:{port}/update_weights_from_disk\"\n",
216-
"data = {\"model_path\": \"meta-llama/Llama-3.2-1B\"}\n",
216+
"data = {\"model_path\": \"qwen/qwen2.5-0.5b-instruct\"}\n",
217217
"\n",
218218
"response = requests.post(url, json=data)\n",
219219
"print_highlight(response.text)\n",
@@ -230,19 +230,28 @@
230230
"# failed update with different parameter size or wrong name\n",
231231
"\n",
232232
"url = f\"http://localhost:{port}/update_weights_from_disk\"\n",
233-
"data = {\"model_path\": \"meta-llama/Llama-3.2-1B-wrong\"}\n",
233+
"data = {\"model_path\": \"qwen/qwen2.5-0.5b-instruct-wrong\"}\n",
234234
"\n",
235235
"response = requests.post(url, json=data)\n",
236236
"response_json = response.json()\n",
237237
"print_highlight(response_json)\n",
238238
"assert response_json[\"success\"] is False\n",
239239
"assert response_json[\"message\"] == (\n",
240240
" \"Failed to get weights iterator: \"\n",
241-
" \"meta-llama/Llama-3.2-1B-wrong\"\n",
241+
" \"qwen/qwen2.5-0.5b-instruct-wrong\"\n",
242242
" \" (repository not found).\"\n",
243243
")"
244244
]
245245
},
246+
{
247+
"cell_type": "code",
248+
"execution_count": null,
249+
"metadata": {},
250+
"outputs": [],
251+
"source": [
252+
"terminate_process(server_process)"
253+
]
254+
},
246255
{
247256
"cell_type": "markdown",
248257
"metadata": {},
@@ -259,11 +268,9 @@
259268
"metadata": {},
260269
"outputs": [],
261270
"source": [
262-
"terminate_process(server_process)\n",
263-
"\n",
264271
"embedding_process, port = launch_server_cmd(\n",
265272
" \"\"\"\n",
266-
"python -m sglang.launch_server --model-path Alibaba-NLP/gte-Qwen2-7B-instruct \\\n",
273+
"python3 -m sglang.launch_server --model-path Alibaba-NLP/gte-Qwen2-1.5B-instruct \\\n",
267274
" --host 0.0.0.0 --is-embedding\n",
268275
"\"\"\"\n",
269276
")\n",
@@ -280,7 +287,7 @@
280287
"# successful encode for embedding model\n",
281288
"\n",
282289
"url = f\"http://localhost:{port}/encode\"\n",
283-
"data = {\"model\": \"Alibaba-NLP/gte-Qwen2-7B-instruct\", \"text\": \"Once upon a time\"}\n",
290+
"data = {\"model\": \"Alibaba-NLP/gte-Qwen2-1.5B-instruct\", \"text\": \"Once upon a time\"}\n",
284291
"\n",
285292
"response = requests.post(url, json=data)\n",
286293
"response_json = response.json()\n",
@@ -318,7 +325,7 @@
318325
"\n",
319326
"reward_process, port = launch_server_cmd(\n",
320327
" \"\"\"\n",
321-
"python -m sglang.launch_server --model-path Skywork/Skywork-Reward-Llama-3.1-8B-v0.2 --host 0.0.0.0 --is-embedding\n",
328+
"python3 -m sglang.launch_server --model-path Skywork/Skywork-Reward-Llama-3.1-8B-v0.2 --host 0.0.0.0 --is-embedding\n",
322329
"\"\"\"\n",
323330
")\n",
324331
"\n",
@@ -383,7 +390,7 @@
383390
"outputs": [],
384391
"source": [
385392
"expert_record_server_process, port = launch_server_cmd(\n",
386-
" \"python -m sglang.launch_server --model-path Qwen/Qwen1.5-MoE-A2.7B --host 0.0.0.0\"\n",
393+
" \"python3 -m sglang.launch_server --model-path Qwen/Qwen1.5-MoE-A2.7B --host 0.0.0.0\"\n",
387394
")\n",
388395
"\n",
389396
"wait_for_server(f\"http://localhost:{port}\")"
@@ -449,7 +456,7 @@
449456
"source": [
450457
"tokenizer_free_server_process, port = launch_server_cmd(\n",
451458
" \"\"\"\n",
452-
"python3 -m sglang.launch_server --model-path meta-llama/Llama-3.2-1B-Instruct --skip-tokenizer-init\n",
459+
"python3 -m sglang.launch_server --model-path qwen/qwen2.5-0.5b-instruct --skip-tokenizer-init\n",
453460
"\"\"\"\n",
454461
")\n",
455462
"\n",
@@ -464,7 +471,7 @@
464471
"source": [
465472
"from transformers import AutoTokenizer\n",
466473
"\n",
467-
"tokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-3.2-1B-Instruct\")\n",
474+
"tokenizer = AutoTokenizer.from_pretrained(\"qwen/qwen2.5-0.5b-instruct\")\n",
468475
"\n",
469476
"input_text = \"What is the capital of France?\"\n",
470477
"\n",

docs/backend/offline_engine_api.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@
8383
" nest_asyncio.apply()\n",
8484
"\n",
8585
"\n",
86-
"llm = sgl.Engine(model_path=\"meta-llama/Meta-Llama-3.1-8B-Instruct\")"
86+
"llm = sgl.Engine(model_path=\"qwen/qwen2.5-0.5b-instruct\")"
8787
]
8888
},
8989
{

docs/backend/openai_api_completions.ipynb

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@
4444
"\n",
4545
"\n",
4646
"server_process, port = launch_server_cmd(\n",
47-
" \"python -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-8B-Instruct --host 0.0.0.0\"\n",
47+
" \"python3 -m sglang.launch_server --model-path qwen/qwen2.5-0.5b-instruct --host 0.0.0.0 --mem-fraction-static 0.8\"\n",
4848
")\n",
4949
"\n",
5050
"wait_for_server(f\"http://localhost:{port}\")\n",
@@ -75,7 +75,7 @@
7575
"client = openai.Client(base_url=f\"http://127.0.0.1:{port}/v1\", api_key=\"None\")\n",
7676
"\n",
7777
"response = client.chat.completions.create(\n",
78-
" model=\"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
78+
" model=\"qwen/qwen2.5-0.5b-instruct\",\n",
7979
" messages=[\n",
8080
" {\"role\": \"user\", \"content\": \"List 3 countries and their capitals.\"},\n",
8181
" ],\n",
@@ -104,7 +104,7 @@
104104
"outputs": [],
105105
"source": [
106106
"response = client.chat.completions.create(\n",
107-
" model=\"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
107+
" model=\"qwen/qwen2.5-0.5b-instruct\",\n",
108108
" messages=[\n",
109109
" {\n",
110110
" \"role\": \"system\",\n",
@@ -143,7 +143,7 @@
143143
"outputs": [],
144144
"source": [
145145
"stream = client.chat.completions.create(\n",
146-
" model=\"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
146+
" model=\"qwen/qwen2.5-0.5b-instruct\",\n",
147147
" messages=[{\"role\": \"user\", \"content\": \"Say this is a test\"}],\n",
148148
" stream=True,\n",
149149
")\n",
@@ -169,7 +169,7 @@
169169
"outputs": [],
170170
"source": [
171171
"response = client.completions.create(\n",
172-
" model=\"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
172+
" model=\"qwen/qwen2.5-0.5b-instruct\",\n",
173173
" prompt=\"List 3 countries and their capitals.\",\n",
174174
" temperature=0,\n",
175175
" max_tokens=64,\n",
@@ -198,7 +198,7 @@
198198
"outputs": [],
199199
"source": [
200200
"response = client.completions.create(\n",
201-
" model=\"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
201+
" model=\"qwen/qwen2.5-0.5b-instruct\",\n",
202202
" prompt=\"Write a short story about a space explorer.\",\n",
203203
" temperature=0.7, # Moderate temperature for creative writing\n",
204204
" max_tokens=150, # Longer response for a story\n",
@@ -257,7 +257,7 @@
257257
" \"method\": \"POST\",\n",
258258
" \"url\": \"/chat/completions\",\n",
259259
" \"body\": {\n",
260-
" \"model\": \"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
260+
" \"model\": \"qwen/qwen2.5-0.5b-instruct\",\n",
261261
" \"messages\": [\n",
262262
" {\"role\": \"user\", \"content\": \"Tell me a joke about programming\"}\n",
263263
" ],\n",
@@ -269,7 +269,7 @@
269269
" \"method\": \"POST\",\n",
270270
" \"url\": \"/chat/completions\",\n",
271271
" \"body\": {\n",
272-
" \"model\": \"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
272+
" \"model\": \"qwen/qwen2.5-0.5b-instruct\",\n",
273273
" \"messages\": [{\"role\": \"user\", \"content\": \"What is Python?\"}],\n",
274274
" \"max_tokens\": 50,\n",
275275
" },\n",
@@ -362,7 +362,7 @@
362362
" \"method\": \"POST\",\n",
363363
" \"url\": \"/chat/completions\",\n",
364364
" \"body\": {\n",
365-
" \"model\": \"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
365+
" \"model\": \"qwen/qwen2.5-0.5b-instruct\",\n",
366366
" \"messages\": [\n",
367367
" {\n",
368368
" \"role\": \"system\",\n",
@@ -439,7 +439,7 @@
439439
" \"method\": \"POST\",\n",
440440
" \"url\": \"/chat/completions\",\n",
441441
" \"body\": {\n",
442-
" \"model\": \"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
442+
" \"model\": \"qwen/qwen2.5-0.5b-instruct\",\n",
443443
" \"messages\": [\n",
444444
" {\n",
445445
" \"role\": \"system\",\n",

docs/backend/openai_api_embeddings.ipynb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
"\n",
4141
"embedding_process, port = launch_server_cmd(\n",
4242
" \"\"\"\n",
43-
"python -m sglang.launch_server --model-path Alibaba-NLP/gte-Qwen2-7B-instruct \\\n",
43+
"python3 -m sglang.launch_server --model-path Alibaba-NLP/gte-Qwen2-1.5B-instruct \\\n",
4444
" --host 0.0.0.0 --is-embedding\n",
4545
"\"\"\"\n",
4646
")\n",
@@ -66,7 +66,7 @@
6666
"text = \"Once upon a time\"\n",
6767
"\n",
6868
"curl_text = f\"\"\"curl -s http://localhost:{port}/v1/embeddings \\\n",
69-
" -d '{{\"model\": \"Alibaba-NLP/gte-Qwen2-7B-instruct\", \"input\": \"{text}\"}}'\"\"\"\n",
69+
" -d '{{\"model\": \"Alibaba-NLP/gte-Qwen2-1.5B-instruct\", \"input\": \"{text}\"}}'\"\"\"\n",
7070
"\n",
7171
"text_embedding = json.loads(subprocess.check_output(curl_text, shell=True))[\"data\"][0][\n",
7272
" \"embedding\"\n",
@@ -94,7 +94,7 @@
9494
"\n",
9595
"response = requests.post(\n",
9696
" f\"http://localhost:{port}/v1/embeddings\",\n",
97-
" json={\"model\": \"Alibaba-NLP/gte-Qwen2-7B-instruct\", \"input\": text},\n",
97+
" json={\"model\": \"Alibaba-NLP/gte-Qwen2-1.5B-instruct\", \"input\": text},\n",
9898
")\n",
9999
"\n",
100100
"text_embedding = response.json()[\"data\"][0][\"embedding\"]\n",
@@ -121,7 +121,7 @@
121121
"\n",
122122
"# Text embedding example\n",
123123
"response = client.embeddings.create(\n",
124-
" model=\"Alibaba-NLP/gte-Qwen2-7B-instruct\",\n",
124+
" model=\"Alibaba-NLP/gte-Qwen2-1.5B-instruct\",\n",
125125
" input=text,\n",
126126
")\n",
127127
"\n",
@@ -150,11 +150,11 @@
150150
"\n",
151151
"os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n",
152152
"\n",
153-
"tokenizer = AutoTokenizer.from_pretrained(\"Alibaba-NLP/gte-Qwen2-7B-instruct\")\n",
153+
"tokenizer = AutoTokenizer.from_pretrained(\"Alibaba-NLP/gte-Qwen2-1.5B-instruct\")\n",
154154
"input_ids = tokenizer.encode(text)\n",
155155
"\n",
156156
"curl_ids = f\"\"\"curl -s http://localhost:{port}/v1/embeddings \\\n",
157-
" -d '{{\"model\": \"Alibaba-NLP/gte-Qwen2-7B-instruct\", \"input\": {json.dumps(input_ids)}}}'\"\"\"\n",
157+
" -d '{{\"model\": \"Alibaba-NLP/gte-Qwen2-1.5B-instruct\", \"input\": {json.dumps(input_ids)}}}'\"\"\"\n",
158158
"\n",
159159
"input_ids_embedding = json.loads(subprocess.check_output(curl_ids, shell=True))[\"data\"][\n",
160160
" 0\n",

docs/backend/openai_api_vision.ipynb

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
"\n",
3030
"Launch the server in your terminal and wait for it to initialize.\n",
3131
"\n",
32-
"**Remember to add** `--chat-template llama_3_vision` **to specify the [vision chat template](https://docs.sglang.ai/backend/openai_api_vision.html#Chat-Template), otherwise, the server will only support text (images won’t be passed in), which can lead to degraded performance.**\n",
32+
"**Remember to add** `--chat-template` **for example** `--chat-template=qwen2-vl` **to specify the [vision chat template](https://docs.sglang.ai/backend/openai_api_vision.html#Chat-Template), otherwise, the server will only support text (images won’t be passed in), which can lead to degraded performance.**\n",
3333
"\n",
3434
"We need to specify `--chat-template` for vision language models because the chat template provided in Hugging Face tokenizer only supports text."
3535
]
@@ -51,8 +51,8 @@
5151
"\n",
5252
"vision_process, port = launch_server_cmd(\n",
5353
" \"\"\"\n",
54-
"python3 -m sglang.launch_server --model-path meta-llama/Llama-3.2-11B-Vision-Instruct \\\n",
55-
" --chat-template=llama_3_vision\n",
54+
"python3 -m sglang.launch_server --model-path Qwen/Qwen2.5-VL-7B-Instruct \\\n",
55+
" --chat-template=qwen2-vl\n",
5656
"\"\"\"\n",
5757
")\n",
5858
"\n",
@@ -79,7 +79,7 @@
7979
"curl_command = f\"\"\"\n",
8080
"curl -s http://localhost:{port}/v1/chat/completions \\\\\n",
8181
" -d '{{\n",
82-
" \"model\": \"meta-llama/Llama-3.2-11B-Vision-Instruct\",\n",
82+
" \"model\": \"Qwen/Qwen2.5-VL-7B-Instruct\",\n",
8383
" \"messages\": [\n",
8484
" {{\n",
8585
" \"role\": \"user\",\n",
@@ -127,7 +127,7 @@
127127
"url = f\"http://localhost:{port}/v1/chat/completions\"\n",
128128
"\n",
129129
"data = {\n",
130-
" \"model\": \"meta-llama/Llama-3.2-11B-Vision-Instruct\",\n",
130+
" \"model\": \"Qwen/Qwen2.5-VL-7B-Instruct\",\n",
131131
" \"messages\": [\n",
132132
" {\n",
133133
" \"role\": \"user\",\n",
@@ -167,7 +167,7 @@
167167
"client = OpenAI(base_url=f\"http://localhost:{port}/v1\", api_key=\"None\")\n",
168168
"\n",
169169
"response = client.chat.completions.create(\n",
170-
" model=\"meta-llama/Llama-3.2-11B-Vision-Instruct\",\n",
170+
" model=\"Qwen/Qwen2.5-VL-7B-Instruct\",\n",
171171
" messages=[\n",
172172
" {\n",
173173
" \"role\": \"user\",\n",
@@ -211,7 +211,7 @@
211211
"client = OpenAI(base_url=f\"http://localhost:{port}/v1\", api_key=\"None\")\n",
212212
"\n",
213213
"response = client.chat.completions.create(\n",
214-
" model=\"meta-llama/Llama-3.2-11B-Vision-Instruct\",\n",
214+
" model=\"Qwen/Qwen2.5-VL-7B-Instruct\",\n",
215215
" messages=[\n",
216216
" {\n",
217217
" \"role\": \"user\",\n",

docs/backend/send_request.ipynb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -35,11 +35,11 @@
3535
"\n",
3636
"# This is equivalent to running the following command in your terminal\n",
3737
"\n",
38-
"# python -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-8B-Instruct --host 0.0.0.0\n",
38+
"# python3 -m sglang.launch_server --model-path qwen/qwen2.5-0.5b-instruct --host 0.0.0.0\n",
3939
"\n",
4040
"server_process, port = launch_server_cmd(\n",
4141
" \"\"\"\n",
42-
"python -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-8B-Instruct \\\n",
42+
"python3 -m sglang.launch_server --model-path qwen/qwen2.5-0.5b-instruct \\\n",
4343
" --host 0.0.0.0\n",
4444
"\"\"\"\n",
4545
")\n",
@@ -65,7 +65,7 @@
6565
"curl_command = f\"\"\"\n",
6666
"curl -s http://localhost:{port}/v1/chat/completions \\\n",
6767
" -H \"Content-Type: application/json\" \\\n",
68-
" -d '{{\"model\": \"meta-llama/Meta-Llama-3.1-8B-Instruct\", \"messages\": [{{\"role\": \"user\", \"content\": \"What is the capital of France?\"}}]}}'\n",
68+
" -d '{{\"model\": \"qwen/qwen2.5-0.5b-instruct\", \"messages\": [{{\"role\": \"user\", \"content\": \"What is the capital of France?\"}}]}}'\n",
6969
"\"\"\"\n",
7070
"\n",
7171
"response = json.loads(subprocess.check_output(curl_command, shell=True))\n",
@@ -90,7 +90,7 @@
9090
"url = f\"http://localhost:{port}/v1/chat/completions\"\n",
9191
"\n",
9292
"data = {\n",
93-
" \"model\": \"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
93+
" \"model\": \"qwen/qwen2.5-0.5b-instruct\",\n",
9494
" \"messages\": [{\"role\": \"user\", \"content\": \"What is the capital of France?\"}],\n",
9595
"}\n",
9696
"\n",
@@ -116,7 +116,7 @@
116116
"client = openai.Client(base_url=f\"http://127.0.0.1:{port}/v1\", api_key=\"None\")\n",
117117
"\n",
118118
"response = client.chat.completions.create(\n",
119-
" model=\"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
119+
" model=\"qwen/qwen2.5-0.5b-instruct\",\n",
120120
" messages=[\n",
121121
" {\"role\": \"user\", \"content\": \"List 3 countries and their capitals.\"},\n",
122122
" ],\n",
@@ -145,7 +145,7 @@
145145
"\n",
146146
"# Use stream=True for streaming responses\n",
147147
"response = client.chat.completions.create(\n",
148-
" model=\"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
148+
" model=\"qwen/qwen2.5-0.5b-instruct\",\n",
149149
" messages=[\n",
150150
" {\"role\": \"user\", \"content\": \"List 3 countries and their capitals.\"},\n",
151151
" ],\n",

0 commit comments

Comments
 (0)