Skip to content

Commit a497224

Browse files
committed
chore: only add models that seem to be working
1 parent 629127e commit a497224

File tree

3 files changed

+5
-119
lines changed

3 files changed

+5
-119
lines changed

.github/workflows/update.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,8 @@ jobs:
1818
with:
1919
go-version-file: go.mod
2020
- run: go run ./cmd/openrouter/main.go
21-
- run: go run ./cmd/huggingface/main.go
21+
# we need to add this back when we know that the providers/models all work
22+
# - run: go run ./cmd/huggingface/main.go
2223
- uses: stefanzweifel/git-auto-commit-action@778341af668090896ca464160c2def5d1d1a3eb0 # v5
2324
with:
2425
commit_message: "chore: auto-update generated files"

cmd/huggingface/main.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,8 @@ func findContextWindow(model Model) int64 {
9999
return 0
100100
}
101101

102+
// WARN: DO NOT USE
103+
// for now we have a subset list of models we use
102104
func main() {
103105
modelsResp, err := fetchHuggingFaceModels()
104106
if err != nil {

internal/providers/configs/huggingface.json

Lines changed: 1 addition & 118 deletions
Original file line numberDiff line numberDiff line change
@@ -20,19 +20,6 @@
2020
"has_reasoning_efforts": false,
2121
"supports_attachments": false
2222
},
23-
{
24-
"id": "Qwen/Qwen3-235B-A22B-Instruct-2507:cerebras",
25-
"name": "Qwen/Qwen3-235B-A22B-Instruct-2507 (cerebras)",
26-
"cost_per_1m_in": 0.6,
27-
"cost_per_1m_out": 1.2,
28-
"cost_per_1m_in_cached": 0,
29-
"cost_per_1m_out_cached": 0,
30-
"context_window": 262144,
31-
"default_max_tokens": 8192,
32-
"can_reason": false,
33-
"has_reasoning_efforts": false,
34-
"supports_attachments": false
35-
},
3623
{
3724
"id": "Qwen/Qwen3-235B-A22B-Instruct-2507:fireworks-ai",
3825
"name": "Qwen/Qwen3-235B-A22B-Instruct-2507 (fireworks-ai)",
@@ -72,19 +59,6 @@
7259
"has_reasoning_efforts": false,
7360
"supports_attachments": false
7461
},
75-
{
76-
"id": "Qwen/Qwen3-32B:groq",
77-
"name": "Qwen/Qwen3-32B (groq)",
78-
"cost_per_1m_in": 0.29,
79-
"cost_per_1m_out": 0.59,
80-
"cost_per_1m_in_cached": 0,
81-
"cost_per_1m_out_cached": 0,
82-
"context_window": 131072,
83-
"default_max_tokens": 8192,
84-
"can_reason": false,
85-
"has_reasoning_efforts": false,
86-
"supports_attachments": false
87-
},
8862
{
8963
"id": "Qwen/Qwen3-Coder-480B-A35B-Instruct:cerebras",
9064
"name": "Qwen/Qwen3-Coder-480B-A35B-Instruct (cerebras)",
@@ -111,32 +85,6 @@
11185
"has_reasoning_efforts": false,
11286
"supports_attachments": false
11387
},
114-
{
115-
"id": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B:groq",
116-
"name": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B (groq)",
117-
"cost_per_1m_in": 0.75,
118-
"cost_per_1m_out": 0.99,
119-
"cost_per_1m_in_cached": 0,
120-
"cost_per_1m_out_cached": 0,
121-
"context_window": 131072,
122-
"default_max_tokens": 8192,
123-
"can_reason": false,
124-
"has_reasoning_efforts": false,
125-
"supports_attachments": false
126-
},
127-
{
128-
"id": "deepseek-ai/DeepSeek-V3:fireworks-ai",
129-
"name": "deepseek-ai/DeepSeek-V3 (fireworks-ai)",
130-
"cost_per_1m_in": 0.9,
131-
"cost_per_1m_out": 0.9,
132-
"cost_per_1m_in_cached": 0,
133-
"cost_per_1m_out_cached": 0,
134-
"context_window": 131072,
135-
"default_max_tokens": 8192,
136-
"can_reason": false,
137-
"has_reasoning_efforts": false,
138-
"supports_attachments": false
139-
},
14088
{
14189
"id": "deepseek-ai/DeepSeek-V3-0324:fireworks-ai",
14290
"name": "deepseek-ai/DeepSeek-V3-0324 (fireworks-ai)",
@@ -163,32 +111,6 @@
163111
"has_reasoning_efforts": false,
164112
"supports_attachments": false
165113
},
166-
{
167-
"id": "google/gemma-2-9b-it:groq",
168-
"name": "google/gemma-2-9b-it (groq)",
169-
"cost_per_1m_in": 0.2,
170-
"cost_per_1m_out": 0.2,
171-
"cost_per_1m_in_cached": 0,
172-
"cost_per_1m_out_cached": 0,
173-
"context_window": 8192,
174-
"default_max_tokens": 2048,
175-
"can_reason": false,
176-
"has_reasoning_efforts": false,
177-
"supports_attachments": false
178-
},
179-
{
180-
"id": "meta-llama/Llama-3.1-405B-Instruct:fireworks-ai",
181-
"name": "meta-llama/Llama-3.1-405B-Instruct (fireworks-ai)",
182-
"cost_per_1m_in": 3,
183-
"cost_per_1m_out": 3,
184-
"cost_per_1m_in_cached": 0,
185-
"cost_per_1m_out_cached": 0,
186-
"context_window": 131072,
187-
"default_max_tokens": 8192,
188-
"can_reason": false,
189-
"has_reasoning_efforts": false,
190-
"supports_attachments": false
191-
},
192114
{
193115
"id": "meta-llama/Llama-3.1-70B-Instruct:fireworks-ai",
194116
"name": "meta-llama/Llama-3.1-70B-Instruct (fireworks-ai)",
@@ -228,19 +150,6 @@
228150
"has_reasoning_efforts": false,
229151
"supports_attachments": false
230152
},
231-
{
232-
"id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct:cerebras",
233-
"name": "meta-llama/Llama-4-Maverick-17B-128E-Instruct (cerebras)",
234-
"cost_per_1m_in": 0.2,
235-
"cost_per_1m_out": 0.6,
236-
"cost_per_1m_in_cached": 0,
237-
"cost_per_1m_out_cached": 0,
238-
"context_window": 1048576,
239-
"default_max_tokens": 8192,
240-
"can_reason": false,
241-
"has_reasoning_efforts": false,
242-
"supports_attachments": false
243-
},
244153
{
245154
"id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct:fireworks-ai",
246155
"name": "meta-llama/Llama-4-Maverick-17B-128E-Instruct (fireworks-ai)",
@@ -267,32 +176,6 @@
267176
"has_reasoning_efforts": false,
268177
"supports_attachments": false
269178
},
270-
{
271-
"id": "meta-llama/Llama-4-Scout-17B-16E-Instruct:cerebras",
272-
"name": "meta-llama/Llama-4-Scout-17B-16E-Instruct (cerebras)",
273-
"cost_per_1m_in": 0.65,
274-
"cost_per_1m_out": 0.85,
275-
"cost_per_1m_in_cached": 0,
276-
"cost_per_1m_out_cached": 0,
277-
"context_window": 1048576,
278-
"default_max_tokens": 8192,
279-
"can_reason": false,
280-
"has_reasoning_efforts": false,
281-
"supports_attachments": false
282-
},
283-
{
284-
"id": "meta-llama/Llama-4-Scout-17B-16E-Instruct:fireworks-ai",
285-
"name": "meta-llama/Llama-4-Scout-17B-16E-Instruct (fireworks-ai)",
286-
"cost_per_1m_in": 0.15,
287-
"cost_per_1m_out": 0.6,
288-
"cost_per_1m_in_cached": 0,
289-
"cost_per_1m_out_cached": 0,
290-
"context_window": 1048576,
291-
"default_max_tokens": 8192,
292-
"can_reason": false,
293-
"has_reasoning_efforts": false,
294-
"supports_attachments": false
295-
},
296179
{
297180
"id": "meta-llama/Llama-4-Scout-17B-16E-Instruct:groq",
298181
"name": "meta-llama/Llama-4-Scout-17B-16E-Instruct (groq)",
@@ -428,4 +311,4 @@
428311
"HTTP-Referer": "https://charm.land",
429312
"X-Title": "Crush"
430313
}
431-
}
314+
}

0 commit comments

Comments
 (0)