Skip to content

GLM-4 32B

GLM-4 32B #6

Workflow file for this run

name: Spanish tests
on: [workflow_dispatch, push, pull_request]
jobs:
canary-multi-turn:
runs-on: ubuntu-22.04
timeout-minutes: 3
strategy:
max-parallel: 3
fail-fast: false
matrix:
model:
- meta-llama/llama-3.2-3b-instruct # $0.0200/$0.0200 [ 128K]
- google/gemma-3-4b-it # $0.0200/$0.0400 [ 128K]
- mistralai/ministral-3b # $0.0400/$0.0400 [ 128K]
- z-ai/glm-4-32b # $0.1000/$0.1000 [ 128K]
- ibm-granite/granite-4.0-h-micro # $0.0170/$0.1100 [ 128K]
- qwen/qwen3-8b # $0.0350/$0.1380 [ 128K]
steps:
- uses: actions/checkout@v4
- run: ./chat-llm.js tests/es/canary-single-turn.txt
env:
LLM_API_BASE_URL: https://openrouter.ai/api/v1
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_CHAT_MODEL: ${{ matrix.model }}
- run: ./chat-llm.js tests/es/canary-multi-turn.txt
env:
LLM_API_BASE_URL: https://openrouter.ai/api/v1
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_CHAT_MODEL: ${{ matrix.model }}
high-school-stem:
runs-on: ubuntu-22.04
timeout-minutes: 5
strategy:
max-parallel: 3
fail-fast: false
matrix:
model:
- meta-llama/llama-3.2-3b-instruct # $0.0200/$0.0200 [ 128K]
- google/gemma-3-4b-it # $0.0200/$0.0400 [ 128K]
- mistralai/ministral-3b # $0.0400/$0.0400 [ 128K]
- z-ai/glm-4-32b # $0.1000/$0.1000 [ 128K]
- ibm-granite/granite-4.0-h-micro # $0.0170/$0.1100 [ 128K]
- qwen/qwen3-8b # $0.0350/$0.1380 [ 128K]
steps:
- uses: actions/checkout@v4
- run: ./chat-llm.js tests/es/high-school-stem.txt
env:
LLM_API_BASE_URL: https://openrouter.ai/api/v1
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_CHAT_MODEL: ${{ matrix.model }}