diff --git a/records/track_10min_16mb/2026-03-22_10L_MLP3x_Int6_Baseline/README.md b/records/track_10min_16mb/2026-03-22_10L_MLP3x_Int6_Baseline/README.md new file mode 100644 index 0000000000..a547548987 --- /dev/null +++ b/records/track_10min_16mb/2026-03-22_10L_MLP3x_Int6_Baseline/README.md @@ -0,0 +1,20 @@ +# 10L MLP3x Int6 Baseline (non-record) + +Non-record submission. Local MLX smoke test confirming pipeline works end-to-end. + +## Config +- 10 layers, 512 dim, 8 heads, 4 KV heads +- MLP 3x expansion (hidden=1536), relu² +- int6 quantization, zlib-9 compression +- Trained on Apple Silicon (MLX), 200 iterations only + +## Score +val_bpb: 2.3517 (200 iterations — not a competitive score) + +## Planned improvements +- zstd-22 compression +- Sliding window eval (stride=64) +- Muon WD=0.04 +- SmearGate + BigramHash +- SWA over last 40% of warmdown +- Full 10-min run on 8xH100 diff --git a/records/track_10min_16mb/2026-03-22_10L_MLP3x_Int6_Baseline/submission.json b/records/track_10min_16mb/2026-03-22_10L_MLP3x_Int6_Baseline/submission.json new file mode 100644 index 0000000000..7e714584a1 --- /dev/null +++ b/records/track_10min_16mb/2026-03-22_10L_MLP3x_Int6_Baseline/submission.json @@ -0,0 +1,6 @@ +{ + "name": "Your Name", + "github_id": "your_github_username", + "val_bpb": 2.3517, + "notes": "Non-record submission. 10-layer, 3x MLP, int6 quant baseline run on Apple Silicon MLX. 200 iterations smoke test only \u2014 full H100 run pending compute grant." +} diff --git a/records/track_10min_16mb/2026-03-22_10L_MLP3x_Int6_Baseline/train_gpt_mlx.py b/records/track_10min_16mb/2026-03-22_10L_MLP3x_Int6_Baseline/train_gpt_mlx.py new file mode 100644 index 0000000000..7b9e935aa6 --- /dev/null +++ b/records/track_10min_16mb/2026-03-22_10L_MLP3x_Int6_Baseline/train_gpt_mlx.py @@ -0,0 +1,1104 @@ +#!/usr/bin/env python3 +""" +The `train_gpt.py` and `train_gpt_mlx.py` scripts are intended as good launching-off points for new participants, not SOTA configs. We'll accept PRs that tune, improve, or simplify these scripts without significantly increasing complexity, but competitive submissions should stay in the `/records` folder. + +Hard stop: To keep readable for newcomers, let's make sure `train_gpt.py` and `train_gpt_mlx.py` never are longer than 1500 lines. +""" +from __future__ import annotations + +import glob +import json +import math +import os +import pickle +import sys +import time +import uuid +import zlib +from collections.abc import Callable +from pathlib import Path + +import numpy as np +import sentencepiece as spm + +import mlx.core as mx +import mlx.nn as nn +import mlx.optimizers as optim +from mlx.utils import tree_flatten, tree_unflatten + +# ============================================================================== +# SHARD FORMAT + COMPUTE DTYPE +# ============================================================================== + +COMPUTE_DTYPE = mx.bfloat16 + +# ============================================================================== +# HYPERPARAMETERS +# ============================================================================== +# Default Simple Baseline run: +# - 9 transformer blocks at width 512 +# - 8 attention heads with 4 KV heads (GQA) and 2x MLP expansion +# - vocab size 1024, sequence length 1024, tied embeddings +# - 524,288 train tokens per step for 20,000 iterations with a ~10 minute cap +class Hyperparameters: + # Data / tokenizer. + data_path: str = os.environ.get("DATA_PATH", "./data/datasets/fineweb10B_sp1024") + tokenizer_path: str = os.environ.get("TOKENIZER_PATH", "./data/tokenizers/fineweb_1024_bpe.model") + run_id: str = os.environ.get("RUN_ID", str(uuid.uuid4())) + seed: int = int(os.environ.get("SEED", 1337)) + + # Training loop. These defaults now mirror train_gpt.py on a single process. + iterations: int = int(os.environ.get("ITERATIONS", 20_000)) + val_loss_every: int = int(os.environ.get("VAL_LOSS_EVERY", 0)) + # Validation always uses the full fineweb_val split. + val_batch_size: int = int(os.environ.get("VAL_BATCH_SIZE", 524_288)) + train_log_every: int = int(os.environ.get("TRAIN_LOG_EVERY", 200)) + train_batch_tokens: int = int(os.environ.get("TRAIN_BATCH_TOKENS", 524_288)) + grad_accum_steps: int = int(os.environ.get("GRAD_ACCUM_STEPS", 8)) + train_seq_len: int = int(os.environ.get("TRAIN_SEQ_LEN", os.environ.get("TRAIN_MAX_SEQ_LEN", 1024))) + # Chunk each logical MLX microbatch into smaller sub-batches to reduce peak + # memory pressure without changing the effective optimizer batch. + mlx_max_microbatch_tokens: int = int(os.environ.get("MLX_MAX_MICROBATCH_TOKENS", 8_192)) + # Force MLX to materialize the graph after every sub-batch, preventing lazy + # graph buildup across accumulation steps. Keeps peak memory low on 16GB machines. + # Disable on 32GB+ unified memory for better throughput (MLX_EAGER_EVAL=0). + mlx_eager_eval: bool = bool(int(os.environ.get("MLX_EAGER_EVAL", "1"))) + warmup_steps: int = int(os.environ.get("WARMUP_STEPS", 20)) + warmdown_iters: int = int(os.environ.get("WARMDOWN_ITERS", 1200)) + max_wallclock_seconds: float = float(os.environ.get("MAX_WALLCLOCK_SECONDS", 600.0)) + + # Model (defaults match the current baseline setup). + vocab_size: int = int(os.environ.get("VOCAB_SIZE", 1024)) + num_layers: int = int(os.environ.get("NUM_LAYERS", 9)) + model_dim: int = int(os.environ.get("MODEL_DIM", 512)) + num_heads: int = int(os.environ.get("NUM_HEADS", 8)) + num_kv_heads: int = int(os.environ.get("NUM_KV_HEADS", 4)) + mlp_mult: int = int(os.environ.get("MLP_MULT", 2)) + tie_embeddings: bool = bool(int(os.environ.get("TIE_EMBEDDINGS", "1"))) + tied_embed_init_std: float = float(os.environ.get("TIED_EMBED_INIT_STD", 0.005)) + logit_chunk_tokens: int = int(os.environ.get("LOGIT_CHUNK_TOKENS", 0)) + logit_softcap: float = float(os.environ.get("LOGIT_SOFTCAP", 30.0)) + rope_base: float = float(os.environ.get("ROPE_BASE", 10000.0)) + qk_gain_init: float = float(os.environ.get("QK_GAIN_INIT", 1.5)) + + # Optimizer. We keep the same per-group defaults as train_gpt.py. + beta1: float = float(os.environ.get("BETA1", 0.9)) + beta2: float = float(os.environ.get("BETA2", 0.95)) + adam_eps: float = float(os.environ.get("ADAM_EPS", 1e-8)) + tied_embed_lr: float = float(os.environ.get("TIED_EMBED_LR", 0.05)) + matrix_lr: float = float(os.environ.get("MATRIX_LR", 0.04)) + scalar_lr: float = float(os.environ.get("SCALAR_LR", 0.04)) + muon_momentum: float = float(os.environ.get("MUON_MOMENTUM", 0.95)) + muon_backend_steps: int = int(os.environ.get("MUON_BACKEND_STEPS", 5)) + muon_momentum_warmup_start: float = float(os.environ.get("MUON_MOMENTUM_WARMUP_START", 0.85)) + muon_momentum_warmup_steps: int = int(os.environ.get("MUON_MOMENTUM_WARMUP_STEPS", 500)) + grad_clip_norm: float = float(os.environ.get("GRAD_CLIP_NORM", 0.0)) + + out_dir: str = os.environ.get("OUT_DIR", "logs") + + @property + def train_files(self) -> str: + return f"{self.data_path}/fineweb_train_*.bin" + + @property + def val_files(self) -> str: + return f"{self.data_path}/fineweb_val_*.bin" + + @property + def microbatch_tokens(self) -> int: + return self.train_batch_tokens // self.grad_accum_steps + + def lr_mul(self, step: int, elapsed_ms: float) -> float: + if self.warmdown_iters <= 0: + return 1.0 + if self.max_wallclock_seconds <= 0: + warmdown_start = max(self.iterations - self.warmdown_iters, 0) + return max((self.iterations - step) / max(self.warmdown_iters, 1), 0.0) if warmdown_start <= step < self.iterations else 1.0 + step_ms = elapsed_ms / max(step, 1) + warmdown_ms = self.warmdown_iters * step_ms + remaining_ms = max(1000.0 * self.max_wallclock_seconds - elapsed_ms, 0.0) + return remaining_ms / max(warmdown_ms, 1e-9) if remaining_ms <= warmdown_ms else 1.0 + + +CONTROL_TENSOR_NAME_PATTERNS = tuple( + pattern + for pattern in os.environ.get( + "CONTROL_TENSOR_NAME_PATTERNS", + "attn_scale,attn_scales,mlp_scale,mlp_scales,resid_mix,resid_mixes,q_gain,skip_weight,skip_weights", + ).split(",") + if pattern +) +INT8_KEEP_FLOAT_FP32_NAME_PATTERNS = tuple( + pattern + for pattern in os.environ.get( + "INT8_KEEP_FLOAT_FP32_NAME_PATTERNS", + ",".join(CONTROL_TENSOR_NAME_PATTERNS), + ).split(",") + if pattern +) + + +def token_chunks(total_tokens: int, seq_len: int, max_chunk_tokens: int) -> list[int]: + usable_total = (total_tokens // seq_len) * seq_len + if usable_total <= 0: + raise ValueError(f"token budget too small for seq_len={seq_len}") + usable_chunk = max((max_chunk_tokens // seq_len) * seq_len, seq_len) + chunks: list[int] = [] + remaining = usable_total + while remaining > 0: + chunk = min(remaining, usable_chunk) + chunks.append(chunk) + remaining -= chunk + return chunks + + +def accumulate_flat_grads( + accum: dict[str, mx.array] | None, + grads_tree: dict, + scale: float, +) -> dict[str, mx.array]: + flat = dict(tree_flatten(grads_tree)) + if accum is None: + return {k: g * scale for k, g in flat.items()} + for k, g in flat.items(): + accum[k] = accum[k] + g * scale + return accum + + +# ============================================================================== +# MATH HELPERS +# ============================================================================== + +def rms_norm(x: mx.array, eps: float = 1e-6) -> mx.array: + return (x * mx.rsqrt(mx.mean(x * x, axis=-1, keepdims=True) + eps)).astype(x.dtype) + + +def zeropower_newtonschulz5(g: mx.array, steps: int, eps: float = 1e-7) -> mx.array: + # Orthogonalize a 2D update matrix with a fast Newton-Schulz iteration. + # Muon uses this to normalize matrix-shaped gradients before applying them. + # Background on Muon: https://kellerjordan.github.io/posts/muon/ + a, b, c = 3.4445, -4.7750, 2.0315 + x = g.astype(mx.float32) + x = x / (mx.sqrt(mx.sum(x * x)) + eps) + transposed = x.shape[0] > x.shape[1] + if transposed: + x = x.T + for _ in range(steps): + a_mat = x @ x.T + b_mat = b * a_mat + c * (a_mat @ a_mat) + x = a * x + b_mat @ x + if transposed: + x = x.T + return x.astype(g.dtype) + + +def load_data_shard(path: Path) -> np.ndarray: + header_bytes = 256 * np.dtype(" None: + self.file_idx = (self.file_idx + 1) % len(self.files) + if self.file_idx == 0: + self.epoch += 1 + if self.log_fn is not None: + self.log_fn( + f"WARNING: starting epoch:{self.epoch} " + f"dataset:{self.dataset_name} train_shards:{len(self.files)}" + ) + self.tokens = load_data_shard(self.files[self.file_idx]) + self.pos = 0 + + def take(self, n: int) -> np.ndarray: + chunks: list[np.ndarray] = [] + left = n + while left > 0: + if self.pos >= self.tokens.size: + self.next_file() + k = min(left, int(self.tokens.size - self.pos)) + chunks.append(self.tokens[self.pos : self.pos + k]) + self.pos += k + left -= k + return chunks[0] if len(chunks) == 1 else np.concatenate(chunks, axis=0) + + +class TokenLoader: + def __init__( + self, + pattern: str, + log_fn: Callable[[str], None] | None = None, + dataset_name: str = "", + ): + self.stream = TokenStream(pattern, log_fn=log_fn, dataset_name=dataset_name) + + def next_batch(self, batch_tokens: int, seq_len: int) -> tuple[mx.array, mx.array]: + usable = (batch_tokens // seq_len) * seq_len + if usable <= 0: + raise ValueError(f"token budget too small for seq_len={seq_len}") + chunk = self.stream.take(usable + 1) + x = chunk[:-1].reshape(-1, seq_len) + y = chunk[1:].reshape(-1, seq_len) + return mx.array(x, dtype=mx.int32), mx.array(y, dtype=mx.int32) + + +# ============================================================================== +# MODEL BLOCKS +# ============================================================================== + +class CastedLinear(nn.Module): + def __init__(self, in_dim: int, out_dim: int): + super().__init__() + self.weight = nn.Linear(in_dim, out_dim, bias=False).weight.astype(mx.float32) + + def __call__(self, x: mx.array) -> mx.array: + return x @ self.weight.astype(x.dtype).T + + +class RMSNormNoWeight(nn.Module): + # MLX module wrapper around the functional RMSNorm helper so it composes nicely in blocks. + def __call__(self, x: mx.array) -> mx.array: + return rms_norm(x) + + +class CausalSelfAttention(nn.Module): + # - separate q/k/v projections + # - RMSNorm on q and k before attention + # - RoPE on q and k + # - causal masked SDPA + def __init__( + self, + dim: int, + num_heads: int, + num_kv_heads: int, + rope_base: float, + qk_gain_init: float, + ): + super().__init__() + if dim % num_heads != 0: + raise ValueError("model_dim must be divisible by num_heads") + if num_heads % num_kv_heads != 0: + raise ValueError("num_heads must be divisible by num_kv_heads") + self.num_heads = num_heads + self.num_kv_heads = num_kv_heads + self.head_dim = dim // num_heads + if self.head_dim % 2 != 0: + raise ValueError("head_dim must be even for RoPE") + kv_dim = self.num_kv_heads * self.head_dim + self.c_q = CastedLinear(dim, dim) + self.c_k = CastedLinear(dim, kv_dim) + self.c_v = CastedLinear(dim, kv_dim) + self.proj = CastedLinear(dim, dim) + self.q_gain = mx.ones((num_heads,), dtype=mx.float32) * qk_gain_init + self.rope = nn.RoPE(self.head_dim, traditional=False, base=rope_base) + self.scale = self.head_dim ** -0.5 + + def __call__(self, x: mx.array) -> mx.array: + bsz, seqlen, dim = x.shape + q = self.c_q(x).reshape(bsz, seqlen, self.num_heads, self.head_dim).transpose(0, 2, 1, 3) + k = self.c_k(x).reshape(bsz, seqlen, self.num_kv_heads, self.head_dim).transpose(0, 2, 1, 3) + v = self.c_v(x).reshape(bsz, seqlen, self.num_kv_heads, self.head_dim).transpose(0, 2, 1, 3) + + q = self.rope(rms_norm(q).astype(COMPUTE_DTYPE)) + k = self.rope(rms_norm(k).astype(COMPUTE_DTYPE)) + q = q * self.q_gain.astype(q.dtype)[None, :, None, None] + y = mx.fast.scaled_dot_product_attention(q, k, v, scale=self.scale, mask="causal") + y = y.transpose(0, 2, 1, 3).reshape(bsz, seqlen, dim) + return self.proj(y) + + +class MLP(nn.Module): + # Baseline MLP uses relu^2 instead of GELU/SiLU. It is cheap and works well in this setup. + def __init__(self, dim: int, mlp_mult: int): + super().__init__() + hidden = dim * mlp_mult + self.fc = CastedLinear(dim, hidden) + self.proj = CastedLinear(hidden, dim) + + def __call__(self, x: mx.array) -> mx.array: + x = nn.relu(self.fc(x)) + return self.proj(x * x) + + +class Block(nn.Module): + def __init__( + self, + dim: int, + num_heads: int, + num_kv_heads: int, + mlp_mult: int, + rope_base: float, + qk_gain_init: float, + ): + super().__init__() + self.attn_norm = RMSNormNoWeight() + self.mlp_norm = RMSNormNoWeight() + self.attn = CausalSelfAttention(dim, num_heads, num_kv_heads, rope_base, qk_gain_init) + self.mlp = MLP(dim, mlp_mult) + self.attn_scale = mx.ones((dim,), dtype=mx.float32) + self.mlp_scale = mx.ones((dim,), dtype=mx.float32) + self.resid_mix = mx.array(np.stack((np.ones((dim,), dtype=np.float32), np.zeros((dim,), dtype=np.float32)))) + + def __call__(self, x: mx.array, x0: mx.array) -> mx.array: + mix = self.resid_mix.astype(x.dtype) + x = mix[0][None, None, :] * x + mix[1][None, None, :] * x0 + attn_out = self.attn(self.attn_norm(x)) + x = x + self.attn_scale.astype(x.dtype)[None, None, :] * attn_out + x = x + self.mlp_scale.astype(x.dtype)[None, None, :] * self.mlp(self.mlp_norm(x)) + return x + + +class GPT(nn.Module): + # - token embedding + RMSNorm + # - encoder half accumulates skip tensors + # - decoder half consumes reversed skips with learned skip_weights + # - tied embeddings for the LM head (the baseline default setup) + def __init__(self, vocab_size: int, num_layers: int, dim: int, num_heads: int, num_kv_heads: int, mlp_mult: int, + logit_chunk_tokens: int, logit_softcap: float, rope_base: float, tied_embed_init_std: float, + qk_gain_init: float): + super().__init__() + if logit_softcap <= 0.0: + raise ValueError(f"logit_softcap must be positive, got {logit_softcap}") + self.logit_chunk_tokens = logit_chunk_tokens + self.logit_softcap = logit_softcap + + self.tok_emb = nn.Embedding(vocab_size, dim) + self.num_encoder_layers = num_layers // 2 + self.num_decoder_layers = num_layers - self.num_encoder_layers + self.num_skip_weights = min(self.num_encoder_layers, self.num_decoder_layers) + self.skip_weights = mx.ones((self.num_skip_weights, dim), dtype=mx.float32) + self.blocks = [ + Block(dim, num_heads, num_kv_heads, mlp_mult, rope_base, qk_gain_init) + for i in range(num_layers) + ] + self.final_norm = RMSNormNoWeight() + + for b in self.blocks: + b.attn.proj.weight = mx.zeros_like(b.attn.proj.weight) + b.mlp.proj.weight = mx.zeros_like(b.mlp.proj.weight) + self.tok_emb.weight = ( + mx.random.normal(self.tok_emb.weight.shape, dtype=mx.float32) * tied_embed_init_std + ).astype(COMPUTE_DTYPE) + + def softcap(self, logits: mx.array) -> mx.array: + c = self.logit_softcap + return c * mx.tanh(logits / c) + + def __call__(self, input_ids: mx.array) -> mx.array: + x = rms_norm(self.tok_emb(input_ids).astype(COMPUTE_DTYPE)) + x0 = x + skips: list[mx.array] = [] + + for i in range(self.num_encoder_layers): + x = self.blocks[i](x, x0) + skips.append(x) + for i in range(self.num_decoder_layers): + # Odd layer counts have one more decoder block than encoder block. The baseline only + # applies a skip connection when one exists, then runs the remaining decoder block(s) + # without an added skip. + if skips: + x = x + self.skip_weights[i].astype(x.dtype)[None, None, :] * skips.pop() + x = self.blocks[self.num_encoder_layers + i](x, x0) + return self.final_norm(x) + + def loss(self, input_ids: mx.array, target_ids: mx.array) -> mx.array: + # Cross-entropy over flattened tokens. We keep optional logit chunking because it is a useful + # memory knob on Macs, but the common path is chunk_tokens=0 (single matmul + CE). + x = self(input_ids).reshape(-1, self.tok_emb.weight.shape[1]) + y = target_ids.reshape(-1) + if self.logit_chunk_tokens <= 0 or x.shape[0] <= self.logit_chunk_tokens: + logits_proj = x @ self.tok_emb.weight.astype(x.dtype).T + logits = self.softcap(logits_proj) + return nn.losses.cross_entropy(logits.astype(mx.float32), y, reduction="mean") + + loss_sum = mx.array(0.0, dtype=mx.float32) + n = int(x.shape[0]) + for s in range(0, n, self.logit_chunk_tokens): + e = min(s + self.logit_chunk_tokens, n) + logits_proj = x[s:e] @ self.tok_emb.weight.astype(x.dtype).T + logits = self.softcap(logits_proj) + loss_sum = loss_sum + nn.losses.cross_entropy(logits.astype(mx.float32), y[s:e], reduction="sum") + return loss_sum / float(n) + +# ============================================================================== +# OPTIMIZERS (MUON + ADAM SPLIT) +# ============================================================================== +class Muon: + # Muon applies SGD-momentum to matrix gradients, then orthogonalizes the result before the + # parameter update. + def __init__(self, keys: list[str], params: dict[str, mx.array], args: Hyperparameters): + self.keys = keys + self.args = args + self.buffers = {k: mx.zeros_like(params[k]) for k in keys} + + def step(self, params: dict[str, mx.array], grads: dict[str, mx.array], step: int, lr_mul: float) -> dict[str, mx.array]: + if self.args.muon_momentum_warmup_steps: + t = min(step / self.args.muon_momentum_warmup_steps, 1.0) + momentum = (1.0 - t) * self.args.muon_momentum_warmup_start + t * self.args.muon_momentum + else: + momentum = self.args.muon_momentum + lr = self.args.matrix_lr * lr_mul + out: dict[str, mx.array] = {} + for k in self.keys: + p = params[k] + g = grads[k] + buf = momentum * self.buffers[k] + g + self.buffers[k] = buf + g_eff = g + momentum * buf + g_ortho = zeropower_newtonschulz5(g_eff, self.args.muon_backend_steps) + scale = math.sqrt(max(1.0, float(p.shape[0]) / float(p.shape[1]))) + out[k] = p - lr * (g_ortho * scale).astype(p.dtype) + return out + + +class SplitOptimizers: + # - embeddings: Adam with the tied-embedding LR + # - block matrices (2D): Muon + # - block scalars + skip weights: Adam + # This preserves the high-level optimization behavior even though MLX internals differ. + def __init__(self, model: GPT, args: Hyperparameters): + self.args = args + params = dict(tree_flatten(model.parameters())) + self.embed_key = "tok_emb.weight" + self.matrix_keys = [ + k + for k, p in params.items() + if k.startswith("blocks.") and p.ndim == 2 and not any(pattern in k for pattern in CONTROL_TENSOR_NAME_PATTERNS) + ] + self.scalar_keys = [ + k + for k, p in params.items() + if k == "skip_weights" or (k.startswith("blocks.") and (p.ndim < 2 or any(pattern in k for pattern in CONTROL_TENSOR_NAME_PATTERNS))) + ] + + self.muon = Muon(self.matrix_keys, params, args) + self.adam_embed = optim.Adam( + learning_rate=args.tied_embed_lr, + betas=[args.beta1, args.beta2], + eps=args.adam_eps, + bias_correction=True, + ) + self.adam_scalar = optim.Adam( + learning_rate=args.scalar_lr, + betas=[args.beta1, args.beta2], + eps=args.adam_eps, + bias_correction=True, + ) + + def step(self, model: GPT, grads_tree: dict, step: int, lr_mul: float) -> None: + params = dict(tree_flatten(model.parameters())) + grads = dict(tree_flatten(grads_tree)) + updated = dict(params) + + updated.update(self.muon.step(params, grads, step=step, lr_mul=lr_mul)) + + self.adam_embed.learning_rate = self.args.tied_embed_lr * lr_mul + updated.update( + self.adam_embed.apply_gradients( + {self.embed_key: grads[self.embed_key]}, + {self.embed_key: params[self.embed_key]}, + ) + ) + + self.adam_scalar.learning_rate = self.args.scalar_lr * lr_mul + scalar_grads = {k: grads[k] for k in self.scalar_keys} + scalar_params = {k: params[k] for k in self.scalar_keys} + updated.update(self.adam_scalar.apply_gradients(scalar_grads, scalar_params)) + + model.update(tree_unflatten(list(updated.items()))) + +# ============================================================================== +# QUANTIZATION (INT8 + ZLIB) +# ============================================================================== +# - per-row int8 for 2D float tensors +# - per-tensor int8 for other float tensors +# - fp16 passthrough for small float tensors +# - exact passthrough for non-floats + +MX_DTYPE_FROM_NAME = { + "float32": mx.float32, + "float16": mx.float16, + "bfloat16": mx.bfloat16, +} + +INT8_KEEP_FLOAT_MAX_NUMEL = 65_536 +INT8_KEEP_FLOAT_STORE_DTYPE = np.float16 +INT8_PER_ROW_SCALE_DTYPE = np.float16 +INT8_CLIP_PERCENTILE = 99.99984 +INT8_CLIP_Q = INT8_CLIP_PERCENTILE / 100.0 + + +def _np_float32(arr: mx.array) -> np.ndarray: + return np.array(arr.astype(mx.float32), dtype=np.float32, copy=False) + + +def keep_float_array(name: str, arr: mx.array, passthrough_orig_dtypes: dict[str, str]) -> np.ndarray: + if any(pattern in name for pattern in INT8_KEEP_FLOAT_FP32_NAME_PATTERNS): + return np.ascontiguousarray(_np_float32(arr)) + if arr.dtype in {mx.float32, mx.bfloat16}: + passthrough_orig_dtypes[name] = str(arr.dtype).split(".")[-1] + return np.ascontiguousarray(np.array(arr.astype(mx.float16), dtype=INT8_KEEP_FLOAT_STORE_DTYPE, copy=False)) + return np.ascontiguousarray(np.array(arr, copy=True)) + + +def quantize_float_array(arr: mx.array) -> tuple[np.ndarray, np.ndarray]: + f32 = _np_float32(arr) + if f32.ndim == 2: + # Matrices get one scale per row, which usually tracks output-channel + # ranges much better than a single tensor-wide scale. + clip_abs = np.quantile(np.abs(f32), INT8_CLIP_Q, axis=1) if f32.size else np.empty((f32.shape[0],), dtype=np.float32) + clipped = np.clip(f32, -clip_abs[:, None], clip_abs[:, None]) + scale = np.maximum(clip_abs / 127.0, 1.0 / 127.0).astype(np.float32, copy=False) + q = np.clip(np.round(clipped / scale[:, None]), -127, 127).astype(np.int8, copy=False) + return np.ascontiguousarray(q), np.ascontiguousarray(scale.astype(INT8_PER_ROW_SCALE_DTYPE, copy=False)) + + # Vectors / scalars use a simpler per-tensor scale. + clip_abs = float(np.quantile(np.abs(f32).reshape(-1), INT8_CLIP_Q)) if f32.size else 0.0 + scale = np.array(clip_abs / 127.0 if clip_abs > 0.0 else 1.0, dtype=np.float32) + q = np.clip(np.round(np.clip(f32, -clip_abs, clip_abs) / scale), -127, 127).astype(np.int8, copy=False) + return np.ascontiguousarray(q), scale + + +def quantize_state_dict_int8(flat_state: dict[str, mx.array]) -> tuple[dict[str, object], dict[str, int]]: + quantized: dict[str, np.ndarray] = {} + scales: dict[str, np.ndarray] = {} + dtypes: dict[str, str] = {} + passthrough: dict[str, np.ndarray] = {} + passthrough_orig_dtypes: dict[str, str] = {} + qmeta: dict[str, dict[str, object]] = {} + stats = dict.fromkeys( + ("param_count", "num_tensors", "num_float_tensors", "num_nonfloat_tensors", "baseline_tensor_bytes", "int8_payload_bytes"), + 0, + ) + for name, arr in flat_state.items(): + stats["param_count"] += int(arr.size) + stats["num_tensors"] += 1 + stats["baseline_tensor_bytes"] += int(arr.nbytes) + if not mx.issubdtype(arr.dtype, mx.floating): + stats["num_nonfloat_tensors"] += 1 + passthrough[name] = np.ascontiguousarray(np.array(arr)) + stats["int8_payload_bytes"] += int(passthrough[name].nbytes) + continue + + # Small float tensors are cheap enough to keep directly. We still downcast + # fp32/bf16 passthrough tensors to fp16 so metadata does not dominate size. + if int(arr.size) <= INT8_KEEP_FLOAT_MAX_NUMEL: + kept = keep_float_array(name, arr, passthrough_orig_dtypes) + passthrough[name] = kept + stats["int8_payload_bytes"] += int(kept.nbytes) + continue + + stats["num_float_tensors"] += 1 + q, s = quantize_float_array(arr) + if s.ndim > 0: + qmeta[name] = {"scheme": "per_row", "axis": 0} + quantized[name] = q + scales[name] = s + dtypes[name] = str(arr.dtype).split(".")[-1] + stats["int8_payload_bytes"] += int(q.nbytes + s.nbytes) + obj: dict[str, object] = { + "__quant_format__": "int8_clean_per_row_v1", + "quantized": quantized, + "scales": scales, + "dtypes": dtypes, + "passthrough": passthrough, + } + if qmeta: + obj["qmeta"] = qmeta + if passthrough_orig_dtypes: + obj["passthrough_orig_dtypes"] = passthrough_orig_dtypes + return obj, stats + + +def dequantize_state_dict_int8(quant_obj: dict[str, object]) -> dict[str, mx.array]: + out: dict[str, mx.array] = {} + qmeta = quant_obj.get("qmeta", {}) + passthrough_orig_dtypes = quant_obj.get("passthrough_orig_dtypes", {}) + for name, q in quant_obj["quantized"].items(): + q_np = np.asarray(q, dtype=np.int8) + dtype_name = quant_obj["dtypes"][name] + scale = np.asarray(quant_obj["scales"][name], dtype=np.float32) + if qmeta.get(name, {}).get("scheme") == "per_row" or scale.ndim > 0: + # Broadcast the saved row scale back across trailing dimensions. + out_arr = q_np.astype(np.float32) * scale.reshape((q_np.shape[0],) + (1,) * (q_np.ndim - 1)) + else: + out_arr = q_np.astype(np.float32) * float(scale) + out[name] = mx.array(out_arr, dtype=MX_DTYPE_FROM_NAME[dtype_name]) + for name, arr in quant_obj["passthrough"].items(): + # Restore small tensors, undoing the temporary fp16 storage cast if needed. + out_arr = np.array(arr, copy=True) + orig_dtype = passthrough_orig_dtypes.get(name) + if isinstance(orig_dtype, str): + out[name] = mx.array(out_arr, dtype=MX_DTYPE_FROM_NAME[orig_dtype]) + else: + out[name] = mx.array(out_arr) + return out + + +def build_sentencepiece_luts( + sp: spm.SentencePieceProcessor, vocab_size: int +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + sp_vocab_size = int(sp.vocab_size()) + table_size = max(sp_vocab_size, vocab_size) + base_bytes_lut = np.zeros((table_size,), dtype=np.int16) + has_leading_space_lut = np.zeros((table_size,), dtype=np.bool_) + is_boundary_token_lut = np.ones((table_size,), dtype=np.bool_) + for token_id in range(sp_vocab_size): + if sp.is_control(token_id) or sp.is_unknown(token_id) or sp.is_unused(token_id): + continue + is_boundary_token_lut[token_id] = False + if sp.is_byte(token_id): + base_bytes_lut[token_id] = 1 + continue + piece = sp.id_to_piece(token_id) + if piece.startswith("▁"): + has_leading_space_lut[token_id] = True + piece = piece[1:] + base_bytes_lut[token_id] = len(piece.encode("utf-8")) + return base_bytes_lut, has_leading_space_lut, is_boundary_token_lut + + +def validate_dataset_tokenizer_pair(data_path: str, tokenizer_path: str) -> tuple[str, int, int | None]: + # The shard directory and tokenizer are coupled: val_bpb is only meaningful if we + # decode bytes with the exact tokenizer that produced the shards. The manifest + # lets the training script fail fast on accidental dataset/tokenizer mismatches. + dataset_dir = Path(data_path).resolve() + actual_train_files = len(list(dataset_dir.glob("fineweb_train_*.bin"))) + if len(dataset_dir.parents) < 2: + return dataset_dir.name, actual_train_files, None + manifest_path = dataset_dir.parents[1] / "manifest.json" + if not manifest_path.is_file(): + return dataset_dir.name, actual_train_files, None + + manifest = json.loads(manifest_path.read_text(encoding="utf-8")) + dataset_entry = next((x for x in manifest.get("datasets", []) if x.get("name") == dataset_dir.name), None) + if dataset_entry is None: + return dataset_dir.name, actual_train_files, None + + tokenizer_name = dataset_entry.get("tokenizer_name") + tokenizer_entry = ( + next((x for x in manifest.get("tokenizers", []) if x.get("name") == tokenizer_name), None) + if tokenizer_name + else None + ) + expected_name = Path((tokenizer_entry or {}).get("model_path") or (tokenizer_entry or {}).get("path") or "").name + if expected_name and Path(tokenizer_path).name != expected_name: + raise ValueError(f"{dataset_dir.name} expects tokenizer {expected_name}, got {Path(tokenizer_path).name}") + expected_train_files = (dataset_entry.get("stats") or {}).get("files_train") + if expected_train_files is not None: + expected_train_files = int(expected_train_files) + if actual_train_files > expected_train_files: + raise ValueError( + f"{dataset_dir.name} has more train shards than expected: found {actual_train_files}, " + f"manifest says {expected_train_files}" + ) + return dataset_dir.name, actual_train_files, expected_train_files + + +def load_validation_tokens(pattern: str, seq_len: int) -> np.ndarray: + files = [Path(p) for p in sorted(glob.glob(pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {pattern}") + # The export pipeline writes the fixed first-50k-doc validation set to fineweb_val_*. + tokens = np.ascontiguousarray(np.concatenate([load_data_shard(file) for file in files], axis=0)) + usable = ((tokens.size - 1) // seq_len) * seq_len + if usable <= 0: + raise ValueError(f"Validation split is too short for TRAIN_SEQ_LEN={seq_len}") + return tokens[: usable + 1] + + +def loss_and_grad_chunked( + args: Hyperparameters, + train_loader: TokenLoader, + compiled_loss_and_grad, +) -> tuple[mx.array, dict]: + chunk_sizes = token_chunks(args.microbatch_tokens, args.train_seq_len, args.mlx_max_microbatch_tokens) + total_tokens = float(sum(chunk_sizes)) + loss_value = mx.array(0.0, dtype=mx.float32) + grad_accum: dict[str, mx.array] | None = None + for chunk_tokens in chunk_sizes: + x, y = train_loader.next_batch(chunk_tokens, args.train_seq_len) + loss, grads = compiled_loss_and_grad(x, y) + scale = float(y.size) / total_tokens + loss_value = loss_value + loss.astype(mx.float32) * scale + grad_accum = accumulate_flat_grads(grad_accum, grads, scale) + if args.mlx_eager_eval: + mx.eval(loss_value, grad_accum) # materialize each chunk to cap peak memory + return loss_value, tree_unflatten(list(grad_accum.items())) + + +def eval_val( + args: Hyperparameters, + compiled_loss, + val_tokens: np.ndarray, + base_bytes_lut: np.ndarray, + has_leading_space_lut: np.ndarray, + is_boundary_token_lut: np.ndarray, + log_fn: Callable[[str], None] | None = None, +) -> tuple[float, float]: + # Validation computes two metrics: + # - val_loss: token cross-entropy (natural log) + # - val_bpb: tokenizer-agnostic compression metric used by the challenge + val_batch_tokens = args.val_batch_size // args.grad_accum_steps + if val_batch_tokens < args.train_seq_len: + raise ValueError( + "VAL_BATCH_SIZE must provide at least one sequence; " + f"got VAL_BATCH_SIZE={args.val_batch_size}, GRAD_ACCUM_STEPS={args.grad_accum_steps}, " + f"TRAIN_SEQ_LEN={args.train_seq_len}" + ) + val_batch_seqs = val_batch_tokens // args.train_seq_len + total_seqs = (val_tokens.size - 1) // args.train_seq_len + total_batches = max((total_seqs + val_batch_seqs - 1) // val_batch_seqs, 1) + total_loss_sum = 0.0 + total_tokens = 0.0 + total_bytes = 0.0 + for batch_idx, batch_seq_start in enumerate(range(0, total_seqs, val_batch_seqs), start=1): + batch_seq_end = min(batch_seq_start + val_batch_seqs, total_seqs) + raw_start = batch_seq_start * args.train_seq_len + raw_end = batch_seq_end * args.train_seq_len + 1 + chunk = val_tokens[raw_start:raw_end] + x_np = chunk[:-1].reshape(-1, args.train_seq_len) + y_np = chunk[1:].reshape(-1, args.train_seq_len) + x = mx.array(x_np, dtype=mx.int32) + y = mx.array(y_np, dtype=mx.int32) + chunk_token_count = float(y.size) + batch_loss = compiled_loss(x, y).astype(mx.float32) + mx.eval(batch_loss) + total_loss_sum += float(batch_loss.item()) * chunk_token_count + prev_ids = x_np.reshape(-1) + tgt_ids = y_np.reshape(-1) + bytes_np = base_bytes_lut[tgt_ids].astype(np.int16, copy=True) + bytes_np += ( + has_leading_space_lut[tgt_ids] & ~is_boundary_token_lut[prev_ids] + ).astype(np.int16, copy=False) + total_tokens += chunk_token_count + total_bytes += float(bytes_np.astype(np.float64).sum()) + if log_fn is not None and total_batches > 1 and ( + batch_idx == 1 or batch_idx == total_batches or batch_idx % 25 == 0 + ): + log_fn(f"val_progress:{batch_idx}/{total_batches}") + val_loss = total_loss_sum / total_tokens + bits_per_token = val_loss / math.log(2.0) + val_bpb = bits_per_token * (total_tokens / total_bytes) + return val_loss, val_bpb + +# ----------------------------- +# TRAINING +# ----------------------------- + +def clip_grad_tree(grads_tree: dict, max_norm: float) -> dict: + if max_norm <= 0: + return grads_tree + flat = dict(tree_flatten(grads_tree)) + total_sq = 0.0 + for grad in flat.values(): + total_sq += float(np.sum(np.square(_np_float32(grad)), dtype=np.float64)) + if total_sq <= 0.0: + return grads_tree + total_norm = math.sqrt(total_sq) + if total_norm <= max_norm: + return grads_tree + scale = max_norm / (total_norm + 1e-12) + return tree_unflatten([(k, g * scale) for k, g in flat.items()]) + + +def main() -> None: + # ============================================================================== + # TOKENIZER + VALIDATION METRIC SETUP + # ============================================================================== + args = Hyperparameters() + out_dir = Path(args.out_dir) + out_dir.mkdir(parents=True, exist_ok=True) + logfile = out_dir / f"{args.run_id}.txt" + print(logfile) + + def log(msg: str, console: bool = True) -> None: + if console: + print(msg) + with logfile.open("a", encoding="utf-8") as f: + print(msg, file=f) + + code = Path(__file__).read_text(encoding="utf-8") + log(code, console=False) + log("=" * 100, console=False) + log(f"Running Python {sys.version}", console=False) + log(f"Running MLX {mx.__version__}", console=False) + log("=" * 100, console=False) + + if not args.tie_embeddings: + raise NotImplementedError("train_gpt_mlx.py only supports tied embeddings") + if not args.tokenizer_path.endswith(".model"): + raise ValueError(f"TOKENIZER_PATH must point to a SentencePiece .model file: {args.tokenizer_path}") + sp = spm.SentencePieceProcessor(model_file=args.tokenizer_path) + if int(sp.vocab_size()) != args.vocab_size: + raise ValueError( + f"VOCAB_SIZE={args.vocab_size} does not match tokenizer vocab_size={int(sp.vocab_size())}" + ) + dataset_name, actual_train_files, expected_train_files = validate_dataset_tokenizer_pair( + args.data_path, + args.tokenizer_path, + ) + val_tokens = load_validation_tokens(args.val_files, args.train_seq_len) + + base_bytes_lut, has_leading_space_lut, is_boundary_token_lut = build_sentencepiece_luts( + sp, args.vocab_size + ) + + # ============================================================================== + # TRAINING SETUP + # ============================================================================== + mx.random.seed(args.seed) + + train_loader = TokenLoader(args.train_files, log_fn=log, dataset_name=dataset_name) + + # ============================================================================== + # MODEL + OPTIMIZER SETUP + # ============================================================================== + model = GPT( + vocab_size=args.vocab_size, + num_layers=args.num_layers, + dim=args.model_dim, + num_heads=args.num_heads, + num_kv_heads=args.num_kv_heads, + mlp_mult=args.mlp_mult, + logit_chunk_tokens=args.logit_chunk_tokens, + logit_softcap=args.logit_softcap, + rope_base=args.rope_base, + tied_embed_init_std=args.tied_embed_init_std, + qk_gain_init=args.qk_gain_init, + ) + opt = SplitOptimizers(model, args) + + # ============================================================================== + # COMPILED TRAIN / EVAL FUNCTIONS (MLX) + # ============================================================================== + # The crucial MLX detail is capture scope: this model contains non-trainable arrays too (for example + # inside RoPE modules), so compiling only against trainable parameters throws "uncaptured inputs". + # Compiling the model-bound functions and capturing the full model state fixes that while still + # returning gradients only for trainable parameters via nn.value_and_grad(...). + compiled_loss = mx.compile(lambda x, y: model.loss(x, y), inputs=model.state, outputs=model.state) + compiled_loss_and_grad = mx.compile( + nn.value_and_grad(model, lambda x, y: model.loss(x, y)), + inputs=model.state, + outputs=model.state, + ) + + # Print config once so logs are self-describing. + n_params = sum(int(np.prod(p.shape)) for _, p in tree_flatten(model.parameters())) + log(f"run_id:{args.run_id}") + log(f"mlx_version:{mx.__version__}") + log(f"train_loader:shards pattern={args.train_files}") + log(f"val_loader:shards pattern={args.val_files} tokens:{val_tokens.size - 1}") + if expected_train_files is None: + log(f"train_loader:dataset:{dataset_name} train_shards:{actual_train_files}") + elif actual_train_files < expected_train_files: + log( + f"WARNING: train_loader:subset dataset:{dataset_name} " + f"train_shards:{actual_train_files}/{expected_train_files} " + f"new epochs will arrive sooner than the full dataset" + ) + else: + log(f"train_loader:dataset:{dataset_name} train_shards:{actual_train_files}/{expected_train_files}") + log(f"tokenizer_path:{args.tokenizer_path}") + log( + f"model_params:{n_params} vocab_size:{args.vocab_size} layers:{args.num_layers} " + f"dim:{args.model_dim} heads:{args.num_heads} kv_heads:{args.num_kv_heads} " + f"seq_len:{args.train_seq_len} tie_embeddings:{args.tie_embeddings}" + ) + log( + f"iterations:{args.iterations} train_batch_tokens:{args.train_batch_tokens} grad_accum_steps:{args.grad_accum_steps} " + f"microbatch_tokens:{args.microbatch_tokens} microbatch_batch_size:{args.microbatch_tokens // args.train_seq_len} " + f"val_batch_size:{args.val_batch_size} " + f"warmup_steps:{args.warmup_steps} max_wallclock_seconds:{args.max_wallclock_seconds:.3f}" + ) + log(f"mlx_max_microbatch_tokens:{args.mlx_max_microbatch_tokens}") + log( + f"optimizer:muon+adam muon_matrix_params:{len(opt.matrix_keys)} scalar_params:{len(opt.scalar_keys)} " + f"embed_lr:{args.tied_embed_lr} " + f"matrix_lr:{args.matrix_lr} scalar_lr:{args.scalar_lr} " + f"muon_momentum:{args.muon_momentum} muon_steps:{args.muon_backend_steps}" + ) + log(f"val_bpb:enabled tokenizer_kind=sentencepiece tokenizer_path={args.tokenizer_path}") + log(f"compute_dtype:{COMPUTE_DTYPE} compile:True") + log( + f"dtypes tok_emb:{model.tok_emb.weight.dtype} " + f"linear_weight:{model.blocks[0].attn.c_q.weight.dtype} " + f"skip_weights:{model.skip_weights.dtype}" + ) + + # ============================================================================== + # TRAINING LOOP + # ============================================================================== + if args.warmup_steps > 0: + # Warmup should only prime MLX compile/allocation paths. Updating parameters here forces us + # to snapshot and restore model/optimizer state, which is expensive on unified-memory Macs. + # Instead we run the real train shapes, force the loss/grads to materialize, and then reset + # the loader so measured training still starts from the true init and token window. + for warmup_step in range(args.warmup_steps): + accum: dict[str, mx.array] | None = None + warmup_loss = mx.array(0.0, dtype=mx.float32) + grad_scale = 1.0 / args.grad_accum_steps + for _ in range(args.grad_accum_steps): + warmup_loss, grads = loss_and_grad_chunked(args, train_loader, compiled_loss_and_grad) + accum = accumulate_flat_grads(accum, grads, grad_scale) + mx.eval(warmup_loss, accum) + mx.synchronize() + if args.warmup_steps <= 20 or (warmup_step + 1) % 10 == 0 or warmup_step + 1 == args.warmup_steps: + log(f"warmup_step:{warmup_step + 1}/{args.warmup_steps}") + + # Prime the standalone eval graph once too. It is compiled separately from value_and_grad. + val_batch_tokens = args.val_batch_size // args.grad_accum_steps + if val_batch_tokens < args.train_seq_len: + raise ValueError( + "VAL_BATCH_SIZE must provide at least one sequence; " + f"got VAL_BATCH_SIZE={args.val_batch_size}, GRAD_ACCUM_STEPS={args.grad_accum_steps}, " + f"TRAIN_SEQ_LEN={args.train_seq_len}" + ) + warm_val_seqs = min(val_batch_tokens // args.train_seq_len, (val_tokens.size - 1) // args.train_seq_len) + warm_chunk = val_tokens[: warm_val_seqs * args.train_seq_len + 1] + x_val = mx.array(warm_chunk[:-1].reshape(-1, args.train_seq_len), dtype=mx.int32) + y_val = mx.array(warm_chunk[1:].reshape(-1, args.train_seq_len), dtype=mx.int32) + warm_val_loss = compiled_loss(x_val, y_val) + mx.eval(warm_val_loss) + mx.synchronize() + + train_loader = TokenLoader(args.train_files, log_fn=log, dataset_name=dataset_name) + + train_time_ms = 0.0 + max_wallclock_ms = 1000.0 * args.max_wallclock_seconds if args.max_wallclock_seconds > 0 else None + stop_after_step: int | None = None + t0 = time.perf_counter() + step = 0 + while True: + last_step = step == args.iterations or (stop_after_step is not None and step >= stop_after_step) + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + train_time_ms += 1000.0 * (time.perf_counter() - t0) + # Validation always scans the same fixed full validation split. + val_loss, val_bpb = eval_val( + args, + compiled_loss, + val_tokens, + base_bytes_lut, + has_leading_space_lut, + is_boundary_token_lut, + log_fn=log, + ) + if step % 25 == 0 or last_step: + log( + f"step:{step}/{args.iterations} val_loss:{val_loss:.4f} val_bpb:{val_bpb:.4f} " + f"train_time:{train_time_ms:.0f}ms step_avg:{train_time_ms / max(step, 1):.2f}ms" + ) + t0 = time.perf_counter() + if last_step: + if stop_after_step is not None and step < args.iterations: + log(f"stopping_early: wallclock_cap train_time:{train_time_ms:.0f}ms step:{step}/{args.iterations}") + break + + lr_mul = args.lr_mul(step, train_time_ms + 1000.0 * (time.perf_counter() - t0)) + step_t0 = time.perf_counter() + + accum: dict[str, mx.array] | None = None + train_loss = mx.array(0.0, dtype=mx.float32) + grad_scale = 1.0 / args.grad_accum_steps + for _ in range(args.grad_accum_steps): + loss, grads = loss_and_grad_chunked(args, train_loader, compiled_loss_and_grad) + accum = accumulate_flat_grads(accum, grads, grad_scale) + train_loss = train_loss + loss.astype(mx.float32) * grad_scale + if args.mlx_eager_eval: + mx.eval(train_loss, accum) # materialize each microbatch to cap peak memory + + grads = tree_unflatten(list(accum.items())) + grads = clip_grad_tree(grads, args.grad_clip_norm) + train_loss_value = float(train_loss.item()) + opt.step(model, grads, step=step, lr_mul=lr_mul) + mx.synchronize() + + step_ms = 1000.0 * (time.perf_counter() - step_t0) + approx_train_time_ms = train_time_ms + 1000.0 * (time.perf_counter() - t0) + tok_s = args.train_batch_tokens / (step_ms / 1000.0) + step += 1 + if args.train_log_every > 0 and (step <= 10 or step % args.train_log_every == 0 or stop_after_step is not None): + log( + f"step:{step}/{args.iterations} train_loss:{train_loss_value:.4f} " + f"train_time:{approx_train_time_ms:.0f}ms step_avg:{approx_train_time_ms / step:.2f}ms tok_s:{tok_s:.0f}" + ) + if max_wallclock_ms is not None and stop_after_step is None and approx_train_time_ms >= max_wallclock_ms: + stop_after_step = step + + # ============================================================================== + # FINAL SERIALIZATION + QUANTIZED ROUNDTRIP EVAL + # ============================================================================== + # We always write a raw artifact and a quantized artifact, then validate the + # quantized roundtrip directly by loading the dequantized tensors back into the + # model and running one final validation pass. + out_path = out_dir / f"{args.run_id}_mlx_model.npz" + flat_state = {k: v for k, v in tree_flatten(model.state)} + mx.savez(str(out_path), **flat_state) + log(f"saved_model:{out_path} bytes:{out_path.stat().st_size}") + + quant_obj, quant_stats = quantize_state_dict_int8(flat_state) + quant_raw = pickle.dumps(quant_obj, protocol=pickle.HIGHEST_PROTOCOL) + quant_blob = zlib.compress(quant_raw, level=9) + quant_serialized_bytes = len(quant_raw) + quant_path = out_dir / f"{args.run_id}_mlx_model.int8.ptz" + with quant_path.open("wb") as f: + f.write(quant_blob) + quant_file_bytes = quant_path.stat().st_size + ratio = quant_stats["baseline_tensor_bytes"] / max(quant_stats["int8_payload_bytes"], 1) + log( + f"serialized_model_int8_zlib:{quant_file_bytes} bytes " + f"(payload:{quant_stats['int8_payload_bytes']} raw_pickle:{quant_serialized_bytes} payload_ratio:{ratio:.2f}x)" + ) + + with quant_path.open("rb") as f: + quant_blob_disk = f.read() + quant_flat = dequantize_state_dict_int8(pickle.loads(zlib.decompress(quant_blob_disk))) + model.update(tree_unflatten(list(quant_flat.items()))) + q_t0 = time.perf_counter() + q_val_loss, q_val_bpb = eval_val( + args, + compiled_loss, + val_tokens, + base_bytes_lut, + has_leading_space_lut, + is_boundary_token_lut, + log_fn=log, + ) + q_eval_ms = 1000.0 * (time.perf_counter() - q_t0) + log(f"final_int8_zlib_roundtrip val_loss:{q_val_loss:.4f} val_bpb:{q_val_bpb:.4f} eval_time:{q_eval_ms:.0f}ms") + log(f"final_int8_zlib_roundtrip_exact val_loss:{q_val_loss:.8f} val_bpb:{q_val_bpb:.8f}") + + +if __name__ == "__main__": + main() diff --git a/records/track_10min_16mb/2026-03-22_10L_MLP3x_Int6_Baseline/train_log.txt b/records/track_10min_16mb/2026-03-22_10L_MLP3x_Int6_Baseline/train_log.txt new file mode 100644 index 0000000000..9070a1d73f --- /dev/null +++ b/records/track_10min_16mb/2026-03-22_10L_MLP3x_Int6_Baseline/train_log.txt @@ -0,0 +1,7158 @@ +#!/usr/bin/env python3 +""" +The `train_gpt.py` and `train_gpt_mlx.py` scripts are intended as good launching-off points for new participants, not SOTA configs. We'll accept PRs that tune, improve, or simplify these scripts without significantly increasing complexity, but competitive submissions should stay in the `/records` folder. + +Hard stop: To keep readable for newcomers, let's make sure `train_gpt.py` and `train_gpt_mlx.py` never are longer than 1500 lines. +""" +from __future__ import annotations + +import glob +import json +import math +import os +import pickle +import sys +import time +import uuid +import zlib +from collections.abc import Callable +from pathlib import Path + +import numpy as np +import sentencepiece as spm + +import mlx.core as mx +import mlx.nn as nn +import mlx.optimizers as optim +from mlx.utils import tree_flatten, tree_unflatten + +# ============================================================================== +# SHARD FORMAT + COMPUTE DTYPE +# ============================================================================== + +COMPUTE_DTYPE = mx.bfloat16 + +# ============================================================================== +# HYPERPARAMETERS +# ============================================================================== +# Default Simple Baseline run: +# - 9 transformer blocks at width 512 +# - 8 attention heads with 4 KV heads (GQA) and 2x MLP expansion +# - vocab size 1024, sequence length 1024, tied embeddings +# - 524,288 train tokens per step for 20,000 iterations with a ~10 minute cap +class Hyperparameters: + # Data / tokenizer. + data_path: str = os.environ.get("DATA_PATH", "./data/datasets/fineweb10B_sp1024") + tokenizer_path: str = os.environ.get("TOKENIZER_PATH", "./data/tokenizers/fineweb_1024_bpe.model") + run_id: str = os.environ.get("RUN_ID", str(uuid.uuid4())) + seed: int = int(os.environ.get("SEED", 1337)) + + # Training loop. These defaults now mirror train_gpt.py on a single process. + iterations: int = int(os.environ.get("ITERATIONS", 20_000)) + val_loss_every: int = int(os.environ.get("VAL_LOSS_EVERY", 0)) + # Validation always uses the full fineweb_val split. + val_batch_size: int = int(os.environ.get("VAL_BATCH_SIZE", 524_288)) + train_log_every: int = int(os.environ.get("TRAIN_LOG_EVERY", 200)) + train_batch_tokens: int = int(os.environ.get("TRAIN_BATCH_TOKENS", 524_288)) + grad_accum_steps: int = int(os.environ.get("GRAD_ACCUM_STEPS", 8)) + train_seq_len: int = int(os.environ.get("TRAIN_SEQ_LEN", os.environ.get("TRAIN_MAX_SEQ_LEN", 1024))) + # Chunk each logical MLX microbatch into smaller sub-batches to reduce peak + # memory pressure without changing the effective optimizer batch. + mlx_max_microbatch_tokens: int = int(os.environ.get("MLX_MAX_MICROBATCH_TOKENS", 8_192)) + # Force MLX to materialize the graph after every sub-batch, preventing lazy + # graph buildup across accumulation steps. Keeps peak memory low on 16GB machines. + # Disable on 32GB+ unified memory for better throughput (MLX_EAGER_EVAL=0). + mlx_eager_eval: bool = bool(int(os.environ.get("MLX_EAGER_EVAL", "1"))) + warmup_steps: int = int(os.environ.get("WARMUP_STEPS", 20)) + warmdown_iters: int = int(os.environ.get("WARMDOWN_ITERS", 1200)) + max_wallclock_seconds: float = float(os.environ.get("MAX_WALLCLOCK_SECONDS", 600.0)) + + # Model (defaults match the current baseline setup). + vocab_size: int = int(os.environ.get("VOCAB_SIZE", 1024)) + num_layers: int = int(os.environ.get("NUM_LAYERS", 9)) + model_dim: int = int(os.environ.get("MODEL_DIM", 512)) + num_heads: int = int(os.environ.get("NUM_HEADS", 8)) + num_kv_heads: int = int(os.environ.get("NUM_KV_HEADS", 4)) + mlp_mult: int = int(os.environ.get("MLP_MULT", 2)) + tie_embeddings: bool = bool(int(os.environ.get("TIE_EMBEDDINGS", "1"))) + tied_embed_init_std: float = float(os.environ.get("TIED_EMBED_INIT_STD", 0.005)) + logit_chunk_tokens: int = int(os.environ.get("LOGIT_CHUNK_TOKENS", 0)) + logit_softcap: float = float(os.environ.get("LOGIT_SOFTCAP", 30.0)) + rope_base: float = float(os.environ.get("ROPE_BASE", 10000.0)) + qk_gain_init: float = float(os.environ.get("QK_GAIN_INIT", 1.5)) + + # Optimizer. We keep the same per-group defaults as train_gpt.py. + beta1: float = float(os.environ.get("BETA1", 0.9)) + beta2: float = float(os.environ.get("BETA2", 0.95)) + adam_eps: float = float(os.environ.get("ADAM_EPS", 1e-8)) + tied_embed_lr: float = float(os.environ.get("TIED_EMBED_LR", 0.05)) + matrix_lr: float = float(os.environ.get("MATRIX_LR", 0.04)) + scalar_lr: float = float(os.environ.get("SCALAR_LR", 0.04)) + muon_momentum: float = float(os.environ.get("MUON_MOMENTUM", 0.95)) + muon_backend_steps: int = int(os.environ.get("MUON_BACKEND_STEPS", 5)) + muon_momentum_warmup_start: float = float(os.environ.get("MUON_MOMENTUM_WARMUP_START", 0.85)) + muon_momentum_warmup_steps: int = int(os.environ.get("MUON_MOMENTUM_WARMUP_STEPS", 500)) + grad_clip_norm: float = float(os.environ.get("GRAD_CLIP_NORM", 0.0)) + + out_dir: str = os.environ.get("OUT_DIR", "logs") + + @property + def train_files(self) -> str: + return f"{self.data_path}/fineweb_train_*.bin" + + @property + def val_files(self) -> str: + return f"{self.data_path}/fineweb_val_*.bin" + + @property + def microbatch_tokens(self) -> int: + return self.train_batch_tokens // self.grad_accum_steps + + def lr_mul(self, step: int, elapsed_ms: float) -> float: + if self.warmdown_iters <= 0: + return 1.0 + if self.max_wallclock_seconds <= 0: + warmdown_start = max(self.iterations - self.warmdown_iters, 0) + return max((self.iterations - step) / max(self.warmdown_iters, 1), 0.0) if warmdown_start <= step < self.iterations else 1.0 + step_ms = elapsed_ms / max(step, 1) + warmdown_ms = self.warmdown_iters * step_ms + remaining_ms = max(1000.0 * self.max_wallclock_seconds - elapsed_ms, 0.0) + return remaining_ms / max(warmdown_ms, 1e-9) if remaining_ms <= warmdown_ms else 1.0 + + +CONTROL_TENSOR_NAME_PATTERNS = tuple( + pattern + for pattern in os.environ.get( + "CONTROL_TENSOR_NAME_PATTERNS", + "attn_scale,attn_scales,mlp_scale,mlp_scales,resid_mix,resid_mixes,q_gain,skip_weight,skip_weights", + ).split(",") + if pattern +) +INT8_KEEP_FLOAT_FP32_NAME_PATTERNS = tuple( + pattern + for pattern in os.environ.get( + "INT8_KEEP_FLOAT_FP32_NAME_PATTERNS", + ",".join(CONTROL_TENSOR_NAME_PATTERNS), + ).split(",") + if pattern +) + + +def token_chunks(total_tokens: int, seq_len: int, max_chunk_tokens: int) -> list[int]: + usable_total = (total_tokens // seq_len) * seq_len + if usable_total <= 0: + raise ValueError(f"token budget too small for seq_len={seq_len}") + usable_chunk = max((max_chunk_tokens // seq_len) * seq_len, seq_len) + chunks: list[int] = [] + remaining = usable_total + while remaining > 0: + chunk = min(remaining, usable_chunk) + chunks.append(chunk) + remaining -= chunk + return chunks + + +def accumulate_flat_grads( + accum: dict[str, mx.array] | None, + grads_tree: dict, + scale: float, +) -> dict[str, mx.array]: + flat = dict(tree_flatten(grads_tree)) + if accum is None: + return {k: g * scale for k, g in flat.items()} + for k, g in flat.items(): + accum[k] = accum[k] + g * scale + return accum + + +# ============================================================================== +# MATH HELPERS +# ============================================================================== + +def rms_norm(x: mx.array, eps: float = 1e-6) -> mx.array: + return (x * mx.rsqrt(mx.mean(x * x, axis=-1, keepdims=True) + eps)).astype(x.dtype) + + +def zeropower_newtonschulz5(g: mx.array, steps: int, eps: float = 1e-7) -> mx.array: + # Orthogonalize a 2D update matrix with a fast Newton-Schulz iteration. + # Muon uses this to normalize matrix-shaped gradients before applying them. + # Background on Muon: https://kellerjordan.github.io/posts/muon/ + a, b, c = 3.4445, -4.7750, 2.0315 + x = g.astype(mx.float32) + x = x / (mx.sqrt(mx.sum(x * x)) + eps) + transposed = x.shape[0] > x.shape[1] + if transposed: + x = x.T + for _ in range(steps): + a_mat = x @ x.T + b_mat = b * a_mat + c * (a_mat @ a_mat) + x = a * x + b_mat @ x + if transposed: + x = x.T + return x.astype(g.dtype) + + +def load_data_shard(path: Path) -> np.ndarray: + header_bytes = 256 * np.dtype(" None: + self.file_idx = (self.file_idx + 1) % len(self.files) + if self.file_idx == 0: + self.epoch += 1 + if self.log_fn is not None: + self.log_fn( + f"WARNING: starting epoch:{self.epoch} " + f"dataset:{self.dataset_name} train_shards:{len(self.files)}" + ) + self.tokens = load_data_shard(self.files[self.file_idx]) + self.pos = 0 + + def take(self, n: int) -> np.ndarray: + chunks: list[np.ndarray] = [] + left = n + while left > 0: + if self.pos >= self.tokens.size: + self.next_file() + k = min(left, int(self.tokens.size - self.pos)) + chunks.append(self.tokens[self.pos : self.pos + k]) + self.pos += k + left -= k + return chunks[0] if len(chunks) == 1 else np.concatenate(chunks, axis=0) + + +class TokenLoader: + def __init__( + self, + pattern: str, + log_fn: Callable[[str], None] | None = None, + dataset_name: str = "", + ): + self.stream = TokenStream(pattern, log_fn=log_fn, dataset_name=dataset_name) + + def next_batch(self, batch_tokens: int, seq_len: int) -> tuple[mx.array, mx.array]: + usable = (batch_tokens // seq_len) * seq_len + if usable <= 0: + raise ValueError(f"token budget too small for seq_len={seq_len}") + chunk = self.stream.take(usable + 1) + x = chunk[:-1].reshape(-1, seq_len) + y = chunk[1:].reshape(-1, seq_len) + return mx.array(x, dtype=mx.int32), mx.array(y, dtype=mx.int32) + + +# ============================================================================== +# MODEL BLOCKS +# ============================================================================== + +class CastedLinear(nn.Module): + def __init__(self, in_dim: int, out_dim: int): + super().__init__() + self.weight = nn.Linear(in_dim, out_dim, bias=False).weight.astype(mx.float32) + + def __call__(self, x: mx.array) -> mx.array: + return x @ self.weight.astype(x.dtype).T + + +class RMSNormNoWeight(nn.Module): + # MLX module wrapper around the functional RMSNorm helper so it composes nicely in blocks. + def __call__(self, x: mx.array) -> mx.array: + return rms_norm(x) + + +class CausalSelfAttention(nn.Module): + # - separate q/k/v projections + # - RMSNorm on q and k before attention + # - RoPE on q and k + # - causal masked SDPA + def __init__( + self, + dim: int, + num_heads: int, + num_kv_heads: int, + rope_base: float, + qk_gain_init: float, + ): + super().__init__() + if dim % num_heads != 0: + raise ValueError("model_dim must be divisible by num_heads") + if num_heads % num_kv_heads != 0: + raise ValueError("num_heads must be divisible by num_kv_heads") + self.num_heads = num_heads + self.num_kv_heads = num_kv_heads + self.head_dim = dim // num_heads + if self.head_dim % 2 != 0: + raise ValueError("head_dim must be even for RoPE") + kv_dim = self.num_kv_heads * self.head_dim + self.c_q = CastedLinear(dim, dim) + self.c_k = CastedLinear(dim, kv_dim) + self.c_v = CastedLinear(dim, kv_dim) + self.proj = CastedLinear(dim, dim) + self.q_gain = mx.ones((num_heads,), dtype=mx.float32) * qk_gain_init + self.rope = nn.RoPE(self.head_dim, traditional=False, base=rope_base) + self.scale = self.head_dim ** -0.5 + + def __call__(self, x: mx.array) -> mx.array: + bsz, seqlen, dim = x.shape + q = self.c_q(x).reshape(bsz, seqlen, self.num_heads, self.head_dim).transpose(0, 2, 1, 3) + k = self.c_k(x).reshape(bsz, seqlen, self.num_kv_heads, self.head_dim).transpose(0, 2, 1, 3) + v = self.c_v(x).reshape(bsz, seqlen, self.num_kv_heads, self.head_dim).transpose(0, 2, 1, 3) + + q = self.rope(rms_norm(q).astype(COMPUTE_DTYPE)) + k = self.rope(rms_norm(k).astype(COMPUTE_DTYPE)) + q = q * self.q_gain.astype(q.dtype)[None, :, None, None] + y = mx.fast.scaled_dot_product_attention(q, k, v, scale=self.scale, mask="causal") + y = y.transpose(0, 2, 1, 3).reshape(bsz, seqlen, dim) + return self.proj(y) + + +class MLP(nn.Module): + # Baseline MLP uses relu^2 instead of GELU/SiLU. It is cheap and works well in this setup. + def __init__(self, dim: int, mlp_mult: int): + super().__init__() + hidden = dim * mlp_mult + self.fc = CastedLinear(dim, hidden) + self.proj = CastedLinear(hidden, dim) + + def __call__(self, x: mx.array) -> mx.array: + x = nn.relu(self.fc(x)) + return self.proj(x * x) + + +class Block(nn.Module): + def __init__( + self, + dim: int, + num_heads: int, + num_kv_heads: int, + mlp_mult: int, + rope_base: float, + qk_gain_init: float, + ): + super().__init__() + self.attn_norm = RMSNormNoWeight() + self.mlp_norm = RMSNormNoWeight() + self.attn = CausalSelfAttention(dim, num_heads, num_kv_heads, rope_base, qk_gain_init) + self.mlp = MLP(dim, mlp_mult) + self.attn_scale = mx.ones((dim,), dtype=mx.float32) + self.mlp_scale = mx.ones((dim,), dtype=mx.float32) + self.resid_mix = mx.array(np.stack((np.ones((dim,), dtype=np.float32), np.zeros((dim,), dtype=np.float32)))) + + def __call__(self, x: mx.array, x0: mx.array) -> mx.array: + mix = self.resid_mix.astype(x.dtype) + x = mix[0][None, None, :] * x + mix[1][None, None, :] * x0 + attn_out = self.attn(self.attn_norm(x)) + x = x + self.attn_scale.astype(x.dtype)[None, None, :] * attn_out + x = x + self.mlp_scale.astype(x.dtype)[None, None, :] * self.mlp(self.mlp_norm(x)) + return x + + +class GPT(nn.Module): + # - token embedding + RMSNorm + # - encoder half accumulates skip tensors + # - decoder half consumes reversed skips with learned skip_weights + # - tied embeddings for the LM head (the baseline default setup) + def __init__(self, vocab_size: int, num_layers: int, dim: int, num_heads: int, num_kv_heads: int, mlp_mult: int, + logit_chunk_tokens: int, logit_softcap: float, rope_base: float, tied_embed_init_std: float, + qk_gain_init: float): + super().__init__() + if logit_softcap <= 0.0: + raise ValueError(f"logit_softcap must be positive, got {logit_softcap}") + self.logit_chunk_tokens = logit_chunk_tokens + self.logit_softcap = logit_softcap + + self.tok_emb = nn.Embedding(vocab_size, dim) + self.num_encoder_layers = num_layers // 2 + self.num_decoder_layers = num_layers - self.num_encoder_layers + self.num_skip_weights = min(self.num_encoder_layers, self.num_decoder_layers) + self.skip_weights = mx.ones((self.num_skip_weights, dim), dtype=mx.float32) + self.blocks = [ + Block(dim, num_heads, num_kv_heads, mlp_mult, rope_base, qk_gain_init) + for i in range(num_layers) + ] + self.final_norm = RMSNormNoWeight() + + for b in self.blocks: + b.attn.proj.weight = mx.zeros_like(b.attn.proj.weight) + b.mlp.proj.weight = mx.zeros_like(b.mlp.proj.weight) + self.tok_emb.weight = ( + mx.random.normal(self.tok_emb.weight.shape, dtype=mx.float32) * tied_embed_init_std + ).astype(COMPUTE_DTYPE) + + def softcap(self, logits: mx.array) -> mx.array: + c = self.logit_softcap + return c * mx.tanh(logits / c) + + def __call__(self, input_ids: mx.array) -> mx.array: + x = rms_norm(self.tok_emb(input_ids).astype(COMPUTE_DTYPE)) + x0 = x + skips: list[mx.array] = [] + + for i in range(self.num_encoder_layers): + x = self.blocks[i](x, x0) + skips.append(x) + for i in range(self.num_decoder_layers): + # Odd layer counts have one more decoder block than encoder block. The baseline only + # applies a skip connection when one exists, then runs the remaining decoder block(s) + # without an added skip. + if skips: + x = x + self.skip_weights[i].astype(x.dtype)[None, None, :] * skips.pop() + x = self.blocks[self.num_encoder_layers + i](x, x0) + return self.final_norm(x) + + def loss(self, input_ids: mx.array, target_ids: mx.array) -> mx.array: + # Cross-entropy over flattened tokens. We keep optional logit chunking because it is a useful + # memory knob on Macs, but the common path is chunk_tokens=0 (single matmul + CE). + x = self(input_ids).reshape(-1, self.tok_emb.weight.shape[1]) + y = target_ids.reshape(-1) + if self.logit_chunk_tokens <= 0 or x.shape[0] <= self.logit_chunk_tokens: + logits_proj = x @ self.tok_emb.weight.astype(x.dtype).T + logits = self.softcap(logits_proj) + return nn.losses.cross_entropy(logits.astype(mx.float32), y, reduction="mean") + + loss_sum = mx.array(0.0, dtype=mx.float32) + n = int(x.shape[0]) + for s in range(0, n, self.logit_chunk_tokens): + e = min(s + self.logit_chunk_tokens, n) + logits_proj = x[s:e] @ self.tok_emb.weight.astype(x.dtype).T + logits = self.softcap(logits_proj) + loss_sum = loss_sum + nn.losses.cross_entropy(logits.astype(mx.float32), y[s:e], reduction="sum") + return loss_sum / float(n) + +# ============================================================================== +# OPTIMIZERS (MUON + ADAM SPLIT) +# ============================================================================== +class Muon: + # Muon applies SGD-momentum to matrix gradients, then orthogonalizes the result before the + # parameter update. + def __init__(self, keys: list[str], params: dict[str, mx.array], args: Hyperparameters): + self.keys = keys + self.args = args + self.buffers = {k: mx.zeros_like(params[k]) for k in keys} + + def step(self, params: dict[str, mx.array], grads: dict[str, mx.array], step: int, lr_mul: float) -> dict[str, mx.array]: + if self.args.muon_momentum_warmup_steps: + t = min(step / self.args.muon_momentum_warmup_steps, 1.0) + momentum = (1.0 - t) * self.args.muon_momentum_warmup_start + t * self.args.muon_momentum + else: + momentum = self.args.muon_momentum + lr = self.args.matrix_lr * lr_mul + out: dict[str, mx.array] = {} + for k in self.keys: + p = params[k] + g = grads[k] + buf = momentum * self.buffers[k] + g + self.buffers[k] = buf + g_eff = g + momentum * buf + g_ortho = zeropower_newtonschulz5(g_eff, self.args.muon_backend_steps) + scale = math.sqrt(max(1.0, float(p.shape[0]) / float(p.shape[1]))) + out[k] = p - lr * (g_ortho * scale).astype(p.dtype) + return out + + +class SplitOptimizers: + # - embeddings: Adam with the tied-embedding LR + # - block matrices (2D): Muon + # - block scalars + skip weights: Adam + # This preserves the high-level optimization behavior even though MLX internals differ. + def __init__(self, model: GPT, args: Hyperparameters): + self.args = args + params = dict(tree_flatten(model.parameters())) + self.embed_key = "tok_emb.weight" + self.matrix_keys = [ + k + for k, p in params.items() + if k.startswith("blocks.") and p.ndim == 2 and not any(pattern in k for pattern in CONTROL_TENSOR_NAME_PATTERNS) + ] + self.scalar_keys = [ + k + for k, p in params.items() + if k == "skip_weights" or (k.startswith("blocks.") and (p.ndim < 2 or any(pattern in k for pattern in CONTROL_TENSOR_NAME_PATTERNS))) + ] + + self.muon = Muon(self.matrix_keys, params, args) + self.adam_embed = optim.Adam( + learning_rate=args.tied_embed_lr, + betas=[args.beta1, args.beta2], + eps=args.adam_eps, + bias_correction=True, + ) + self.adam_scalar = optim.Adam( + learning_rate=args.scalar_lr, + betas=[args.beta1, args.beta2], + eps=args.adam_eps, + bias_correction=True, + ) + + def step(self, model: GPT, grads_tree: dict, step: int, lr_mul: float) -> None: + params = dict(tree_flatten(model.parameters())) + grads = dict(tree_flatten(grads_tree)) + updated = dict(params) + + updated.update(self.muon.step(params, grads, step=step, lr_mul=lr_mul)) + + self.adam_embed.learning_rate = self.args.tied_embed_lr * lr_mul + updated.update( + self.adam_embed.apply_gradients( + {self.embed_key: grads[self.embed_key]}, + {self.embed_key: params[self.embed_key]}, + ) + ) + + self.adam_scalar.learning_rate = self.args.scalar_lr * lr_mul + scalar_grads = {k: grads[k] for k in self.scalar_keys} + scalar_params = {k: params[k] for k in self.scalar_keys} + updated.update(self.adam_scalar.apply_gradients(scalar_grads, scalar_params)) + + model.update(tree_unflatten(list(updated.items()))) + +# ============================================================================== +# QUANTIZATION (INT8 + ZLIB) +# ============================================================================== +# - per-row int8 for 2D float tensors +# - per-tensor int8 for other float tensors +# - fp16 passthrough for small float tensors +# - exact passthrough for non-floats + +MX_DTYPE_FROM_NAME = { + "float32": mx.float32, + "float16": mx.float16, + "bfloat16": mx.bfloat16, +} + +INT8_KEEP_FLOAT_MAX_NUMEL = 65_536 +INT8_KEEP_FLOAT_STORE_DTYPE = np.float16 +INT8_PER_ROW_SCALE_DTYPE = np.float16 +INT8_CLIP_PERCENTILE = 99.99984 +INT8_CLIP_Q = INT8_CLIP_PERCENTILE / 100.0 + + +def _np_float32(arr: mx.array) -> np.ndarray: + return np.array(arr.astype(mx.float32), dtype=np.float32, copy=False) + + +def keep_float_array(name: str, arr: mx.array, passthrough_orig_dtypes: dict[str, str]) -> np.ndarray: + if any(pattern in name for pattern in INT8_KEEP_FLOAT_FP32_NAME_PATTERNS): + return np.ascontiguousarray(_np_float32(arr)) + if arr.dtype in {mx.float32, mx.bfloat16}: + passthrough_orig_dtypes[name] = str(arr.dtype).split(".")[-1] + return np.ascontiguousarray(np.array(arr.astype(mx.float16), dtype=INT8_KEEP_FLOAT_STORE_DTYPE, copy=False)) + return np.ascontiguousarray(np.array(arr, copy=True)) + + +def quantize_float_array(arr: mx.array) -> tuple[np.ndarray, np.ndarray]: + f32 = _np_float32(arr) + if f32.ndim == 2: + # Matrices get one scale per row, which usually tracks output-channel + # ranges much better than a single tensor-wide scale. + clip_abs = np.quantile(np.abs(f32), INT8_CLIP_Q, axis=1) if f32.size else np.empty((f32.shape[0],), dtype=np.float32) + clipped = np.clip(f32, -clip_abs[:, None], clip_abs[:, None]) + scale = np.maximum(clip_abs / 127.0, 1.0 / 127.0).astype(np.float32, copy=False) + q = np.clip(np.round(clipped / scale[:, None]), -127, 127).astype(np.int8, copy=False) + return np.ascontiguousarray(q), np.ascontiguousarray(scale.astype(INT8_PER_ROW_SCALE_DTYPE, copy=False)) + + # Vectors / scalars use a simpler per-tensor scale. + clip_abs = float(np.quantile(np.abs(f32).reshape(-1), INT8_CLIP_Q)) if f32.size else 0.0 + scale = np.array(clip_abs / 127.0 if clip_abs > 0.0 else 1.0, dtype=np.float32) + q = np.clip(np.round(np.clip(f32, -clip_abs, clip_abs) / scale), -127, 127).astype(np.int8, copy=False) + return np.ascontiguousarray(q), scale + + +def quantize_state_dict_int8(flat_state: dict[str, mx.array]) -> tuple[dict[str, object], dict[str, int]]: + quantized: dict[str, np.ndarray] = {} + scales: dict[str, np.ndarray] = {} + dtypes: dict[str, str] = {} + passthrough: dict[str, np.ndarray] = {} + passthrough_orig_dtypes: dict[str, str] = {} + qmeta: dict[str, dict[str, object]] = {} + stats = dict.fromkeys( + ("param_count", "num_tensors", "num_float_tensors", "num_nonfloat_tensors", "baseline_tensor_bytes", "int8_payload_bytes"), + 0, + ) + for name, arr in flat_state.items(): + stats["param_count"] += int(arr.size) + stats["num_tensors"] += 1 + stats["baseline_tensor_bytes"] += int(arr.nbytes) + if not mx.issubdtype(arr.dtype, mx.floating): + stats["num_nonfloat_tensors"] += 1 + passthrough[name] = np.ascontiguousarray(np.array(arr)) + stats["int8_payload_bytes"] += int(passthrough[name].nbytes) + continue + + # Small float tensors are cheap enough to keep directly. We still downcast + # fp32/bf16 passthrough tensors to fp16 so metadata does not dominate size. + if int(arr.size) <= INT8_KEEP_FLOAT_MAX_NUMEL: + kept = keep_float_array(name, arr, passthrough_orig_dtypes) + passthrough[name] = kept + stats["int8_payload_bytes"] += int(kept.nbytes) + continue + + stats["num_float_tensors"] += 1 + q, s = quantize_float_array(arr) + if s.ndim > 0: + qmeta[name] = {"scheme": "per_row", "axis": 0} + quantized[name] = q + scales[name] = s + dtypes[name] = str(arr.dtype).split(".")[-1] + stats["int8_payload_bytes"] += int(q.nbytes + s.nbytes) + obj: dict[str, object] = { + "__quant_format__": "int8_clean_per_row_v1", + "quantized": quantized, + "scales": scales, + "dtypes": dtypes, + "passthrough": passthrough, + } + if qmeta: + obj["qmeta"] = qmeta + if passthrough_orig_dtypes: + obj["passthrough_orig_dtypes"] = passthrough_orig_dtypes + return obj, stats + + +def dequantize_state_dict_int8(quant_obj: dict[str, object]) -> dict[str, mx.array]: + out: dict[str, mx.array] = {} + qmeta = quant_obj.get("qmeta", {}) + passthrough_orig_dtypes = quant_obj.get("passthrough_orig_dtypes", {}) + for name, q in quant_obj["quantized"].items(): + q_np = np.asarray(q, dtype=np.int8) + dtype_name = quant_obj["dtypes"][name] + scale = np.asarray(quant_obj["scales"][name], dtype=np.float32) + if qmeta.get(name, {}).get("scheme") == "per_row" or scale.ndim > 0: + # Broadcast the saved row scale back across trailing dimensions. + out_arr = q_np.astype(np.float32) * scale.reshape((q_np.shape[0],) + (1,) * (q_np.ndim - 1)) + else: + out_arr = q_np.astype(np.float32) * float(scale) + out[name] = mx.array(out_arr, dtype=MX_DTYPE_FROM_NAME[dtype_name]) + for name, arr in quant_obj["passthrough"].items(): + # Restore small tensors, undoing the temporary fp16 storage cast if needed. + out_arr = np.array(arr, copy=True) + orig_dtype = passthrough_orig_dtypes.get(name) + if isinstance(orig_dtype, str): + out[name] = mx.array(out_arr, dtype=MX_DTYPE_FROM_NAME[orig_dtype]) + else: + out[name] = mx.array(out_arr) + return out + + +def build_sentencepiece_luts( + sp: spm.SentencePieceProcessor, vocab_size: int +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + sp_vocab_size = int(sp.vocab_size()) + table_size = max(sp_vocab_size, vocab_size) + base_bytes_lut = np.zeros((table_size,), dtype=np.int16) + has_leading_space_lut = np.zeros((table_size,), dtype=np.bool_) + is_boundary_token_lut = np.ones((table_size,), dtype=np.bool_) + for token_id in range(sp_vocab_size): + if sp.is_control(token_id) or sp.is_unknown(token_id) or sp.is_unused(token_id): + continue + is_boundary_token_lut[token_id] = False + if sp.is_byte(token_id): + base_bytes_lut[token_id] = 1 + continue + piece = sp.id_to_piece(token_id) + if piece.startswith("▁"): + has_leading_space_lut[token_id] = True + piece = piece[1:] + base_bytes_lut[token_id] = len(piece.encode("utf-8")) + return base_bytes_lut, has_leading_space_lut, is_boundary_token_lut + + +def validate_dataset_tokenizer_pair(data_path: str, tokenizer_path: str) -> tuple[str, int, int | None]: + # The shard directory and tokenizer are coupled: val_bpb is only meaningful if we + # decode bytes with the exact tokenizer that produced the shards. The manifest + # lets the training script fail fast on accidental dataset/tokenizer mismatches. + dataset_dir = Path(data_path).resolve() + actual_train_files = len(list(dataset_dir.glob("fineweb_train_*.bin"))) + if len(dataset_dir.parents) < 2: + return dataset_dir.name, actual_train_files, None + manifest_path = dataset_dir.parents[1] / "manifest.json" + if not manifest_path.is_file(): + return dataset_dir.name, actual_train_files, None + + manifest = json.loads(manifest_path.read_text(encoding="utf-8")) + dataset_entry = next((x for x in manifest.get("datasets", []) if x.get("name") == dataset_dir.name), None) + if dataset_entry is None: + return dataset_dir.name, actual_train_files, None + + tokenizer_name = dataset_entry.get("tokenizer_name") + tokenizer_entry = ( + next((x for x in manifest.get("tokenizers", []) if x.get("name") == tokenizer_name), None) + if tokenizer_name + else None + ) + expected_name = Path((tokenizer_entry or {}).get("model_path") or (tokenizer_entry or {}).get("path") or "").name + if expected_name and Path(tokenizer_path).name != expected_name: + raise ValueError(f"{dataset_dir.name} expects tokenizer {expected_name}, got {Path(tokenizer_path).name}") + expected_train_files = (dataset_entry.get("stats") or {}).get("files_train") + if expected_train_files is not None: + expected_train_files = int(expected_train_files) + if actual_train_files > expected_train_files: + raise ValueError( + f"{dataset_dir.name} has more train shards than expected: found {actual_train_files}, " + f"manifest says {expected_train_files}" + ) + return dataset_dir.name, actual_train_files, expected_train_files + + +def load_validation_tokens(pattern: str, seq_len: int) -> np.ndarray: + files = [Path(p) for p in sorted(glob.glob(pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {pattern}") + # The export pipeline writes the fixed first-50k-doc validation set to fineweb_val_*. + tokens = np.ascontiguousarray(np.concatenate([load_data_shard(file) for file in files], axis=0)) + usable = ((tokens.size - 1) // seq_len) * seq_len + if usable <= 0: + raise ValueError(f"Validation split is too short for TRAIN_SEQ_LEN={seq_len}") + return tokens[: usable + 1] + + +def loss_and_grad_chunked( + args: Hyperparameters, + train_loader: TokenLoader, + compiled_loss_and_grad, +) -> tuple[mx.array, dict]: + chunk_sizes = token_chunks(args.microbatch_tokens, args.train_seq_len, args.mlx_max_microbatch_tokens) + total_tokens = float(sum(chunk_sizes)) + loss_value = mx.array(0.0, dtype=mx.float32) + grad_accum: dict[str, mx.array] | None = None + for chunk_tokens in chunk_sizes: + x, y = train_loader.next_batch(chunk_tokens, args.train_seq_len) + loss, grads = compiled_loss_and_grad(x, y) + scale = float(y.size) / total_tokens + loss_value = loss_value + loss.astype(mx.float32) * scale + grad_accum = accumulate_flat_grads(grad_accum, grads, scale) + if args.mlx_eager_eval: + mx.eval(loss_value, grad_accum) # materialize each chunk to cap peak memory + return loss_value, tree_unflatten(list(grad_accum.items())) + + +def eval_val( + args: Hyperparameters, + compiled_loss, + val_tokens: np.ndarray, + base_bytes_lut: np.ndarray, + has_leading_space_lut: np.ndarray, + is_boundary_token_lut: np.ndarray, + log_fn: Callable[[str], None] | None = None, +) -> tuple[float, float]: + # Validation computes two metrics: + # - val_loss: token cross-entropy (natural log) + # - val_bpb: tokenizer-agnostic compression metric used by the challenge + val_batch_tokens = args.val_batch_size // args.grad_accum_steps + if val_batch_tokens < args.train_seq_len: + raise ValueError( + "VAL_BATCH_SIZE must provide at least one sequence; " + f"got VAL_BATCH_SIZE={args.val_batch_size}, GRAD_ACCUM_STEPS={args.grad_accum_steps}, " + f"TRAIN_SEQ_LEN={args.train_seq_len}" + ) + val_batch_seqs = val_batch_tokens // args.train_seq_len + total_seqs = (val_tokens.size - 1) // args.train_seq_len + total_batches = max((total_seqs + val_batch_seqs - 1) // val_batch_seqs, 1) + total_loss_sum = 0.0 + total_tokens = 0.0 + total_bytes = 0.0 + for batch_idx, batch_seq_start in enumerate(range(0, total_seqs, val_batch_seqs), start=1): + batch_seq_end = min(batch_seq_start + val_batch_seqs, total_seqs) + raw_start = batch_seq_start * args.train_seq_len + raw_end = batch_seq_end * args.train_seq_len + 1 + chunk = val_tokens[raw_start:raw_end] + x_np = chunk[:-1].reshape(-1, args.train_seq_len) + y_np = chunk[1:].reshape(-1, args.train_seq_len) + x = mx.array(x_np, dtype=mx.int32) + y = mx.array(y_np, dtype=mx.int32) + chunk_token_count = float(y.size) + batch_loss = compiled_loss(x, y).astype(mx.float32) + mx.eval(batch_loss) + total_loss_sum += float(batch_loss.item()) * chunk_token_count + prev_ids = x_np.reshape(-1) + tgt_ids = y_np.reshape(-1) + bytes_np = base_bytes_lut[tgt_ids].astype(np.int16, copy=True) + bytes_np += ( + has_leading_space_lut[tgt_ids] & ~is_boundary_token_lut[prev_ids] + ).astype(np.int16, copy=False) + total_tokens += chunk_token_count + total_bytes += float(bytes_np.astype(np.float64).sum()) + if log_fn is not None and total_batches > 1 and ( + batch_idx == 1 or batch_idx == total_batches or batch_idx % 25 == 0 + ): + log_fn(f"val_progress:{batch_idx}/{total_batches}") + val_loss = total_loss_sum / total_tokens + bits_per_token = val_loss / math.log(2.0) + val_bpb = bits_per_token * (total_tokens / total_bytes) + return val_loss, val_bpb + +# ----------------------------- +# TRAINING +# ----------------------------- + +def clip_grad_tree(grads_tree: dict, max_norm: float) -> dict: + if max_norm <= 0: + return grads_tree + flat = dict(tree_flatten(grads_tree)) + total_sq = 0.0 + for grad in flat.values(): + total_sq += float(np.sum(np.square(_np_float32(grad)), dtype=np.float64)) + if total_sq <= 0.0: + return grads_tree + total_norm = math.sqrt(total_sq) + if total_norm <= max_norm: + return grads_tree + scale = max_norm / (total_norm + 1e-12) + return tree_unflatten([(k, g * scale) for k, g in flat.items()]) + + +def main() -> None: + # ============================================================================== + # TOKENIZER + VALIDATION METRIC SETUP + # ============================================================================== + args = Hyperparameters() + out_dir = Path(args.out_dir) + out_dir.mkdir(parents=True, exist_ok=True) + logfile = out_dir / f"{args.run_id}.txt" + print(logfile) + + def log(msg: str, console: bool = True) -> None: + if console: + print(msg) + with logfile.open("a", encoding="utf-8") as f: + print(msg, file=f) + + code = Path(__file__).read_text(encoding="utf-8") + log(code, console=False) + log("=" * 100, console=False) + log(f"Running Python {sys.version}", console=False) + log(f"Running MLX {mx.__version__}", console=False) + log("=" * 100, console=False) + + if not args.tie_embeddings: + raise NotImplementedError("train_gpt_mlx.py only supports tied embeddings") + if not args.tokenizer_path.endswith(".model"): + raise ValueError(f"TOKENIZER_PATH must point to a SentencePiece .model file: {args.tokenizer_path}") + sp = spm.SentencePieceProcessor(model_file=args.tokenizer_path) + if int(sp.vocab_size()) != args.vocab_size: + raise ValueError( + f"VOCAB_SIZE={args.vocab_size} does not match tokenizer vocab_size={int(sp.vocab_size())}" + ) + dataset_name, actual_train_files, expected_train_files = validate_dataset_tokenizer_pair( + args.data_path, + args.tokenizer_path, + ) + val_tokens = load_validation_tokens(args.val_files, args.train_seq_len) + + base_bytes_lut, has_leading_space_lut, is_boundary_token_lut = build_sentencepiece_luts( + sp, args.vocab_size + ) + + # ============================================================================== + # TRAINING SETUP + # ============================================================================== + mx.random.seed(args.seed) + + train_loader = TokenLoader(args.train_files, log_fn=log, dataset_name=dataset_name) + + # ============================================================================== + # MODEL + OPTIMIZER SETUP + # ============================================================================== + model = GPT( + vocab_size=args.vocab_size, + num_layers=args.num_layers, + dim=args.model_dim, + num_heads=args.num_heads, + num_kv_heads=args.num_kv_heads, + mlp_mult=args.mlp_mult, + logit_chunk_tokens=args.logit_chunk_tokens, + logit_softcap=args.logit_softcap, + rope_base=args.rope_base, + tied_embed_init_std=args.tied_embed_init_std, + qk_gain_init=args.qk_gain_init, + ) + opt = SplitOptimizers(model, args) + + # ============================================================================== + # COMPILED TRAIN / EVAL FUNCTIONS (MLX) + # ============================================================================== + # The crucial MLX detail is capture scope: this model contains non-trainable arrays too (for example + # inside RoPE modules), so compiling only against trainable parameters throws "uncaptured inputs". + # Compiling the model-bound functions and capturing the full model state fixes that while still + # returning gradients only for trainable parameters via nn.value_and_grad(...). + compiled_loss = mx.compile(lambda x, y: model.loss(x, y), inputs=model.state, outputs=model.state) + compiled_loss_and_grad = mx.compile( + nn.value_and_grad(model, lambda x, y: model.loss(x, y)), + inputs=model.state, + outputs=model.state, + ) + + # Print config once so logs are self-describing. + n_params = sum(int(np.prod(p.shape)) for _, p in tree_flatten(model.parameters())) + log(f"run_id:{args.run_id}") + log(f"mlx_version:{mx.__version__}") + log(f"train_loader:shards pattern={args.train_files}") + log(f"val_loader:shards pattern={args.val_files} tokens:{val_tokens.size - 1}") + if expected_train_files is None: + log(f"train_loader:dataset:{dataset_name} train_shards:{actual_train_files}") + elif actual_train_files < expected_train_files: + log( + f"WARNING: train_loader:subset dataset:{dataset_name} " + f"train_shards:{actual_train_files}/{expected_train_files} " + f"new epochs will arrive sooner than the full dataset" + ) + else: + log(f"train_loader:dataset:{dataset_name} train_shards:{actual_train_files}/{expected_train_files}") + log(f"tokenizer_path:{args.tokenizer_path}") + log( + f"model_params:{n_params} vocab_size:{args.vocab_size} layers:{args.num_layers} " + f"dim:{args.model_dim} heads:{args.num_heads} kv_heads:{args.num_kv_heads} " + f"seq_len:{args.train_seq_len} tie_embeddings:{args.tie_embeddings}" + ) + log( + f"iterations:{args.iterations} train_batch_tokens:{args.train_batch_tokens} grad_accum_steps:{args.grad_accum_steps} " + f"microbatch_tokens:{args.microbatch_tokens} microbatch_batch_size:{args.microbatch_tokens // args.train_seq_len} " + f"val_batch_size:{args.val_batch_size} " + f"warmup_steps:{args.warmup_steps} max_wallclock_seconds:{args.max_wallclock_seconds:.3f}" + ) + log(f"mlx_max_microbatch_tokens:{args.mlx_max_microbatch_tokens}") + log( + f"optimizer:muon+adam muon_matrix_params:{len(opt.matrix_keys)} scalar_params:{len(opt.scalar_keys)} " + f"embed_lr:{args.tied_embed_lr} " + f"matrix_lr:{args.matrix_lr} scalar_lr:{args.scalar_lr} " + f"muon_momentum:{args.muon_momentum} muon_steps:{args.muon_backend_steps}" + ) + log(f"val_bpb:enabled tokenizer_kind=sentencepiece tokenizer_path={args.tokenizer_path}") + log(f"compute_dtype:{COMPUTE_DTYPE} compile:True") + log( + f"dtypes tok_emb:{model.tok_emb.weight.dtype} " + f"linear_weight:{model.blocks[0].attn.c_q.weight.dtype} " + f"skip_weights:{model.skip_weights.dtype}" + ) + + # ============================================================================== + # TRAINING LOOP + # ============================================================================== + if args.warmup_steps > 0: + # Warmup should only prime MLX compile/allocation paths. Updating parameters here forces us + # to snapshot and restore model/optimizer state, which is expensive on unified-memory Macs. + # Instead we run the real train shapes, force the loss/grads to materialize, and then reset + # the loader so measured training still starts from the true init and token window. + for warmup_step in range(args.warmup_steps): + accum: dict[str, mx.array] | None = None + warmup_loss = mx.array(0.0, dtype=mx.float32) + grad_scale = 1.0 / args.grad_accum_steps + for _ in range(args.grad_accum_steps): + warmup_loss, grads = loss_and_grad_chunked(args, train_loader, compiled_loss_and_grad) + accum = accumulate_flat_grads(accum, grads, grad_scale) + mx.eval(warmup_loss, accum) + mx.synchronize() + if args.warmup_steps <= 20 or (warmup_step + 1) % 10 == 0 or warmup_step + 1 == args.warmup_steps: + log(f"warmup_step:{warmup_step + 1}/{args.warmup_steps}") + + # Prime the standalone eval graph once too. It is compiled separately from value_and_grad. + val_batch_tokens = args.val_batch_size // args.grad_accum_steps + if val_batch_tokens < args.train_seq_len: + raise ValueError( + "VAL_BATCH_SIZE must provide at least one sequence; " + f"got VAL_BATCH_SIZE={args.val_batch_size}, GRAD_ACCUM_STEPS={args.grad_accum_steps}, " + f"TRAIN_SEQ_LEN={args.train_seq_len}" + ) + warm_val_seqs = min(val_batch_tokens // args.train_seq_len, (val_tokens.size - 1) // args.train_seq_len) + warm_chunk = val_tokens[: warm_val_seqs * args.train_seq_len + 1] + x_val = mx.array(warm_chunk[:-1].reshape(-1, args.train_seq_len), dtype=mx.int32) + y_val = mx.array(warm_chunk[1:].reshape(-1, args.train_seq_len), dtype=mx.int32) + warm_val_loss = compiled_loss(x_val, y_val) + mx.eval(warm_val_loss) + mx.synchronize() + + train_loader = TokenLoader(args.train_files, log_fn=log, dataset_name=dataset_name) + + train_time_ms = 0.0 + max_wallclock_ms = 1000.0 * args.max_wallclock_seconds if args.max_wallclock_seconds > 0 else None + stop_after_step: int | None = None + t0 = time.perf_counter() + step = 0 + while True: + last_step = step == args.iterations or (stop_after_step is not None and step >= stop_after_step) + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + train_time_ms += 1000.0 * (time.perf_counter() - t0) + # Validation always scans the same fixed full validation split. + val_loss, val_bpb = eval_val( + args, + compiled_loss, + val_tokens, + base_bytes_lut, + has_leading_space_lut, + is_boundary_token_lut, + log_fn=log, + ) + if step % 25 == 0 or last_step: + log( + f"step:{step}/{args.iterations} val_loss:{val_loss:.4f} val_bpb:{val_bpb:.4f} " + f"train_time:{train_time_ms:.0f}ms step_avg:{train_time_ms / max(step, 1):.2f}ms" + ) + t0 = time.perf_counter() + if last_step: + if stop_after_step is not None and step < args.iterations: + log(f"stopping_early: wallclock_cap train_time:{train_time_ms:.0f}ms step:{step}/{args.iterations}") + break + + lr_mul = args.lr_mul(step, train_time_ms + 1000.0 * (time.perf_counter() - t0)) + step_t0 = time.perf_counter() + + accum: dict[str, mx.array] | None = None + train_loss = mx.array(0.0, dtype=mx.float32) + grad_scale = 1.0 / args.grad_accum_steps + for _ in range(args.grad_accum_steps): + loss, grads = loss_and_grad_chunked(args, train_loader, compiled_loss_and_grad) + accum = accumulate_flat_grads(accum, grads, grad_scale) + train_loss = train_loss + loss.astype(mx.float32) * grad_scale + if args.mlx_eager_eval: + mx.eval(train_loss, accum) # materialize each microbatch to cap peak memory + + grads = tree_unflatten(list(accum.items())) + grads = clip_grad_tree(grads, args.grad_clip_norm) + train_loss_value = float(train_loss.item()) + opt.step(model, grads, step=step, lr_mul=lr_mul) + mx.synchronize() + + step_ms = 1000.0 * (time.perf_counter() - step_t0) + approx_train_time_ms = train_time_ms + 1000.0 * (time.perf_counter() - t0) + tok_s = args.train_batch_tokens / (step_ms / 1000.0) + step += 1 + if args.train_log_every > 0 and (step <= 10 or step % args.train_log_every == 0 or stop_after_step is not None): + log( + f"step:{step}/{args.iterations} train_loss:{train_loss_value:.4f} " + f"train_time:{approx_train_time_ms:.0f}ms step_avg:{approx_train_time_ms / step:.2f}ms tok_s:{tok_s:.0f}" + ) + if max_wallclock_ms is not None and stop_after_step is None and approx_train_time_ms >= max_wallclock_ms: + stop_after_step = step + + # ============================================================================== + # FINAL SERIALIZATION + QUANTIZED ROUNDTRIP EVAL + # ============================================================================== + # We always write a raw artifact and a quantized artifact, then validate the + # quantized roundtrip directly by loading the dequantized tensors back into the + # model and running one final validation pass. + out_path = out_dir / f"{args.run_id}_mlx_model.npz" + flat_state = {k: v for k, v in tree_flatten(model.state)} + mx.savez(str(out_path), **flat_state) + log(f"saved_model:{out_path} bytes:{out_path.stat().st_size}") + + quant_obj, quant_stats = quantize_state_dict_int8(flat_state) + quant_raw = pickle.dumps(quant_obj, protocol=pickle.HIGHEST_PROTOCOL) + quant_blob = zlib.compress(quant_raw, level=9) + quant_serialized_bytes = len(quant_raw) + quant_path = out_dir / f"{args.run_id}_mlx_model.int8.ptz" + with quant_path.open("wb") as f: + f.write(quant_blob) + quant_file_bytes = quant_path.stat().st_size + ratio = quant_stats["baseline_tensor_bytes"] / max(quant_stats["int8_payload_bytes"], 1) + log( + f"serialized_model_int8_zlib:{quant_file_bytes} bytes " + f"(payload:{quant_stats['int8_payload_bytes']} raw_pickle:{quant_serialized_bytes} payload_ratio:{ratio:.2f}x)" + ) + + with quant_path.open("rb") as f: + quant_blob_disk = f.read() + quant_flat = dequantize_state_dict_int8(pickle.loads(zlib.decompress(quant_blob_disk))) + model.update(tree_unflatten(list(quant_flat.items()))) + q_t0 = time.perf_counter() + q_val_loss, q_val_bpb = eval_val( + args, + compiled_loss, + val_tokens, + base_bytes_lut, + has_leading_space_lut, + is_boundary_token_lut, + log_fn=log, + ) + q_eval_ms = 1000.0 * (time.perf_counter() - q_t0) + log(f"final_int8_zlib_roundtrip val_loss:{q_val_loss:.4f} val_bpb:{q_val_bpb:.4f} eval_time:{q_eval_ms:.0f}ms") + log(f"final_int8_zlib_roundtrip_exact val_loss:{q_val_loss:.8f} val_bpb:{q_val_bpb:.8f}") + + +if __name__ == "__main__": + main() + +==================================================================================================== +Running Python 3.14.0 (main, Oct 7 2025, 09:34:52) [Clang 17.0.0 (clang-1700.3.19.1)] +Running MLX 0.31.1 +==================================================================================================== +run_id:smoke_test +mlx_version:0.31.1 +train_loader:shards pattern=./data/datasets/fineweb10B_sp1024/fineweb_train_*.bin +val_loader:shards pattern=./data/datasets/fineweb10B_sp1024/fineweb_val_*.bin tokens:62021632 +WARNING: train_loader:subset dataset:fineweb10B_sp1024 train_shards:1/195 new epochs will arrive sooner than the full dataset +tokenizer_path:./data/tokenizers/fineweb_1024_bpe.model +model_params:17059912 vocab_size:1024 layers:9 dim:512 heads:8 kv_heads:4 seq_len:1024 tie_embeddings:True +iterations:200 train_batch_tokens:8192 grad_accum_steps:8 microbatch_tokens:1024 microbatch_batch_size:1 val_batch_size:8192 warmup_steps:20 max_wallclock_seconds:600.000 +mlx_max_microbatch_tokens:8192 +optimizer:muon+adam muon_matrix_params:54 scalar_params:37 embed_lr:0.05 matrix_lr:0.04 scalar_lr:0.04 muon_momentum:0.95 muon_steps:5 +val_bpb:enabled tokenizer_kind=sentencepiece tokenizer_path=./data/tokenizers/fineweb_1024_bpe.model +compute_dtype:mlx.core.bfloat16 compile:True +dtypes tok_emb:mlx.core.bfloat16 linear_weight:mlx.core.float32 skip_weights:mlx.core.float32 +warmup_step:1/20 +warmup_step:2/20 +warmup_step:3/20 +warmup_step:4/20 +warmup_step:5/20 +warmup_step:6/20 +warmup_step:7/20 +warmup_step:8/20 +warmup_step:9/20 +warmup_step:10/20 +warmup_step:11/20 +warmup_step:12/20 +warmup_step:13/20 +warmup_step:14/20 +warmup_step:15/20 +warmup_step:16/20 +warmup_step:17/20 +warmup_step:18/20 +warmup_step:19/20 +warmup_step:20/20 +step:1/200 train_loss:6.9428 train_time:552ms step_avg:552.33ms tok_s:14833 +step:2/200 train_loss:18.7874 train_time:1306ms step_avg:653.19ms tok_s:10869 +step:3/200 train_loss:14.5884 train_time:1940ms step_avg:646.59ms tok_s:12938 +step:4/200 train_loss:10.7145 train_time:2566ms step_avg:641.56ms tok_s:13081 +step:5/200 train_loss:8.1853 train_time:3249ms step_avg:649.82ms tok_s:12001 +step:6/200 train_loss:6.9953 train_time:3877ms step_avg:646.20ms tok_s:13048 +step:7/200 train_loss:6.5758 train_time:4501ms step_avg:643.00ms tok_s:13138 +step:8/200 train_loss:6.5355 train_time:5124ms step_avg:640.48ms tok_s:13157 +step:9/200 train_loss:6.3934 train_time:5748ms step_avg:638.66ms tok_s:13131 +step:10/200 train_loss:6.3565 train_time:6371ms step_avg:637.11ms tok_s:13150 +#!/usr/bin/env python3 +""" +The `train_gpt.py` and `train_gpt_mlx.py` scripts are intended as good launching-off points for new participants, not SOTA configs. We'll accept PRs that tune, improve, or simplify these scripts without significantly increasing complexity, but competitive submissions should stay in the `/records` folder. + +Hard stop: To keep readable for newcomers, let's make sure `train_gpt.py` and `train_gpt_mlx.py` never are longer than 1500 lines. +""" +from __future__ import annotations + +import glob +import json +import math +import os +import pickle +import sys +import time +import uuid +import zlib +from collections.abc import Callable +from pathlib import Path + +import numpy as np +import sentencepiece as spm + +import mlx.core as mx +import mlx.nn as nn +import mlx.optimizers as optim +from mlx.utils import tree_flatten, tree_unflatten + +# ============================================================================== +# SHARD FORMAT + COMPUTE DTYPE +# ============================================================================== + +COMPUTE_DTYPE = mx.bfloat16 + +# ============================================================================== +# HYPERPARAMETERS +# ============================================================================== +# Default Simple Baseline run: +# - 9 transformer blocks at width 512 +# - 8 attention heads with 4 KV heads (GQA) and 2x MLP expansion +# - vocab size 1024, sequence length 1024, tied embeddings +# - 524,288 train tokens per step for 20,000 iterations with a ~10 minute cap +class Hyperparameters: + # Data / tokenizer. + data_path: str = os.environ.get("DATA_PATH", "./data/datasets/fineweb10B_sp1024") + tokenizer_path: str = os.environ.get("TOKENIZER_PATH", "./data/tokenizers/fineweb_1024_bpe.model") + run_id: str = os.environ.get("RUN_ID", str(uuid.uuid4())) + seed: int = int(os.environ.get("SEED", 1337)) + + # Training loop. These defaults now mirror train_gpt.py on a single process. + iterations: int = int(os.environ.get("ITERATIONS", 20_000)) + val_loss_every: int = int(os.environ.get("VAL_LOSS_EVERY", 0)) + # Validation always uses the full fineweb_val split. + val_batch_size: int = int(os.environ.get("VAL_BATCH_SIZE", 524_288)) + train_log_every: int = int(os.environ.get("TRAIN_LOG_EVERY", 200)) + train_batch_tokens: int = int(os.environ.get("TRAIN_BATCH_TOKENS", 524_288)) + grad_accum_steps: int = int(os.environ.get("GRAD_ACCUM_STEPS", 8)) + train_seq_len: int = int(os.environ.get("TRAIN_SEQ_LEN", os.environ.get("TRAIN_MAX_SEQ_LEN", 1024))) + # Chunk each logical MLX microbatch into smaller sub-batches to reduce peak + # memory pressure without changing the effective optimizer batch. + mlx_max_microbatch_tokens: int = int(os.environ.get("MLX_MAX_MICROBATCH_TOKENS", 8_192)) + # Force MLX to materialize the graph after every sub-batch, preventing lazy + # graph buildup across accumulation steps. Keeps peak memory low on 16GB machines. + # Disable on 32GB+ unified memory for better throughput (MLX_EAGER_EVAL=0). + mlx_eager_eval: bool = bool(int(os.environ.get("MLX_EAGER_EVAL", "1"))) + warmup_steps: int = int(os.environ.get("WARMUP_STEPS", 20)) + warmdown_iters: int = int(os.environ.get("WARMDOWN_ITERS", 1200)) + max_wallclock_seconds: float = float(os.environ.get("MAX_WALLCLOCK_SECONDS", 600.0)) + + # Model (defaults match the current baseline setup). + vocab_size: int = int(os.environ.get("VOCAB_SIZE", 1024)) + num_layers: int = int(os.environ.get("NUM_LAYERS", 9)) + model_dim: int = int(os.environ.get("MODEL_DIM", 512)) + num_heads: int = int(os.environ.get("NUM_HEADS", 8)) + num_kv_heads: int = int(os.environ.get("NUM_KV_HEADS", 4)) + mlp_mult: int = int(os.environ.get("MLP_MULT", 2)) + tie_embeddings: bool = bool(int(os.environ.get("TIE_EMBEDDINGS", "1"))) + tied_embed_init_std: float = float(os.environ.get("TIED_EMBED_INIT_STD", 0.005)) + logit_chunk_tokens: int = int(os.environ.get("LOGIT_CHUNK_TOKENS", 0)) + logit_softcap: float = float(os.environ.get("LOGIT_SOFTCAP", 30.0)) + rope_base: float = float(os.environ.get("ROPE_BASE", 10000.0)) + qk_gain_init: float = float(os.environ.get("QK_GAIN_INIT", 1.5)) + + # Optimizer. We keep the same per-group defaults as train_gpt.py. + beta1: float = float(os.environ.get("BETA1", 0.9)) + beta2: float = float(os.environ.get("BETA2", 0.95)) + adam_eps: float = float(os.environ.get("ADAM_EPS", 1e-8)) + tied_embed_lr: float = float(os.environ.get("TIED_EMBED_LR", 0.05)) + matrix_lr: float = float(os.environ.get("MATRIX_LR", 0.04)) + scalar_lr: float = float(os.environ.get("SCALAR_LR", 0.04)) + muon_momentum: float = float(os.environ.get("MUON_MOMENTUM", 0.95)) + muon_backend_steps: int = int(os.environ.get("MUON_BACKEND_STEPS", 5)) + muon_momentum_warmup_start: float = float(os.environ.get("MUON_MOMENTUM_WARMUP_START", 0.85)) + muon_momentum_warmup_steps: int = int(os.environ.get("MUON_MOMENTUM_WARMUP_STEPS", 500)) + grad_clip_norm: float = float(os.environ.get("GRAD_CLIP_NORM", 0.0)) + + out_dir: str = os.environ.get("OUT_DIR", "logs") + + @property + def train_files(self) -> str: + return f"{self.data_path}/fineweb_train_*.bin" + + @property + def val_files(self) -> str: + return f"{self.data_path}/fineweb_val_*.bin" + + @property + def microbatch_tokens(self) -> int: + return self.train_batch_tokens // self.grad_accum_steps + + def lr_mul(self, step: int, elapsed_ms: float) -> float: + if self.warmdown_iters <= 0: + return 1.0 + if self.max_wallclock_seconds <= 0: + warmdown_start = max(self.iterations - self.warmdown_iters, 0) + return max((self.iterations - step) / max(self.warmdown_iters, 1), 0.0) if warmdown_start <= step < self.iterations else 1.0 + step_ms = elapsed_ms / max(step, 1) + warmdown_ms = self.warmdown_iters * step_ms + remaining_ms = max(1000.0 * self.max_wallclock_seconds - elapsed_ms, 0.0) + return remaining_ms / max(warmdown_ms, 1e-9) if remaining_ms <= warmdown_ms else 1.0 + + +CONTROL_TENSOR_NAME_PATTERNS = tuple( + pattern + for pattern in os.environ.get( + "CONTROL_TENSOR_NAME_PATTERNS", + "attn_scale,attn_scales,mlp_scale,mlp_scales,resid_mix,resid_mixes,q_gain,skip_weight,skip_weights", + ).split(",") + if pattern +) +INT8_KEEP_FLOAT_FP32_NAME_PATTERNS = tuple( + pattern + for pattern in os.environ.get( + "INT8_KEEP_FLOAT_FP32_NAME_PATTERNS", + ",".join(CONTROL_TENSOR_NAME_PATTERNS), + ).split(",") + if pattern +) + + +def token_chunks(total_tokens: int, seq_len: int, max_chunk_tokens: int) -> list[int]: + usable_total = (total_tokens // seq_len) * seq_len + if usable_total <= 0: + raise ValueError(f"token budget too small for seq_len={seq_len}") + usable_chunk = max((max_chunk_tokens // seq_len) * seq_len, seq_len) + chunks: list[int] = [] + remaining = usable_total + while remaining > 0: + chunk = min(remaining, usable_chunk) + chunks.append(chunk) + remaining -= chunk + return chunks + + +def accumulate_flat_grads( + accum: dict[str, mx.array] | None, + grads_tree: dict, + scale: float, +) -> dict[str, mx.array]: + flat = dict(tree_flatten(grads_tree)) + if accum is None: + return {k: g * scale for k, g in flat.items()} + for k, g in flat.items(): + accum[k] = accum[k] + g * scale + return accum + + +# ============================================================================== +# MATH HELPERS +# ============================================================================== + +def rms_norm(x: mx.array, eps: float = 1e-6) -> mx.array: + return (x * mx.rsqrt(mx.mean(x * x, axis=-1, keepdims=True) + eps)).astype(x.dtype) + + +def zeropower_newtonschulz5(g: mx.array, steps: int, eps: float = 1e-7) -> mx.array: + # Orthogonalize a 2D update matrix with a fast Newton-Schulz iteration. + # Muon uses this to normalize matrix-shaped gradients before applying them. + # Background on Muon: https://kellerjordan.github.io/posts/muon/ + a, b, c = 3.4445, -4.7750, 2.0315 + x = g.astype(mx.float32) + x = x / (mx.sqrt(mx.sum(x * x)) + eps) + transposed = x.shape[0] > x.shape[1] + if transposed: + x = x.T + for _ in range(steps): + a_mat = x @ x.T + b_mat = b * a_mat + c * (a_mat @ a_mat) + x = a * x + b_mat @ x + if transposed: + x = x.T + return x.astype(g.dtype) + + +def load_data_shard(path: Path) -> np.ndarray: + header_bytes = 256 * np.dtype(" None: + self.file_idx = (self.file_idx + 1) % len(self.files) + if self.file_idx == 0: + self.epoch += 1 + if self.log_fn is not None: + self.log_fn( + f"WARNING: starting epoch:{self.epoch} " + f"dataset:{self.dataset_name} train_shards:{len(self.files)}" + ) + self.tokens = load_data_shard(self.files[self.file_idx]) + self.pos = 0 + + def take(self, n: int) -> np.ndarray: + chunks: list[np.ndarray] = [] + left = n + while left > 0: + if self.pos >= self.tokens.size: + self.next_file() + k = min(left, int(self.tokens.size - self.pos)) + chunks.append(self.tokens[self.pos : self.pos + k]) + self.pos += k + left -= k + return chunks[0] if len(chunks) == 1 else np.concatenate(chunks, axis=0) + + +class TokenLoader: + def __init__( + self, + pattern: str, + log_fn: Callable[[str], None] | None = None, + dataset_name: str = "", + ): + self.stream = TokenStream(pattern, log_fn=log_fn, dataset_name=dataset_name) + + def next_batch(self, batch_tokens: int, seq_len: int) -> tuple[mx.array, mx.array]: + usable = (batch_tokens // seq_len) * seq_len + if usable <= 0: + raise ValueError(f"token budget too small for seq_len={seq_len}") + chunk = self.stream.take(usable + 1) + x = chunk[:-1].reshape(-1, seq_len) + y = chunk[1:].reshape(-1, seq_len) + return mx.array(x, dtype=mx.int32), mx.array(y, dtype=mx.int32) + + +# ============================================================================== +# MODEL BLOCKS +# ============================================================================== + +class CastedLinear(nn.Module): + def __init__(self, in_dim: int, out_dim: int): + super().__init__() + self.weight = nn.Linear(in_dim, out_dim, bias=False).weight.astype(mx.float32) + + def __call__(self, x: mx.array) -> mx.array: + return x @ self.weight.astype(x.dtype).T + + +class RMSNormNoWeight(nn.Module): + # MLX module wrapper around the functional RMSNorm helper so it composes nicely in blocks. + def __call__(self, x: mx.array) -> mx.array: + return rms_norm(x) + + +class CausalSelfAttention(nn.Module): + # - separate q/k/v projections + # - RMSNorm on q and k before attention + # - RoPE on q and k + # - causal masked SDPA + def __init__( + self, + dim: int, + num_heads: int, + num_kv_heads: int, + rope_base: float, + qk_gain_init: float, + ): + super().__init__() + if dim % num_heads != 0: + raise ValueError("model_dim must be divisible by num_heads") + if num_heads % num_kv_heads != 0: + raise ValueError("num_heads must be divisible by num_kv_heads") + self.num_heads = num_heads + self.num_kv_heads = num_kv_heads + self.head_dim = dim // num_heads + if self.head_dim % 2 != 0: + raise ValueError("head_dim must be even for RoPE") + kv_dim = self.num_kv_heads * self.head_dim + self.c_q = CastedLinear(dim, dim) + self.c_k = CastedLinear(dim, kv_dim) + self.c_v = CastedLinear(dim, kv_dim) + self.proj = CastedLinear(dim, dim) + self.q_gain = mx.ones((num_heads,), dtype=mx.float32) * qk_gain_init + self.rope = nn.RoPE(self.head_dim, traditional=False, base=rope_base) + self.scale = self.head_dim ** -0.5 + + def __call__(self, x: mx.array) -> mx.array: + bsz, seqlen, dim = x.shape + q = self.c_q(x).reshape(bsz, seqlen, self.num_heads, self.head_dim).transpose(0, 2, 1, 3) + k = self.c_k(x).reshape(bsz, seqlen, self.num_kv_heads, self.head_dim).transpose(0, 2, 1, 3) + v = self.c_v(x).reshape(bsz, seqlen, self.num_kv_heads, self.head_dim).transpose(0, 2, 1, 3) + + q = self.rope(rms_norm(q).astype(COMPUTE_DTYPE)) + k = self.rope(rms_norm(k).astype(COMPUTE_DTYPE)) + q = q * self.q_gain.astype(q.dtype)[None, :, None, None] + y = mx.fast.scaled_dot_product_attention(q, k, v, scale=self.scale, mask="causal") + y = y.transpose(0, 2, 1, 3).reshape(bsz, seqlen, dim) + return self.proj(y) + + +class MLP(nn.Module): + # Baseline MLP uses relu^2 instead of GELU/SiLU. It is cheap and works well in this setup. + def __init__(self, dim: int, mlp_mult: int): + super().__init__() + hidden = dim * mlp_mult + self.fc = CastedLinear(dim, hidden) + self.proj = CastedLinear(hidden, dim) + + def __call__(self, x: mx.array) -> mx.array: + x = nn.relu(self.fc(x)) + return self.proj(x * x) + + +class Block(nn.Module): + def __init__( + self, + dim: int, + num_heads: int, + num_kv_heads: int, + mlp_mult: int, + rope_base: float, + qk_gain_init: float, + ): + super().__init__() + self.attn_norm = RMSNormNoWeight() + self.mlp_norm = RMSNormNoWeight() + self.attn = CausalSelfAttention(dim, num_heads, num_kv_heads, rope_base, qk_gain_init) + self.mlp = MLP(dim, mlp_mult) + self.attn_scale = mx.ones((dim,), dtype=mx.float32) + self.mlp_scale = mx.ones((dim,), dtype=mx.float32) + self.resid_mix = mx.array(np.stack((np.ones((dim,), dtype=np.float32), np.zeros((dim,), dtype=np.float32)))) + + def __call__(self, x: mx.array, x0: mx.array) -> mx.array: + mix = self.resid_mix.astype(x.dtype) + x = mix[0][None, None, :] * x + mix[1][None, None, :] * x0 + attn_out = self.attn(self.attn_norm(x)) + x = x + self.attn_scale.astype(x.dtype)[None, None, :] * attn_out + x = x + self.mlp_scale.astype(x.dtype)[None, None, :] * self.mlp(self.mlp_norm(x)) + return x + + +class GPT(nn.Module): + # - token embedding + RMSNorm + # - encoder half accumulates skip tensors + # - decoder half consumes reversed skips with learned skip_weights + # - tied embeddings for the LM head (the baseline default setup) + def __init__(self, vocab_size: int, num_layers: int, dim: int, num_heads: int, num_kv_heads: int, mlp_mult: int, + logit_chunk_tokens: int, logit_softcap: float, rope_base: float, tied_embed_init_std: float, + qk_gain_init: float): + super().__init__() + if logit_softcap <= 0.0: + raise ValueError(f"logit_softcap must be positive, got {logit_softcap}") + self.logit_chunk_tokens = logit_chunk_tokens + self.logit_softcap = logit_softcap + + self.tok_emb = nn.Embedding(vocab_size, dim) + self.num_encoder_layers = num_layers // 2 + self.num_decoder_layers = num_layers - self.num_encoder_layers + self.num_skip_weights = min(self.num_encoder_layers, self.num_decoder_layers) + self.skip_weights = mx.ones((self.num_skip_weights, dim), dtype=mx.float32) + self.blocks = [ + Block(dim, num_heads, num_kv_heads, mlp_mult, rope_base, qk_gain_init) + for i in range(num_layers) + ] + self.final_norm = RMSNormNoWeight() + + for b in self.blocks: + b.attn.proj.weight = mx.zeros_like(b.attn.proj.weight) + b.mlp.proj.weight = mx.zeros_like(b.mlp.proj.weight) + self.tok_emb.weight = ( + mx.random.normal(self.tok_emb.weight.shape, dtype=mx.float32) * tied_embed_init_std + ).astype(COMPUTE_DTYPE) + + def softcap(self, logits: mx.array) -> mx.array: + c = self.logit_softcap + return c * mx.tanh(logits / c) + + def __call__(self, input_ids: mx.array) -> mx.array: + x = rms_norm(self.tok_emb(input_ids).astype(COMPUTE_DTYPE)) + x0 = x + skips: list[mx.array] = [] + + for i in range(self.num_encoder_layers): + x = self.blocks[i](x, x0) + skips.append(x) + for i in range(self.num_decoder_layers): + # Odd layer counts have one more decoder block than encoder block. The baseline only + # applies a skip connection when one exists, then runs the remaining decoder block(s) + # without an added skip. + if skips: + x = x + self.skip_weights[i].astype(x.dtype)[None, None, :] * skips.pop() + x = self.blocks[self.num_encoder_layers + i](x, x0) + return self.final_norm(x) + + def loss(self, input_ids: mx.array, target_ids: mx.array) -> mx.array: + # Cross-entropy over flattened tokens. We keep optional logit chunking because it is a useful + # memory knob on Macs, but the common path is chunk_tokens=0 (single matmul + CE). + x = self(input_ids).reshape(-1, self.tok_emb.weight.shape[1]) + y = target_ids.reshape(-1) + if self.logit_chunk_tokens <= 0 or x.shape[0] <= self.logit_chunk_tokens: + logits_proj = x @ self.tok_emb.weight.astype(x.dtype).T + logits = self.softcap(logits_proj) + return nn.losses.cross_entropy(logits.astype(mx.float32), y, reduction="mean") + + loss_sum = mx.array(0.0, dtype=mx.float32) + n = int(x.shape[0]) + for s in range(0, n, self.logit_chunk_tokens): + e = min(s + self.logit_chunk_tokens, n) + logits_proj = x[s:e] @ self.tok_emb.weight.astype(x.dtype).T + logits = self.softcap(logits_proj) + loss_sum = loss_sum + nn.losses.cross_entropy(logits.astype(mx.float32), y[s:e], reduction="sum") + return loss_sum / float(n) + +# ============================================================================== +# OPTIMIZERS (MUON + ADAM SPLIT) +# ============================================================================== +class Muon: + # Muon applies SGD-momentum to matrix gradients, then orthogonalizes the result before the + # parameter update. + def __init__(self, keys: list[str], params: dict[str, mx.array], args: Hyperparameters): + self.keys = keys + self.args = args + self.buffers = {k: mx.zeros_like(params[k]) for k in keys} + + def step(self, params: dict[str, mx.array], grads: dict[str, mx.array], step: int, lr_mul: float) -> dict[str, mx.array]: + if self.args.muon_momentum_warmup_steps: + t = min(step / self.args.muon_momentum_warmup_steps, 1.0) + momentum = (1.0 - t) * self.args.muon_momentum_warmup_start + t * self.args.muon_momentum + else: + momentum = self.args.muon_momentum + lr = self.args.matrix_lr * lr_mul + out: dict[str, mx.array] = {} + for k in self.keys: + p = params[k] + g = grads[k] + buf = momentum * self.buffers[k] + g + self.buffers[k] = buf + g_eff = g + momentum * buf + g_ortho = zeropower_newtonschulz5(g_eff, self.args.muon_backend_steps) + scale = math.sqrt(max(1.0, float(p.shape[0]) / float(p.shape[1]))) + out[k] = p - lr * (g_ortho * scale).astype(p.dtype) + return out + + +class SplitOptimizers: + # - embeddings: Adam with the tied-embedding LR + # - block matrices (2D): Muon + # - block scalars + skip weights: Adam + # This preserves the high-level optimization behavior even though MLX internals differ. + def __init__(self, model: GPT, args: Hyperparameters): + self.args = args + params = dict(tree_flatten(model.parameters())) + self.embed_key = "tok_emb.weight" + self.matrix_keys = [ + k + for k, p in params.items() + if k.startswith("blocks.") and p.ndim == 2 and not any(pattern in k for pattern in CONTROL_TENSOR_NAME_PATTERNS) + ] + self.scalar_keys = [ + k + for k, p in params.items() + if k == "skip_weights" or (k.startswith("blocks.") and (p.ndim < 2 or any(pattern in k for pattern in CONTROL_TENSOR_NAME_PATTERNS))) + ] + + self.muon = Muon(self.matrix_keys, params, args) + self.adam_embed = optim.Adam( + learning_rate=args.tied_embed_lr, + betas=[args.beta1, args.beta2], + eps=args.adam_eps, + bias_correction=True, + ) + self.adam_scalar = optim.Adam( + learning_rate=args.scalar_lr, + betas=[args.beta1, args.beta2], + eps=args.adam_eps, + bias_correction=True, + ) + + def step(self, model: GPT, grads_tree: dict, step: int, lr_mul: float) -> None: + params = dict(tree_flatten(model.parameters())) + grads = dict(tree_flatten(grads_tree)) + updated = dict(params) + + updated.update(self.muon.step(params, grads, step=step, lr_mul=lr_mul)) + + self.adam_embed.learning_rate = self.args.tied_embed_lr * lr_mul + updated.update( + self.adam_embed.apply_gradients( + {self.embed_key: grads[self.embed_key]}, + {self.embed_key: params[self.embed_key]}, + ) + ) + + self.adam_scalar.learning_rate = self.args.scalar_lr * lr_mul + scalar_grads = {k: grads[k] for k in self.scalar_keys} + scalar_params = {k: params[k] for k in self.scalar_keys} + updated.update(self.adam_scalar.apply_gradients(scalar_grads, scalar_params)) + + model.update(tree_unflatten(list(updated.items()))) + +# ============================================================================== +# QUANTIZATION (INT8 + ZLIB) +# ============================================================================== +# - per-row int8 for 2D float tensors +# - per-tensor int8 for other float tensors +# - fp16 passthrough for small float tensors +# - exact passthrough for non-floats + +MX_DTYPE_FROM_NAME = { + "float32": mx.float32, + "float16": mx.float16, + "bfloat16": mx.bfloat16, +} + +INT8_KEEP_FLOAT_MAX_NUMEL = 65_536 +INT8_KEEP_FLOAT_STORE_DTYPE = np.float16 +INT8_PER_ROW_SCALE_DTYPE = np.float16 +INT8_CLIP_PERCENTILE = 99.99984 +INT8_CLIP_Q = INT8_CLIP_PERCENTILE / 100.0 + + +def _np_float32(arr: mx.array) -> np.ndarray: + return np.array(arr.astype(mx.float32), dtype=np.float32, copy=False) + + +def keep_float_array(name: str, arr: mx.array, passthrough_orig_dtypes: dict[str, str]) -> np.ndarray: + if any(pattern in name for pattern in INT8_KEEP_FLOAT_FP32_NAME_PATTERNS): + return np.ascontiguousarray(_np_float32(arr)) + if arr.dtype in {mx.float32, mx.bfloat16}: + passthrough_orig_dtypes[name] = str(arr.dtype).split(".")[-1] + return np.ascontiguousarray(np.array(arr.astype(mx.float16), dtype=INT8_KEEP_FLOAT_STORE_DTYPE, copy=False)) + return np.ascontiguousarray(np.array(arr, copy=True)) + + +def quantize_float_array(arr: mx.array) -> tuple[np.ndarray, np.ndarray]: + f32 = _np_float32(arr) + if f32.ndim == 2: + # Matrices get one scale per row, which usually tracks output-channel + # ranges much better than a single tensor-wide scale. + clip_abs = np.quantile(np.abs(f32), INT8_CLIP_Q, axis=1) if f32.size else np.empty((f32.shape[0],), dtype=np.float32) + clipped = np.clip(f32, -clip_abs[:, None], clip_abs[:, None]) + scale = np.maximum(clip_abs / 127.0, 1.0 / 127.0).astype(np.float32, copy=False) + q = np.clip(np.round(clipped / scale[:, None]), -127, 127).astype(np.int8, copy=False) + return np.ascontiguousarray(q), np.ascontiguousarray(scale.astype(INT8_PER_ROW_SCALE_DTYPE, copy=False)) + + # Vectors / scalars use a simpler per-tensor scale. + clip_abs = float(np.quantile(np.abs(f32).reshape(-1), INT8_CLIP_Q)) if f32.size else 0.0 + scale = np.array(clip_abs / 127.0 if clip_abs > 0.0 else 1.0, dtype=np.float32) + q = np.clip(np.round(np.clip(f32, -clip_abs, clip_abs) / scale), -127, 127).astype(np.int8, copy=False) + return np.ascontiguousarray(q), scale + + +def quantize_state_dict_int8(flat_state: dict[str, mx.array]) -> tuple[dict[str, object], dict[str, int]]: + quantized: dict[str, np.ndarray] = {} + scales: dict[str, np.ndarray] = {} + dtypes: dict[str, str] = {} + passthrough: dict[str, np.ndarray] = {} + passthrough_orig_dtypes: dict[str, str] = {} + qmeta: dict[str, dict[str, object]] = {} + stats = dict.fromkeys( + ("param_count", "num_tensors", "num_float_tensors", "num_nonfloat_tensors", "baseline_tensor_bytes", "int8_payload_bytes"), + 0, + ) + for name, arr in flat_state.items(): + stats["param_count"] += int(arr.size) + stats["num_tensors"] += 1 + stats["baseline_tensor_bytes"] += int(arr.nbytes) + if not mx.issubdtype(arr.dtype, mx.floating): + stats["num_nonfloat_tensors"] += 1 + passthrough[name] = np.ascontiguousarray(np.array(arr)) + stats["int8_payload_bytes"] += int(passthrough[name].nbytes) + continue + + # Small float tensors are cheap enough to keep directly. We still downcast + # fp32/bf16 passthrough tensors to fp16 so metadata does not dominate size. + if int(arr.size) <= INT8_KEEP_FLOAT_MAX_NUMEL: + kept = keep_float_array(name, arr, passthrough_orig_dtypes) + passthrough[name] = kept + stats["int8_payload_bytes"] += int(kept.nbytes) + continue + + stats["num_float_tensors"] += 1 + q, s = quantize_float_array(arr) + if s.ndim > 0: + qmeta[name] = {"scheme": "per_row", "axis": 0} + quantized[name] = q + scales[name] = s + dtypes[name] = str(arr.dtype).split(".")[-1] + stats["int8_payload_bytes"] += int(q.nbytes + s.nbytes) + obj: dict[str, object] = { + "__quant_format__": "int8_clean_per_row_v1", + "quantized": quantized, + "scales": scales, + "dtypes": dtypes, + "passthrough": passthrough, + } + if qmeta: + obj["qmeta"] = qmeta + if passthrough_orig_dtypes: + obj["passthrough_orig_dtypes"] = passthrough_orig_dtypes + return obj, stats + + +def dequantize_state_dict_int8(quant_obj: dict[str, object]) -> dict[str, mx.array]: + out: dict[str, mx.array] = {} + qmeta = quant_obj.get("qmeta", {}) + passthrough_orig_dtypes = quant_obj.get("passthrough_orig_dtypes", {}) + for name, q in quant_obj["quantized"].items(): + q_np = np.asarray(q, dtype=np.int8) + dtype_name = quant_obj["dtypes"][name] + scale = np.asarray(quant_obj["scales"][name], dtype=np.float32) + if qmeta.get(name, {}).get("scheme") == "per_row" or scale.ndim > 0: + # Broadcast the saved row scale back across trailing dimensions. + out_arr = q_np.astype(np.float32) * scale.reshape((q_np.shape[0],) + (1,) * (q_np.ndim - 1)) + else: + out_arr = q_np.astype(np.float32) * float(scale) + out[name] = mx.array(out_arr, dtype=MX_DTYPE_FROM_NAME[dtype_name]) + for name, arr in quant_obj["passthrough"].items(): + # Restore small tensors, undoing the temporary fp16 storage cast if needed. + out_arr = np.array(arr, copy=True) + orig_dtype = passthrough_orig_dtypes.get(name) + if isinstance(orig_dtype, str): + out[name] = mx.array(out_arr, dtype=MX_DTYPE_FROM_NAME[orig_dtype]) + else: + out[name] = mx.array(out_arr) + return out + + +def build_sentencepiece_luts( + sp: spm.SentencePieceProcessor, vocab_size: int +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + sp_vocab_size = int(sp.vocab_size()) + table_size = max(sp_vocab_size, vocab_size) + base_bytes_lut = np.zeros((table_size,), dtype=np.int16) + has_leading_space_lut = np.zeros((table_size,), dtype=np.bool_) + is_boundary_token_lut = np.ones((table_size,), dtype=np.bool_) + for token_id in range(sp_vocab_size): + if sp.is_control(token_id) or sp.is_unknown(token_id) or sp.is_unused(token_id): + continue + is_boundary_token_lut[token_id] = False + if sp.is_byte(token_id): + base_bytes_lut[token_id] = 1 + continue + piece = sp.id_to_piece(token_id) + if piece.startswith("▁"): + has_leading_space_lut[token_id] = True + piece = piece[1:] + base_bytes_lut[token_id] = len(piece.encode("utf-8")) + return base_bytes_lut, has_leading_space_lut, is_boundary_token_lut + + +def validate_dataset_tokenizer_pair(data_path: str, tokenizer_path: str) -> tuple[str, int, int | None]: + # The shard directory and tokenizer are coupled: val_bpb is only meaningful if we + # decode bytes with the exact tokenizer that produced the shards. The manifest + # lets the training script fail fast on accidental dataset/tokenizer mismatches. + dataset_dir = Path(data_path).resolve() + actual_train_files = len(list(dataset_dir.glob("fineweb_train_*.bin"))) + if len(dataset_dir.parents) < 2: + return dataset_dir.name, actual_train_files, None + manifest_path = dataset_dir.parents[1] / "manifest.json" + if not manifest_path.is_file(): + return dataset_dir.name, actual_train_files, None + + manifest = json.loads(manifest_path.read_text(encoding="utf-8")) + dataset_entry = next((x for x in manifest.get("datasets", []) if x.get("name") == dataset_dir.name), None) + if dataset_entry is None: + return dataset_dir.name, actual_train_files, None + + tokenizer_name = dataset_entry.get("tokenizer_name") + tokenizer_entry = ( + next((x for x in manifest.get("tokenizers", []) if x.get("name") == tokenizer_name), None) + if tokenizer_name + else None + ) + expected_name = Path((tokenizer_entry or {}).get("model_path") or (tokenizer_entry or {}).get("path") or "").name + if expected_name and Path(tokenizer_path).name != expected_name: + raise ValueError(f"{dataset_dir.name} expects tokenizer {expected_name}, got {Path(tokenizer_path).name}") + expected_train_files = (dataset_entry.get("stats") or {}).get("files_train") + if expected_train_files is not None: + expected_train_files = int(expected_train_files) + if actual_train_files > expected_train_files: + raise ValueError( + f"{dataset_dir.name} has more train shards than expected: found {actual_train_files}, " + f"manifest says {expected_train_files}" + ) + return dataset_dir.name, actual_train_files, expected_train_files + + +def load_validation_tokens(pattern: str, seq_len: int) -> np.ndarray: + files = [Path(p) for p in sorted(glob.glob(pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {pattern}") + # The export pipeline writes the fixed first-50k-doc validation set to fineweb_val_*. + tokens = np.ascontiguousarray(np.concatenate([load_data_shard(file) for file in files], axis=0)) + usable = ((tokens.size - 1) // seq_len) * seq_len + if usable <= 0: + raise ValueError(f"Validation split is too short for TRAIN_SEQ_LEN={seq_len}") + return tokens[: usable + 1] + + +def loss_and_grad_chunked( + args: Hyperparameters, + train_loader: TokenLoader, + compiled_loss_and_grad, +) -> tuple[mx.array, dict]: + chunk_sizes = token_chunks(args.microbatch_tokens, args.train_seq_len, args.mlx_max_microbatch_tokens) + total_tokens = float(sum(chunk_sizes)) + loss_value = mx.array(0.0, dtype=mx.float32) + grad_accum: dict[str, mx.array] | None = None + for chunk_tokens in chunk_sizes: + x, y = train_loader.next_batch(chunk_tokens, args.train_seq_len) + loss, grads = compiled_loss_and_grad(x, y) + scale = float(y.size) / total_tokens + loss_value = loss_value + loss.astype(mx.float32) * scale + grad_accum = accumulate_flat_grads(grad_accum, grads, scale) + if args.mlx_eager_eval: + mx.eval(loss_value, grad_accum) # materialize each chunk to cap peak memory + return loss_value, tree_unflatten(list(grad_accum.items())) + + +def eval_val( + args: Hyperparameters, + compiled_loss, + val_tokens: np.ndarray, + base_bytes_lut: np.ndarray, + has_leading_space_lut: np.ndarray, + is_boundary_token_lut: np.ndarray, + log_fn: Callable[[str], None] | None = None, +) -> tuple[float, float]: + # Validation computes two metrics: + # - val_loss: token cross-entropy (natural log) + # - val_bpb: tokenizer-agnostic compression metric used by the challenge + val_batch_tokens = args.val_batch_size // args.grad_accum_steps + if val_batch_tokens < args.train_seq_len: + raise ValueError( + "VAL_BATCH_SIZE must provide at least one sequence; " + f"got VAL_BATCH_SIZE={args.val_batch_size}, GRAD_ACCUM_STEPS={args.grad_accum_steps}, " + f"TRAIN_SEQ_LEN={args.train_seq_len}" + ) + val_batch_seqs = val_batch_tokens // args.train_seq_len + total_seqs = (val_tokens.size - 1) // args.train_seq_len + total_batches = max((total_seqs + val_batch_seqs - 1) // val_batch_seqs, 1) + total_loss_sum = 0.0 + total_tokens = 0.0 + total_bytes = 0.0 + for batch_idx, batch_seq_start in enumerate(range(0, total_seqs, val_batch_seqs), start=1): + batch_seq_end = min(batch_seq_start + val_batch_seqs, total_seqs) + raw_start = batch_seq_start * args.train_seq_len + raw_end = batch_seq_end * args.train_seq_len + 1 + chunk = val_tokens[raw_start:raw_end] + x_np = chunk[:-1].reshape(-1, args.train_seq_len) + y_np = chunk[1:].reshape(-1, args.train_seq_len) + x = mx.array(x_np, dtype=mx.int32) + y = mx.array(y_np, dtype=mx.int32) + chunk_token_count = float(y.size) + batch_loss = compiled_loss(x, y).astype(mx.float32) + mx.eval(batch_loss) + total_loss_sum += float(batch_loss.item()) * chunk_token_count + prev_ids = x_np.reshape(-1) + tgt_ids = y_np.reshape(-1) + bytes_np = base_bytes_lut[tgt_ids].astype(np.int16, copy=True) + bytes_np += ( + has_leading_space_lut[tgt_ids] & ~is_boundary_token_lut[prev_ids] + ).astype(np.int16, copy=False) + total_tokens += chunk_token_count + total_bytes += float(bytes_np.astype(np.float64).sum()) + if log_fn is not None and total_batches > 1 and ( + batch_idx == 1 or batch_idx == total_batches or batch_idx % 25 == 0 + ): + log_fn(f"val_progress:{batch_idx}/{total_batches}") + val_loss = total_loss_sum / total_tokens + bits_per_token = val_loss / math.log(2.0) + val_bpb = bits_per_token * (total_tokens / total_bytes) + return val_loss, val_bpb + +# ----------------------------- +# TRAINING +# ----------------------------- + +def clip_grad_tree(grads_tree: dict, max_norm: float) -> dict: + if max_norm <= 0: + return grads_tree + flat = dict(tree_flatten(grads_tree)) + total_sq = 0.0 + for grad in flat.values(): + total_sq += float(np.sum(np.square(_np_float32(grad)), dtype=np.float64)) + if total_sq <= 0.0: + return grads_tree + total_norm = math.sqrt(total_sq) + if total_norm <= max_norm: + return grads_tree + scale = max_norm / (total_norm + 1e-12) + return tree_unflatten([(k, g * scale) for k, g in flat.items()]) + + +def main() -> None: + # ============================================================================== + # TOKENIZER + VALIDATION METRIC SETUP + # ============================================================================== + args = Hyperparameters() + out_dir = Path(args.out_dir) + out_dir.mkdir(parents=True, exist_ok=True) + logfile = out_dir / f"{args.run_id}.txt" + print(logfile) + + def log(msg: str, console: bool = True) -> None: + if console: + print(msg) + with logfile.open("a", encoding="utf-8") as f: + print(msg, file=f) + + code = Path(__file__).read_text(encoding="utf-8") + log(code, console=False) + log("=" * 100, console=False) + log(f"Running Python {sys.version}", console=False) + log(f"Running MLX {mx.__version__}", console=False) + log("=" * 100, console=False) + + if not args.tie_embeddings: + raise NotImplementedError("train_gpt_mlx.py only supports tied embeddings") + if not args.tokenizer_path.endswith(".model"): + raise ValueError(f"TOKENIZER_PATH must point to a SentencePiece .model file: {args.tokenizer_path}") + sp = spm.SentencePieceProcessor(model_file=args.tokenizer_path) + if int(sp.vocab_size()) != args.vocab_size: + raise ValueError( + f"VOCAB_SIZE={args.vocab_size} does not match tokenizer vocab_size={int(sp.vocab_size())}" + ) + dataset_name, actual_train_files, expected_train_files = validate_dataset_tokenizer_pair( + args.data_path, + args.tokenizer_path, + ) + val_tokens = load_validation_tokens(args.val_files, args.train_seq_len) + + base_bytes_lut, has_leading_space_lut, is_boundary_token_lut = build_sentencepiece_luts( + sp, args.vocab_size + ) + + # ============================================================================== + # TRAINING SETUP + # ============================================================================== + mx.random.seed(args.seed) + + train_loader = TokenLoader(args.train_files, log_fn=log, dataset_name=dataset_name) + + # ============================================================================== + # MODEL + OPTIMIZER SETUP + # ============================================================================== + model = GPT( + vocab_size=args.vocab_size, + num_layers=args.num_layers, + dim=args.model_dim, + num_heads=args.num_heads, + num_kv_heads=args.num_kv_heads, + mlp_mult=args.mlp_mult, + logit_chunk_tokens=args.logit_chunk_tokens, + logit_softcap=args.logit_softcap, + rope_base=args.rope_base, + tied_embed_init_std=args.tied_embed_init_std, + qk_gain_init=args.qk_gain_init, + ) + opt = SplitOptimizers(model, args) + + # ============================================================================== + # COMPILED TRAIN / EVAL FUNCTIONS (MLX) + # ============================================================================== + # The crucial MLX detail is capture scope: this model contains non-trainable arrays too (for example + # inside RoPE modules), so compiling only against trainable parameters throws "uncaptured inputs". + # Compiling the model-bound functions and capturing the full model state fixes that while still + # returning gradients only for trainable parameters via nn.value_and_grad(...). + compiled_loss = mx.compile(lambda x, y: model.loss(x, y), inputs=model.state, outputs=model.state) + compiled_loss_and_grad = mx.compile( + nn.value_and_grad(model, lambda x, y: model.loss(x, y)), + inputs=model.state, + outputs=model.state, + ) + + # Print config once so logs are self-describing. + n_params = sum(int(np.prod(p.shape)) for _, p in tree_flatten(model.parameters())) + log(f"run_id:{args.run_id}") + log(f"mlx_version:{mx.__version__}") + log(f"train_loader:shards pattern={args.train_files}") + log(f"val_loader:shards pattern={args.val_files} tokens:{val_tokens.size - 1}") + if expected_train_files is None: + log(f"train_loader:dataset:{dataset_name} train_shards:{actual_train_files}") + elif actual_train_files < expected_train_files: + log( + f"WARNING: train_loader:subset dataset:{dataset_name} " + f"train_shards:{actual_train_files}/{expected_train_files} " + f"new epochs will arrive sooner than the full dataset" + ) + else: + log(f"train_loader:dataset:{dataset_name} train_shards:{actual_train_files}/{expected_train_files}") + log(f"tokenizer_path:{args.tokenizer_path}") + log( + f"model_params:{n_params} vocab_size:{args.vocab_size} layers:{args.num_layers} " + f"dim:{args.model_dim} heads:{args.num_heads} kv_heads:{args.num_kv_heads} " + f"seq_len:{args.train_seq_len} tie_embeddings:{args.tie_embeddings}" + ) + log( + f"iterations:{args.iterations} train_batch_tokens:{args.train_batch_tokens} grad_accum_steps:{args.grad_accum_steps} " + f"microbatch_tokens:{args.microbatch_tokens} microbatch_batch_size:{args.microbatch_tokens // args.train_seq_len} " + f"val_batch_size:{args.val_batch_size} " + f"warmup_steps:{args.warmup_steps} max_wallclock_seconds:{args.max_wallclock_seconds:.3f}" + ) + log(f"mlx_max_microbatch_tokens:{args.mlx_max_microbatch_tokens}") + log( + f"optimizer:muon+adam muon_matrix_params:{len(opt.matrix_keys)} scalar_params:{len(opt.scalar_keys)} " + f"embed_lr:{args.tied_embed_lr} " + f"matrix_lr:{args.matrix_lr} scalar_lr:{args.scalar_lr} " + f"muon_momentum:{args.muon_momentum} muon_steps:{args.muon_backend_steps}" + ) + log(f"val_bpb:enabled tokenizer_kind=sentencepiece tokenizer_path={args.tokenizer_path}") + log(f"compute_dtype:{COMPUTE_DTYPE} compile:True") + log( + f"dtypes tok_emb:{model.tok_emb.weight.dtype} " + f"linear_weight:{model.blocks[0].attn.c_q.weight.dtype} " + f"skip_weights:{model.skip_weights.dtype}" + ) + + # ============================================================================== + # TRAINING LOOP + # ============================================================================== + if args.warmup_steps > 0: + # Warmup should only prime MLX compile/allocation paths. Updating parameters here forces us + # to snapshot and restore model/optimizer state, which is expensive on unified-memory Macs. + # Instead we run the real train shapes, force the loss/grads to materialize, and then reset + # the loader so measured training still starts from the true init and token window. + for warmup_step in range(args.warmup_steps): + accum: dict[str, mx.array] | None = None + warmup_loss = mx.array(0.0, dtype=mx.float32) + grad_scale = 1.0 / args.grad_accum_steps + for _ in range(args.grad_accum_steps): + warmup_loss, grads = loss_and_grad_chunked(args, train_loader, compiled_loss_and_grad) + accum = accumulate_flat_grads(accum, grads, grad_scale) + mx.eval(warmup_loss, accum) + mx.synchronize() + if args.warmup_steps <= 20 or (warmup_step + 1) % 10 == 0 or warmup_step + 1 == args.warmup_steps: + log(f"warmup_step:{warmup_step + 1}/{args.warmup_steps}") + + # Prime the standalone eval graph once too. It is compiled separately from value_and_grad. + val_batch_tokens = args.val_batch_size // args.grad_accum_steps + if val_batch_tokens < args.train_seq_len: + raise ValueError( + "VAL_BATCH_SIZE must provide at least one sequence; " + f"got VAL_BATCH_SIZE={args.val_batch_size}, GRAD_ACCUM_STEPS={args.grad_accum_steps}, " + f"TRAIN_SEQ_LEN={args.train_seq_len}" + ) + warm_val_seqs = min(val_batch_tokens // args.train_seq_len, (val_tokens.size - 1) // args.train_seq_len) + warm_chunk = val_tokens[: warm_val_seqs * args.train_seq_len + 1] + x_val = mx.array(warm_chunk[:-1].reshape(-1, args.train_seq_len), dtype=mx.int32) + y_val = mx.array(warm_chunk[1:].reshape(-1, args.train_seq_len), dtype=mx.int32) + warm_val_loss = compiled_loss(x_val, y_val) + mx.eval(warm_val_loss) + mx.synchronize() + + train_loader = TokenLoader(args.train_files, log_fn=log, dataset_name=dataset_name) + + train_time_ms = 0.0 + max_wallclock_ms = 1000.0 * args.max_wallclock_seconds if args.max_wallclock_seconds > 0 else None + stop_after_step: int | None = None + t0 = time.perf_counter() + step = 0 + while True: + last_step = step == args.iterations or (stop_after_step is not None and step >= stop_after_step) + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + train_time_ms += 1000.0 * (time.perf_counter() - t0) + # Validation always scans the same fixed full validation split. + val_loss, val_bpb = eval_val( + args, + compiled_loss, + val_tokens, + base_bytes_lut, + has_leading_space_lut, + is_boundary_token_lut, + log_fn=log, + ) + if step % 25 == 0 or last_step: + log( + f"step:{step}/{args.iterations} val_loss:{val_loss:.4f} val_bpb:{val_bpb:.4f} " + f"train_time:{train_time_ms:.0f}ms step_avg:{train_time_ms / max(step, 1):.2f}ms" + ) + t0 = time.perf_counter() + if last_step: + if stop_after_step is not None and step < args.iterations: + log(f"stopping_early: wallclock_cap train_time:{train_time_ms:.0f}ms step:{step}/{args.iterations}") + break + + lr_mul = args.lr_mul(step, train_time_ms + 1000.0 * (time.perf_counter() - t0)) + step_t0 = time.perf_counter() + + accum: dict[str, mx.array] | None = None + train_loss = mx.array(0.0, dtype=mx.float32) + grad_scale = 1.0 / args.grad_accum_steps + for _ in range(args.grad_accum_steps): + loss, grads = loss_and_grad_chunked(args, train_loader, compiled_loss_and_grad) + accum = accumulate_flat_grads(accum, grads, grad_scale) + train_loss = train_loss + loss.astype(mx.float32) * grad_scale + if args.mlx_eager_eval: + mx.eval(train_loss, accum) # materialize each microbatch to cap peak memory + + grads = tree_unflatten(list(accum.items())) + grads = clip_grad_tree(grads, args.grad_clip_norm) + train_loss_value = float(train_loss.item()) + opt.step(model, grads, step=step, lr_mul=lr_mul) + mx.synchronize() + + step_ms = 1000.0 * (time.perf_counter() - step_t0) + approx_train_time_ms = train_time_ms + 1000.0 * (time.perf_counter() - t0) + tok_s = args.train_batch_tokens / (step_ms / 1000.0) + step += 1 + if args.train_log_every > 0 and (step <= 10 or step % args.train_log_every == 0 or stop_after_step is not None): + log( + f"step:{step}/{args.iterations} train_loss:{train_loss_value:.4f} " + f"train_time:{approx_train_time_ms:.0f}ms step_avg:{approx_train_time_ms / step:.2f}ms tok_s:{tok_s:.0f}" + ) + if max_wallclock_ms is not None and stop_after_step is None and approx_train_time_ms >= max_wallclock_ms: + stop_after_step = step + + # ============================================================================== + # FINAL SERIALIZATION + QUANTIZED ROUNDTRIP EVAL + # ============================================================================== + # We always write a raw artifact and a quantized artifact, then validate the + # quantized roundtrip directly by loading the dequantized tensors back into the + # model and running one final validation pass. + out_path = out_dir / f"{args.run_id}_mlx_model.npz" + flat_state = {k: v for k, v in tree_flatten(model.state)} + mx.savez(str(out_path), **flat_state) + log(f"saved_model:{out_path} bytes:{out_path.stat().st_size}") + + quant_obj, quant_stats = quantize_state_dict_int8(flat_state) + quant_raw = pickle.dumps(quant_obj, protocol=pickle.HIGHEST_PROTOCOL) + quant_blob = zlib.compress(quant_raw, level=9) + quant_serialized_bytes = len(quant_raw) + quant_path = out_dir / f"{args.run_id}_mlx_model.int8.ptz" + with quant_path.open("wb") as f: + f.write(quant_blob) + quant_file_bytes = quant_path.stat().st_size + ratio = quant_stats["baseline_tensor_bytes"] / max(quant_stats["int8_payload_bytes"], 1) + log( + f"serialized_model_int8_zlib:{quant_file_bytes} bytes " + f"(payload:{quant_stats['int8_payload_bytes']} raw_pickle:{quant_serialized_bytes} payload_ratio:{ratio:.2f}x)" + ) + + with quant_path.open("rb") as f: + quant_blob_disk = f.read() + quant_flat = dequantize_state_dict_int8(pickle.loads(zlib.decompress(quant_blob_disk))) + model.update(tree_unflatten(list(quant_flat.items()))) + q_t0 = time.perf_counter() + q_val_loss, q_val_bpb = eval_val( + args, + compiled_loss, + val_tokens, + base_bytes_lut, + has_leading_space_lut, + is_boundary_token_lut, + log_fn=log, + ) + q_eval_ms = 1000.0 * (time.perf_counter() - q_t0) + log(f"final_int8_zlib_roundtrip val_loss:{q_val_loss:.4f} val_bpb:{q_val_bpb:.4f} eval_time:{q_eval_ms:.0f}ms") + log(f"final_int8_zlib_roundtrip_exact val_loss:{q_val_loss:.8f} val_bpb:{q_val_bpb:.8f}") + + +if __name__ == "__main__": + main() + +==================================================================================================== +Running Python 3.14.0 (main, Oct 7 2025, 09:34:52) [Clang 17.0.0 (clang-1700.3.19.1)] +Running MLX 0.31.1 +==================================================================================================== +run_id:smoke_test +mlx_version:0.31.1 +train_loader:shards pattern=./data/datasets/fineweb10B_sp1024/fineweb_train_*.bin +val_loader:shards pattern=./data/datasets/fineweb10B_sp1024/fineweb_val_*.bin tokens:62021632 +WARNING: train_loader:subset dataset:fineweb10B_sp1024 train_shards:1/195 new epochs will arrive sooner than the full dataset +tokenizer_path:./data/tokenizers/fineweb_1024_bpe.model +model_params:17059912 vocab_size:1024 layers:9 dim:512 heads:8 kv_heads:4 seq_len:1024 tie_embeddings:True +iterations:200 train_batch_tokens:8192 grad_accum_steps:8 microbatch_tokens:1024 microbatch_batch_size:1 val_batch_size:8192 warmup_steps:20 max_wallclock_seconds:600.000 +mlx_max_microbatch_tokens:8192 +optimizer:muon+adam muon_matrix_params:54 scalar_params:37 embed_lr:0.05 matrix_lr:0.04 scalar_lr:0.04 muon_momentum:0.95 muon_steps:5 +val_bpb:enabled tokenizer_kind=sentencepiece tokenizer_path=./data/tokenizers/fineweb_1024_bpe.model +compute_dtype:mlx.core.bfloat16 compile:True +dtypes tok_emb:mlx.core.bfloat16 linear_weight:mlx.core.float32 skip_weights:mlx.core.float32 +warmup_step:1/20 +warmup_step:2/20 +warmup_step:3/20 +warmup_step:4/20 +warmup_step:5/20 +warmup_step:6/20 +warmup_step:7/20 +warmup_step:8/20 +warmup_step:9/20 +warmup_step:10/20 +warmup_step:11/20 +warmup_step:12/20 +warmup_step:13/20 +warmup_step:14/20 +warmup_step:15/20 +warmup_step:16/20 +warmup_step:17/20 +warmup_step:18/20 +warmup_step:19/20 +warmup_step:20/20 +step:1/200 train_loss:6.9428 train_time:558ms step_avg:558.00ms tok_s:14681 +step:2/200 train_loss:18.7861 train_time:1189ms step_avg:594.38ms tok_s:12999 +step:3/200 train_loss:14.6162 train_time:1838ms step_avg:612.66ms tok_s:12622 +step:4/200 train_loss:10.5331 train_time:2467ms step_avg:616.83ms tok_s:13021 +step:5/200 train_loss:8.0576 train_time:3097ms step_avg:619.43ms tok_s:13011 +step:6/200 train_loss:6.9987 train_time:3726ms step_avg:621.04ms tok_s:13026 +step:7/200 train_loss:6.6060 train_time:4357ms step_avg:622.38ms tok_s:12999 +step:8/200 train_loss:6.5708 train_time:4987ms step_avg:623.41ms tok_s:12994 +step:9/200 train_loss:6.4000 train_time:5617ms step_avg:624.16ms tok_s:13005 +step:10/200 train_loss:6.3442 train_time:6246ms step_avg:624.58ms tok_s:13040 +step:200/200 train_loss:3.8797 train_time:138198ms step_avg:690.99ms tok_s:10638 +val_progress:1/60568 +val_progress:25/60568 +val_progress:50/60568 +val_progress:75/60568 +val_progress:100/60568 +val_progress:125/60568 +val_progress:150/60568 +val_progress:175/60568 +val_progress:200/60568 +val_progress:225/60568 +val_progress:250/60568 +val_progress:275/60568 +val_progress:300/60568 +val_progress:325/60568 +val_progress:350/60568 +val_progress:375/60568 +val_progress:400/60568 +val_progress:425/60568 +val_progress:450/60568 +val_progress:475/60568 +val_progress:500/60568 +val_progress:525/60568 +val_progress:550/60568 +val_progress:575/60568 +val_progress:600/60568 +val_progress:625/60568 +val_progress:650/60568 +val_progress:675/60568 +val_progress:700/60568 +val_progress:725/60568 +val_progress:750/60568 +val_progress:775/60568 +val_progress:800/60568 +val_progress:825/60568 +val_progress:850/60568 +val_progress:875/60568 +val_progress:900/60568 +val_progress:925/60568 +val_progress:950/60568 +val_progress:975/60568 +val_progress:1000/60568 +val_progress:1025/60568 +val_progress:1050/60568 +val_progress:1075/60568 +val_progress:1100/60568 +val_progress:1125/60568 +val_progress:1150/60568 +val_progress:1175/60568 +val_progress:1200/60568 +val_progress:1225/60568 +val_progress:1250/60568 +val_progress:1275/60568 +val_progress:1300/60568 +val_progress:1325/60568 +val_progress:1350/60568 +val_progress:1375/60568 +val_progress:1400/60568 +val_progress:1425/60568 +val_progress:1450/60568 +val_progress:1475/60568 +val_progress:1500/60568 +val_progress:1525/60568 +val_progress:1550/60568 +val_progress:1575/60568 +val_progress:1600/60568 +val_progress:1625/60568 +val_progress:1650/60568 +val_progress:1675/60568 +val_progress:1700/60568 +val_progress:1725/60568 +val_progress:1750/60568 +val_progress:1775/60568 +val_progress:1800/60568 +val_progress:1825/60568 +val_progress:1850/60568 +val_progress:1875/60568 +val_progress:1900/60568 +val_progress:1925/60568 +val_progress:1950/60568 +val_progress:1975/60568 +val_progress:2000/60568 +val_progress:2025/60568 +val_progress:2050/60568 +val_progress:2075/60568 +val_progress:2100/60568 +val_progress:2125/60568 +val_progress:2150/60568 +val_progress:2175/60568 +val_progress:2200/60568 +val_progress:2225/60568 +val_progress:2250/60568 +val_progress:2275/60568 +val_progress:2300/60568 +val_progress:2325/60568 +val_progress:2350/60568 +val_progress:2375/60568 +val_progress:2400/60568 +val_progress:2425/60568 +val_progress:2450/60568 +val_progress:2475/60568 +val_progress:2500/60568 +val_progress:2525/60568 +val_progress:2550/60568 +val_progress:2575/60568 +val_progress:2600/60568 +val_progress:2625/60568 +val_progress:2650/60568 +val_progress:2675/60568 +val_progress:2700/60568 +val_progress:2725/60568 +val_progress:2750/60568 +val_progress:2775/60568 +val_progress:2800/60568 +val_progress:2825/60568 +val_progress:2850/60568 +val_progress:2875/60568 +val_progress:2900/60568 +val_progress:2925/60568 +val_progress:2950/60568 +val_progress:2975/60568 +val_progress:3000/60568 +val_progress:3025/60568 +val_progress:3050/60568 +val_progress:3075/60568 +val_progress:3100/60568 +val_progress:3125/60568 +val_progress:3150/60568 +val_progress:3175/60568 +val_progress:3200/60568 +val_progress:3225/60568 +val_progress:3250/60568 +val_progress:3275/60568 +val_progress:3300/60568 +val_progress:3325/60568 +val_progress:3350/60568 +val_progress:3375/60568 +val_progress:3400/60568 +val_progress:3425/60568 +val_progress:3450/60568 +val_progress:3475/60568 +val_progress:3500/60568 +val_progress:3525/60568 +val_progress:3550/60568 +val_progress:3575/60568 +val_progress:3600/60568 +val_progress:3625/60568 +val_progress:3650/60568 +val_progress:3675/60568 +val_progress:3700/60568 +val_progress:3725/60568 +val_progress:3750/60568 +val_progress:3775/60568 +val_progress:3800/60568 +val_progress:3825/60568 +val_progress:3850/60568 +val_progress:3875/60568 +val_progress:3900/60568 +val_progress:3925/60568 +val_progress:3950/60568 +val_progress:3975/60568 +val_progress:4000/60568 +val_progress:4025/60568 +val_progress:4050/60568 +val_progress:4075/60568 +val_progress:4100/60568 +val_progress:4125/60568 +val_progress:4150/60568 +val_progress:4175/60568 +val_progress:4200/60568 +val_progress:4225/60568 +val_progress:4250/60568 +val_progress:4275/60568 +val_progress:4300/60568 +val_progress:4325/60568 +val_progress:4350/60568 +val_progress:4375/60568 +val_progress:4400/60568 +val_progress:4425/60568 +val_progress:4450/60568 +val_progress:4475/60568 +val_progress:4500/60568 +val_progress:4525/60568 +val_progress:4550/60568 +val_progress:4575/60568 +val_progress:4600/60568 +val_progress:4625/60568 +val_progress:4650/60568 +val_progress:4675/60568 +val_progress:4700/60568 +val_progress:4725/60568 +val_progress:4750/60568 +val_progress:4775/60568 +val_progress:4800/60568 +val_progress:4825/60568 +val_progress:4850/60568 +val_progress:4875/60568 +val_progress:4900/60568 +val_progress:4925/60568 +val_progress:4950/60568 +val_progress:4975/60568 +val_progress:5000/60568 +val_progress:5025/60568 +val_progress:5050/60568 +val_progress:5075/60568 +val_progress:5100/60568 +val_progress:5125/60568 +val_progress:5150/60568 +val_progress:5175/60568 +val_progress:5200/60568 +val_progress:5225/60568 +val_progress:5250/60568 +val_progress:5275/60568 +val_progress:5300/60568 +val_progress:5325/60568 +val_progress:5350/60568 +val_progress:5375/60568 +val_progress:5400/60568 +val_progress:5425/60568 +val_progress:5450/60568 +val_progress:5475/60568 +val_progress:5500/60568 +val_progress:5525/60568 +val_progress:5550/60568 +val_progress:5575/60568 +val_progress:5600/60568 +val_progress:5625/60568 +val_progress:5650/60568 +val_progress:5675/60568 +val_progress:5700/60568 +val_progress:5725/60568 +val_progress:5750/60568 +val_progress:5775/60568 +val_progress:5800/60568 +val_progress:5825/60568 +val_progress:5850/60568 +val_progress:5875/60568 +val_progress:5900/60568 +val_progress:5925/60568 +val_progress:5950/60568 +val_progress:5975/60568 +val_progress:6000/60568 +val_progress:6025/60568 +val_progress:6050/60568 +val_progress:6075/60568 +val_progress:6100/60568 +val_progress:6125/60568 +val_progress:6150/60568 +val_progress:6175/60568 +val_progress:6200/60568 +val_progress:6225/60568 +val_progress:6250/60568 +val_progress:6275/60568 +val_progress:6300/60568 +val_progress:6325/60568 +val_progress:6350/60568 +val_progress:6375/60568 +val_progress:6400/60568 +val_progress:6425/60568 +val_progress:6450/60568 +val_progress:6475/60568 +val_progress:6500/60568 +val_progress:6525/60568 +val_progress:6550/60568 +val_progress:6575/60568 +val_progress:6600/60568 +val_progress:6625/60568 +val_progress:6650/60568 +val_progress:6675/60568 +val_progress:6700/60568 +val_progress:6725/60568 +val_progress:6750/60568 +val_progress:6775/60568 +val_progress:6800/60568 +val_progress:6825/60568 +val_progress:6850/60568 +val_progress:6875/60568 +val_progress:6900/60568 +val_progress:6925/60568 +val_progress:6950/60568 +val_progress:6975/60568 +val_progress:7000/60568 +val_progress:7025/60568 +val_progress:7050/60568 +val_progress:7075/60568 +val_progress:7100/60568 +val_progress:7125/60568 +val_progress:7150/60568 +val_progress:7175/60568 +val_progress:7200/60568 +val_progress:7225/60568 +val_progress:7250/60568 +val_progress:7275/60568 +val_progress:7300/60568 +val_progress:7325/60568 +val_progress:7350/60568 +val_progress:7375/60568 +val_progress:7400/60568 +val_progress:7425/60568 +val_progress:7450/60568 +val_progress:7475/60568 +val_progress:7500/60568 +val_progress:7525/60568 +val_progress:7550/60568 +val_progress:7575/60568 +val_progress:7600/60568 +val_progress:7625/60568 +val_progress:7650/60568 +val_progress:7675/60568 +val_progress:7700/60568 +val_progress:7725/60568 +val_progress:7750/60568 +val_progress:7775/60568 +val_progress:7800/60568 +val_progress:7825/60568 +val_progress:7850/60568 +val_progress:7875/60568 +val_progress:7900/60568 +val_progress:7925/60568 +val_progress:7950/60568 +val_progress:7975/60568 +val_progress:8000/60568 +val_progress:8025/60568 +val_progress:8050/60568 +val_progress:8075/60568 +val_progress:8100/60568 +val_progress:8125/60568 +val_progress:8150/60568 +val_progress:8175/60568 +val_progress:8200/60568 +val_progress:8225/60568 +val_progress:8250/60568 +val_progress:8275/60568 +val_progress:8300/60568 +val_progress:8325/60568 +val_progress:8350/60568 +val_progress:8375/60568 +val_progress:8400/60568 +val_progress:8425/60568 +val_progress:8450/60568 +val_progress:8475/60568 +val_progress:8500/60568 +val_progress:8525/60568 +val_progress:8550/60568 +val_progress:8575/60568 +val_progress:8600/60568 +val_progress:8625/60568 +val_progress:8650/60568 +val_progress:8675/60568 +val_progress:8700/60568 +val_progress:8725/60568 +val_progress:8750/60568 +val_progress:8775/60568 +val_progress:8800/60568 +val_progress:8825/60568 +val_progress:8850/60568 +val_progress:8875/60568 +val_progress:8900/60568 +val_progress:8925/60568 +val_progress:8950/60568 +val_progress:8975/60568 +val_progress:9000/60568 +val_progress:9025/60568 +val_progress:9050/60568 +val_progress:9075/60568 +val_progress:9100/60568 +val_progress:9125/60568 +val_progress:9150/60568 +val_progress:9175/60568 +val_progress:9200/60568 +val_progress:9225/60568 +val_progress:9250/60568 +val_progress:9275/60568 +val_progress:9300/60568 +val_progress:9325/60568 +val_progress:9350/60568 +val_progress:9375/60568 +val_progress:9400/60568 +val_progress:9425/60568 +val_progress:9450/60568 +val_progress:9475/60568 +val_progress:9500/60568 +val_progress:9525/60568 +val_progress:9550/60568 +val_progress:9575/60568 +val_progress:9600/60568 +val_progress:9625/60568 +val_progress:9650/60568 +val_progress:9675/60568 +val_progress:9700/60568 +val_progress:9725/60568 +val_progress:9750/60568 +val_progress:9775/60568 +val_progress:9800/60568 +val_progress:9825/60568 +val_progress:9850/60568 +val_progress:9875/60568 +val_progress:9900/60568 +val_progress:9925/60568 +val_progress:9950/60568 +val_progress:9975/60568 +val_progress:10000/60568 +val_progress:10025/60568 +val_progress:10050/60568 +val_progress:10075/60568 +val_progress:10100/60568 +val_progress:10125/60568 +val_progress:10150/60568 +val_progress:10175/60568 +val_progress:10200/60568 +val_progress:10225/60568 +val_progress:10250/60568 +val_progress:10275/60568 +val_progress:10300/60568 +val_progress:10325/60568 +val_progress:10350/60568 +val_progress:10375/60568 +val_progress:10400/60568 +val_progress:10425/60568 +val_progress:10450/60568 +val_progress:10475/60568 +val_progress:10500/60568 +val_progress:10525/60568 +val_progress:10550/60568 +val_progress:10575/60568 +val_progress:10600/60568 +val_progress:10625/60568 +val_progress:10650/60568 +val_progress:10675/60568 +val_progress:10700/60568 +val_progress:10725/60568 +val_progress:10750/60568 +val_progress:10775/60568 +val_progress:10800/60568 +val_progress:10825/60568 +val_progress:10850/60568 +val_progress:10875/60568 +val_progress:10900/60568 +val_progress:10925/60568 +val_progress:10950/60568 +val_progress:10975/60568 +val_progress:11000/60568 +val_progress:11025/60568 +val_progress:11050/60568 +val_progress:11075/60568 +val_progress:11100/60568 +val_progress:11125/60568 +val_progress:11150/60568 +val_progress:11175/60568 +val_progress:11200/60568 +val_progress:11225/60568 +val_progress:11250/60568 +val_progress:11275/60568 +val_progress:11300/60568 +val_progress:11325/60568 +val_progress:11350/60568 +val_progress:11375/60568 +val_progress:11400/60568 +val_progress:11425/60568 +val_progress:11450/60568 +val_progress:11475/60568 +val_progress:11500/60568 +val_progress:11525/60568 +val_progress:11550/60568 +val_progress:11575/60568 +val_progress:11600/60568 +val_progress:11625/60568 +val_progress:11650/60568 +val_progress:11675/60568 +val_progress:11700/60568 +val_progress:11725/60568 +val_progress:11750/60568 +val_progress:11775/60568 +val_progress:11800/60568 +val_progress:11825/60568 +val_progress:11850/60568 +val_progress:11875/60568 +val_progress:11900/60568 +val_progress:11925/60568 +val_progress:11950/60568 +val_progress:11975/60568 +val_progress:12000/60568 +val_progress:12025/60568 +val_progress:12050/60568 +val_progress:12075/60568 +val_progress:12100/60568 +val_progress:12125/60568 +val_progress:12150/60568 +val_progress:12175/60568 +val_progress:12200/60568 +val_progress:12225/60568 +val_progress:12250/60568 +val_progress:12275/60568 +val_progress:12300/60568 +val_progress:12325/60568 +val_progress:12350/60568 +val_progress:12375/60568 +val_progress:12400/60568 +val_progress:12425/60568 +val_progress:12450/60568 +val_progress:12475/60568 +val_progress:12500/60568 +val_progress:12525/60568 +val_progress:12550/60568 +val_progress:12575/60568 +val_progress:12600/60568 +val_progress:12625/60568 +val_progress:12650/60568 +val_progress:12675/60568 +val_progress:12700/60568 +val_progress:12725/60568 +val_progress:12750/60568 +val_progress:12775/60568 +val_progress:12800/60568 +val_progress:12825/60568 +val_progress:12850/60568 +val_progress:12875/60568 +val_progress:12900/60568 +val_progress:12925/60568 +val_progress:12950/60568 +val_progress:12975/60568 +val_progress:13000/60568 +val_progress:13025/60568 +val_progress:13050/60568 +val_progress:13075/60568 +val_progress:13100/60568 +val_progress:13125/60568 +val_progress:13150/60568 +val_progress:13175/60568 +val_progress:13200/60568 +val_progress:13225/60568 +val_progress:13250/60568 +val_progress:13275/60568 +val_progress:13300/60568 +val_progress:13325/60568 +val_progress:13350/60568 +val_progress:13375/60568 +val_progress:13400/60568 +val_progress:13425/60568 +val_progress:13450/60568 +val_progress:13475/60568 +val_progress:13500/60568 +val_progress:13525/60568 +val_progress:13550/60568 +val_progress:13575/60568 +val_progress:13600/60568 +val_progress:13625/60568 +val_progress:13650/60568 +val_progress:13675/60568 +val_progress:13700/60568 +val_progress:13725/60568 +val_progress:13750/60568 +val_progress:13775/60568 +val_progress:13800/60568 +val_progress:13825/60568 +val_progress:13850/60568 +val_progress:13875/60568 +val_progress:13900/60568 +val_progress:13925/60568 +val_progress:13950/60568 +val_progress:13975/60568 +val_progress:14000/60568 +val_progress:14025/60568 +val_progress:14050/60568 +val_progress:14075/60568 +val_progress:14100/60568 +val_progress:14125/60568 +val_progress:14150/60568 +val_progress:14175/60568 +val_progress:14200/60568 +val_progress:14225/60568 +val_progress:14250/60568 +val_progress:14275/60568 +val_progress:14300/60568 +val_progress:14325/60568 +val_progress:14350/60568 +val_progress:14375/60568 +val_progress:14400/60568 +val_progress:14425/60568 +val_progress:14450/60568 +val_progress:14475/60568 +val_progress:14500/60568 +val_progress:14525/60568 +val_progress:14550/60568 +val_progress:14575/60568 +val_progress:14600/60568 +val_progress:14625/60568 +val_progress:14650/60568 +val_progress:14675/60568 +val_progress:14700/60568 +val_progress:14725/60568 +val_progress:14750/60568 +val_progress:14775/60568 +val_progress:14800/60568 +val_progress:14825/60568 +val_progress:14850/60568 +val_progress:14875/60568 +val_progress:14900/60568 +val_progress:14925/60568 +val_progress:14950/60568 +val_progress:14975/60568 +val_progress:15000/60568 +val_progress:15025/60568 +val_progress:15050/60568 +val_progress:15075/60568 +val_progress:15100/60568 +val_progress:15125/60568 +val_progress:15150/60568 +val_progress:15175/60568 +val_progress:15200/60568 +val_progress:15225/60568 +val_progress:15250/60568 +val_progress:15275/60568 +val_progress:15300/60568 +val_progress:15325/60568 +val_progress:15350/60568 +val_progress:15375/60568 +val_progress:15400/60568 +val_progress:15425/60568 +val_progress:15450/60568 +val_progress:15475/60568 +val_progress:15500/60568 +val_progress:15525/60568 +val_progress:15550/60568 +val_progress:15575/60568 +val_progress:15600/60568 +val_progress:15625/60568 +val_progress:15650/60568 +val_progress:15675/60568 +val_progress:15700/60568 +val_progress:15725/60568 +val_progress:15750/60568 +val_progress:15775/60568 +val_progress:15800/60568 +val_progress:15825/60568 +val_progress:15850/60568 +val_progress:15875/60568 +val_progress:15900/60568 +val_progress:15925/60568 +val_progress:15950/60568 +val_progress:15975/60568 +val_progress:16000/60568 +val_progress:16025/60568 +val_progress:16050/60568 +val_progress:16075/60568 +val_progress:16100/60568 +val_progress:16125/60568 +val_progress:16150/60568 +val_progress:16175/60568 +val_progress:16200/60568 +val_progress:16225/60568 +val_progress:16250/60568 +val_progress:16275/60568 +val_progress:16300/60568 +val_progress:16325/60568 +val_progress:16350/60568 +val_progress:16375/60568 +val_progress:16400/60568 +val_progress:16425/60568 +val_progress:16450/60568 +val_progress:16475/60568 +val_progress:16500/60568 +val_progress:16525/60568 +val_progress:16550/60568 +val_progress:16575/60568 +val_progress:16600/60568 +val_progress:16625/60568 +val_progress:16650/60568 +val_progress:16675/60568 +val_progress:16700/60568 +val_progress:16725/60568 +val_progress:16750/60568 +val_progress:16775/60568 +val_progress:16800/60568 +val_progress:16825/60568 +val_progress:16850/60568 +val_progress:16875/60568 +val_progress:16900/60568 +val_progress:16925/60568 +val_progress:16950/60568 +val_progress:16975/60568 +val_progress:17000/60568 +val_progress:17025/60568 +val_progress:17050/60568 +val_progress:17075/60568 +val_progress:17100/60568 +val_progress:17125/60568 +val_progress:17150/60568 +val_progress:17175/60568 +val_progress:17200/60568 +val_progress:17225/60568 +val_progress:17250/60568 +val_progress:17275/60568 +val_progress:17300/60568 +val_progress:17325/60568 +val_progress:17350/60568 +val_progress:17375/60568 +val_progress:17400/60568 +val_progress:17425/60568 +val_progress:17450/60568 +val_progress:17475/60568 +val_progress:17500/60568 +val_progress:17525/60568 +val_progress:17550/60568 +val_progress:17575/60568 +val_progress:17600/60568 +val_progress:17625/60568 +val_progress:17650/60568 +val_progress:17675/60568 +val_progress:17700/60568 +val_progress:17725/60568 +val_progress:17750/60568 +val_progress:17775/60568 +val_progress:17800/60568 +val_progress:17825/60568 +val_progress:17850/60568 +val_progress:17875/60568 +val_progress:17900/60568 +val_progress:17925/60568 +val_progress:17950/60568 +val_progress:17975/60568 +val_progress:18000/60568 +val_progress:18025/60568 +val_progress:18050/60568 +val_progress:18075/60568 +val_progress:18100/60568 +val_progress:18125/60568 +val_progress:18150/60568 +val_progress:18175/60568 +val_progress:18200/60568 +val_progress:18225/60568 +val_progress:18250/60568 +val_progress:18275/60568 +val_progress:18300/60568 +val_progress:18325/60568 +val_progress:18350/60568 +val_progress:18375/60568 +val_progress:18400/60568 +val_progress:18425/60568 +val_progress:18450/60568 +val_progress:18475/60568 +val_progress:18500/60568 +val_progress:18525/60568 +val_progress:18550/60568 +val_progress:18575/60568 +val_progress:18600/60568 +val_progress:18625/60568 +val_progress:18650/60568 +val_progress:18675/60568 +val_progress:18700/60568 +val_progress:18725/60568 +val_progress:18750/60568 +val_progress:18775/60568 +val_progress:18800/60568 +val_progress:18825/60568 +val_progress:18850/60568 +val_progress:18875/60568 +val_progress:18900/60568 +val_progress:18925/60568 +val_progress:18950/60568 +val_progress:18975/60568 +val_progress:19000/60568 +val_progress:19025/60568 +val_progress:19050/60568 +val_progress:19075/60568 +val_progress:19100/60568 +val_progress:19125/60568 +val_progress:19150/60568 +val_progress:19175/60568 +val_progress:19200/60568 +val_progress:19225/60568 +val_progress:19250/60568 +val_progress:19275/60568 +val_progress:19300/60568 +val_progress:19325/60568 +val_progress:19350/60568 +val_progress:19375/60568 +val_progress:19400/60568 +val_progress:19425/60568 +val_progress:19450/60568 +val_progress:19475/60568 +val_progress:19500/60568 +val_progress:19525/60568 +val_progress:19550/60568 +val_progress:19575/60568 +val_progress:19600/60568 +val_progress:19625/60568 +val_progress:19650/60568 +val_progress:19675/60568 +val_progress:19700/60568 +val_progress:19725/60568 +val_progress:19750/60568 +val_progress:19775/60568 +val_progress:19800/60568 +val_progress:19825/60568 +val_progress:19850/60568 +val_progress:19875/60568 +val_progress:19900/60568 +val_progress:19925/60568 +val_progress:19950/60568 +val_progress:19975/60568 +val_progress:20000/60568 +val_progress:20025/60568 +val_progress:20050/60568 +val_progress:20075/60568 +val_progress:20100/60568 +val_progress:20125/60568 +val_progress:20150/60568 +val_progress:20175/60568 +val_progress:20200/60568 +val_progress:20225/60568 +val_progress:20250/60568 +val_progress:20275/60568 +val_progress:20300/60568 +val_progress:20325/60568 +val_progress:20350/60568 +val_progress:20375/60568 +val_progress:20400/60568 +val_progress:20425/60568 +val_progress:20450/60568 +val_progress:20475/60568 +val_progress:20500/60568 +val_progress:20525/60568 +val_progress:20550/60568 +val_progress:20575/60568 +val_progress:20600/60568 +val_progress:20625/60568 +val_progress:20650/60568 +val_progress:20675/60568 +val_progress:20700/60568 +val_progress:20725/60568 +val_progress:20750/60568 +val_progress:20775/60568 +val_progress:20800/60568 +val_progress:20825/60568 +val_progress:20850/60568 +val_progress:20875/60568 +val_progress:20900/60568 +val_progress:20925/60568 +val_progress:20950/60568 +val_progress:20975/60568 +val_progress:21000/60568 +val_progress:21025/60568 +val_progress:21050/60568 +val_progress:21075/60568 +val_progress:21100/60568 +val_progress:21125/60568 +val_progress:21150/60568 +val_progress:21175/60568 +val_progress:21200/60568 +val_progress:21225/60568 +val_progress:21250/60568 +val_progress:21275/60568 +val_progress:21300/60568 +val_progress:21325/60568 +val_progress:21350/60568 +val_progress:21375/60568 +val_progress:21400/60568 +val_progress:21425/60568 +val_progress:21450/60568 +val_progress:21475/60568 +val_progress:21500/60568 +val_progress:21525/60568 +val_progress:21550/60568 +val_progress:21575/60568 +val_progress:21600/60568 +val_progress:21625/60568 +val_progress:21650/60568 +val_progress:21675/60568 +val_progress:21700/60568 +val_progress:21725/60568 +val_progress:21750/60568 +val_progress:21775/60568 +val_progress:21800/60568 +val_progress:21825/60568 +val_progress:21850/60568 +val_progress:21875/60568 +val_progress:21900/60568 +val_progress:21925/60568 +val_progress:21950/60568 +val_progress:21975/60568 +val_progress:22000/60568 +val_progress:22025/60568 +val_progress:22050/60568 +val_progress:22075/60568 +val_progress:22100/60568 +val_progress:22125/60568 +val_progress:22150/60568 +val_progress:22175/60568 +val_progress:22200/60568 +val_progress:22225/60568 +val_progress:22250/60568 +val_progress:22275/60568 +val_progress:22300/60568 +val_progress:22325/60568 +val_progress:22350/60568 +val_progress:22375/60568 +val_progress:22400/60568 +val_progress:22425/60568 +val_progress:22450/60568 +val_progress:22475/60568 +val_progress:22500/60568 +val_progress:22525/60568 +val_progress:22550/60568 +val_progress:22575/60568 +val_progress:22600/60568 +val_progress:22625/60568 +val_progress:22650/60568 +val_progress:22675/60568 +val_progress:22700/60568 +val_progress:22725/60568 +val_progress:22750/60568 +val_progress:22775/60568 +val_progress:22800/60568 +val_progress:22825/60568 +val_progress:22850/60568 +val_progress:22875/60568 +val_progress:22900/60568 +val_progress:22925/60568 +val_progress:22950/60568 +val_progress:22975/60568 +val_progress:23000/60568 +val_progress:23025/60568 +val_progress:23050/60568 +val_progress:23075/60568 +val_progress:23100/60568 +val_progress:23125/60568 +val_progress:23150/60568 +val_progress:23175/60568 +val_progress:23200/60568 +val_progress:23225/60568 +val_progress:23250/60568 +val_progress:23275/60568 +val_progress:23300/60568 +val_progress:23325/60568 +val_progress:23350/60568 +val_progress:23375/60568 +val_progress:23400/60568 +val_progress:23425/60568 +val_progress:23450/60568 +val_progress:23475/60568 +val_progress:23500/60568 +val_progress:23525/60568 +val_progress:23550/60568 +val_progress:23575/60568 +val_progress:23600/60568 +val_progress:23625/60568 +val_progress:23650/60568 +val_progress:23675/60568 +val_progress:23700/60568 +val_progress:23725/60568 +val_progress:23750/60568 +val_progress:23775/60568 +val_progress:23800/60568 +val_progress:23825/60568 +val_progress:23850/60568 +val_progress:23875/60568 +val_progress:23900/60568 +val_progress:23925/60568 +val_progress:23950/60568 +val_progress:23975/60568 +val_progress:24000/60568 +val_progress:24025/60568 +val_progress:24050/60568 +val_progress:24075/60568 +val_progress:24100/60568 +val_progress:24125/60568 +val_progress:24150/60568 +val_progress:24175/60568 +val_progress:24200/60568 +val_progress:24225/60568 +val_progress:24250/60568 +val_progress:24275/60568 +val_progress:24300/60568 +val_progress:24325/60568 +val_progress:24350/60568 +val_progress:24375/60568 +val_progress:24400/60568 +val_progress:24425/60568 +val_progress:24450/60568 +val_progress:24475/60568 +val_progress:24500/60568 +val_progress:24525/60568 +val_progress:24550/60568 +val_progress:24575/60568 +val_progress:24600/60568 +val_progress:24625/60568 +val_progress:24650/60568 +val_progress:24675/60568 +val_progress:24700/60568 +val_progress:24725/60568 +val_progress:24750/60568 +val_progress:24775/60568 +val_progress:24800/60568 +val_progress:24825/60568 +val_progress:24850/60568 +val_progress:24875/60568 +val_progress:24900/60568 +val_progress:24925/60568 +val_progress:24950/60568 +val_progress:24975/60568 +val_progress:25000/60568 +val_progress:25025/60568 +val_progress:25050/60568 +val_progress:25075/60568 +val_progress:25100/60568 +val_progress:25125/60568 +val_progress:25150/60568 +val_progress:25175/60568 +val_progress:25200/60568 +val_progress:25225/60568 +val_progress:25250/60568 +val_progress:25275/60568 +val_progress:25300/60568 +val_progress:25325/60568 +val_progress:25350/60568 +val_progress:25375/60568 +val_progress:25400/60568 +val_progress:25425/60568 +val_progress:25450/60568 +val_progress:25475/60568 +val_progress:25500/60568 +val_progress:25525/60568 +val_progress:25550/60568 +val_progress:25575/60568 +val_progress:25600/60568 +val_progress:25625/60568 +val_progress:25650/60568 +val_progress:25675/60568 +val_progress:25700/60568 +val_progress:25725/60568 +val_progress:25750/60568 +val_progress:25775/60568 +val_progress:25800/60568 +val_progress:25825/60568 +val_progress:25850/60568 +val_progress:25875/60568 +val_progress:25900/60568 +val_progress:25925/60568 +val_progress:25950/60568 +val_progress:25975/60568 +val_progress:26000/60568 +val_progress:26025/60568 +val_progress:26050/60568 +val_progress:26075/60568 +val_progress:26100/60568 +val_progress:26125/60568 +val_progress:26150/60568 +val_progress:26175/60568 +val_progress:26200/60568 +val_progress:26225/60568 +val_progress:26250/60568 +val_progress:26275/60568 +val_progress:26300/60568 +val_progress:26325/60568 +val_progress:26350/60568 +val_progress:26375/60568 +val_progress:26400/60568 +val_progress:26425/60568 +val_progress:26450/60568 +val_progress:26475/60568 +val_progress:26500/60568 +val_progress:26525/60568 +val_progress:26550/60568 +val_progress:26575/60568 +val_progress:26600/60568 +val_progress:26625/60568 +val_progress:26650/60568 +val_progress:26675/60568 +val_progress:26700/60568 +val_progress:26725/60568 +val_progress:26750/60568 +val_progress:26775/60568 +val_progress:26800/60568 +val_progress:26825/60568 +val_progress:26850/60568 +val_progress:26875/60568 +val_progress:26900/60568 +val_progress:26925/60568 +val_progress:26950/60568 +val_progress:26975/60568 +val_progress:27000/60568 +val_progress:27025/60568 +val_progress:27050/60568 +val_progress:27075/60568 +val_progress:27100/60568 +val_progress:27125/60568 +val_progress:27150/60568 +val_progress:27175/60568 +val_progress:27200/60568 +val_progress:27225/60568 +val_progress:27250/60568 +val_progress:27275/60568 +val_progress:27300/60568 +val_progress:27325/60568 +val_progress:27350/60568 +val_progress:27375/60568 +val_progress:27400/60568 +val_progress:27425/60568 +val_progress:27450/60568 +val_progress:27475/60568 +val_progress:27500/60568 +val_progress:27525/60568 +val_progress:27550/60568 +val_progress:27575/60568 +val_progress:27600/60568 +val_progress:27625/60568 +val_progress:27650/60568 +val_progress:27675/60568 +val_progress:27700/60568 +val_progress:27725/60568 +val_progress:27750/60568 +val_progress:27775/60568 +val_progress:27800/60568 +val_progress:27825/60568 +val_progress:27850/60568 +val_progress:27875/60568 +val_progress:27900/60568 +val_progress:27925/60568 +val_progress:27950/60568 +val_progress:27975/60568 +val_progress:28000/60568 +val_progress:28025/60568 +val_progress:28050/60568 +val_progress:28075/60568 +val_progress:28100/60568 +val_progress:28125/60568 +val_progress:28150/60568 +val_progress:28175/60568 +val_progress:28200/60568 +val_progress:28225/60568 +val_progress:28250/60568 +val_progress:28275/60568 +val_progress:28300/60568 +val_progress:28325/60568 +val_progress:28350/60568 +val_progress:28375/60568 +val_progress:28400/60568 +val_progress:28425/60568 +val_progress:28450/60568 +val_progress:28475/60568 +val_progress:28500/60568 +val_progress:28525/60568 +val_progress:28550/60568 +val_progress:28575/60568 +val_progress:28600/60568 +val_progress:28625/60568 +val_progress:28650/60568 +val_progress:28675/60568 +val_progress:28700/60568 +val_progress:28725/60568 +val_progress:28750/60568 +val_progress:28775/60568 +val_progress:28800/60568 +val_progress:28825/60568 +val_progress:28850/60568 +val_progress:28875/60568 +val_progress:28900/60568 +val_progress:28925/60568 +val_progress:28950/60568 +val_progress:28975/60568 +val_progress:29000/60568 +val_progress:29025/60568 +val_progress:29050/60568 +val_progress:29075/60568 +val_progress:29100/60568 +val_progress:29125/60568 +val_progress:29150/60568 +val_progress:29175/60568 +val_progress:29200/60568 +val_progress:29225/60568 +val_progress:29250/60568 +val_progress:29275/60568 +val_progress:29300/60568 +val_progress:29325/60568 +val_progress:29350/60568 +val_progress:29375/60568 +val_progress:29400/60568 +val_progress:29425/60568 +val_progress:29450/60568 +val_progress:29475/60568 +val_progress:29500/60568 +val_progress:29525/60568 +val_progress:29550/60568 +val_progress:29575/60568 +val_progress:29600/60568 +val_progress:29625/60568 +val_progress:29650/60568 +val_progress:29675/60568 +val_progress:29700/60568 +val_progress:29725/60568 +val_progress:29750/60568 +val_progress:29775/60568 +val_progress:29800/60568 +val_progress:29825/60568 +val_progress:29850/60568 +val_progress:29875/60568 +val_progress:29900/60568 +val_progress:29925/60568 +val_progress:29950/60568 +val_progress:29975/60568 +val_progress:30000/60568 +val_progress:30025/60568 +val_progress:30050/60568 +val_progress:30075/60568 +val_progress:30100/60568 +val_progress:30125/60568 +val_progress:30150/60568 +val_progress:30175/60568 +val_progress:30200/60568 +val_progress:30225/60568 +val_progress:30250/60568 +val_progress:30275/60568 +val_progress:30300/60568 +val_progress:30325/60568 +val_progress:30350/60568 +val_progress:30375/60568 +val_progress:30400/60568 +val_progress:30425/60568 +val_progress:30450/60568 +val_progress:30475/60568 +val_progress:30500/60568 +val_progress:30525/60568 +val_progress:30550/60568 +val_progress:30575/60568 +val_progress:30600/60568 +val_progress:30625/60568 +val_progress:30650/60568 +val_progress:30675/60568 +val_progress:30700/60568 +val_progress:30725/60568 +val_progress:30750/60568 +val_progress:30775/60568 +val_progress:30800/60568 +val_progress:30825/60568 +val_progress:30850/60568 +val_progress:30875/60568 +val_progress:30900/60568 +val_progress:30925/60568 +val_progress:30950/60568 +val_progress:30975/60568 +val_progress:31000/60568 +val_progress:31025/60568 +val_progress:31050/60568 +val_progress:31075/60568 +val_progress:31100/60568 +val_progress:31125/60568 +val_progress:31150/60568 +val_progress:31175/60568 +val_progress:31200/60568 +val_progress:31225/60568 +val_progress:31250/60568 +val_progress:31275/60568 +val_progress:31300/60568 +val_progress:31325/60568 +val_progress:31350/60568 +val_progress:31375/60568 +val_progress:31400/60568 +val_progress:31425/60568 +val_progress:31450/60568 +val_progress:31475/60568 +val_progress:31500/60568 +val_progress:31525/60568 +val_progress:31550/60568 +val_progress:31575/60568 +val_progress:31600/60568 +val_progress:31625/60568 +val_progress:31650/60568 +val_progress:31675/60568 +val_progress:31700/60568 +val_progress:31725/60568 +val_progress:31750/60568 +val_progress:31775/60568 +val_progress:31800/60568 +val_progress:31825/60568 +val_progress:31850/60568 +val_progress:31875/60568 +val_progress:31900/60568 +val_progress:31925/60568 +val_progress:31950/60568 +val_progress:31975/60568 +val_progress:32000/60568 +val_progress:32025/60568 +val_progress:32050/60568 +val_progress:32075/60568 +val_progress:32100/60568 +val_progress:32125/60568 +val_progress:32150/60568 +val_progress:32175/60568 +val_progress:32200/60568 +val_progress:32225/60568 +val_progress:32250/60568 +val_progress:32275/60568 +val_progress:32300/60568 +val_progress:32325/60568 +val_progress:32350/60568 +val_progress:32375/60568 +val_progress:32400/60568 +val_progress:32425/60568 +val_progress:32450/60568 +val_progress:32475/60568 +val_progress:32500/60568 +val_progress:32525/60568 +val_progress:32550/60568 +val_progress:32575/60568 +val_progress:32600/60568 +val_progress:32625/60568 +val_progress:32650/60568 +val_progress:32675/60568 +val_progress:32700/60568 +val_progress:32725/60568 +val_progress:32750/60568 +val_progress:32775/60568 +val_progress:32800/60568 +val_progress:32825/60568 +val_progress:32850/60568 +val_progress:32875/60568 +val_progress:32900/60568 +val_progress:32925/60568 +val_progress:32950/60568 +val_progress:32975/60568 +val_progress:33000/60568 +val_progress:33025/60568 +val_progress:33050/60568 +val_progress:33075/60568 +val_progress:33100/60568 +val_progress:33125/60568 +val_progress:33150/60568 +val_progress:33175/60568 +val_progress:33200/60568 +val_progress:33225/60568 +val_progress:33250/60568 +val_progress:33275/60568 +val_progress:33300/60568 +val_progress:33325/60568 +val_progress:33350/60568 +val_progress:33375/60568 +val_progress:33400/60568 +val_progress:33425/60568 +val_progress:33450/60568 +val_progress:33475/60568 +val_progress:33500/60568 +val_progress:33525/60568 +val_progress:33550/60568 +val_progress:33575/60568 +val_progress:33600/60568 +val_progress:33625/60568 +val_progress:33650/60568 +val_progress:33675/60568 +val_progress:33700/60568 +val_progress:33725/60568 +val_progress:33750/60568 +val_progress:33775/60568 +val_progress:33800/60568 +val_progress:33825/60568 +val_progress:33850/60568 +val_progress:33875/60568 +val_progress:33900/60568 +val_progress:33925/60568 +val_progress:33950/60568 +val_progress:33975/60568 +val_progress:34000/60568 +val_progress:34025/60568 +val_progress:34050/60568 +val_progress:34075/60568 +val_progress:34100/60568 +val_progress:34125/60568 +val_progress:34150/60568 +val_progress:34175/60568 +val_progress:34200/60568 +val_progress:34225/60568 +val_progress:34250/60568 +val_progress:34275/60568 +val_progress:34300/60568 +val_progress:34325/60568 +val_progress:34350/60568 +val_progress:34375/60568 +val_progress:34400/60568 +val_progress:34425/60568 +val_progress:34450/60568 +val_progress:34475/60568 +val_progress:34500/60568 +val_progress:34525/60568 +val_progress:34550/60568 +val_progress:34575/60568 +val_progress:34600/60568 +val_progress:34625/60568 +val_progress:34650/60568 +val_progress:34675/60568 +val_progress:34700/60568 +val_progress:34725/60568 +val_progress:34750/60568 +val_progress:34775/60568 +val_progress:34800/60568 +val_progress:34825/60568 +val_progress:34850/60568 +val_progress:34875/60568 +val_progress:34900/60568 +val_progress:34925/60568 +val_progress:34950/60568 +val_progress:34975/60568 +val_progress:35000/60568 +val_progress:35025/60568 +val_progress:35050/60568 +val_progress:35075/60568 +val_progress:35100/60568 +val_progress:35125/60568 +val_progress:35150/60568 +val_progress:35175/60568 +val_progress:35200/60568 +val_progress:35225/60568 +val_progress:35250/60568 +val_progress:35275/60568 +val_progress:35300/60568 +val_progress:35325/60568 +val_progress:35350/60568 +val_progress:35375/60568 +val_progress:35400/60568 +val_progress:35425/60568 +val_progress:35450/60568 +val_progress:35475/60568 +val_progress:35500/60568 +val_progress:35525/60568 +val_progress:35550/60568 +val_progress:35575/60568 +val_progress:35600/60568 +val_progress:35625/60568 +val_progress:35650/60568 +val_progress:35675/60568 +val_progress:35700/60568 +val_progress:35725/60568 +val_progress:35750/60568 +val_progress:35775/60568 +val_progress:35800/60568 +val_progress:35825/60568 +val_progress:35850/60568 +val_progress:35875/60568 +val_progress:35900/60568 +val_progress:35925/60568 +val_progress:35950/60568 +val_progress:35975/60568 +val_progress:36000/60568 +val_progress:36025/60568 +val_progress:36050/60568 +val_progress:36075/60568 +val_progress:36100/60568 +val_progress:36125/60568 +val_progress:36150/60568 +val_progress:36175/60568 +val_progress:36200/60568 +val_progress:36225/60568 +val_progress:36250/60568 +val_progress:36275/60568 +val_progress:36300/60568 +val_progress:36325/60568 +val_progress:36350/60568 +val_progress:36375/60568 +val_progress:36400/60568 +val_progress:36425/60568 +val_progress:36450/60568 +val_progress:36475/60568 +val_progress:36500/60568 +val_progress:36525/60568 +val_progress:36550/60568 +val_progress:36575/60568 +val_progress:36600/60568 +val_progress:36625/60568 +val_progress:36650/60568 +val_progress:36675/60568 +val_progress:36700/60568 +val_progress:36725/60568 +val_progress:36750/60568 +val_progress:36775/60568 +val_progress:36800/60568 +val_progress:36825/60568 +val_progress:36850/60568 +val_progress:36875/60568 +val_progress:36900/60568 +val_progress:36925/60568 +val_progress:36950/60568 +val_progress:36975/60568 +val_progress:37000/60568 +val_progress:37025/60568 +val_progress:37050/60568 +val_progress:37075/60568 +val_progress:37100/60568 +val_progress:37125/60568 +val_progress:37150/60568 +val_progress:37175/60568 +val_progress:37200/60568 +val_progress:37225/60568 +val_progress:37250/60568 +val_progress:37275/60568 +val_progress:37300/60568 +val_progress:37325/60568 +val_progress:37350/60568 +val_progress:37375/60568 +val_progress:37400/60568 +val_progress:37425/60568 +val_progress:37450/60568 +val_progress:37475/60568 +val_progress:37500/60568 +val_progress:37525/60568 +val_progress:37550/60568 +val_progress:37575/60568 +val_progress:37600/60568 +val_progress:37625/60568 +val_progress:37650/60568 +val_progress:37675/60568 +val_progress:37700/60568 +val_progress:37725/60568 +val_progress:37750/60568 +val_progress:37775/60568 +val_progress:37800/60568 +val_progress:37825/60568 +val_progress:37850/60568 +val_progress:37875/60568 +val_progress:37900/60568 +val_progress:37925/60568 +val_progress:37950/60568 +val_progress:37975/60568 +val_progress:38000/60568 +val_progress:38025/60568 +val_progress:38050/60568 +val_progress:38075/60568 +val_progress:38100/60568 +val_progress:38125/60568 +val_progress:38150/60568 +val_progress:38175/60568 +val_progress:38200/60568 +val_progress:38225/60568 +val_progress:38250/60568 +val_progress:38275/60568 +val_progress:38300/60568 +val_progress:38325/60568 +val_progress:38350/60568 +val_progress:38375/60568 +val_progress:38400/60568 +val_progress:38425/60568 +val_progress:38450/60568 +val_progress:38475/60568 +val_progress:38500/60568 +val_progress:38525/60568 +val_progress:38550/60568 +val_progress:38575/60568 +val_progress:38600/60568 +val_progress:38625/60568 +val_progress:38650/60568 +val_progress:38675/60568 +val_progress:38700/60568 +val_progress:38725/60568 +val_progress:38750/60568 +val_progress:38775/60568 +val_progress:38800/60568 +val_progress:38825/60568 +val_progress:38850/60568 +val_progress:38875/60568 +val_progress:38900/60568 +val_progress:38925/60568 +val_progress:38950/60568 +val_progress:38975/60568 +val_progress:39000/60568 +val_progress:39025/60568 +val_progress:39050/60568 +val_progress:39075/60568 +val_progress:39100/60568 +val_progress:39125/60568 +val_progress:39150/60568 +val_progress:39175/60568 +val_progress:39200/60568 +val_progress:39225/60568 +val_progress:39250/60568 +val_progress:39275/60568 +val_progress:39300/60568 +val_progress:39325/60568 +val_progress:39350/60568 +val_progress:39375/60568 +val_progress:39400/60568 +val_progress:39425/60568 +val_progress:39450/60568 +val_progress:39475/60568 +val_progress:39500/60568 +val_progress:39525/60568 +val_progress:39550/60568 +val_progress:39575/60568 +val_progress:39600/60568 +val_progress:39625/60568 +val_progress:39650/60568 +val_progress:39675/60568 +val_progress:39700/60568 +val_progress:39725/60568 +val_progress:39750/60568 +val_progress:39775/60568 +val_progress:39800/60568 +val_progress:39825/60568 +val_progress:39850/60568 +val_progress:39875/60568 +val_progress:39900/60568 +val_progress:39925/60568 +val_progress:39950/60568 +val_progress:39975/60568 +val_progress:40000/60568 +val_progress:40025/60568 +val_progress:40050/60568 +val_progress:40075/60568 +val_progress:40100/60568 +val_progress:40125/60568 +val_progress:40150/60568 +val_progress:40175/60568 +val_progress:40200/60568 +val_progress:40225/60568 +val_progress:40250/60568 +val_progress:40275/60568 +val_progress:40300/60568 +val_progress:40325/60568 +val_progress:40350/60568 +val_progress:40375/60568 +val_progress:40400/60568 +val_progress:40425/60568 +val_progress:40450/60568 +val_progress:40475/60568 +val_progress:40500/60568 +val_progress:40525/60568 +val_progress:40550/60568 +val_progress:40575/60568 +val_progress:40600/60568 +val_progress:40625/60568 +val_progress:40650/60568 +val_progress:40675/60568 +val_progress:40700/60568 +val_progress:40725/60568 +val_progress:40750/60568 +val_progress:40775/60568 +val_progress:40800/60568 +val_progress:40825/60568 +val_progress:40850/60568 +val_progress:40875/60568 +val_progress:40900/60568 +val_progress:40925/60568 +val_progress:40950/60568 +val_progress:40975/60568 +val_progress:41000/60568 +val_progress:41025/60568 +val_progress:41050/60568 +val_progress:41075/60568 +val_progress:41100/60568 +val_progress:41125/60568 +val_progress:41150/60568 +val_progress:41175/60568 +val_progress:41200/60568 +val_progress:41225/60568 +val_progress:41250/60568 +val_progress:41275/60568 +val_progress:41300/60568 +val_progress:41325/60568 +val_progress:41350/60568 +val_progress:41375/60568 +val_progress:41400/60568 +val_progress:41425/60568 +val_progress:41450/60568 +val_progress:41475/60568 +val_progress:41500/60568 +val_progress:41525/60568 +val_progress:41550/60568 +val_progress:41575/60568 +val_progress:41600/60568 +val_progress:41625/60568 +val_progress:41650/60568 +val_progress:41675/60568 +val_progress:41700/60568 +val_progress:41725/60568 +val_progress:41750/60568 +val_progress:41775/60568 +val_progress:41800/60568 +val_progress:41825/60568 +val_progress:41850/60568 +val_progress:41875/60568 +val_progress:41900/60568 +val_progress:41925/60568 +val_progress:41950/60568 +val_progress:41975/60568 +val_progress:42000/60568 +val_progress:42025/60568 +val_progress:42050/60568 +val_progress:42075/60568 +val_progress:42100/60568 +val_progress:42125/60568 +val_progress:42150/60568 +val_progress:42175/60568 +val_progress:42200/60568 +val_progress:42225/60568 +val_progress:42250/60568 +val_progress:42275/60568 +val_progress:42300/60568 +val_progress:42325/60568 +val_progress:42350/60568 +val_progress:42375/60568 +val_progress:42400/60568 +val_progress:42425/60568 +val_progress:42450/60568 +val_progress:42475/60568 +val_progress:42500/60568 +val_progress:42525/60568 +val_progress:42550/60568 +val_progress:42575/60568 +val_progress:42600/60568 +val_progress:42625/60568 +val_progress:42650/60568 +val_progress:42675/60568 +val_progress:42700/60568 +val_progress:42725/60568 +val_progress:42750/60568 +val_progress:42775/60568 +val_progress:42800/60568 +val_progress:42825/60568 +val_progress:42850/60568 +val_progress:42875/60568 +val_progress:42900/60568 +val_progress:42925/60568 +val_progress:42950/60568 +val_progress:42975/60568 +val_progress:43000/60568 +val_progress:43025/60568 +val_progress:43050/60568 +val_progress:43075/60568 +val_progress:43100/60568 +val_progress:43125/60568 +val_progress:43150/60568 +val_progress:43175/60568 +val_progress:43200/60568 +val_progress:43225/60568 +val_progress:43250/60568 +val_progress:43275/60568 +val_progress:43300/60568 +val_progress:43325/60568 +val_progress:43350/60568 +val_progress:43375/60568 +val_progress:43400/60568 +val_progress:43425/60568 +val_progress:43450/60568 +val_progress:43475/60568 +val_progress:43500/60568 +val_progress:43525/60568 +val_progress:43550/60568 +val_progress:43575/60568 +val_progress:43600/60568 +val_progress:43625/60568 +val_progress:43650/60568 +val_progress:43675/60568 +val_progress:43700/60568 +val_progress:43725/60568 +val_progress:43750/60568 +val_progress:43775/60568 +val_progress:43800/60568 +val_progress:43825/60568 +val_progress:43850/60568 +val_progress:43875/60568 +val_progress:43900/60568 +val_progress:43925/60568 +val_progress:43950/60568 +val_progress:43975/60568 +val_progress:44000/60568 +val_progress:44025/60568 +val_progress:44050/60568 +val_progress:44075/60568 +val_progress:44100/60568 +val_progress:44125/60568 +val_progress:44150/60568 +val_progress:44175/60568 +val_progress:44200/60568 +val_progress:44225/60568 +val_progress:44250/60568 +val_progress:44275/60568 +val_progress:44300/60568 +val_progress:44325/60568 +val_progress:44350/60568 +val_progress:44375/60568 +val_progress:44400/60568 +val_progress:44425/60568 +val_progress:44450/60568 +val_progress:44475/60568 +val_progress:44500/60568 +val_progress:44525/60568 +val_progress:44550/60568 +val_progress:44575/60568 +val_progress:44600/60568 +val_progress:44625/60568 +val_progress:44650/60568 +val_progress:44675/60568 +val_progress:44700/60568 +val_progress:44725/60568 +val_progress:44750/60568 +val_progress:44775/60568 +val_progress:44800/60568 +val_progress:44825/60568 +val_progress:44850/60568 +val_progress:44875/60568 +val_progress:44900/60568 +val_progress:44925/60568 +val_progress:44950/60568 +val_progress:44975/60568 +val_progress:45000/60568 +val_progress:45025/60568 +val_progress:45050/60568 +val_progress:45075/60568 +val_progress:45100/60568 +val_progress:45125/60568 +val_progress:45150/60568 +val_progress:45175/60568 +val_progress:45200/60568 +val_progress:45225/60568 +val_progress:45250/60568 +val_progress:45275/60568 +val_progress:45300/60568 +val_progress:45325/60568 +val_progress:45350/60568 +val_progress:45375/60568 +val_progress:45400/60568 +val_progress:45425/60568 +val_progress:45450/60568 +val_progress:45475/60568 +val_progress:45500/60568 +val_progress:45525/60568 +val_progress:45550/60568 +val_progress:45575/60568 +val_progress:45600/60568 +val_progress:45625/60568 +val_progress:45650/60568 +val_progress:45675/60568 +val_progress:45700/60568 +val_progress:45725/60568 +val_progress:45750/60568 +val_progress:45775/60568 +val_progress:45800/60568 +val_progress:45825/60568 +val_progress:45850/60568 +val_progress:45875/60568 +val_progress:45900/60568 +val_progress:45925/60568 +val_progress:45950/60568 +val_progress:45975/60568 +val_progress:46000/60568 +val_progress:46025/60568 +val_progress:46050/60568 +val_progress:46075/60568 +val_progress:46100/60568 +val_progress:46125/60568 +val_progress:46150/60568 +val_progress:46175/60568 +val_progress:46200/60568 +val_progress:46225/60568 +val_progress:46250/60568 +val_progress:46275/60568 +val_progress:46300/60568 +val_progress:46325/60568 +val_progress:46350/60568 +val_progress:46375/60568 +val_progress:46400/60568 +val_progress:46425/60568 +val_progress:46450/60568 +val_progress:46475/60568 +val_progress:46500/60568 +val_progress:46525/60568 +val_progress:46550/60568 +val_progress:46575/60568 +val_progress:46600/60568 +val_progress:46625/60568 +val_progress:46650/60568 +val_progress:46675/60568 +val_progress:46700/60568 +val_progress:46725/60568 +val_progress:46750/60568 +val_progress:46775/60568 +val_progress:46800/60568 +val_progress:46825/60568 +val_progress:46850/60568 +val_progress:46875/60568 +val_progress:46900/60568 +val_progress:46925/60568 +val_progress:46950/60568 +val_progress:46975/60568 +val_progress:47000/60568 +val_progress:47025/60568 +val_progress:47050/60568 +val_progress:47075/60568 +val_progress:47100/60568 +val_progress:47125/60568 +val_progress:47150/60568 +val_progress:47175/60568 +val_progress:47200/60568 +val_progress:47225/60568 +val_progress:47250/60568 +val_progress:47275/60568 +val_progress:47300/60568 +val_progress:47325/60568 +val_progress:47350/60568 +val_progress:47375/60568 +val_progress:47400/60568 +val_progress:47425/60568 +val_progress:47450/60568 +val_progress:47475/60568 +val_progress:47500/60568 +val_progress:47525/60568 +val_progress:47550/60568 +val_progress:47575/60568 +val_progress:47600/60568 +val_progress:47625/60568 +val_progress:47650/60568 +val_progress:47675/60568 +val_progress:47700/60568 +val_progress:47725/60568 +val_progress:47750/60568 +val_progress:47775/60568 +val_progress:47800/60568 +val_progress:47825/60568 +val_progress:47850/60568 +val_progress:47875/60568 +val_progress:47900/60568 +val_progress:47925/60568 +val_progress:47950/60568 +val_progress:47975/60568 +val_progress:48000/60568 +val_progress:48025/60568 +val_progress:48050/60568 +val_progress:48075/60568 +val_progress:48100/60568 +val_progress:48125/60568 +val_progress:48150/60568 +val_progress:48175/60568 +val_progress:48200/60568 +val_progress:48225/60568 +val_progress:48250/60568 +val_progress:48275/60568 +val_progress:48300/60568 +val_progress:48325/60568 +val_progress:48350/60568 +val_progress:48375/60568 +val_progress:48400/60568 +val_progress:48425/60568 +val_progress:48450/60568 +val_progress:48475/60568 +val_progress:48500/60568 +val_progress:48525/60568 +val_progress:48550/60568 +val_progress:48575/60568 +val_progress:48600/60568 +val_progress:48625/60568 +val_progress:48650/60568 +val_progress:48675/60568 +val_progress:48700/60568 +val_progress:48725/60568 +val_progress:48750/60568 +val_progress:48775/60568 +val_progress:48800/60568 +val_progress:48825/60568 +val_progress:48850/60568 +val_progress:48875/60568 +val_progress:48900/60568 +val_progress:48925/60568 +val_progress:48950/60568 +val_progress:48975/60568 +val_progress:49000/60568 +val_progress:49025/60568 +val_progress:49050/60568 +val_progress:49075/60568 +val_progress:49100/60568 +val_progress:49125/60568 +val_progress:49150/60568 +val_progress:49175/60568 +val_progress:49200/60568 +val_progress:49225/60568 +val_progress:49250/60568 +val_progress:49275/60568 +val_progress:49300/60568 +val_progress:49325/60568 +val_progress:49350/60568 +val_progress:49375/60568 +val_progress:49400/60568 +val_progress:49425/60568 +val_progress:49450/60568 +val_progress:49475/60568 +val_progress:49500/60568 +val_progress:49525/60568 +val_progress:49550/60568 +val_progress:49575/60568 +val_progress:49600/60568 +val_progress:49625/60568 +val_progress:49650/60568 +val_progress:49675/60568 +val_progress:49700/60568 +val_progress:49725/60568 +val_progress:49750/60568 +val_progress:49775/60568 +val_progress:49800/60568 +val_progress:49825/60568 +val_progress:49850/60568 +val_progress:49875/60568 +val_progress:49900/60568 +val_progress:49925/60568 +val_progress:49950/60568 +val_progress:49975/60568 +val_progress:50000/60568 +val_progress:50025/60568 +val_progress:50050/60568 +val_progress:50075/60568 +val_progress:50100/60568 +val_progress:50125/60568 +val_progress:50150/60568 +val_progress:50175/60568 +val_progress:50200/60568 +val_progress:50225/60568 +val_progress:50250/60568 +val_progress:50275/60568 +val_progress:50300/60568 +val_progress:50325/60568 +val_progress:50350/60568 +val_progress:50375/60568 +val_progress:50400/60568 +val_progress:50425/60568 +val_progress:50450/60568 +val_progress:50475/60568 +val_progress:50500/60568 +val_progress:50525/60568 +val_progress:50550/60568 +val_progress:50575/60568 +val_progress:50600/60568 +val_progress:50625/60568 +val_progress:50650/60568 +val_progress:50675/60568 +val_progress:50700/60568 +val_progress:50725/60568 +val_progress:50750/60568 +val_progress:50775/60568 +val_progress:50800/60568 +val_progress:50825/60568 +val_progress:50850/60568 +val_progress:50875/60568 +val_progress:50900/60568 +val_progress:50925/60568 +val_progress:50950/60568 +val_progress:50975/60568 +val_progress:51000/60568 +val_progress:51025/60568 +val_progress:51050/60568 +val_progress:51075/60568 +val_progress:51100/60568 +val_progress:51125/60568 +val_progress:51150/60568 +val_progress:51175/60568 +val_progress:51200/60568 +val_progress:51225/60568 +val_progress:51250/60568 +val_progress:51275/60568 +val_progress:51300/60568 +val_progress:51325/60568 +val_progress:51350/60568 +val_progress:51375/60568 +val_progress:51400/60568 +val_progress:51425/60568 +val_progress:51450/60568 +val_progress:51475/60568 +val_progress:51500/60568 +val_progress:51525/60568 +val_progress:51550/60568 +val_progress:51575/60568 +val_progress:51600/60568 +val_progress:51625/60568 +val_progress:51650/60568 +val_progress:51675/60568 +val_progress:51700/60568 +val_progress:51725/60568 +val_progress:51750/60568 +val_progress:51775/60568 +val_progress:51800/60568 +val_progress:51825/60568 +val_progress:51850/60568 +val_progress:51875/60568 +val_progress:51900/60568 +val_progress:51925/60568 +val_progress:51950/60568 +val_progress:51975/60568 +val_progress:52000/60568 +val_progress:52025/60568 +val_progress:52050/60568 +val_progress:52075/60568 +val_progress:52100/60568 +val_progress:52125/60568 +val_progress:52150/60568 +val_progress:52175/60568 +val_progress:52200/60568 +val_progress:52225/60568 +val_progress:52250/60568 +val_progress:52275/60568 +val_progress:52300/60568 +val_progress:52325/60568 +val_progress:52350/60568 +val_progress:52375/60568 +val_progress:52400/60568 +val_progress:52425/60568 +val_progress:52450/60568 +val_progress:52475/60568 +val_progress:52500/60568 +val_progress:52525/60568 +val_progress:52550/60568 +val_progress:52575/60568 +val_progress:52600/60568 +val_progress:52625/60568 +val_progress:52650/60568 +val_progress:52675/60568 +val_progress:52700/60568 +val_progress:52725/60568 +val_progress:52750/60568 +val_progress:52775/60568 +val_progress:52800/60568 +val_progress:52825/60568 +val_progress:52850/60568 +val_progress:52875/60568 +val_progress:52900/60568 +val_progress:52925/60568 +val_progress:52950/60568 +val_progress:52975/60568 +val_progress:53000/60568 +val_progress:53025/60568 +val_progress:53050/60568 +val_progress:53075/60568 +val_progress:53100/60568 +val_progress:53125/60568 +val_progress:53150/60568 +val_progress:53175/60568 +val_progress:53200/60568 +val_progress:53225/60568 +val_progress:53250/60568 +val_progress:53275/60568 +val_progress:53300/60568 +val_progress:53325/60568 +val_progress:53350/60568 +val_progress:53375/60568 +val_progress:53400/60568 +val_progress:53425/60568 +val_progress:53450/60568 +val_progress:53475/60568 +val_progress:53500/60568 +val_progress:53525/60568 +val_progress:53550/60568 +val_progress:53575/60568 +val_progress:53600/60568 +val_progress:53625/60568 +val_progress:53650/60568 +val_progress:53675/60568 +val_progress:53700/60568 +val_progress:53725/60568 +val_progress:53750/60568 +val_progress:53775/60568 +val_progress:53800/60568 +val_progress:53825/60568 +val_progress:53850/60568 +val_progress:53875/60568 +val_progress:53900/60568 +val_progress:53925/60568 +val_progress:53950/60568 +val_progress:53975/60568 +val_progress:54000/60568 +val_progress:54025/60568 +val_progress:54050/60568 +val_progress:54075/60568 +val_progress:54100/60568 +val_progress:54125/60568 +val_progress:54150/60568 +val_progress:54175/60568 +val_progress:54200/60568 +val_progress:54225/60568 +val_progress:54250/60568 +val_progress:54275/60568 +val_progress:54300/60568 +val_progress:54325/60568 +val_progress:54350/60568 +val_progress:54375/60568 +val_progress:54400/60568 +val_progress:54425/60568 +val_progress:54450/60568 +val_progress:54475/60568 +val_progress:54500/60568 +val_progress:54525/60568 +val_progress:54550/60568 +val_progress:54575/60568 +val_progress:54600/60568 +val_progress:54625/60568 +val_progress:54650/60568 +val_progress:54675/60568 +val_progress:54700/60568 +val_progress:54725/60568 +val_progress:54750/60568 +val_progress:54775/60568 +val_progress:54800/60568 +val_progress:54825/60568 +val_progress:54850/60568 +val_progress:54875/60568 +val_progress:54900/60568 +val_progress:54925/60568 +val_progress:54950/60568 +val_progress:54975/60568 +val_progress:55000/60568 +val_progress:55025/60568 +val_progress:55050/60568 +val_progress:55075/60568 +val_progress:55100/60568 +val_progress:55125/60568 +val_progress:55150/60568 +val_progress:55175/60568 +val_progress:55200/60568 +val_progress:55225/60568 +val_progress:55250/60568 +val_progress:55275/60568 +val_progress:55300/60568 +val_progress:55325/60568 +val_progress:55350/60568 +val_progress:55375/60568 +val_progress:55400/60568 +val_progress:55425/60568 +val_progress:55450/60568 +val_progress:55475/60568 +val_progress:55500/60568 +val_progress:55525/60568 +val_progress:55550/60568 +val_progress:55575/60568 +val_progress:55600/60568 +val_progress:55625/60568 +val_progress:55650/60568 +val_progress:55675/60568 +val_progress:55700/60568 +val_progress:55725/60568 +val_progress:55750/60568 +val_progress:55775/60568 +val_progress:55800/60568 +val_progress:55825/60568 +val_progress:55850/60568 +val_progress:55875/60568 +val_progress:55900/60568 +val_progress:55925/60568 +val_progress:55950/60568 +val_progress:55975/60568 +val_progress:56000/60568 +val_progress:56025/60568 +val_progress:56050/60568 +val_progress:56075/60568 +val_progress:56100/60568 +val_progress:56125/60568 +val_progress:56150/60568 +val_progress:56175/60568 +val_progress:56200/60568 +val_progress:56225/60568 +val_progress:56250/60568 +val_progress:56275/60568 +val_progress:56300/60568 +val_progress:56325/60568 +val_progress:56350/60568 +val_progress:56375/60568 +val_progress:56400/60568 +val_progress:56425/60568 +val_progress:56450/60568 +val_progress:56475/60568 +val_progress:56500/60568 +val_progress:56525/60568 +val_progress:56550/60568 +val_progress:56575/60568 +val_progress:56600/60568 +val_progress:56625/60568 +val_progress:56650/60568 +val_progress:56675/60568 +val_progress:56700/60568 +val_progress:56725/60568 +val_progress:56750/60568 +val_progress:56775/60568 +val_progress:56800/60568 +val_progress:56825/60568 +val_progress:56850/60568 +val_progress:56875/60568 +val_progress:56900/60568 +val_progress:56925/60568 +val_progress:56950/60568 +val_progress:56975/60568 +val_progress:57000/60568 +val_progress:57025/60568 +val_progress:57050/60568 +val_progress:57075/60568 +val_progress:57100/60568 +val_progress:57125/60568 +val_progress:57150/60568 +val_progress:57175/60568 +val_progress:57200/60568 +val_progress:57225/60568 +val_progress:57250/60568 +val_progress:57275/60568 +val_progress:57300/60568 +val_progress:57325/60568 +val_progress:57350/60568 +val_progress:57375/60568 +val_progress:57400/60568 +val_progress:57425/60568 +val_progress:57450/60568 +val_progress:57475/60568 +val_progress:57500/60568 +val_progress:57525/60568 +val_progress:57550/60568 +val_progress:57575/60568 +val_progress:57600/60568 +val_progress:57625/60568 +val_progress:57650/60568 +val_progress:57675/60568 +val_progress:57700/60568 +val_progress:57725/60568 +val_progress:57750/60568 +val_progress:57775/60568 +val_progress:57800/60568 +val_progress:57825/60568 +val_progress:57850/60568 +val_progress:57875/60568 +val_progress:57900/60568 +val_progress:57925/60568 +val_progress:57950/60568 +val_progress:57975/60568 +val_progress:58000/60568 +val_progress:58025/60568 +val_progress:58050/60568 +val_progress:58075/60568 +val_progress:58100/60568 +val_progress:58125/60568 +val_progress:58150/60568 +val_progress:58175/60568 +val_progress:58200/60568 +val_progress:58225/60568 +val_progress:58250/60568 +val_progress:58275/60568 +val_progress:58300/60568 +val_progress:58325/60568 +val_progress:58350/60568 +val_progress:58375/60568 +val_progress:58400/60568 +val_progress:58425/60568 +val_progress:58450/60568 +val_progress:58475/60568 +val_progress:58500/60568 +val_progress:58525/60568 +val_progress:58550/60568 +val_progress:58575/60568 +val_progress:58600/60568 +val_progress:58625/60568 +val_progress:58650/60568 +val_progress:58675/60568 +val_progress:58700/60568 +val_progress:58725/60568 +val_progress:58750/60568 +val_progress:58775/60568 +val_progress:58800/60568 +val_progress:58825/60568 +val_progress:58850/60568 +val_progress:58875/60568 +val_progress:58900/60568 +val_progress:58925/60568 +val_progress:58950/60568 +val_progress:58975/60568 +val_progress:59000/60568 +val_progress:59025/60568 +val_progress:59050/60568 +val_progress:59075/60568 +val_progress:59100/60568 +val_progress:59125/60568 +val_progress:59150/60568 +val_progress:59175/60568 +val_progress:59200/60568 +val_progress:59225/60568 +val_progress:59250/60568 +val_progress:59275/60568 +val_progress:59300/60568 +val_progress:59325/60568 +val_progress:59350/60568 +val_progress:59375/60568 +val_progress:59400/60568 +val_progress:59425/60568 +val_progress:59450/60568 +val_progress:59475/60568 +val_progress:59500/60568 +val_progress:59525/60568 +val_progress:59550/60568 +val_progress:59575/60568 +val_progress:59600/60568 +val_progress:59625/60568 +val_progress:59650/60568 +val_progress:59675/60568 +val_progress:59700/60568 +val_progress:59725/60568 +val_progress:59750/60568 +val_progress:59775/60568 +val_progress:59800/60568 +val_progress:59825/60568 +val_progress:59850/60568 +val_progress:59875/60568 +val_progress:59900/60568 +val_progress:59925/60568 +val_progress:59950/60568 +val_progress:59975/60568 +val_progress:60000/60568 +val_progress:60025/60568 +val_progress:60050/60568 +val_progress:60075/60568 +val_progress:60100/60568 +val_progress:60125/60568 +val_progress:60150/60568 +val_progress:60175/60568 +val_progress:60200/60568 +val_progress:60225/60568 +val_progress:60250/60568 +val_progress:60275/60568 +val_progress:60300/60568 +val_progress:60325/60568 +val_progress:60350/60568 +val_progress:60375/60568 +val_progress:60400/60568 +val_progress:60425/60568 +val_progress:60450/60568 +val_progress:60475/60568 +val_progress:60500/60568 +val_progress:60525/60568 +val_progress:60550/60568 +val_progress:60568/60568 +step:200/200 val_loss:3.9694 val_bpb:2.3509 train_time:138199ms step_avg:691.00ms +saved_model:logs/smoke_test_mlx_model.npz bytes:67212188 +serialized_model_int8_zlib:10610003 bytes (payload:17178912 raw_pickle:17188361 payload_ratio:3.91x) +val_progress:1/60568 +val_progress:25/60568 +val_progress:50/60568 +val_progress:75/60568 +val_progress:100/60568 +val_progress:125/60568 +val_progress:150/60568 +val_progress:175/60568 +val_progress:200/60568 +val_progress:225/60568 +val_progress:250/60568 +val_progress:275/60568 +val_progress:300/60568 +val_progress:325/60568 +val_progress:350/60568 +val_progress:375/60568 +val_progress:400/60568 +val_progress:425/60568 +val_progress:450/60568 +val_progress:475/60568 +val_progress:500/60568 +val_progress:525/60568 +val_progress:550/60568 +val_progress:575/60568 +val_progress:600/60568 +val_progress:625/60568 +val_progress:650/60568 +val_progress:675/60568 +val_progress:700/60568 +val_progress:725/60568 +val_progress:750/60568 +val_progress:775/60568 +val_progress:800/60568 +val_progress:825/60568 +val_progress:850/60568 +val_progress:875/60568 +val_progress:900/60568 +val_progress:925/60568 +val_progress:950/60568 +val_progress:975/60568 +val_progress:1000/60568 +val_progress:1025/60568 +val_progress:1050/60568 +val_progress:1075/60568 +val_progress:1100/60568 +val_progress:1125/60568 +val_progress:1150/60568 +val_progress:1175/60568 +val_progress:1200/60568 +val_progress:1225/60568 +val_progress:1250/60568 +val_progress:1275/60568 +val_progress:1300/60568 +val_progress:1325/60568 +val_progress:1350/60568 +val_progress:1375/60568 +val_progress:1400/60568 +val_progress:1425/60568 +val_progress:1450/60568 +val_progress:1475/60568 +val_progress:1500/60568 +val_progress:1525/60568 +val_progress:1550/60568 +val_progress:1575/60568 +val_progress:1600/60568 +val_progress:1625/60568 +val_progress:1650/60568 +val_progress:1675/60568 +val_progress:1700/60568 +val_progress:1725/60568 +val_progress:1750/60568 +val_progress:1775/60568 +val_progress:1800/60568 +val_progress:1825/60568 +val_progress:1850/60568 +val_progress:1875/60568 +val_progress:1900/60568 +val_progress:1925/60568 +val_progress:1950/60568 +val_progress:1975/60568 +val_progress:2000/60568 +val_progress:2025/60568 +val_progress:2050/60568 +val_progress:2075/60568 +val_progress:2100/60568 +val_progress:2125/60568 +val_progress:2150/60568 +val_progress:2175/60568 +val_progress:2200/60568 +val_progress:2225/60568 +val_progress:2250/60568 +val_progress:2275/60568 +val_progress:2300/60568 +val_progress:2325/60568 +val_progress:2350/60568 +val_progress:2375/60568 +val_progress:2400/60568 +val_progress:2425/60568 +val_progress:2450/60568 +val_progress:2475/60568 +val_progress:2500/60568 +val_progress:2525/60568 +val_progress:2550/60568 +val_progress:2575/60568 +val_progress:2600/60568 +val_progress:2625/60568 +val_progress:2650/60568 +val_progress:2675/60568 +val_progress:2700/60568 +val_progress:2725/60568 +val_progress:2750/60568 +val_progress:2775/60568 +val_progress:2800/60568 +val_progress:2825/60568 +val_progress:2850/60568 +val_progress:2875/60568 +val_progress:2900/60568 +val_progress:2925/60568 +val_progress:2950/60568 +val_progress:2975/60568 +val_progress:3000/60568 +val_progress:3025/60568 +val_progress:3050/60568 +val_progress:3075/60568 +val_progress:3100/60568 +val_progress:3125/60568 +val_progress:3150/60568 +val_progress:3175/60568 +val_progress:3200/60568 +val_progress:3225/60568 +val_progress:3250/60568 +val_progress:3275/60568 +val_progress:3300/60568 +val_progress:3325/60568 +val_progress:3350/60568 +val_progress:3375/60568 +val_progress:3400/60568 +val_progress:3425/60568 +val_progress:3450/60568 +val_progress:3475/60568 +val_progress:3500/60568 +val_progress:3525/60568 +val_progress:3550/60568 +val_progress:3575/60568 +val_progress:3600/60568 +val_progress:3625/60568 +val_progress:3650/60568 +val_progress:3675/60568 +val_progress:3700/60568 +val_progress:3725/60568 +val_progress:3750/60568 +val_progress:3775/60568 +val_progress:3800/60568 +val_progress:3825/60568 +val_progress:3850/60568 +val_progress:3875/60568 +val_progress:3900/60568 +val_progress:3925/60568 +val_progress:3950/60568 +val_progress:3975/60568 +val_progress:4000/60568 +val_progress:4025/60568 +val_progress:4050/60568 +val_progress:4075/60568 +val_progress:4100/60568 +val_progress:4125/60568 +val_progress:4150/60568 +val_progress:4175/60568 +val_progress:4200/60568 +val_progress:4225/60568 +val_progress:4250/60568 +val_progress:4275/60568 +val_progress:4300/60568 +val_progress:4325/60568 +val_progress:4350/60568 +val_progress:4375/60568 +val_progress:4400/60568 +val_progress:4425/60568 +val_progress:4450/60568 +val_progress:4475/60568 +val_progress:4500/60568 +val_progress:4525/60568 +val_progress:4550/60568 +val_progress:4575/60568 +val_progress:4600/60568 +val_progress:4625/60568 +val_progress:4650/60568 +val_progress:4675/60568 +val_progress:4700/60568 +val_progress:4725/60568 +val_progress:4750/60568 +val_progress:4775/60568 +val_progress:4800/60568 +val_progress:4825/60568 +val_progress:4850/60568 +val_progress:4875/60568 +val_progress:4900/60568 +val_progress:4925/60568 +val_progress:4950/60568 +val_progress:4975/60568 +val_progress:5000/60568 +val_progress:5025/60568 +val_progress:5050/60568 +val_progress:5075/60568 +val_progress:5100/60568 +val_progress:5125/60568 +val_progress:5150/60568 +val_progress:5175/60568 +val_progress:5200/60568 +val_progress:5225/60568 +val_progress:5250/60568 +val_progress:5275/60568 +val_progress:5300/60568 +val_progress:5325/60568 +val_progress:5350/60568 +val_progress:5375/60568 +val_progress:5400/60568 +val_progress:5425/60568 +val_progress:5450/60568 +val_progress:5475/60568 +val_progress:5500/60568 +val_progress:5525/60568 +val_progress:5550/60568 +val_progress:5575/60568 +val_progress:5600/60568 +val_progress:5625/60568 +val_progress:5650/60568 +val_progress:5675/60568 +val_progress:5700/60568 +val_progress:5725/60568 +val_progress:5750/60568 +val_progress:5775/60568 +val_progress:5800/60568 +val_progress:5825/60568 +val_progress:5850/60568 +val_progress:5875/60568 +val_progress:5900/60568 +val_progress:5925/60568 +val_progress:5950/60568 +val_progress:5975/60568 +val_progress:6000/60568 +val_progress:6025/60568 +val_progress:6050/60568 +val_progress:6075/60568 +val_progress:6100/60568 +val_progress:6125/60568 +val_progress:6150/60568 +val_progress:6175/60568 +val_progress:6200/60568 +val_progress:6225/60568 +val_progress:6250/60568 +val_progress:6275/60568 +val_progress:6300/60568 +val_progress:6325/60568 +val_progress:6350/60568 +val_progress:6375/60568 +val_progress:6400/60568 +val_progress:6425/60568 +val_progress:6450/60568 +val_progress:6475/60568 +val_progress:6500/60568 +val_progress:6525/60568 +val_progress:6550/60568 +val_progress:6575/60568 +val_progress:6600/60568 +val_progress:6625/60568 +val_progress:6650/60568 +val_progress:6675/60568 +val_progress:6700/60568 +val_progress:6725/60568 +val_progress:6750/60568 +val_progress:6775/60568 +val_progress:6800/60568 +val_progress:6825/60568 +val_progress:6850/60568 +val_progress:6875/60568 +val_progress:6900/60568 +val_progress:6925/60568 +val_progress:6950/60568 +val_progress:6975/60568 +val_progress:7000/60568 +val_progress:7025/60568 +val_progress:7050/60568 +val_progress:7075/60568 +val_progress:7100/60568 +val_progress:7125/60568 +val_progress:7150/60568 +val_progress:7175/60568 +val_progress:7200/60568 +val_progress:7225/60568 +val_progress:7250/60568 +val_progress:7275/60568 +val_progress:7300/60568 +val_progress:7325/60568 +val_progress:7350/60568 +val_progress:7375/60568 +val_progress:7400/60568 +val_progress:7425/60568 +val_progress:7450/60568 +val_progress:7475/60568 +val_progress:7500/60568 +val_progress:7525/60568 +val_progress:7550/60568 +val_progress:7575/60568 +val_progress:7600/60568 +val_progress:7625/60568 +val_progress:7650/60568 +val_progress:7675/60568 +val_progress:7700/60568 +val_progress:7725/60568 +val_progress:7750/60568 +val_progress:7775/60568 +val_progress:7800/60568 +val_progress:7825/60568 +val_progress:7850/60568 +val_progress:7875/60568 +val_progress:7900/60568 +val_progress:7925/60568 +val_progress:7950/60568 +val_progress:7975/60568 +val_progress:8000/60568 +val_progress:8025/60568 +val_progress:8050/60568 +val_progress:8075/60568 +val_progress:8100/60568 +val_progress:8125/60568 +val_progress:8150/60568 +val_progress:8175/60568 +val_progress:8200/60568 +val_progress:8225/60568 +val_progress:8250/60568 +val_progress:8275/60568 +val_progress:8300/60568 +val_progress:8325/60568 +val_progress:8350/60568 +val_progress:8375/60568 +val_progress:8400/60568 +val_progress:8425/60568 +val_progress:8450/60568 +val_progress:8475/60568 +val_progress:8500/60568 +val_progress:8525/60568 +val_progress:8550/60568 +val_progress:8575/60568 +val_progress:8600/60568 +val_progress:8625/60568 +val_progress:8650/60568 +val_progress:8675/60568 +val_progress:8700/60568 +val_progress:8725/60568 +val_progress:8750/60568 +val_progress:8775/60568 +val_progress:8800/60568 +val_progress:8825/60568 +val_progress:8850/60568 +val_progress:8875/60568 +val_progress:8900/60568 +val_progress:8925/60568 +val_progress:8950/60568 +val_progress:8975/60568 +val_progress:9000/60568 +val_progress:9025/60568 +val_progress:9050/60568 +val_progress:9075/60568 +val_progress:9100/60568 +val_progress:9125/60568 +val_progress:9150/60568 +val_progress:9175/60568 +val_progress:9200/60568 +val_progress:9225/60568 +val_progress:9250/60568 +val_progress:9275/60568 +val_progress:9300/60568 +val_progress:9325/60568 +val_progress:9350/60568 +val_progress:9375/60568 +val_progress:9400/60568 +val_progress:9425/60568 +val_progress:9450/60568 +val_progress:9475/60568 +val_progress:9500/60568 +val_progress:9525/60568 +val_progress:9550/60568 +val_progress:9575/60568 +val_progress:9600/60568 +val_progress:9625/60568 +val_progress:9650/60568 +val_progress:9675/60568 +val_progress:9700/60568 +val_progress:9725/60568 +val_progress:9750/60568 +val_progress:9775/60568 +val_progress:9800/60568 +val_progress:9825/60568 +val_progress:9850/60568 +val_progress:9875/60568 +val_progress:9900/60568 +val_progress:9925/60568 +val_progress:9950/60568 +val_progress:9975/60568 +val_progress:10000/60568 +val_progress:10025/60568 +val_progress:10050/60568 +val_progress:10075/60568 +val_progress:10100/60568 +val_progress:10125/60568 +val_progress:10150/60568 +val_progress:10175/60568 +val_progress:10200/60568 +val_progress:10225/60568 +val_progress:10250/60568 +val_progress:10275/60568 +val_progress:10300/60568 +val_progress:10325/60568 +val_progress:10350/60568 +val_progress:10375/60568 +val_progress:10400/60568 +val_progress:10425/60568 +val_progress:10450/60568 +val_progress:10475/60568 +val_progress:10500/60568 +val_progress:10525/60568 +val_progress:10550/60568 +val_progress:10575/60568 +val_progress:10600/60568 +val_progress:10625/60568 +val_progress:10650/60568 +val_progress:10675/60568 +val_progress:10700/60568 +val_progress:10725/60568 +val_progress:10750/60568 +val_progress:10775/60568 +val_progress:10800/60568 +val_progress:10825/60568 +val_progress:10850/60568 +val_progress:10875/60568 +val_progress:10900/60568 +val_progress:10925/60568 +val_progress:10950/60568 +val_progress:10975/60568 +val_progress:11000/60568 +val_progress:11025/60568 +val_progress:11050/60568 +val_progress:11075/60568 +val_progress:11100/60568 +val_progress:11125/60568 +val_progress:11150/60568 +val_progress:11175/60568 +val_progress:11200/60568 +val_progress:11225/60568 +val_progress:11250/60568 +val_progress:11275/60568 +val_progress:11300/60568 +val_progress:11325/60568 +val_progress:11350/60568 +val_progress:11375/60568 +val_progress:11400/60568 +val_progress:11425/60568 +val_progress:11450/60568 +val_progress:11475/60568 +val_progress:11500/60568 +val_progress:11525/60568 +val_progress:11550/60568 +val_progress:11575/60568 +val_progress:11600/60568 +val_progress:11625/60568 +val_progress:11650/60568 +val_progress:11675/60568 +val_progress:11700/60568 +val_progress:11725/60568 +val_progress:11750/60568 +val_progress:11775/60568 +val_progress:11800/60568 +val_progress:11825/60568 +val_progress:11850/60568 +val_progress:11875/60568 +val_progress:11900/60568 +val_progress:11925/60568 +val_progress:11950/60568 +val_progress:11975/60568 +val_progress:12000/60568 +val_progress:12025/60568 +val_progress:12050/60568 +val_progress:12075/60568 +val_progress:12100/60568 +val_progress:12125/60568 +val_progress:12150/60568 +val_progress:12175/60568 +val_progress:12200/60568 +val_progress:12225/60568 +val_progress:12250/60568 +val_progress:12275/60568 +val_progress:12300/60568 +val_progress:12325/60568 +val_progress:12350/60568 +val_progress:12375/60568 +val_progress:12400/60568 +val_progress:12425/60568 +val_progress:12450/60568 +val_progress:12475/60568 +val_progress:12500/60568 +val_progress:12525/60568 +val_progress:12550/60568 +val_progress:12575/60568 +val_progress:12600/60568 +val_progress:12625/60568 +val_progress:12650/60568 +val_progress:12675/60568 +val_progress:12700/60568 +val_progress:12725/60568 +val_progress:12750/60568 +val_progress:12775/60568 +val_progress:12800/60568 +val_progress:12825/60568 +val_progress:12850/60568 +val_progress:12875/60568 +val_progress:12900/60568 +val_progress:12925/60568 +val_progress:12950/60568 +val_progress:12975/60568 +val_progress:13000/60568 +val_progress:13025/60568 +val_progress:13050/60568 +val_progress:13075/60568 +val_progress:13100/60568 +val_progress:13125/60568 +val_progress:13150/60568 +val_progress:13175/60568 +val_progress:13200/60568 +val_progress:13225/60568 +val_progress:13250/60568 +val_progress:13275/60568 +val_progress:13300/60568 +val_progress:13325/60568 +val_progress:13350/60568 +val_progress:13375/60568 +val_progress:13400/60568 +val_progress:13425/60568 +val_progress:13450/60568 +val_progress:13475/60568 +val_progress:13500/60568 +val_progress:13525/60568 +val_progress:13550/60568 +val_progress:13575/60568 +val_progress:13600/60568 +val_progress:13625/60568 +val_progress:13650/60568 +val_progress:13675/60568 +val_progress:13700/60568 +val_progress:13725/60568 +val_progress:13750/60568 +val_progress:13775/60568 +val_progress:13800/60568 +val_progress:13825/60568 +val_progress:13850/60568 +val_progress:13875/60568 +val_progress:13900/60568 +val_progress:13925/60568 +val_progress:13950/60568 +val_progress:13975/60568 +val_progress:14000/60568 +val_progress:14025/60568 +val_progress:14050/60568 +val_progress:14075/60568 +val_progress:14100/60568 +val_progress:14125/60568 +val_progress:14150/60568 +val_progress:14175/60568 +val_progress:14200/60568 +val_progress:14225/60568 +val_progress:14250/60568 +val_progress:14275/60568 +val_progress:14300/60568 +val_progress:14325/60568 +val_progress:14350/60568 +val_progress:14375/60568 +val_progress:14400/60568 +val_progress:14425/60568 +val_progress:14450/60568 +val_progress:14475/60568 +val_progress:14500/60568 +val_progress:14525/60568 +val_progress:14550/60568 +val_progress:14575/60568 +val_progress:14600/60568 +val_progress:14625/60568 +val_progress:14650/60568 +val_progress:14675/60568 +val_progress:14700/60568 +val_progress:14725/60568 +val_progress:14750/60568 +val_progress:14775/60568 +val_progress:14800/60568 +val_progress:14825/60568 +val_progress:14850/60568 +val_progress:14875/60568 +val_progress:14900/60568 +val_progress:14925/60568 +val_progress:14950/60568 +val_progress:14975/60568 +val_progress:15000/60568 +val_progress:15025/60568 +val_progress:15050/60568 +val_progress:15075/60568 +val_progress:15100/60568 +val_progress:15125/60568 +val_progress:15150/60568 +val_progress:15175/60568 +val_progress:15200/60568 +val_progress:15225/60568 +val_progress:15250/60568 +val_progress:15275/60568 +val_progress:15300/60568 +val_progress:15325/60568 +val_progress:15350/60568 +val_progress:15375/60568 +val_progress:15400/60568 +val_progress:15425/60568 +val_progress:15450/60568 +val_progress:15475/60568 +val_progress:15500/60568 +val_progress:15525/60568 +val_progress:15550/60568 +val_progress:15575/60568 +val_progress:15600/60568 +val_progress:15625/60568 +val_progress:15650/60568 +val_progress:15675/60568 +val_progress:15700/60568 +val_progress:15725/60568 +val_progress:15750/60568 +val_progress:15775/60568 +val_progress:15800/60568 +val_progress:15825/60568 +val_progress:15850/60568 +val_progress:15875/60568 +val_progress:15900/60568 +val_progress:15925/60568 +val_progress:15950/60568 +val_progress:15975/60568 +val_progress:16000/60568 +val_progress:16025/60568 +val_progress:16050/60568 +val_progress:16075/60568 +val_progress:16100/60568 +val_progress:16125/60568 +val_progress:16150/60568 +val_progress:16175/60568 +val_progress:16200/60568 +val_progress:16225/60568 +val_progress:16250/60568 +val_progress:16275/60568 +val_progress:16300/60568 +val_progress:16325/60568 +val_progress:16350/60568 +val_progress:16375/60568 +val_progress:16400/60568 +val_progress:16425/60568 +val_progress:16450/60568 +val_progress:16475/60568 +val_progress:16500/60568 +val_progress:16525/60568 +val_progress:16550/60568 +val_progress:16575/60568 +val_progress:16600/60568 +val_progress:16625/60568 +val_progress:16650/60568 +val_progress:16675/60568 +val_progress:16700/60568 +val_progress:16725/60568 +val_progress:16750/60568 +val_progress:16775/60568 +val_progress:16800/60568 +val_progress:16825/60568 +val_progress:16850/60568 +val_progress:16875/60568 +val_progress:16900/60568 +val_progress:16925/60568 +val_progress:16950/60568 +val_progress:16975/60568 +val_progress:17000/60568 +val_progress:17025/60568 +val_progress:17050/60568 +val_progress:17075/60568 +val_progress:17100/60568 +val_progress:17125/60568 +val_progress:17150/60568 +val_progress:17175/60568 +val_progress:17200/60568 +val_progress:17225/60568 +val_progress:17250/60568 +val_progress:17275/60568 +val_progress:17300/60568 +val_progress:17325/60568 +val_progress:17350/60568 +val_progress:17375/60568 +val_progress:17400/60568 +val_progress:17425/60568 +val_progress:17450/60568 +val_progress:17475/60568 +val_progress:17500/60568 +val_progress:17525/60568 +val_progress:17550/60568 +val_progress:17575/60568 +val_progress:17600/60568 +val_progress:17625/60568 +val_progress:17650/60568 +val_progress:17675/60568 +val_progress:17700/60568 +val_progress:17725/60568 +val_progress:17750/60568 +val_progress:17775/60568 +val_progress:17800/60568 +val_progress:17825/60568 +val_progress:17850/60568 +val_progress:17875/60568 +val_progress:17900/60568 +val_progress:17925/60568 +val_progress:17950/60568 +val_progress:17975/60568 +val_progress:18000/60568 +val_progress:18025/60568 +val_progress:18050/60568 +val_progress:18075/60568 +val_progress:18100/60568 +val_progress:18125/60568 +val_progress:18150/60568 +val_progress:18175/60568 +val_progress:18200/60568 +val_progress:18225/60568 +val_progress:18250/60568 +val_progress:18275/60568 +val_progress:18300/60568 +val_progress:18325/60568 +val_progress:18350/60568 +val_progress:18375/60568 +val_progress:18400/60568 +val_progress:18425/60568 +val_progress:18450/60568 +val_progress:18475/60568 +val_progress:18500/60568 +val_progress:18525/60568 +val_progress:18550/60568 +val_progress:18575/60568 +val_progress:18600/60568 +val_progress:18625/60568 +val_progress:18650/60568 +val_progress:18675/60568 +val_progress:18700/60568 +val_progress:18725/60568 +val_progress:18750/60568 +val_progress:18775/60568 +val_progress:18800/60568 +val_progress:18825/60568 +val_progress:18850/60568 +val_progress:18875/60568 +val_progress:18900/60568 +val_progress:18925/60568 +val_progress:18950/60568 +val_progress:18975/60568 +val_progress:19000/60568 +val_progress:19025/60568 +val_progress:19050/60568 +val_progress:19075/60568 +val_progress:19100/60568 +val_progress:19125/60568 +val_progress:19150/60568 +val_progress:19175/60568 +val_progress:19200/60568 +val_progress:19225/60568 +val_progress:19250/60568 +val_progress:19275/60568 +val_progress:19300/60568 +val_progress:19325/60568 +val_progress:19350/60568 +val_progress:19375/60568 +val_progress:19400/60568 +val_progress:19425/60568 +val_progress:19450/60568 +val_progress:19475/60568 +val_progress:19500/60568 +val_progress:19525/60568 +val_progress:19550/60568 +val_progress:19575/60568 +val_progress:19600/60568 +val_progress:19625/60568 +val_progress:19650/60568 +val_progress:19675/60568 +val_progress:19700/60568 +val_progress:19725/60568 +val_progress:19750/60568 +val_progress:19775/60568 +val_progress:19800/60568 +val_progress:19825/60568 +val_progress:19850/60568 +val_progress:19875/60568 +val_progress:19900/60568 +val_progress:19925/60568 +val_progress:19950/60568 +val_progress:19975/60568 +val_progress:20000/60568 +val_progress:20025/60568 +val_progress:20050/60568 +val_progress:20075/60568 +val_progress:20100/60568 +val_progress:20125/60568 +val_progress:20150/60568 +val_progress:20175/60568 +val_progress:20200/60568 +val_progress:20225/60568 +val_progress:20250/60568 +val_progress:20275/60568 +val_progress:20300/60568 +val_progress:20325/60568 +val_progress:20350/60568 +val_progress:20375/60568 +val_progress:20400/60568 +val_progress:20425/60568 +val_progress:20450/60568 +val_progress:20475/60568 +val_progress:20500/60568 +val_progress:20525/60568 +val_progress:20550/60568 +val_progress:20575/60568 +val_progress:20600/60568 +val_progress:20625/60568 +val_progress:20650/60568 +val_progress:20675/60568 +val_progress:20700/60568 +val_progress:20725/60568 +val_progress:20750/60568 +val_progress:20775/60568 +val_progress:20800/60568 +val_progress:20825/60568 +val_progress:20850/60568 +val_progress:20875/60568 +val_progress:20900/60568 +val_progress:20925/60568 +val_progress:20950/60568 +val_progress:20975/60568 +val_progress:21000/60568 +val_progress:21025/60568 +val_progress:21050/60568 +val_progress:21075/60568 +val_progress:21100/60568 +val_progress:21125/60568 +val_progress:21150/60568 +val_progress:21175/60568 +val_progress:21200/60568 +val_progress:21225/60568 +val_progress:21250/60568 +val_progress:21275/60568 +val_progress:21300/60568 +val_progress:21325/60568 +val_progress:21350/60568 +val_progress:21375/60568 +val_progress:21400/60568 +val_progress:21425/60568 +val_progress:21450/60568 +val_progress:21475/60568 +val_progress:21500/60568 +val_progress:21525/60568 +val_progress:21550/60568 +val_progress:21575/60568 +val_progress:21600/60568 +val_progress:21625/60568 +val_progress:21650/60568 +val_progress:21675/60568 +val_progress:21700/60568 +val_progress:21725/60568 +val_progress:21750/60568 +val_progress:21775/60568 +val_progress:21800/60568 +val_progress:21825/60568 +val_progress:21850/60568 +val_progress:21875/60568 +val_progress:21900/60568 +val_progress:21925/60568 +val_progress:21950/60568 +val_progress:21975/60568 +val_progress:22000/60568 +val_progress:22025/60568 +val_progress:22050/60568 +val_progress:22075/60568 +val_progress:22100/60568 +val_progress:22125/60568 +val_progress:22150/60568 +val_progress:22175/60568 +val_progress:22200/60568 +val_progress:22225/60568 +val_progress:22250/60568 +val_progress:22275/60568 +val_progress:22300/60568 +val_progress:22325/60568 +val_progress:22350/60568 +val_progress:22375/60568 +val_progress:22400/60568 +val_progress:22425/60568 +val_progress:22450/60568 +val_progress:22475/60568 +val_progress:22500/60568 +val_progress:22525/60568 +val_progress:22550/60568 +val_progress:22575/60568 +val_progress:22600/60568 +val_progress:22625/60568 +val_progress:22650/60568 +val_progress:22675/60568 +val_progress:22700/60568 +val_progress:22725/60568 +val_progress:22750/60568 +val_progress:22775/60568 +val_progress:22800/60568 +val_progress:22825/60568 +val_progress:22850/60568 +val_progress:22875/60568 +val_progress:22900/60568 +val_progress:22925/60568 +val_progress:22950/60568 +val_progress:22975/60568 +val_progress:23000/60568 +val_progress:23025/60568 +val_progress:23050/60568 +val_progress:23075/60568 +val_progress:23100/60568 +val_progress:23125/60568 +val_progress:23150/60568 +val_progress:23175/60568 +val_progress:23200/60568 +val_progress:23225/60568 +val_progress:23250/60568 +val_progress:23275/60568 +val_progress:23300/60568 +val_progress:23325/60568 +val_progress:23350/60568 +val_progress:23375/60568 +val_progress:23400/60568 +val_progress:23425/60568 +val_progress:23450/60568 +val_progress:23475/60568 +val_progress:23500/60568 +val_progress:23525/60568 +val_progress:23550/60568 +val_progress:23575/60568 +val_progress:23600/60568 +val_progress:23625/60568 +val_progress:23650/60568 +val_progress:23675/60568 +val_progress:23700/60568 +val_progress:23725/60568 +val_progress:23750/60568 +val_progress:23775/60568 +val_progress:23800/60568 +val_progress:23825/60568 +val_progress:23850/60568 +val_progress:23875/60568 +val_progress:23900/60568 +val_progress:23925/60568 +val_progress:23950/60568 +val_progress:23975/60568 +val_progress:24000/60568 +val_progress:24025/60568 +val_progress:24050/60568 +val_progress:24075/60568 +val_progress:24100/60568 +val_progress:24125/60568 +val_progress:24150/60568 +val_progress:24175/60568 +val_progress:24200/60568 +val_progress:24225/60568 +val_progress:24250/60568 +val_progress:24275/60568 +val_progress:24300/60568 +val_progress:24325/60568 +val_progress:24350/60568 +val_progress:24375/60568 +val_progress:24400/60568 +val_progress:24425/60568 +val_progress:24450/60568 +val_progress:24475/60568 +val_progress:24500/60568 +val_progress:24525/60568 +val_progress:24550/60568 +val_progress:24575/60568 +val_progress:24600/60568 +val_progress:24625/60568 +val_progress:24650/60568 +val_progress:24675/60568 +val_progress:24700/60568 +val_progress:24725/60568 +val_progress:24750/60568 +val_progress:24775/60568 +val_progress:24800/60568 +val_progress:24825/60568 +val_progress:24850/60568 +val_progress:24875/60568 +val_progress:24900/60568 +val_progress:24925/60568 +val_progress:24950/60568 +val_progress:24975/60568 +val_progress:25000/60568 +val_progress:25025/60568 +val_progress:25050/60568 +val_progress:25075/60568 +val_progress:25100/60568 +val_progress:25125/60568 +val_progress:25150/60568 +val_progress:25175/60568 +val_progress:25200/60568 +val_progress:25225/60568 +val_progress:25250/60568 +val_progress:25275/60568 +val_progress:25300/60568 +val_progress:25325/60568 +val_progress:25350/60568 +val_progress:25375/60568 +val_progress:25400/60568 +val_progress:25425/60568 +val_progress:25450/60568 +val_progress:25475/60568 +val_progress:25500/60568 +val_progress:25525/60568 +val_progress:25550/60568 +val_progress:25575/60568 +val_progress:25600/60568 +val_progress:25625/60568 +val_progress:25650/60568 +val_progress:25675/60568 +val_progress:25700/60568 +val_progress:25725/60568 +val_progress:25750/60568 +val_progress:25775/60568 +val_progress:25800/60568 +val_progress:25825/60568 +val_progress:25850/60568 +val_progress:25875/60568 +val_progress:25900/60568 +val_progress:25925/60568 +val_progress:25950/60568 +val_progress:25975/60568 +val_progress:26000/60568 +val_progress:26025/60568 +val_progress:26050/60568 +val_progress:26075/60568 +val_progress:26100/60568 +val_progress:26125/60568 +val_progress:26150/60568 +val_progress:26175/60568 +val_progress:26200/60568 +val_progress:26225/60568 +val_progress:26250/60568 +val_progress:26275/60568 +val_progress:26300/60568 +val_progress:26325/60568 +val_progress:26350/60568 +val_progress:26375/60568 +val_progress:26400/60568 +val_progress:26425/60568 +val_progress:26450/60568 +val_progress:26475/60568 +val_progress:26500/60568 +val_progress:26525/60568 +val_progress:26550/60568 +val_progress:26575/60568 +val_progress:26600/60568 +val_progress:26625/60568 +val_progress:26650/60568 +val_progress:26675/60568 +val_progress:26700/60568 +val_progress:26725/60568 +val_progress:26750/60568 +val_progress:26775/60568 +val_progress:26800/60568 +val_progress:26825/60568 +val_progress:26850/60568 +val_progress:26875/60568 +val_progress:26900/60568 +val_progress:26925/60568 +val_progress:26950/60568 +val_progress:26975/60568 +val_progress:27000/60568 +val_progress:27025/60568 +val_progress:27050/60568 +val_progress:27075/60568 +val_progress:27100/60568 +val_progress:27125/60568 +val_progress:27150/60568 +val_progress:27175/60568 +val_progress:27200/60568 +val_progress:27225/60568 +val_progress:27250/60568 +val_progress:27275/60568 +val_progress:27300/60568 +val_progress:27325/60568 +val_progress:27350/60568 +val_progress:27375/60568 +val_progress:27400/60568 +val_progress:27425/60568 +val_progress:27450/60568 +val_progress:27475/60568 +val_progress:27500/60568 +val_progress:27525/60568 +val_progress:27550/60568 +val_progress:27575/60568 +val_progress:27600/60568 +val_progress:27625/60568 +val_progress:27650/60568 +val_progress:27675/60568 +val_progress:27700/60568 +val_progress:27725/60568 +val_progress:27750/60568 +val_progress:27775/60568 +val_progress:27800/60568 +val_progress:27825/60568 +val_progress:27850/60568 +val_progress:27875/60568 +val_progress:27900/60568 +val_progress:27925/60568 +val_progress:27950/60568 +val_progress:27975/60568 +val_progress:28000/60568 +val_progress:28025/60568 +val_progress:28050/60568 +val_progress:28075/60568 +val_progress:28100/60568 +val_progress:28125/60568 +val_progress:28150/60568 +val_progress:28175/60568 +val_progress:28200/60568 +val_progress:28225/60568 +val_progress:28250/60568 +val_progress:28275/60568 +val_progress:28300/60568 +val_progress:28325/60568 +val_progress:28350/60568 +val_progress:28375/60568 +val_progress:28400/60568 +val_progress:28425/60568 +val_progress:28450/60568 +val_progress:28475/60568 +val_progress:28500/60568 +val_progress:28525/60568 +val_progress:28550/60568 +val_progress:28575/60568 +val_progress:28600/60568 +val_progress:28625/60568 +val_progress:28650/60568 +val_progress:28675/60568 +val_progress:28700/60568 +val_progress:28725/60568 +val_progress:28750/60568 +val_progress:28775/60568 +val_progress:28800/60568 +val_progress:28825/60568 +val_progress:28850/60568 +val_progress:28875/60568 +val_progress:28900/60568 +val_progress:28925/60568 +val_progress:28950/60568 +val_progress:28975/60568 +val_progress:29000/60568 +val_progress:29025/60568 +val_progress:29050/60568 +val_progress:29075/60568 +val_progress:29100/60568 +val_progress:29125/60568 +val_progress:29150/60568 +val_progress:29175/60568 +val_progress:29200/60568 +val_progress:29225/60568 +val_progress:29250/60568 +val_progress:29275/60568 +val_progress:29300/60568 +val_progress:29325/60568 +val_progress:29350/60568 +val_progress:29375/60568 +val_progress:29400/60568 +val_progress:29425/60568 +val_progress:29450/60568 +val_progress:29475/60568 +val_progress:29500/60568 +val_progress:29525/60568 +val_progress:29550/60568 +val_progress:29575/60568 +val_progress:29600/60568 +val_progress:29625/60568 +val_progress:29650/60568 +val_progress:29675/60568 +val_progress:29700/60568 +val_progress:29725/60568 +val_progress:29750/60568 +val_progress:29775/60568 +val_progress:29800/60568 +val_progress:29825/60568 +val_progress:29850/60568 +val_progress:29875/60568 +val_progress:29900/60568 +val_progress:29925/60568 +val_progress:29950/60568 +val_progress:29975/60568 +val_progress:30000/60568 +val_progress:30025/60568 +val_progress:30050/60568 +val_progress:30075/60568 +val_progress:30100/60568 +val_progress:30125/60568 +val_progress:30150/60568 +val_progress:30175/60568 +val_progress:30200/60568 +val_progress:30225/60568 +val_progress:30250/60568 +val_progress:30275/60568 +val_progress:30300/60568 +val_progress:30325/60568 +val_progress:30350/60568 +val_progress:30375/60568 +val_progress:30400/60568 +val_progress:30425/60568 +val_progress:30450/60568 +val_progress:30475/60568 +val_progress:30500/60568 +val_progress:30525/60568 +val_progress:30550/60568 +val_progress:30575/60568 +val_progress:30600/60568 +val_progress:30625/60568 +val_progress:30650/60568 +val_progress:30675/60568 +val_progress:30700/60568 +val_progress:30725/60568 +val_progress:30750/60568 +val_progress:30775/60568 +val_progress:30800/60568 +val_progress:30825/60568 +val_progress:30850/60568 +val_progress:30875/60568 +val_progress:30900/60568 +val_progress:30925/60568 +val_progress:30950/60568 +val_progress:30975/60568 +val_progress:31000/60568 +val_progress:31025/60568 +val_progress:31050/60568 +val_progress:31075/60568 +val_progress:31100/60568 +val_progress:31125/60568 +val_progress:31150/60568 +val_progress:31175/60568 +val_progress:31200/60568 +val_progress:31225/60568 +val_progress:31250/60568 +val_progress:31275/60568 +val_progress:31300/60568 +val_progress:31325/60568 +val_progress:31350/60568 +val_progress:31375/60568 +val_progress:31400/60568 +val_progress:31425/60568 +val_progress:31450/60568 +val_progress:31475/60568 +val_progress:31500/60568 +val_progress:31525/60568 +val_progress:31550/60568 +val_progress:31575/60568 +val_progress:31600/60568 +val_progress:31625/60568 +val_progress:31650/60568 +val_progress:31675/60568 +val_progress:31700/60568 +val_progress:31725/60568 +val_progress:31750/60568 +val_progress:31775/60568 +val_progress:31800/60568 +val_progress:31825/60568 +val_progress:31850/60568 +val_progress:31875/60568 +val_progress:31900/60568 +val_progress:31925/60568 +val_progress:31950/60568 +val_progress:31975/60568 +val_progress:32000/60568 +val_progress:32025/60568 +val_progress:32050/60568 +val_progress:32075/60568 +val_progress:32100/60568 +val_progress:32125/60568 +val_progress:32150/60568 +val_progress:32175/60568 +val_progress:32200/60568 +val_progress:32225/60568 +val_progress:32250/60568 +val_progress:32275/60568 +val_progress:32300/60568 +val_progress:32325/60568 +val_progress:32350/60568 +val_progress:32375/60568 +val_progress:32400/60568 +val_progress:32425/60568 +val_progress:32450/60568 +val_progress:32475/60568 +val_progress:32500/60568 +val_progress:32525/60568 +val_progress:32550/60568 +val_progress:32575/60568 +val_progress:32600/60568 +val_progress:32625/60568 +val_progress:32650/60568 +val_progress:32675/60568 +val_progress:32700/60568 +val_progress:32725/60568 +val_progress:32750/60568 +val_progress:32775/60568 +val_progress:32800/60568 +val_progress:32825/60568 +val_progress:32850/60568 +val_progress:32875/60568 +val_progress:32900/60568 +val_progress:32925/60568 +val_progress:32950/60568 +val_progress:32975/60568 +val_progress:33000/60568 +val_progress:33025/60568 +val_progress:33050/60568 +val_progress:33075/60568 +val_progress:33100/60568 +val_progress:33125/60568 +val_progress:33150/60568 +val_progress:33175/60568 +val_progress:33200/60568 +val_progress:33225/60568 +val_progress:33250/60568 +val_progress:33275/60568 +val_progress:33300/60568 +val_progress:33325/60568 +val_progress:33350/60568 +val_progress:33375/60568 +val_progress:33400/60568 +val_progress:33425/60568 +val_progress:33450/60568 +val_progress:33475/60568 +val_progress:33500/60568 +val_progress:33525/60568 +val_progress:33550/60568 +val_progress:33575/60568 +val_progress:33600/60568 +val_progress:33625/60568 +val_progress:33650/60568 +val_progress:33675/60568 +val_progress:33700/60568 +val_progress:33725/60568 +val_progress:33750/60568 +val_progress:33775/60568 +val_progress:33800/60568 +val_progress:33825/60568 +val_progress:33850/60568 +val_progress:33875/60568 +val_progress:33900/60568 +val_progress:33925/60568 +val_progress:33950/60568 +val_progress:33975/60568 +val_progress:34000/60568 +val_progress:34025/60568 +val_progress:34050/60568 +val_progress:34075/60568 +val_progress:34100/60568 +val_progress:34125/60568 +val_progress:34150/60568 +val_progress:34175/60568 +val_progress:34200/60568 +val_progress:34225/60568 +val_progress:34250/60568 +val_progress:34275/60568 +val_progress:34300/60568 +val_progress:34325/60568 +val_progress:34350/60568 +val_progress:34375/60568 +val_progress:34400/60568 +val_progress:34425/60568 +val_progress:34450/60568 +val_progress:34475/60568 +val_progress:34500/60568 +val_progress:34525/60568 +val_progress:34550/60568 +val_progress:34575/60568 +val_progress:34600/60568 +val_progress:34625/60568 +val_progress:34650/60568 +val_progress:34675/60568 +val_progress:34700/60568 +val_progress:34725/60568 +val_progress:34750/60568 +val_progress:34775/60568 +val_progress:34800/60568 +val_progress:34825/60568 +val_progress:34850/60568 +val_progress:34875/60568 +val_progress:34900/60568 +val_progress:34925/60568 +val_progress:34950/60568 +val_progress:34975/60568 +val_progress:35000/60568 +val_progress:35025/60568 +val_progress:35050/60568 +val_progress:35075/60568 +val_progress:35100/60568 +val_progress:35125/60568 +val_progress:35150/60568 +val_progress:35175/60568 +val_progress:35200/60568 +val_progress:35225/60568 +val_progress:35250/60568 +val_progress:35275/60568 +val_progress:35300/60568 +val_progress:35325/60568 +val_progress:35350/60568 +val_progress:35375/60568 +val_progress:35400/60568 +val_progress:35425/60568 +val_progress:35450/60568 +val_progress:35475/60568 +val_progress:35500/60568 +val_progress:35525/60568 +val_progress:35550/60568 +val_progress:35575/60568 +val_progress:35600/60568 +val_progress:35625/60568 +val_progress:35650/60568 +val_progress:35675/60568 +val_progress:35700/60568 +val_progress:35725/60568 +val_progress:35750/60568 +val_progress:35775/60568 +val_progress:35800/60568 +val_progress:35825/60568 +val_progress:35850/60568 +val_progress:35875/60568 +val_progress:35900/60568 +val_progress:35925/60568 +val_progress:35950/60568 +val_progress:35975/60568 +val_progress:36000/60568 +val_progress:36025/60568 +val_progress:36050/60568 +val_progress:36075/60568 +val_progress:36100/60568 +val_progress:36125/60568 +val_progress:36150/60568 +val_progress:36175/60568 +val_progress:36200/60568 +val_progress:36225/60568 +val_progress:36250/60568 +val_progress:36275/60568 +val_progress:36300/60568 +val_progress:36325/60568 +val_progress:36350/60568 +val_progress:36375/60568 +val_progress:36400/60568 +val_progress:36425/60568 +val_progress:36450/60568 +val_progress:36475/60568 +val_progress:36500/60568 +val_progress:36525/60568 +val_progress:36550/60568 +val_progress:36575/60568 +val_progress:36600/60568 +val_progress:36625/60568 +val_progress:36650/60568 +val_progress:36675/60568 +val_progress:36700/60568 +val_progress:36725/60568 +val_progress:36750/60568 +val_progress:36775/60568 +val_progress:36800/60568 +val_progress:36825/60568 +val_progress:36850/60568 +val_progress:36875/60568 +val_progress:36900/60568 +val_progress:36925/60568 +val_progress:36950/60568 +val_progress:36975/60568 +val_progress:37000/60568 +val_progress:37025/60568 +val_progress:37050/60568 +val_progress:37075/60568 +val_progress:37100/60568 +val_progress:37125/60568 +val_progress:37150/60568 +val_progress:37175/60568 +val_progress:37200/60568 +val_progress:37225/60568 +val_progress:37250/60568 +val_progress:37275/60568 +val_progress:37300/60568 +val_progress:37325/60568 +val_progress:37350/60568 +val_progress:37375/60568 +val_progress:37400/60568 +val_progress:37425/60568 +val_progress:37450/60568 +val_progress:37475/60568 +val_progress:37500/60568 +val_progress:37525/60568 +val_progress:37550/60568 +val_progress:37575/60568 +val_progress:37600/60568 +val_progress:37625/60568 +val_progress:37650/60568 +val_progress:37675/60568 +val_progress:37700/60568 +val_progress:37725/60568 +val_progress:37750/60568 +val_progress:37775/60568 +val_progress:37800/60568 +val_progress:37825/60568 +val_progress:37850/60568 +val_progress:37875/60568 +val_progress:37900/60568 +val_progress:37925/60568 +val_progress:37950/60568 +val_progress:37975/60568 +val_progress:38000/60568 +val_progress:38025/60568 +val_progress:38050/60568 +val_progress:38075/60568 +val_progress:38100/60568 +val_progress:38125/60568 +val_progress:38150/60568 +val_progress:38175/60568 +val_progress:38200/60568 +val_progress:38225/60568 +val_progress:38250/60568 +val_progress:38275/60568 +val_progress:38300/60568 +val_progress:38325/60568 +val_progress:38350/60568 +val_progress:38375/60568 +val_progress:38400/60568 +val_progress:38425/60568 +val_progress:38450/60568 +val_progress:38475/60568 +val_progress:38500/60568 +val_progress:38525/60568 +val_progress:38550/60568 +val_progress:38575/60568 +val_progress:38600/60568 +val_progress:38625/60568 +val_progress:38650/60568 +val_progress:38675/60568 +val_progress:38700/60568 +val_progress:38725/60568 +val_progress:38750/60568 +val_progress:38775/60568 +val_progress:38800/60568 +val_progress:38825/60568 +val_progress:38850/60568 +val_progress:38875/60568 +val_progress:38900/60568 +val_progress:38925/60568 +val_progress:38950/60568 +val_progress:38975/60568 +val_progress:39000/60568 +val_progress:39025/60568 +val_progress:39050/60568 +val_progress:39075/60568 +val_progress:39100/60568 +val_progress:39125/60568 +val_progress:39150/60568 +val_progress:39175/60568 +val_progress:39200/60568 +val_progress:39225/60568 +val_progress:39250/60568 +val_progress:39275/60568 +val_progress:39300/60568 +val_progress:39325/60568 +val_progress:39350/60568 +val_progress:39375/60568 +val_progress:39400/60568 +val_progress:39425/60568 +val_progress:39450/60568 +val_progress:39475/60568 +val_progress:39500/60568 +val_progress:39525/60568 +val_progress:39550/60568 +val_progress:39575/60568 +val_progress:39600/60568 +val_progress:39625/60568 +val_progress:39650/60568 +val_progress:39675/60568 +val_progress:39700/60568 +val_progress:39725/60568 +val_progress:39750/60568 +val_progress:39775/60568 +val_progress:39800/60568 +val_progress:39825/60568 +val_progress:39850/60568 +val_progress:39875/60568 +val_progress:39900/60568 +val_progress:39925/60568 +val_progress:39950/60568 +val_progress:39975/60568 +val_progress:40000/60568 +val_progress:40025/60568 +val_progress:40050/60568 +val_progress:40075/60568 +val_progress:40100/60568 +val_progress:40125/60568 +val_progress:40150/60568 +val_progress:40175/60568 +val_progress:40200/60568 +val_progress:40225/60568 +val_progress:40250/60568 +val_progress:40275/60568 +val_progress:40300/60568 +val_progress:40325/60568 +val_progress:40350/60568 +val_progress:40375/60568 +val_progress:40400/60568 +val_progress:40425/60568 +val_progress:40450/60568 +val_progress:40475/60568 +val_progress:40500/60568 +val_progress:40525/60568 +val_progress:40550/60568 +val_progress:40575/60568 +val_progress:40600/60568 +val_progress:40625/60568 +val_progress:40650/60568 +val_progress:40675/60568 +val_progress:40700/60568 +val_progress:40725/60568 +val_progress:40750/60568 +val_progress:40775/60568 +val_progress:40800/60568 +val_progress:40825/60568 +val_progress:40850/60568 +val_progress:40875/60568 +val_progress:40900/60568 +val_progress:40925/60568 +val_progress:40950/60568 +val_progress:40975/60568 +val_progress:41000/60568 +val_progress:41025/60568 +val_progress:41050/60568 +val_progress:41075/60568 +val_progress:41100/60568 +val_progress:41125/60568 +val_progress:41150/60568 +val_progress:41175/60568 +val_progress:41200/60568 +val_progress:41225/60568 +val_progress:41250/60568 +val_progress:41275/60568 +val_progress:41300/60568 +val_progress:41325/60568 +val_progress:41350/60568 +val_progress:41375/60568 +val_progress:41400/60568 +val_progress:41425/60568 +val_progress:41450/60568 +val_progress:41475/60568 +val_progress:41500/60568 +val_progress:41525/60568 +val_progress:41550/60568 +val_progress:41575/60568 +val_progress:41600/60568 +val_progress:41625/60568 +val_progress:41650/60568 +val_progress:41675/60568 +val_progress:41700/60568 +val_progress:41725/60568 +val_progress:41750/60568 +val_progress:41775/60568 +val_progress:41800/60568 +val_progress:41825/60568 +val_progress:41850/60568 +val_progress:41875/60568 +val_progress:41900/60568 +val_progress:41925/60568 +val_progress:41950/60568 +val_progress:41975/60568 +val_progress:42000/60568 +val_progress:42025/60568 +val_progress:42050/60568 +val_progress:42075/60568 +val_progress:42100/60568 +val_progress:42125/60568 +val_progress:42150/60568 +val_progress:42175/60568 +val_progress:42200/60568 +val_progress:42225/60568 +val_progress:42250/60568 +val_progress:42275/60568 +val_progress:42300/60568 +val_progress:42325/60568 +val_progress:42350/60568 +val_progress:42375/60568 +val_progress:42400/60568 +val_progress:42425/60568 +val_progress:42450/60568 +val_progress:42475/60568 +val_progress:42500/60568 +val_progress:42525/60568 +val_progress:42550/60568 +val_progress:42575/60568 +val_progress:42600/60568 +val_progress:42625/60568 +val_progress:42650/60568 +val_progress:42675/60568 +val_progress:42700/60568 +val_progress:42725/60568 +val_progress:42750/60568 +val_progress:42775/60568 +val_progress:42800/60568 +val_progress:42825/60568 +val_progress:42850/60568 +val_progress:42875/60568 +val_progress:42900/60568 +val_progress:42925/60568 +val_progress:42950/60568 +val_progress:42975/60568 +val_progress:43000/60568 +val_progress:43025/60568 +val_progress:43050/60568 +val_progress:43075/60568 +val_progress:43100/60568 +val_progress:43125/60568 +val_progress:43150/60568 +val_progress:43175/60568 +val_progress:43200/60568 +val_progress:43225/60568 +val_progress:43250/60568 +val_progress:43275/60568 +val_progress:43300/60568 +val_progress:43325/60568 +val_progress:43350/60568 +val_progress:43375/60568 +val_progress:43400/60568 +val_progress:43425/60568 +val_progress:43450/60568 +val_progress:43475/60568 +val_progress:43500/60568 +val_progress:43525/60568 +val_progress:43550/60568 +val_progress:43575/60568 +val_progress:43600/60568 +val_progress:43625/60568 +val_progress:43650/60568 +val_progress:43675/60568 +val_progress:43700/60568 +val_progress:43725/60568 +val_progress:43750/60568 +val_progress:43775/60568 +val_progress:43800/60568 +val_progress:43825/60568 +val_progress:43850/60568 +val_progress:43875/60568 +val_progress:43900/60568 +val_progress:43925/60568 +val_progress:43950/60568 +val_progress:43975/60568 +val_progress:44000/60568 +val_progress:44025/60568 +val_progress:44050/60568 +val_progress:44075/60568 +val_progress:44100/60568 +val_progress:44125/60568 +val_progress:44150/60568 +val_progress:44175/60568 +val_progress:44200/60568 +val_progress:44225/60568 +val_progress:44250/60568 +val_progress:44275/60568 +val_progress:44300/60568 +val_progress:44325/60568 +val_progress:44350/60568 +val_progress:44375/60568 +val_progress:44400/60568 +val_progress:44425/60568 +val_progress:44450/60568 +val_progress:44475/60568 +val_progress:44500/60568 +val_progress:44525/60568 +val_progress:44550/60568 +val_progress:44575/60568 +val_progress:44600/60568 +val_progress:44625/60568 +val_progress:44650/60568 +val_progress:44675/60568 +val_progress:44700/60568 +val_progress:44725/60568 +val_progress:44750/60568 +val_progress:44775/60568 +val_progress:44800/60568 +val_progress:44825/60568 +val_progress:44850/60568 +val_progress:44875/60568 +val_progress:44900/60568 +val_progress:44925/60568 +val_progress:44950/60568 +val_progress:44975/60568 +val_progress:45000/60568 +val_progress:45025/60568 +val_progress:45050/60568 +val_progress:45075/60568 +val_progress:45100/60568 +val_progress:45125/60568 +val_progress:45150/60568 +val_progress:45175/60568 +val_progress:45200/60568 +val_progress:45225/60568 +val_progress:45250/60568 +val_progress:45275/60568 +val_progress:45300/60568 +val_progress:45325/60568 +val_progress:45350/60568 +val_progress:45375/60568 +val_progress:45400/60568 +val_progress:45425/60568 +val_progress:45450/60568 +val_progress:45475/60568 +val_progress:45500/60568 +val_progress:45525/60568 +val_progress:45550/60568 +val_progress:45575/60568 +val_progress:45600/60568 +val_progress:45625/60568 +val_progress:45650/60568 +val_progress:45675/60568 +val_progress:45700/60568 +val_progress:45725/60568 +val_progress:45750/60568 +val_progress:45775/60568 +val_progress:45800/60568 +val_progress:45825/60568 +val_progress:45850/60568 +val_progress:45875/60568 +val_progress:45900/60568 +val_progress:45925/60568 +val_progress:45950/60568 +val_progress:45975/60568 +val_progress:46000/60568 +val_progress:46025/60568 +val_progress:46050/60568 +val_progress:46075/60568 +val_progress:46100/60568 +val_progress:46125/60568 +val_progress:46150/60568 +val_progress:46175/60568 +val_progress:46200/60568 +val_progress:46225/60568 +val_progress:46250/60568 +val_progress:46275/60568 +val_progress:46300/60568 +val_progress:46325/60568 +val_progress:46350/60568 +val_progress:46375/60568 +val_progress:46400/60568 +val_progress:46425/60568 +val_progress:46450/60568 +val_progress:46475/60568 +val_progress:46500/60568 +val_progress:46525/60568 +val_progress:46550/60568 +val_progress:46575/60568 +val_progress:46600/60568 +val_progress:46625/60568 +val_progress:46650/60568 +val_progress:46675/60568 +val_progress:46700/60568 +val_progress:46725/60568 +val_progress:46750/60568 +val_progress:46775/60568 +val_progress:46800/60568 +val_progress:46825/60568 +val_progress:46850/60568 +val_progress:46875/60568 +val_progress:46900/60568 +val_progress:46925/60568 +val_progress:46950/60568 +val_progress:46975/60568 +val_progress:47000/60568 +val_progress:47025/60568 +val_progress:47050/60568 +val_progress:47075/60568 +val_progress:47100/60568 +val_progress:47125/60568 +val_progress:47150/60568 +val_progress:47175/60568 +val_progress:47200/60568 +val_progress:47225/60568 +val_progress:47250/60568 +val_progress:47275/60568 +val_progress:47300/60568 +val_progress:47325/60568 +val_progress:47350/60568 +val_progress:47375/60568 +val_progress:47400/60568 +val_progress:47425/60568 +val_progress:47450/60568 +val_progress:47475/60568 +val_progress:47500/60568 +val_progress:47525/60568 +val_progress:47550/60568 +val_progress:47575/60568 +val_progress:47600/60568 +val_progress:47625/60568 +val_progress:47650/60568 +val_progress:47675/60568 +val_progress:47700/60568 +val_progress:47725/60568 +val_progress:47750/60568 +val_progress:47775/60568 +val_progress:47800/60568 +val_progress:47825/60568 +val_progress:47850/60568 +val_progress:47875/60568 +val_progress:47900/60568 +val_progress:47925/60568 +val_progress:47950/60568 +val_progress:47975/60568 +val_progress:48000/60568 +val_progress:48025/60568 +val_progress:48050/60568 +val_progress:48075/60568 +val_progress:48100/60568 +val_progress:48125/60568 +val_progress:48150/60568 +val_progress:48175/60568 +val_progress:48200/60568 +val_progress:48225/60568 +val_progress:48250/60568 +val_progress:48275/60568 +val_progress:48300/60568 +val_progress:48325/60568 +val_progress:48350/60568 +val_progress:48375/60568 +val_progress:48400/60568 +val_progress:48425/60568 +val_progress:48450/60568 +val_progress:48475/60568 +val_progress:48500/60568 +val_progress:48525/60568 +val_progress:48550/60568 +val_progress:48575/60568 +val_progress:48600/60568 +val_progress:48625/60568 +val_progress:48650/60568 +val_progress:48675/60568 +val_progress:48700/60568 +val_progress:48725/60568 +val_progress:48750/60568 +val_progress:48775/60568 +val_progress:48800/60568 +val_progress:48825/60568 +val_progress:48850/60568 +val_progress:48875/60568 +val_progress:48900/60568 +val_progress:48925/60568 +val_progress:48950/60568 +val_progress:48975/60568 +val_progress:49000/60568 +val_progress:49025/60568 +val_progress:49050/60568 +val_progress:49075/60568 +val_progress:49100/60568 +val_progress:49125/60568 +val_progress:49150/60568 +val_progress:49175/60568 +val_progress:49200/60568 +val_progress:49225/60568 +val_progress:49250/60568 +val_progress:49275/60568 +val_progress:49300/60568 +val_progress:49325/60568 +val_progress:49350/60568 +val_progress:49375/60568 +val_progress:49400/60568 +val_progress:49425/60568 +val_progress:49450/60568 +val_progress:49475/60568 +val_progress:49500/60568 +val_progress:49525/60568 +val_progress:49550/60568 +val_progress:49575/60568 +val_progress:49600/60568 +val_progress:49625/60568 +val_progress:49650/60568 +val_progress:49675/60568 +val_progress:49700/60568 +val_progress:49725/60568 +val_progress:49750/60568 +val_progress:49775/60568 +val_progress:49800/60568 +val_progress:49825/60568 +val_progress:49850/60568 +val_progress:49875/60568 +val_progress:49900/60568 +val_progress:49925/60568 +val_progress:49950/60568 +val_progress:49975/60568 +val_progress:50000/60568 +val_progress:50025/60568 +val_progress:50050/60568 +val_progress:50075/60568 +val_progress:50100/60568 +val_progress:50125/60568 +val_progress:50150/60568 +val_progress:50175/60568 +val_progress:50200/60568 +val_progress:50225/60568 +val_progress:50250/60568 +val_progress:50275/60568 +val_progress:50300/60568 +val_progress:50325/60568 +val_progress:50350/60568 +val_progress:50375/60568 +val_progress:50400/60568 +val_progress:50425/60568 +val_progress:50450/60568 +val_progress:50475/60568 +val_progress:50500/60568 +val_progress:50525/60568 +val_progress:50550/60568 +val_progress:50575/60568 +val_progress:50600/60568 +val_progress:50625/60568 +val_progress:50650/60568 +val_progress:50675/60568 +val_progress:50700/60568 +val_progress:50725/60568 +val_progress:50750/60568 +val_progress:50775/60568 +val_progress:50800/60568 +val_progress:50825/60568 +val_progress:50850/60568 +val_progress:50875/60568 +val_progress:50900/60568 +val_progress:50925/60568 +val_progress:50950/60568 +val_progress:50975/60568 +val_progress:51000/60568 +val_progress:51025/60568 +val_progress:51050/60568 +val_progress:51075/60568 +val_progress:51100/60568 +val_progress:51125/60568 +val_progress:51150/60568 +val_progress:51175/60568 +val_progress:51200/60568 +val_progress:51225/60568 +val_progress:51250/60568 +val_progress:51275/60568 +val_progress:51300/60568 +val_progress:51325/60568 +val_progress:51350/60568 +val_progress:51375/60568 +val_progress:51400/60568 +val_progress:51425/60568 +val_progress:51450/60568 +val_progress:51475/60568 +val_progress:51500/60568 +val_progress:51525/60568 +val_progress:51550/60568 +val_progress:51575/60568 +val_progress:51600/60568 +val_progress:51625/60568 +val_progress:51650/60568 +val_progress:51675/60568 +val_progress:51700/60568 +val_progress:51725/60568 +val_progress:51750/60568 +val_progress:51775/60568 +val_progress:51800/60568 +val_progress:51825/60568 +val_progress:51850/60568 +val_progress:51875/60568 +val_progress:51900/60568 +val_progress:51925/60568 +val_progress:51950/60568 +val_progress:51975/60568 +val_progress:52000/60568 +val_progress:52025/60568 +val_progress:52050/60568 +val_progress:52075/60568 +val_progress:52100/60568 +val_progress:52125/60568 +val_progress:52150/60568 +val_progress:52175/60568 +val_progress:52200/60568 +val_progress:52225/60568 +val_progress:52250/60568 +val_progress:52275/60568 +val_progress:52300/60568 +val_progress:52325/60568 +val_progress:52350/60568 +val_progress:52375/60568 +val_progress:52400/60568 +val_progress:52425/60568 +val_progress:52450/60568 +val_progress:52475/60568 +val_progress:52500/60568 +val_progress:52525/60568 +val_progress:52550/60568 +val_progress:52575/60568 +val_progress:52600/60568 +val_progress:52625/60568 +val_progress:52650/60568 +val_progress:52675/60568 +val_progress:52700/60568 +val_progress:52725/60568 +val_progress:52750/60568 +val_progress:52775/60568 +val_progress:52800/60568 +val_progress:52825/60568 +val_progress:52850/60568 +val_progress:52875/60568 +val_progress:52900/60568 +val_progress:52925/60568 +val_progress:52950/60568 +val_progress:52975/60568 +val_progress:53000/60568 +val_progress:53025/60568 +val_progress:53050/60568 +val_progress:53075/60568 +val_progress:53100/60568 +val_progress:53125/60568 +val_progress:53150/60568 +val_progress:53175/60568 +val_progress:53200/60568 +val_progress:53225/60568 +val_progress:53250/60568 +val_progress:53275/60568 +val_progress:53300/60568 +val_progress:53325/60568 +val_progress:53350/60568 +val_progress:53375/60568 +val_progress:53400/60568 +val_progress:53425/60568 +val_progress:53450/60568 +val_progress:53475/60568 +val_progress:53500/60568 +val_progress:53525/60568 +val_progress:53550/60568 +val_progress:53575/60568 +val_progress:53600/60568 +val_progress:53625/60568 +val_progress:53650/60568 +val_progress:53675/60568 +val_progress:53700/60568 +val_progress:53725/60568 +val_progress:53750/60568 +val_progress:53775/60568 +val_progress:53800/60568 +val_progress:53825/60568 +val_progress:53850/60568 +val_progress:53875/60568 +val_progress:53900/60568 +val_progress:53925/60568 +val_progress:53950/60568 +val_progress:53975/60568 +val_progress:54000/60568 +val_progress:54025/60568 +val_progress:54050/60568 +val_progress:54075/60568 +val_progress:54100/60568 +val_progress:54125/60568 +val_progress:54150/60568 +val_progress:54175/60568 +val_progress:54200/60568 +val_progress:54225/60568 +val_progress:54250/60568 +val_progress:54275/60568 +val_progress:54300/60568 +val_progress:54325/60568 +val_progress:54350/60568 +val_progress:54375/60568 +val_progress:54400/60568 +val_progress:54425/60568 +val_progress:54450/60568 +val_progress:54475/60568 +val_progress:54500/60568 +val_progress:54525/60568 +val_progress:54550/60568 +val_progress:54575/60568 +val_progress:54600/60568 +val_progress:54625/60568 +val_progress:54650/60568 +val_progress:54675/60568 +val_progress:54700/60568 +val_progress:54725/60568 +val_progress:54750/60568 +val_progress:54775/60568 +val_progress:54800/60568 +val_progress:54825/60568 +val_progress:54850/60568 +val_progress:54875/60568 +val_progress:54900/60568 +val_progress:54925/60568 +val_progress:54950/60568 +val_progress:54975/60568 +val_progress:55000/60568 +val_progress:55025/60568 +val_progress:55050/60568 +val_progress:55075/60568 +val_progress:55100/60568 +val_progress:55125/60568 +val_progress:55150/60568 +val_progress:55175/60568 +val_progress:55200/60568 +val_progress:55225/60568 +val_progress:55250/60568 +val_progress:55275/60568 +val_progress:55300/60568 +val_progress:55325/60568 +val_progress:55350/60568 +val_progress:55375/60568 +val_progress:55400/60568 +val_progress:55425/60568 +val_progress:55450/60568 +val_progress:55475/60568 +val_progress:55500/60568 +val_progress:55525/60568 +val_progress:55550/60568 +val_progress:55575/60568 +val_progress:55600/60568 +val_progress:55625/60568 +val_progress:55650/60568 +val_progress:55675/60568 +val_progress:55700/60568 +val_progress:55725/60568 +val_progress:55750/60568 +val_progress:55775/60568 +val_progress:55800/60568 +val_progress:55825/60568 +val_progress:55850/60568 +val_progress:55875/60568 +val_progress:55900/60568 +val_progress:55925/60568 +val_progress:55950/60568 +val_progress:55975/60568 +val_progress:56000/60568 +val_progress:56025/60568 +val_progress:56050/60568 +val_progress:56075/60568 +val_progress:56100/60568 +val_progress:56125/60568 +val_progress:56150/60568 +val_progress:56175/60568 +val_progress:56200/60568 +val_progress:56225/60568 +val_progress:56250/60568 +val_progress:56275/60568 +val_progress:56300/60568 +val_progress:56325/60568 +val_progress:56350/60568 +val_progress:56375/60568 +val_progress:56400/60568 +val_progress:56425/60568 +val_progress:56450/60568 +val_progress:56475/60568 +val_progress:56500/60568 +val_progress:56525/60568 +val_progress:56550/60568 +val_progress:56575/60568 +val_progress:56600/60568 +val_progress:56625/60568 +val_progress:56650/60568 +val_progress:56675/60568 +val_progress:56700/60568 +val_progress:56725/60568 +val_progress:56750/60568 +val_progress:56775/60568 +val_progress:56800/60568 +val_progress:56825/60568 +val_progress:56850/60568 +val_progress:56875/60568 +val_progress:56900/60568 +val_progress:56925/60568 +val_progress:56950/60568 +val_progress:56975/60568 +val_progress:57000/60568 +val_progress:57025/60568 +val_progress:57050/60568 +val_progress:57075/60568 +val_progress:57100/60568 +val_progress:57125/60568 +val_progress:57150/60568 +val_progress:57175/60568 +val_progress:57200/60568 +val_progress:57225/60568 +val_progress:57250/60568 +val_progress:57275/60568 +val_progress:57300/60568 +val_progress:57325/60568 +val_progress:57350/60568 +val_progress:57375/60568 +val_progress:57400/60568 +val_progress:57425/60568 +val_progress:57450/60568 +val_progress:57475/60568 +val_progress:57500/60568 +val_progress:57525/60568 +val_progress:57550/60568 +val_progress:57575/60568 +val_progress:57600/60568 +val_progress:57625/60568 +val_progress:57650/60568 +val_progress:57675/60568 +val_progress:57700/60568 +val_progress:57725/60568 +val_progress:57750/60568 +val_progress:57775/60568 +val_progress:57800/60568 +val_progress:57825/60568 +val_progress:57850/60568 +val_progress:57875/60568 +val_progress:57900/60568 +val_progress:57925/60568 +val_progress:57950/60568 +val_progress:57975/60568 +val_progress:58000/60568 +val_progress:58025/60568 +val_progress:58050/60568 +val_progress:58075/60568 +val_progress:58100/60568 +val_progress:58125/60568 +val_progress:58150/60568 +val_progress:58175/60568 +val_progress:58200/60568 +val_progress:58225/60568 +val_progress:58250/60568 +val_progress:58275/60568 +val_progress:58300/60568 +val_progress:58325/60568 +val_progress:58350/60568 +val_progress:58375/60568 +val_progress:58400/60568 +val_progress:58425/60568 +val_progress:58450/60568 +val_progress:58475/60568 +val_progress:58500/60568 +val_progress:58525/60568 +val_progress:58550/60568 +val_progress:58575/60568 +val_progress:58600/60568 +val_progress:58625/60568 +val_progress:58650/60568 +val_progress:58675/60568 +val_progress:58700/60568 +val_progress:58725/60568 +val_progress:58750/60568 +val_progress:58775/60568 +val_progress:58800/60568 +val_progress:58825/60568 +val_progress:58850/60568 +val_progress:58875/60568 +val_progress:58900/60568 +val_progress:58925/60568 +val_progress:58950/60568 +val_progress:58975/60568 +val_progress:59000/60568 +val_progress:59025/60568 +val_progress:59050/60568 +val_progress:59075/60568 +val_progress:59100/60568 +val_progress:59125/60568 +val_progress:59150/60568 +val_progress:59175/60568 +val_progress:59200/60568 +val_progress:59225/60568 +val_progress:59250/60568 +val_progress:59275/60568 +val_progress:59300/60568 +val_progress:59325/60568 +val_progress:59350/60568 +val_progress:59375/60568 +val_progress:59400/60568 +val_progress:59425/60568 +val_progress:59450/60568 +val_progress:59475/60568 +val_progress:59500/60568 +val_progress:59525/60568 +val_progress:59550/60568 +val_progress:59575/60568 +val_progress:59600/60568 +val_progress:59625/60568 +val_progress:59650/60568 +val_progress:59675/60568 +val_progress:59700/60568 +val_progress:59725/60568 +val_progress:59750/60568 +val_progress:59775/60568 +val_progress:59800/60568 +val_progress:59825/60568 +val_progress:59850/60568 +val_progress:59875/60568 +val_progress:59900/60568 +val_progress:59925/60568 +val_progress:59950/60568 +val_progress:59975/60568 +val_progress:60000/60568 +val_progress:60025/60568 +val_progress:60050/60568 +val_progress:60075/60568 +val_progress:60100/60568 +val_progress:60125/60568 +val_progress:60150/60568 +val_progress:60175/60568 +val_progress:60200/60568 +val_progress:60225/60568 +val_progress:60250/60568 +val_progress:60275/60568 +val_progress:60300/60568 +val_progress:60325/60568 +val_progress:60350/60568 +val_progress:60375/60568 +val_progress:60400/60568 +val_progress:60425/60568 +val_progress:60450/60568 +val_progress:60475/60568 +val_progress:60500/60568 +val_progress:60525/60568 +val_progress:60550/60568 +val_progress:60568/60568 +final_int8_zlib_roundtrip val_loss:3.9708 val_bpb:2.3517 eval_time:1011771ms +final_int8_zlib_roundtrip_exact val_loss:3.97082455 val_bpb:2.35174597