From 5db964ebd28636a3cdea422373b44d6d6e92f9e1 Mon Sep 17 00:00:00 2001 From: Viraj Deshwal Date: Thu, 2 Apr 2026 15:31:52 -0700 Subject: [PATCH] Non-Record: Unified Attention + FA3 + 1hr training (val_bpb=1.1088) --- .../README.md | 79 + .../submission.json | 12 + .../train.log | 2287 +++++++++++++++++ .../train_gpt.py | 1644 ++++++++++++ 4 files changed, 4022 insertions(+) create mode 100644 records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/README.md create mode 100644 records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/submission.json create mode 100644 records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/train.log create mode 100644 records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/train_gpt.py diff --git a/records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/README.md b/records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/README.md new file mode 100644 index 0000000000..218c1df7b9 --- /dev/null +++ b/records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/README.md @@ -0,0 +1,79 @@ +# Non-Record: Unified Attention + FA3 + Legal TTT (1-hour training) + +**val_bpb: 1.1088** | **~15.82 MB** | 8×H100 SXM | 1 hour training + +Same architecture as our record submission [PR #1202](https://github.com/openai/parameter-golf/pull/1202) (val_bpb 1.1412, 10-min). This run trains for 1 hour to explore unified attention's scaling behavior with unlimited compute. + +## Results + +| Run | step_avg | steps | Pre-TTT bpb | **Post-TTT bpb** | Training time | +|-----|----------|-------|-------------|-----------------|---------------| +| Record (PR #1202, 3-seed mean) | 49.6ms | 12,100 | 1.1643 | **1.1412** | 10 min | +| **This run (1 hour)** | 51.3ms | 72,000 | 1.1326 | **1.1088** | 60 min | +| **Improvement** | | +59,900 steps | -0.0317 | **-0.0324** | | + +Beats the current unlimited compute SOTA (1.1239, 1-bit quantization, 2hr training) by 0.015 BPB in half the training time. + +## What's Different From the Record Submission + +Only the training schedule changes. Architecture and eval are identical: + +| Parameter | Record (10 min) | This run (1 hour) | +|-----------|----------------|-------------------| +| ITERATIONS | 20,000 (wall-clock limited) | 72,000 | +| MAX_WALLCLOCK_SECONDS | 600 | 3,600 | +| WARMDOWN_ITERS | 3,500 | 10,000 | +| QAT_START_FRACTION | 0.15 | 0.85 | +| EMA_DECAY | 0.997 | 0.997 | +| Everything else | identical | identical | + +The key change: with more steps, we can train clean (no QAT noise) for 85% of the run and still give QAT 10,800 steps to converge. The 10-min run needs QAT at 15% to fit enough QAT steps in the budget. + +## Scaling Observation + +The model plateaus at peak LR around step 48,000 (val_bpb ~1.223). The real gains come from the warmdown phase (steps 62,000-72,000) where the LR decays and the model refines. With 10,000 warmdown steps (vs ~1,100 in the 10-min run), the model has 9x more refinement steps. + +Pre-warmdown base quality: +- 10-min run at step 10,000: val_bpb ~1.248 +- This run at step 48,000: val_bpb ~1.223 + +Post-warmdown + quantization: +- 10-min run: 1.1647 mixed roundtrip +- This run: 1.1326 mixed roundtrip + +The warmdown benefit scales with more steps, and unified attention benefits from longer training just as standard architectures do. + +## Key Innovation: Unified Attention + +Unified Attention ([Deshwal, 2026](https://github.com/ReinforceAI/yocto)) replaces separate Q/K/V projections with a single W_unified matrix. 67% fewer attention projection parameters, reallocated to the MLP. Attention is a routing mechanism; the FFN does the heavy lifting. In the 16 MB budget, we trade 2.28 MB of routing for 2.49 MB of computation. + +See [PR #1202](https://github.com/openai/parameter-golf/pull/1202) for full architecture details, ablation, and negative results. + +## Requirements + +```bash +pip install flash_attn_3 --find-links https://windreamer.github.io/flash-attention3-wheels/cu128_torch280 +pip install sentencepiece zstandard +``` + +## Run Command + +```bash +RUN_ID=r1_k11_d528_fa3_1hour \ +ITERATIONS=72000 \ +MAX_WALLCLOCK_SECONDS=3600 \ +WARMDOWN_ITERS=10000 \ +QAT_START_FRACTION=0.85 \ +EMA_DECAY=0.997 \ +NUM_UNIQUE_LAYERS=11 MODEL_DIM=528 NUM_HEADS=4 \ +VE_LAYERS=9,10 \ +TRAIN_BATCH_TOKENS=524288 \ +SLIDING_WINDOW_EVAL=0 \ +LEGAL_TTT_EPOCHS=3 \ +TTT_LORA_ATTN=0 \ +torchrun --standalone --nproc_per_node=8 train_gpt.py +``` + +## Credits + +Same as [PR #1202](https://github.com/openai/parameter-golf/pull/1202). Unified Attention and FA3 head-dim padding are this work (Viraj Deshwal, Reinforce AI). All other techniques credited in the record submission README. \ No newline at end of file diff --git a/records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/submission.json b/records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/submission.json new file mode 100644 index 0000000000..ff5ea2750b --- /dev/null +++ b/records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/submission.json @@ -0,0 +1,12 @@ +{ + "name": "Viraj Deshwal", + "github_id": "VirajDeshwal", + "val_bpb": 1.1088, + "training_time_seconds": 3600, + "eval_time_seconds": 501, + "gpu": "8xH100 80GB SXM", + "pytorch_version": "2.8.0+cu128", + "track": "non_record_16mb", + "summary": "Unified Attention + FA3 + Legal TTT, 1-hour training. Same architecture as record submission PR #1202 (1.1412, 10-min). Longer training reveals unified attention's scaling behavior." +} + diff --git a/records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/train.log b/records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/train.log new file mode 100644 index 0000000000..1f0299b322 --- /dev/null +++ b/records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/train.log @@ -0,0 +1,2287 @@ +from __future__ import annotations + +import copy +import glob +import io +import math +import os +import random +import subprocess +import sys +import time +import traceback +import uuid +import zlib +import lzma +from pathlib import Path + +import numpy as np +import sentencepiece as spm +import torch +import torch.distributed as dist +import torch.nn.functional as F +from torch import Tensor, nn +from torch.nn.parallel import DistributedDataParallel as DDP + +try: + from flash_attn_interface import flash_attn_func as _flash_attn_func +except ImportError: + raise ImportError( + "Flash Attention 3 (Hopper) is required. Install with:\n" + " pip install flash_attn_3 --find-links https://windreamer.github.io/flash-attention3-wheels/cu128_torch280\n" + "Or see requirements.txt for details." + ) + + +import logging + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s | %(levelname)-5s | %(message)s", + datefmt="%H:%M:%S", +) +logger = logging.getLogger("yocto-golf") + +def log_architecture(model, args): + n = sum(p.numel() for p in model.parameters()) + logger.info(f"YOCTO d={args.model_dim} K={args.num_unique_layers} heads={args.num_heads} params={n:,}") + +class Hyperparameters: + data_path = os.environ.get("DATA_PATH", "./data/datasets/fineweb10B_sp1024") + train_files = os.path.join(data_path, "fineweb_train_*.bin") + val_files = os.path.join(data_path, "fineweb_val_*.bin") + tokenizer_path = os.environ.get("TOKENIZER_PATH", "./data/tokenizers/fineweb_1024_bpe.model") + run_id = os.environ.get("RUN_ID", str(uuid.uuid4())) + seed = int(os.environ.get("SEED", 1337)) + + val_batch_size = int(os.environ.get("VAL_BATCH_SIZE", 524_288)) + val_loss_every = int(os.environ.get("VAL_LOSS_EVERY", 1000)) + train_log_every = int(os.environ.get("TRAIN_LOG_EVERY", 200)) + + iterations = int(os.environ.get("ITERATIONS", 20000)) + warmdown_iters = int(os.environ.get("WARMDOWN_ITERS", 3500)) + warmup_steps = int(os.environ.get("WARMUP_STEPS", 20)) + train_batch_tokens = int(os.environ.get("TRAIN_BATCH_TOKENS", 524_288)) + train_seq_len = int(os.environ.get("TRAIN_SEQ_LEN", 2048)) + max_wallclock_seconds = float(os.environ.get("MAX_WALLCLOCK_SECONDS", 600.0)) + + # ── Yocto architecture ── + vocab_size = int(os.environ.get("VOCAB_SIZE", 1024)) + model_dim = int(os.environ.get("MODEL_DIM", 552)) + num_heads = int(os.environ.get("NUM_HEADS", 4)) + num_unique_layers = int(os.environ.get("NUM_UNIQUE_LAYERS", 10)) + num_recurrences = int(os.environ.get("NUM_RECURRENCES", 1)) + mlp_mult = int(os.environ.get("MLP_MULT", 3)) + use_swiglu = bool(int(os.environ.get("USE_SWIGLU", "1"))) + tie_embeddings = bool(int(os.environ.get("TIE_EMBEDDINGS", "1"))) + rope_base = float(os.environ.get("ROPE_BASE", 10000.0)) + logit_softcap = float(os.environ.get("LOGIT_SOFTCAP", 30.0)) + seeking_gain_init = float(os.environ.get("SEEKING_GAIN_INIT", 1.5)) + rope_fraction = float(os.environ.get("ROPE_FRACTION", 1.0)) # 1.0 = full RoPE, 0.5 = half partial RoPE + + # ── Optimizer ── + embed_lr = float(os.environ.get("EMBED_LR", 0.6)) + head_lr = float(os.environ.get("HEAD_LR", 0.008)) + tied_embed_lr = float(os.environ.get("TIED_EMBED_LR", 0.035)) + tied_embed_init_std = float(os.environ.get("TIED_EMBED_INIT_STD", 0.005)) + matrix_lr = float(os.environ.get("MATRIX_LR", 0.025)) + scalar_lr = float(os.environ.get("SCALAR_LR", 0.025)) + muon_momentum = float(os.environ.get("MUON_MOMENTUM", 0.99)) + muon_backend_steps = int(os.environ.get("MUON_BACKEND_STEPS", 5)) + muon_momentum_warmup_start = float(os.environ.get("MUON_MOMENTUM_WARMUP_START", 0.92)) + muon_momentum_warmup_steps = int(os.environ.get("MUON_MOMENTUM_WARMUP_STEPS", 1500)) + beta1 = float(os.environ.get("BETA1", 0.9)) + beta2 = float(os.environ.get("BETA2", 0.95)) + adam_eps = float(os.environ.get("ADAM_EPS", 1e-8)) + grad_clip_norm = float(os.environ.get("GRAD_CLIP_NORM", 0.3)) + muon_weight_decay = float(os.environ.get("MUON_WEIGHT_DECAY", 0.04)) + + # ── LR warmup (actual learning rate ramp, separate from compile warmup) ── + lr_warmup_steps = int(os.environ.get("LR_WARMUP_STEPS", 100)) + + # ── EMA ── + ema_decay = float(os.environ.get("EMA_DECAY", 0.997)) # 0 = disabled, 0.997 = SOTA setting + + # ── SWA (Stochastic Weight Averaging) ── + swa_every = int(os.environ.get("SWA_EVERY", 50)) # 0 = disabled, 50 = SOTA setting + swa_threshold = float(os.environ.get("SWA_THRESHOLD", 0.2)) # only SWA when lr_scale < this + + # ── Compression ── + compression = os.environ.get("COMPRESSION", "lzma") # "zlib", "zstd", or "lzma" + + # ── QAT (Quantization-Aware Training) ── + qat_bits = int(os.environ.get("QAT_BITS", 6)) # 0 = disabled, 6 = int6 QAT + qat_start_fraction = float(os.environ.get("QAT_START_FRACTION", 0.15)) # when to start QAT + + # ── Mixed precision quantization ── + int5_layers = os.environ.get("INT5_LAYERS", "") # e.g. "2,3,4,5,6,7,8" + + sliding_window_stride = int(os.environ.get("SLIDING_WINDOW_STRIDE", 64)) + + # ── LN Scale ── + ln_scale = bool(int(os.environ.get("LN_SCALE", "1"))) # 1/sqrt(layer_idx+1) on norm outputs + + # ── Value Embedding (VE128) ── + ve_enabled = bool(int(os.environ.get("VE_ENABLED", "1"))) + ve_dim = int(os.environ.get("VE_DIM", 128)) + ve_layers = os.environ.get("VE_LAYERS", "8,9") # last 2 of 10 layers + + # ── TTT LoRA ── + + # ── Legal Score-First TTT ── + legal_ttt_enabled = bool(int(os.environ.get("LEGAL_TTT_ENABLED", "1"))) + legal_ttt_lr = float(os.environ.get("LEGAL_TTT_LR", 0.002)) + legal_ttt_epochs = int(os.environ.get("LEGAL_TTT_EPOCHS", 3)) + legal_ttt_chunk_tokens = int(os.environ.get("LEGAL_TTT_CHUNK_TOKENS", 32768)) + legal_ttt_freeze_blocks = int(os.environ.get("LEGAL_TTT_FREEZE_BLOCKS", 0)) + legal_ttt_momentum = float(os.environ.get("LEGAL_TTT_MOMENTUM", 0.9)) + legal_ttt_batch_seqs = int(os.environ.get("LEGAL_TTT_BATCH_SEQS", 32)) + legal_ttt_grad_clip = float(os.environ.get("LEGAL_TTT_GRAD_CLIP", 1.0)) + + @property + def num_effective_layers(self) -> int: + return self.num_unique_layers * self.num_recurrences + + def validate(self) -> None: + """Check all divisibility constraints.""" + d = self.model_dim + assert d % 3 == 0, f"model_dim={d} must be divisible by 3 for unified attention split" + comp = d // 3 + assert comp % self.num_heads == 0, ( + f"component_dim={comp} (model_dim/3) must be divisible by num_heads={self.num_heads}" + ) + head_dim = comp // self.num_heads + assert head_dim % 2 == 0, f"head_dim={head_dim} must be even for RoPE" + assert head_dim >= 16, f"head_dim={head_dim} must be >= 16 for useful RoPE (got {head_dim})" + assert self.logit_softcap > 0, f"logit_softcap must be positive" + logger.info(f"Architecture constraints validated: d={d}, comp={comp}, heads={self.num_heads}, " + f"head_dim={head_dim}, RoPE_pairs={head_dim//2}") + + +def zeropower_via_newtonschulz5(G: Tensor, steps: int = 5, eps: float = 1e-7) -> Tensor: + """Batched Newton-Schulz orthogonalization. G: (B,M,N) or (M,N).""" + a, b, c = (3.4445, -4.7750, 2.0315) + was_2d = G.ndim == 2 + if was_2d: + G = G.unsqueeze(0) + X = G.bfloat16() + transposed = X.size(-2) > X.size(-1) + if transposed: + X = X.mT + X = X / (X.norm(dim=(-2, -1), keepdim=True) + eps) + for _ in range(steps): + A = X @ X.mT + B = b * A + c * (A @ A) + X = a * X + B @ X + if transposed: + X = X.mT + if was_2d: + X = X.squeeze(0) + return X + +class Muon(torch.optim.Optimizer): + def __init__(self, params, lr: float, momentum: float, backend_steps: int, + nesterov: bool = True, weight_decay: float = 0.0): + super().__init__(params, dict(lr=lr, momentum=momentum, backend_steps=backend_steps, + nesterov=nesterov, weight_decay=weight_decay)) + self._built = False + + def _build(self): + self._distributed = dist.is_available() and dist.is_initialized() + self._world_size = dist.get_world_size() if self._distributed else 1 + self._rank = dist.get_rank() if self._distributed else 0 + ws = self._world_size + + self._bank_meta = [] + for group in self.param_groups: + for p in group["params"]: + B = p.shape[0] + padded_B = ((B + ws - 1) // ws) * ws + shard_B = padded_B // ws + tail = p.shape[1:] + dev = p.device + self._bank_meta.append({ + 'p': p, + 'B': B, + 'padded_grad': torch.zeros(padded_B, *tail, device=dev, dtype=torch.bfloat16), + 'shard': torch.zeros(shard_B, *tail, device=dev, dtype=torch.bfloat16), + 'shard_mom': torch.zeros(shard_B, *tail, device=dev, dtype=torch.bfloat16), + 'full_update': torch.zeros(padded_B, *tail, device=dev, dtype=torch.bfloat16), + 'scale': max(1, p.shape[-2] / p.shape[-1]) ** 0.5, + }) + self._bank_meta.sort(key=lambda m: -m['p'].numel()) + self._built = True + + def launch_reduce_scatters(self): + """Phase 1: launch async reduce-scatter for all banks.""" + if not self._built: + self._build() + if not self._distributed: + return + self._rs_futures = [] + for m in self._bank_meta: + p = m['p'] + if p.grad is None: + self._rs_futures.append(None) + continue + pg = m['padded_grad'] + pg[:m['B']].copy_(p.grad.bfloat16()) + if pg.shape[0] > m['B']: + pg[m['B']:].zero_() + fut = dist.reduce_scatter_tensor(m['shard'], pg, op=dist.ReduceOp.AVG, async_op=True) + self._rs_futures.append(fut) + + @torch.no_grad() + def step(self, closure=None): + """Phase 3: wait for RS, batched NS5, all-gather.""" + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + if not self._built: + self._build() + + for group in self.param_groups: + lr = group["lr"] + momentum = group["momentum"] + backend_steps = group["backend_steps"] + nesterov = group["nesterov"] + wd = group.get("weight_decay", 0.0) + + prev_ag_handle = None + prev_m = None + + sharded = self._distributed and hasattr(self, '_rs_futures') + + for i, m in enumerate(self._bank_meta): + p = m['p'] + if p.grad is None: + continue + + if prev_ag_handle is not None: + prev_ag_handle.wait() + pp = prev_m['p'] + upd = prev_m['full_update'][:prev_m['B']] + if wd > 0.0: + pp.data.mul_(1.0 - lr * wd) + pp.add_(upd.to(dtype=pp.dtype), alpha=-lr * prev_m['scale']) + + if sharded and self._rs_futures[i] is not None: + self._rs_futures[i].wait() + g = m['shard'] + buf = m['shard_mom'] + else: + g = p.grad.bfloat16() + state = self.state[p] + if "momentum_buffer" not in state: + state["momentum_buffer"] = torch.zeros_like(g) + buf = state["momentum_buffer"] + + buf.mul_(momentum).add_(g) + if nesterov: + update = g.add(buf, alpha=momentum) + else: + update = buf + + update = zeropower_via_newtonschulz5(update, steps=backend_steps) + + if sharded: + prev_ag_handle = dist.all_gather_into_tensor( + m['full_update'], update, async_op=True) + prev_m = m + else: + if wd > 0.0: + p.data.mul_(1.0 - lr * wd) + p.add_(update.to(dtype=p.dtype), alpha=-lr * m['scale']) + + if prev_ag_handle is not None: + prev_ag_handle.wait() + pp = prev_m['p'] + upd = prev_m['full_update'][:prev_m['B']] + if wd > 0.0: + pp.data.mul_(1.0 - lr * wd) + pp.add_(upd.to(dtype=pp.dtype), alpha=-lr * prev_m['scale']) + + if hasattr(self, '_rs_futures'): + del self._rs_futures + + return loss + + +def build_sentencepiece_luts(sp: spm.SentencePieceProcessor, vocab_size: int, device: torch.device): + sp_vocab_size = int(sp.vocab_size()) + table_size = max(sp_vocab_size, vocab_size) + base_bytes_np = np.zeros((table_size,), dtype=np.int16) + has_leading_space_np = np.zeros((table_size,), dtype=np.bool_) + is_boundary_token_np = np.ones((table_size,), dtype=np.bool_) + for token_id in range(sp_vocab_size): + if sp.is_control(token_id) or sp.is_unknown(token_id) or sp.is_unused(token_id): + continue + is_boundary_token_np[token_id] = False + if sp.is_byte(token_id): + base_bytes_np[token_id] = 1 + continue + piece = sp.id_to_piece(token_id) + if piece.startswith("▁"): + has_leading_space_np[token_id] = True + piece = piece[1:] + base_bytes_np[token_id] = len(piece.encode("utf-8")) + return ( + torch.tensor(base_bytes_np, dtype=torch.int16, device=device), + torch.tensor(has_leading_space_np, dtype=torch.bool, device=device), + torch.tensor(is_boundary_token_np, dtype=torch.bool, device=device), + ) + +def load_validation_tokens(pattern: str, seq_len: int) -> Tensor: + files = [Path(p) for p in sorted(glob.glob(pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {pattern}") + tokens = torch.cat([load_data_shard(file) for file in files]).contiguous() + usable = ((tokens.numel() - 1) // seq_len) * seq_len + if usable <= 0: + raise ValueError(f"Validation split too short for seq_len={seq_len}") + return tokens[: usable + 1] + +def eval_val(args, model, rank, world_size, device, grad_accum_steps, val_tokens, + base_bytes_lut, has_leading_space_lut, is_boundary_token_lut): + local_batch_tokens = args.val_batch_size // (world_size * grad_accum_steps) + local_batch_seqs = local_batch_tokens // args.train_seq_len + total_seqs = (val_tokens.numel() - 1) // args.train_seq_len + seq_start = (total_seqs * rank) // world_size + seq_end = (total_seqs * (rank + 1)) // world_size + val_loss_sum = torch.zeros((), device=device, dtype=torch.float64) + val_token_count = torch.zeros((), device=device, dtype=torch.float64) + val_byte_count = torch.zeros((), device=device, dtype=torch.float64) + model.eval() + with torch.inference_mode(): + for batch_seq_start in range(seq_start, seq_end, local_batch_seqs): + batch_seq_end = min(batch_seq_start + local_batch_seqs, seq_end) + raw_start = batch_seq_start * args.train_seq_len + raw_end = batch_seq_end * args.train_seq_len + 1 + local = val_tokens[raw_start:raw_end].to(device=device, dtype=torch.int64, non_blocking=True) + x = local[:-1].reshape(-1, args.train_seq_len) + y = local[1:].reshape(-1, args.train_seq_len) + with torch.autocast(device_type="cuda", dtype=torch.bfloat16, enabled=True): + batch_loss = model(x, y).detach() + batch_token_count = float(y.numel()) + val_loss_sum += batch_loss.to(torch.float64) * batch_token_count + val_token_count += batch_token_count + prev_ids, tgt_ids = x.reshape(-1), y.reshape(-1) + token_bytes = base_bytes_lut[tgt_ids].to(dtype=torch.int16) + token_bytes += (has_leading_space_lut[tgt_ids] & ~is_boundary_token_lut[prev_ids]).to(dtype=torch.int16) + val_byte_count += token_bytes.to(torch.float64).sum() + if dist.is_available() and dist.is_initialized(): + dist.all_reduce(val_loss_sum, op=dist.ReduceOp.SUM) + dist.all_reduce(val_token_count, op=dist.ReduceOp.SUM) + dist.all_reduce(val_byte_count, op=dist.ReduceOp.SUM) + val_loss = val_loss_sum / val_token_count + bits_per_token = val_loss.item() / math.log(2.0) + tokens_per_byte = val_token_count.item() / val_byte_count.item() + model.train() + return float(val_loss.item()), float(bits_per_token * tokens_per_byte) + +CONTROL_TENSOR_NAME_PATTERNS = ("attn_scale", "mlp_scale", "resid_mix", "skip_weight", "seeking_gain", "smear", "ve_layer_scales", "ve_shared.scale") +INT8_KEEP_FLOAT_FP32_NAME_PATTERNS = CONTROL_TENSOR_NAME_PATTERNS +INT8_KEEP_FLOAT_MAX_NUMEL = 65_536 +INT8_KEEP_FLOAT_STORE_DTYPE = torch.float16 +INT8_PER_ROW_SCALE_DTYPE = torch.float16 +INT8_CLIP_Q = 99.99984 / 100.0 + +def tensor_nbytes(t: Tensor) -> int: + return int(t.numel()) * int(t.element_size()) + +def quantize_float_tensor(t: Tensor): + t32 = t.float() + if t32.ndim == 2: + clip_abs = torch.quantile(t32.abs(), INT8_CLIP_Q, dim=1) if t32.numel() else torch.empty((t32.shape[0],), dtype=torch.float32) + clipped = torch.maximum(torch.minimum(t32, clip_abs[:, None]), -clip_abs[:, None]) + scale = (clip_abs / 127.0).clamp_min(1.0 / 127.0) + q = torch.clamp(torch.round(clipped / scale[:, None]), -127, 127).to(torch.int8).contiguous() + return q, scale.to(dtype=INT8_PER_ROW_SCALE_DTYPE).contiguous() + clip_abs = float(torch.quantile(t32.abs().flatten(), INT8_CLIP_Q).item()) if t32.numel() else 0.0 + scale = torch.tensor(clip_abs / 127.0 if clip_abs > 0 else 1.0, dtype=torch.float32) + q = torch.clamp(torch.round(torch.clamp(t32, -clip_abs, clip_abs) / scale), -127, 127).to(torch.int8).contiguous() + return q, scale + +GPTQ_CLIP_PERCENTILES = [0.999, 0.9995, 0.9999, 0.99999, 1.0] + +def quantize_float_tensor_int6(t: Tensor): + return quantize_float_tensor_intN(t, max_val=31) + +def quantize_float_tensor_intN(t: Tensor, max_val: int = 31): + t32 = t.float() + if t32.ndim == 2: + best_q, best_scale, best_err = None, None, float('inf') + + for pct in GPTQ_CLIP_PERCENTILES: + if pct >= 1.0: + clip_abs = t32.abs().amax(dim=1).clamp_min(1e-8) + else: + clip_abs = torch.quantile(t32.abs(), pct, dim=1).clamp_min(1e-8) + scale = (clip_abs / max_val).clamp_min(1e-8).to(torch.float16) + clipped = t32.clamp(-clip_abs[:, None], clip_abs[:, None]) + q = torch.clamp(torch.round(clipped / scale.float()[:, None]), -max_val, max_val).to(torch.int8) + recon = q.float() * scale.float()[:, None] + err = (t32 - recon).pow(2).mean().item() + if err < best_err: + best_q, best_scale, best_err = q, scale, err + + return best_q.contiguous(), best_scale.contiguous() + abs_max = t32.abs().max().clamp_min(1e-8).item() + scale = torch.tensor(abs_max / max_val, dtype=torch.float16) + q = torch.clamp(torch.round(t32 / scale.float()), -max_val, max_val).to(torch.int8) + return q, scale + +# ── Unbank/rebank for quantization ── + +def _unbank_state_dict(sd, num_layers): + out = {} + for name, tensor in sd.items(): + if name == "unified_bank": + for i in range(num_layers): + w = tensor[i] # [d, d] + d = w.shape[0] + comp = d // 3 + out[f"blocks.{i}.attn.W_seeking.weight"] = w[:comp, :] + out[f"blocks.{i}.attn.W_offering.weight"] = w[comp:2*comp, :] + out[f"blocks.{i}.attn.W_content.weight"] = w[2*comp:, :] + elif name == "output_bank": + for i in range(num_layers): + out[f"blocks.{i}.attn.W_output.weight"] = tensor[i] + elif name == "fc_bank": + for i in range(num_layers): + out[f"blocks.{i}.mlp.fc.weight"] = tensor[i] + elif name == "proj_bank": + for i in range(num_layers): + out[f"blocks.{i}.mlp.proj.weight"] = tensor[i] + else: + out[name] = tensor + return out + +def _rebank_state_dict(sd, num_layers, template_sd): + out = {} + consumed = set() + + unified_slices = [] + for i in range(num_layers): + sk = f"blocks.{i}.attn.W_seeking.weight" + ok = f"blocks.{i}.attn.W_offering.weight" + ck = f"blocks.{i}.attn.W_content.weight" + unified_slices.append(torch.cat([sd[sk], sd[ok], sd[ck]], dim=0)) + consumed.update([sk, ok, ck]) + out["unified_bank"] = torch.stack(unified_slices).to(dtype=template_sd["unified_bank"].dtype) + + for bank_name, key_template in [ + ("output_bank", "blocks.{i}.attn.W_output.weight"), + ("fc_bank", "blocks.{i}.mlp.fc.weight"), + ("proj_bank", "blocks.{i}.mlp.proj.weight"), + ]: + slices = [] + for i in range(num_layers): + k = key_template.format(i=i) + slices.append(sd[k]) + consumed.add(k) + out[bank_name] = torch.stack(slices).to(dtype=template_sd[bank_name].dtype) + + for name, tensor in sd.items(): + if name not in consumed: + out[name] = tensor + return out + +INT8_EMBED_PATTERNS = ("tok_emb.", "ve_shared.embed.") + +def quantize_state_dict_mixed(state_dict, int5_layers=None): + if int5_layers is None: + int5_layers = set() + result = {} + meta = {} + for name, tensor in state_dict.items(): + t = tensor.detach().to("cpu").contiguous() + if not t.is_floating_point(): + result[name] = t + meta[name] = "passthrough" + continue + if t.numel() <= INT8_KEEP_FLOAT_MAX_NUMEL: + if any(p in name for p in INT8_KEEP_FLOAT_FP32_NAME_PATTERNS): + result[name] = t.float().contiguous() + meta[name] = "passthrough_ctrl" + else: + result[name] = t.to(torch.float16).contiguous() + meta[name] = "passthrough" + continue + is_embed = any(p in name for p in INT8_EMBED_PATTERNS) + if is_embed: + q, s = quantize_float_tensor(t) + result[name + ".q"] = q + result[name + ".scale"] = s + meta[name] = {"type": "int8"} + else: + layer_idx = -1 + if "blocks." in name: + try: + layer_idx = int(name.split("blocks.")[1].split(".")[0]) + except (ValueError, IndexError): + pass + if layer_idx in int5_layers: + q, s = quantize_float_tensor_intN(t, max_val=15) + result[name + ".q"] = q + result[name + ".scale"] = s + meta[name] = {"type": "int5"} + else: + q, s = quantize_float_tensor_int6(t) + result[name + ".q"] = q + result[name + ".scale"] = s + meta[name] = {"type": "int6"} + return result, meta + +def dequantize_state_dict_mixed(result, meta, template_sd=None): + """Dequantize flat-key mixed int6/int8 state dict back to float tensors.""" + out = {} + for name, info in meta.items(): + if info in ("passthrough", "passthrough_ctrl"): + t = result[name] + if template_sd is not None and name in template_sd: + orig_dtype = template_sd[name].dtype + if t.dtype != orig_dtype: + t = t.to(orig_dtype) + out[name] = t + continue + q = result[name + ".q"] + s = result[name + ".scale"] + if s.ndim > 0: + deq = (q.float() * s.float().view(q.shape[0], *([1] * (q.ndim - 1)))) + else: + deq = q.float() * float(s.item()) + target_dtype = torch.bfloat16 + if template_sd is not None and name in template_sd: + target_dtype = template_sd[name].dtype + out[name] = deq.to(target_dtype).contiguous() + return out + + +def load_data_shard(file: Path) -> Tensor: + header = np.fromfile(file, dtype=" None: + self.file_idx = (self.file_idx + 1) % len(self.files) + self.tokens = load_data_shard(self.files[self.file_idx]) + self.pos = 0 + + def take(self, n: int) -> Tensor: + chunks = [] + remaining = n + while remaining > 0: + avail = self.tokens.numel() - self.pos + if avail <= 0: + self._advance_file() + continue + k = min(remaining, avail) + chunks.append(self.tokens[self.pos : self.pos + k]) + self.pos += k + remaining -= k + return chunks[0] if len(chunks) == 1 else torch.cat(chunks) + +class DistributedTokenLoader: + def __init__(self, pattern: str, rank: int, world_size: int, device: torch.device): + self.rank, self.world_size, self.device = rank, world_size, device + self.stream = TokenStream(pattern) + + def next_batch(self, global_tokens: int, seq_len: int, grad_accum_steps: int): + local_tokens = global_tokens // (self.world_size * grad_accum_steps) + per_rank_span = local_tokens + 1 + chunk = self.stream.take(per_rank_span * self.world_size) + start = self.rank * per_rank_span + local = chunk[start : start + per_rank_span].to(dtype=torch.int64) + x = local[:-1].reshape(-1, seq_len) + y = local[1:].reshape(-1, seq_len) + return x.to(self.device, non_blocking=True), y.to(self.device, non_blocking=True) + + +class RMSNorm(nn.Module): + def __init__(self, eps: float | None = None): + super().__init__() + self.eps = eps + + def forward(self, x: Tensor) -> Tensor: + return F.rms_norm(x, (x.size(-1),), eps=self.eps) + +class CastedLinear(nn.Linear): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._is_mlp = False # kept for compatibility + + def forward(self, x: Tensor) -> Tensor: + w = self.weight + if self.training and _qat_active and w.numel() > INT8_KEEP_FLOAT_MAX_NUMEL: + w = _fake_quantize(w, _qat_bits) + bias = self.bias.to(x.dtype) if self.bias is not None else None + return F.linear(x, w.to(x.dtype), bias) + +# ── QAT globals (set during training) ── +_qat_active = False +_qat_bits = 6 + +def _fake_quantize(w: Tensor, bits: int) -> Tensor: + max_val = (1 << (bits - 1)) - 1 # e.g. int6: max_val = 31 + with torch.no_grad(): + abs_max = w.abs().amax(dim=1, keepdim=True).clamp_min(1e-8) + scale = abs_max / max_val + w_q = (w / scale).round().clamp(-max_val, max_val) * scale + return w + (w_q - w).detach() + +def restore_low_dim_params_to_fp32(module: nn.Module) -> None: + with torch.no_grad(): + for name, param in module.named_parameters(): + if (param.ndim < 2 or any(p in name for p in CONTROL_TENSOR_NAME_PATTERNS)) and param.dtype != torch.float32: + param.data = param.data.float() + +class Rotary(nn.Module): + def __init__(self, dim: int, base: float = 10000.0): + super().__init__() + inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + self._seq_len_cached = 0 + self._cos_cached: Tensor | None = None + self._sin_cached: Tensor | None = None + + def forward(self, seq_len: int, device: torch.device, dtype: torch.dtype): + if self._cos_cached is None or self._seq_len_cached != seq_len or self._cos_cached.device != device: + t = torch.arange(seq_len, device=device, dtype=self.inv_freq.dtype) + freqs = torch.outer(t, self.inv_freq.to(device)) + self._cos_cached = freqs.cos()[None, None, :, :] + self._sin_cached = freqs.sin()[None, None, :, :] + self._seq_len_cached = seq_len + return self._cos_cached.to(dtype=dtype), self._sin_cached.to(dtype=dtype) + +def apply_rotary_emb(x: Tensor, cos: Tensor, sin: Tensor) -> Tensor: + half = x.size(-1) // 2 + x1, x2 = x[..., :half], x[..., half:] + return torch.cat((x1 * cos + x2 * sin, x1 * (-sin) + x2 * cos), dim=-1) + +class SmearGate(nn.Module): + def __init__(self, dim: int): + super().__init__() + self.gate = nn.Parameter(torch.zeros(dim, dtype=torch.float32)) + + def forward(self, x: Tensor) -> Tensor: + g = torch.sigmoid(self.gate.to(dtype=x.dtype))[None, None, :] + x_prev = torch.cat([torch.zeros_like(x[:, :1]), x[:, :-1]], dim=1) + return (1 - g) * x + g * x_prev + +class ValueEmbedding(nn.Module): + def __init__(self, vocab_size: int, ve_dim: int, target_dim: int): + super().__init__() + self.embed = nn.Embedding(vocab_size, ve_dim) + nn.init.normal_(self.embed.weight, std=0.01) + self.proj = CastedLinear(ve_dim, target_dim, bias=False) if ve_dim != target_dim else None + if self.proj is not None: + nn.init.zeros_(self.proj.weight) + self.scale = nn.Parameter(torch.tensor(0.1, dtype=torch.float32)) + + def forward(self, token_ids: Tensor) -> Tensor: + h = self.embed(token_ids) + if self.proj is not None: + h = self.proj(h) + return h * self.scale.to(dtype=h.dtype) + +class UnifiedAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, rope_base: float, + seeking_gain_init: float, rope_fraction: float = 1.0): + super().__init__() + assert dim % 3 == 0, f"dim={dim} must be divisible by 3" + self.dim = dim + self.num_heads = num_heads + self.component_dim = dim // 3 + self.head_dim = self.component_dim // num_heads + assert self.component_dim % num_heads == 0 + + self.rope_dim = int(self.head_dim * rope_fraction) + self.rope_dim = max(self.rope_dim - (self.rope_dim % 2), 2) + self.pass_dim = self.head_dim - self.rope_dim + + self.seeking_gain = nn.Parameter( + torch.full((num_heads,), seeking_gain_init, dtype=torch.float32) + ) + self.rotary = Rotary(self.rope_dim, base=rope_base) + + def forward(self, x: Tensor, unified_w: Tensor, output_w: Tensor, unified_delta=None, v_embed=None) -> Tensor: + bsz, seqlen, _ = x.shape + + unified = F.linear(x, unified_w.to(x.dtype)) + if unified_delta is not None: + unified = unified + unified_delta + + seeking, offering, content = unified.split(self.component_dim, dim=-1) + + if v_embed is not None: + content = content + v_embed + + def to_heads(t): + return t.reshape(bsz, seqlen, self.num_heads, self.head_dim).transpose(1, 2) + + seeking = to_heads(seeking) + offering = to_heads(offering) + content = to_heads(content) + + seeking = F.rms_norm(seeking, (seeking.size(-1),)) + offering = F.rms_norm(offering, (offering.size(-1),)) + + cos, sin = self.rotary(seqlen, x.device, seeking.dtype) + if self.pass_dim > 0: + s_rope, s_pass = seeking[..., :self.rope_dim], seeking[..., self.rope_dim:] + o_rope, o_pass = offering[..., :self.rope_dim], offering[..., self.rope_dim:] + s_rope = apply_rotary_emb(s_rope, cos, sin) + o_rope = apply_rotary_emb(o_rope, cos, sin) + seeking = torch.cat([s_rope, s_pass], dim=-1) + offering = torch.cat([o_rope, o_pass], dim=-1) + else: + seeking = apply_rotary_emb(seeking, cos, sin) + offering = apply_rotary_emb(offering, cos, sin) + + seeking = seeking * self.seeking_gain.to(dtype=seeking.dtype)[None, :, None, None] + + sq = seeking.transpose(1, 2) + of = offering.transpose(1, 2) + ct = content.transpose(1, 2) + dtype = sq.dtype + if dtype not in (torch.float16, torch.bfloat16): + sq, of, ct = sq.to(torch.bfloat16), of.to(torch.bfloat16), ct.to(torch.bfloat16) + hd = sq.size(-1) + pad_n = (8 - hd % 8) % 8 + if pad_n > 0: + sq = F.pad(sq, (0, pad_n)) + of = F.pad(of, (0, pad_n)) + ct = F.pad(ct, (0, pad_n)) + out = _flash_attn_func(sq, of, ct, causal=True) + y = out[0] if isinstance(out, tuple) else out + if pad_n > 0: + y = y[..., :hd] + if y.dtype != dtype: + y = y.to(dtype) + y = y.transpose(1, 2) + + y = y.transpose(1, 2).contiguous().reshape(bsz, seqlen, self.component_dim) + return F.linear(y, output_w.to(x.dtype)) + +class SquaredReLUMLP(nn.Module): + """LeakyReLU(0.5)² MLP — weights passed from banks.""" + def __init__(self, dim: int, mlp_mult: int): + super().__init__() + + def forward(self, x: Tensor, fc_w: Tensor, proj_w: Tensor) -> Tensor: + return F.linear( + F.leaky_relu(F.linear(x, fc_w.to(x.dtype)), negative_slope=0.5).square(), + proj_w.to(x.dtype) + ) + +class Block(nn.Module): + """Single transformer block with unified attention + MLP. Weights from banks.""" + def __init__(self, dim: int, num_heads: int, mlp_mult: int, rope_base: float, + seeking_gain_init: float, rope_fraction: float = 1.0, + layer_idx: int = 0, ln_scale: bool = False): + super().__init__() + self.attn_norm = RMSNorm() + self.mlp_norm = RMSNorm() + self.attn = UnifiedAttention(dim, num_heads, rope_base, seeking_gain_init, rope_fraction) + self.mlp = SquaredReLUMLP(dim, mlp_mult) + self.attn_scale = nn.Parameter(torch.ones(dim, dtype=torch.float32)) + self.mlp_scale = nn.Parameter(torch.ones(dim, dtype=torch.float32)) + self.resid_mix = nn.Parameter(torch.stack((torch.ones(dim), torch.zeros(dim))).float()) + self.ln_scale_factor = 1.0 / math.sqrt(layer_idx + 1) if ln_scale else 1.0 + + def forward(self, x: Tensor, x0: Tensor, unified_w: Tensor, output_w: Tensor, + fc_w: Tensor, proj_w: Tensor, unified_delta_fn=None, v_embed=None) -> Tensor: + mix = self.resid_mix.to(dtype=x.dtype) + x = mix[0][None, None, :] * x + mix[1][None, None, :] * x0 + n = self.attn_norm(x) * self.ln_scale_factor + ud = unified_delta_fn(n) if unified_delta_fn is not None else None + x = x + self.attn_scale.to(dtype=x.dtype)[None, None, :] * self.attn(n, unified_w, output_w, ud, v_embed=v_embed) + x = x + self.mlp_scale.to(dtype=x.dtype)[None, None, :] * self.mlp(self.mlp_norm(x) * self.ln_scale_factor, fc_w, proj_w) + return x + +class YoctoGPT(nn.Module): + def __init__(self, vocab_size: int, model_dim: int, num_heads: int, + num_unique_layers: int, num_recurrences: int, mlp_mult: int, + tie_embeddings: bool, tied_embed_init_std: float, + logit_softcap: float, rope_base: float, seeking_gain_init: float, + rope_fraction: float = 1.0, + ln_scale: bool = True, + ve_enabled: bool = True, ve_dim: int = 128, ve_layers: str = "8,9", + int5_layers: str = ""): + super().__init__() + self.tie_embeddings = tie_embeddings + self.logit_softcap = logit_softcap + self.num_unique_layers = num_unique_layers + self.num_recurrences = num_recurrences + self.int5_layer_set = set(int(x) for x in int5_layers.split(",") if x.strip()) + effective = num_unique_layers * num_recurrences + + comp_dim = model_dim // 3 + mlp_dim = mlp_mult * model_dim + + self.tok_emb = nn.Embedding(vocab_size, model_dim) + self.bigram = None + self.smear = SmearGate(model_dim) + + K = num_unique_layers + self.unified_bank = nn.Parameter(torch.empty(K, model_dim, model_dim)) # W_unified: d→d + self.output_bank = nn.Parameter(torch.empty(K, model_dim, comp_dim)) # W_output: comp→d (F.linear expects [out, in]) + self.fc_bank = nn.Parameter(torch.empty(K, mlp_dim, model_dim)) # MLP fc: d→mlp_dim + self.proj_bank = nn.Parameter(torch.empty(K, model_dim, mlp_dim)) # MLP proj: mlp_dim→d + + self.blocks = nn.ModuleList([ + Block(model_dim, num_heads, mlp_mult, rope_base, seeking_gain_init, rope_fraction, + layer_idx=k, ln_scale=ln_scale) + for k in range(num_unique_layers) + ]) + + self.num_encoder_layers = effective // 2 + self.num_decoder_layers = effective - self.num_encoder_layers + self.num_skip_weights = min(self.num_encoder_layers, self.num_decoder_layers) + self.skip_weights = nn.Parameter(torch.ones(self.num_skip_weights, model_dim, dtype=torch.float32)) + + self.ve_layer_indices = [int(x) for x in ve_layers.split(",") if x.strip()] if ve_enabled else [] + if self.ve_layer_indices: + self.ve_shared = ValueEmbedding(vocab_size, ve_dim, comp_dim) + self.ve_layer_scales = nn.ParameterList( + [nn.Parameter(torch.ones(1, dtype=torch.float32)) for _ in self.ve_layer_indices] + ) + else: + self.ve_shared = None + self.ve_layer_scales = nn.ParameterList() + + self.final_norm = RMSNorm() + self.lm_head = None if tie_embeddings else CastedLinear(model_dim, vocab_size, bias=False) + if self.lm_head is not None: + self.lm_head._zero_init = True + self._init_weights(tied_embed_init_std) + + def _init_weights(self, std: float) -> None: + if self.tie_embeddings: + nn.init.normal_(self.tok_emb.weight, mean=0.0, std=std) + K = self.num_unique_layers + proj_scale = 1.0 / math.sqrt(2 * K * self.num_recurrences) + for i in range(K): + nn.init.orthogonal_(self.unified_bank.data[i], gain=1.0) + nn.init.zeros_(self.output_bank.data[i]) + self.output_bank.data[i].mul_(proj_scale) + nn.init.orthogonal_(self.fc_bank.data[i], gain=1.0) + nn.init.zeros_(self.proj_bank.data[i]) + self.proj_bank.data[i].mul_(proj_scale) + for module in self.modules(): + if isinstance(module, nn.Linear) and getattr(module, "_zero_init", False): + nn.init.zeros_(module.weight) + + def _qat_weight(self, w: Tensor, layer_idx: int = -1) -> Tensor: + if self.training and _qat_active: + bits = 5 if layer_idx in self.int5_layer_set else _qat_bits + return _fake_quantize(w, bits) + return w + + def _get_ve(self, layer_idx: int, input_ids: Tensor, ve_cache: dict) -> Tensor | None: + """Get value embedding for a specific layer using shared table + per-layer scale.""" + if self.ve_shared is None or layer_idx not in self.ve_layer_indices: + return None + if 've' not in ve_cache: + ve_cache['ve'] = self.ve_shared(input_ids) + ve_idx = self.ve_layer_indices.index(layer_idx) + return ve_cache['ve'] * self.ve_layer_scales[ve_idx].to(dtype=ve_cache['ve'].dtype) + + def forward(self, input_ids: Tensor, target_ids: Tensor, lora=None) -> Tensor: + x = self.tok_emb(input_ids) + if self.bigram is not None: + x = x + self.bigram(input_ids) + x = F.rms_norm(x, (x.size(-1),)) + x = self.smear(x) + x0 = x + skips: list[Tensor] = [] + ve_cache: dict = {} + + eff_layer_idx = 0 + for _r in range(self.num_recurrences): + for k in range(self.num_unique_layers): + is_encoder = eff_layer_idx < self.num_encoder_layers + + if not is_encoder and skips: + dec_idx = eff_layer_idx - self.num_encoder_layers + if dec_idx < self.num_skip_weights: + x = x + self.skip_weights[dec_idx].to(dtype=x.dtype)[None, None, :] * skips.pop() + + ud_fn = lora.unified_loras[k] if (lora and lora.unified_loras is not None) else None + ve = self._get_ve(k, input_ids, ve_cache) + x = self.blocks[k](x, x0, + self._qat_weight(self.unified_bank[k], k), + self._qat_weight(self.output_bank[k], k), + self._qat_weight(self.fc_bank[k], k), + self._qat_weight(self.proj_bank[k], k), + ud_fn, v_embed=ve) + + if is_encoder: + skips.append(x) + + eff_layer_idx += 1 + + x = self.final_norm(x) + if self.tie_embeddings: + logits = F.linear(x, self.tok_emb.weight) + else: + logits = self.lm_head(x) + logits = logits + (lora.lm_head_lora(x) if lora else 0) + logits = self.logit_softcap * torch.tanh(logits / self.logit_softcap) + + if lora: + bsz, sl, V = logits.shape + return F.cross_entropy( + logits.float().reshape(-1, V), target_ids.reshape(-1), reduction="none" + ).reshape(bsz, sl) + return F.cross_entropy(logits.float().reshape(-1, logits.size(-1)), target_ids.reshape(-1), reduction="mean") + + def forward_logits(self, input_ids: Tensor) -> Tensor: + x = self.tok_emb(input_ids) + if self.bigram is not None: + x = x + self.bigram(input_ids) + x = F.rms_norm(x, (x.size(-1),)) + x = self.smear(x) + x0 = x + skips: list[Tensor] = [] + ve_cache: dict = {} + + eff_layer_idx = 0 + for _r in range(self.num_recurrences): + for k in range(self.num_unique_layers): + is_encoder = eff_layer_idx < self.num_encoder_layers + + if not is_encoder and skips: + dec_idx = eff_layer_idx - self.num_encoder_layers + if dec_idx < self.num_skip_weights: + x = x + self.skip_weights[dec_idx].to(dtype=x.dtype)[None, None, :] * skips.pop() + + ve = self._get_ve(k, input_ids, ve_cache) + x = self.blocks[k](x, x0, + self.unified_bank[k], self.output_bank[k], + self.fc_bank[k], self.proj_bank[k], + v_embed=ve) + + if is_encoder: + skips.append(x) + + eff_layer_idx += 1 + + x = self.final_norm(x) + if self.tie_embeddings: + logits = F.linear(x, self.tok_emb.weight) + else: + logits = self.lm_head(x) + return self.logit_softcap * torch.tanh(logits / self.logit_softcap) + +def eval_val_legal_ttt(args, base_model, rank, world_size, device, + val_tokens, base_bytes_lut, has_leading_space_lut, + is_boundary_token_lut, log0=print): + seq_len = args.train_seq_len + stride = args.sliding_window_stride + total_tokens = val_tokens.numel() - 1 + ttt_chunk = args.legal_ttt_chunk_tokens + + window_starts = [ws for ws in range(0, total_tokens, stride) + if min(ws + seq_len, total_tokens) - ws >= stride or ws == 0] + + num_chunks = (total_tokens + ttt_chunk - 1) // ttt_chunk + chunk_windows: list[list[int]] = [[] for _ in range(num_chunks)] + for ws in window_starts: + end = min(ws + seq_len, total_tokens) + wlen = end - ws + s = 0 if ws == 0 else max(wlen - stride, 0) + scored_start = ws + s + ci = min(scored_start // ttt_chunk, num_chunks - 1) + chunk_windows[ci].append(ws) + + log0(f"legal_ttt:start chunks={num_chunks} chunk_tokens={ttt_chunk} " + f"total_windows={len(window_starts)} stride={stride} " + f"lr={args.legal_ttt_lr} epochs={args.legal_ttt_epochs} " + f"freeze_blocks={args.legal_ttt_freeze_blocks}") + + loss_sum = torch.zeros((), device=device, dtype=torch.float64) + token_count = torch.zeros((), device=device, dtype=torch.float64) + byte_count = torch.zeros((), device=device, dtype=torch.float64) + + frozen_block_ids = set(range(min(args.legal_ttt_freeze_blocks, len(base_model.blocks)))) + ttt_params = [] + for name, p in base_model.named_parameters(): + freeze = False + for bi in frozen_block_ids: + if f"blocks.{bi}." in name: + freeze = True + break + if freeze: + p.requires_grad_(False) + else: + p.requires_grad_(True) + ttt_params.append(p) + + log0(f"legal_ttt:params unfrozen={sum(p.numel() for p in ttt_params)} " + f"frozen={sum(p.numel() for p in base_model.parameters() if not p.requires_grad)}") + + optimizer = torch.optim.SGD(ttt_params, lr=args.legal_ttt_lr, momentum=args.legal_ttt_momentum) + batch_seqs = args.legal_ttt_batch_seqs + t0 = time.perf_counter() + + for ci in range(num_chunks): + windows = chunk_windows[ci] + if not windows: + continue + + my_s = (len(windows) * rank) // world_size + my_e = (len(windows) * (rank + 1)) // world_size + my_windows = windows[my_s:my_e] + + base_model.eval() + with torch.no_grad(): + for bi in range(0, len(my_windows), batch_seqs): + batch_ws = my_windows[bi:bi + batch_seqs] + bsz = len(batch_ws) + x_batch = torch.zeros(bsz, seq_len, dtype=torch.int64, device=device) + y_batch = torch.zeros(bsz, seq_len, dtype=torch.int64, device=device) + wlens: list[int] = [] + for i, ws in enumerate(batch_ws): + end = min(ws + seq_len, total_tokens) + wlen = end - ws + wlens.append(wlen) + chunk_tok = val_tokens[ws:end + 1].to(dtype=torch.int64, device=device) + x_batch[i, :wlen] = chunk_tok[:-1] + y_batch[i, :wlen] = chunk_tok[1:] + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + logits = base_model.forward_logits(x_batch) + nll = F.cross_entropy( + logits.reshape(-1, logits.size(-1)).float(), + y_batch.reshape(-1), reduction="none", + ).reshape(bsz, seq_len) + for i, ws in enumerate(batch_ws): + wlen = wlens[i] + s = 0 if ws == 0 else max(wlen - stride, 0) + scored_nll = nll[i, s:wlen].to(torch.float64) + loss_sum += scored_nll.sum() + token_count += float(wlen - s) + tgt, prev = y_batch[i, s:wlen], x_batch[i, s:wlen] + tb = base_bytes_lut[tgt].to(torch.float64) + tb += (has_leading_space_lut[tgt] & ~is_boundary_token_lut[prev]).to(torch.float64) + byte_count += tb.sum() + + is_last_chunk = (ci == num_chunks - 1) + if not is_last_chunk and args.legal_ttt_epochs > 0: + base_model.train() + for block in base_model.blocks: + block.attn.rotary._cos_cached = None + block.attn.rotary._sin_cached = None + chunk_start = ci * ttt_chunk + chunk_end = min((ci + 1) * ttt_chunk, total_tokens) + chunk_seqs = (chunk_end - chunk_start) // seq_len + if chunk_seqs > 0: + cos_lr = args.legal_ttt_lr * 0.5 * (1.0 + math.cos(math.pi * ci / max(num_chunks - 1, 1))) + for pg in optimizer.param_groups: + pg['lr'] = cos_lr + my_seq_s = (chunk_seqs * rank) // world_size + my_seq_e = (chunk_seqs * (rank + 1)) // world_size + my_chunk_seqs = my_seq_e - my_seq_s + for _ep in range(args.legal_ttt_epochs): + for bs in range(0, my_chunk_seqs, batch_seqs): + be = min(bs + batch_seqs, my_chunk_seqs) + actual_bs = my_seq_s + bs + start_tok = chunk_start + actual_bs * seq_len + end_tok = chunk_start + (my_seq_s + be) * seq_len + 1 + if end_tok > val_tokens.numel(): + continue + local = val_tokens[start_tok:end_tok].to(device=device, dtype=torch.int64) + x = local[:-1].reshape(-1, seq_len) + y = local[1:].reshape(-1, seq_len) + optimizer.zero_grad(set_to_none=True) + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + loss = base_model(x, y) + loss.backward() + if world_size > 1: + for p in ttt_params: + if p.grad is not None: + dist.all_reduce(p.grad, op=dist.ReduceOp.AVG) + torch.nn.utils.clip_grad_norm_(ttt_params, args.legal_ttt_grad_clip) + optimizer.step() + + if rank == 0 and (ci % 10 == 0 or ci == num_chunks - 1): + elapsed = time.perf_counter() - t0 + rl = loss_sum.item() / max(token_count.item(), 1) + rbpb = rl / math.log(2.0) * (token_count.item() / max(byte_count.item(), 1)) if token_count.item() > 0 else 0.0 + log0(f" legal_ttt_chunk [{ci+1}/{num_chunks}] bpb={rbpb:.6f} time={elapsed:.1f}s") + + if dist.is_available() and dist.is_initialized(): + dist.all_reduce(loss_sum, op=dist.ReduceOp.SUM) + dist.all_reduce(token_count, op=dist.ReduceOp.SUM) + dist.all_reduce(byte_count, op=dist.ReduceOp.SUM) + + val_loss = (loss_sum / token_count).item() + val_bpb = val_loss / math.log(2.0) * (token_count.item() / byte_count.item()) + + for p in base_model.parameters(): + p.requires_grad_(True) + base_model.eval() + + log0(f"legal_ttt:done val_loss={val_loss:.6f} val_bpb={val_bpb:.6f} " + f"elapsed={time.perf_counter() - t0:.1f}s") + return val_loss, val_bpb + +def prune_to_fit(result, meta, code_bytes, target_bytes=16_000_000, compress="lzma"): + """Selectively zero ±1 quantized values to fit artifact in budget.""" + buf = io.BytesIO() + torch.save({"w": result, "m": meta}, buf) + raw = buf.getvalue() + if compress == "lzma": + blob = lzma.compress(raw, preset=6) + else: + blob = zlib.compress(raw, level=9) + if len(blob) + code_bytes <= target_bytes: + return result, len(blob) + + candidates = [] + for name, info in meta.items(): + if isinstance(info, dict) and info.get("type") in ("int6", "int5"): + q = result[name + ".q"] + s = result[name + ".scale"] + for row in range(q.shape[0]): + mask = (q[row].abs() == 1) + if mask.any(): + scale_sq = float(s[row].float() ** 2) if s.ndim > 0 else float(s.float() ** 2) + count = int(mask.sum().item()) + candidates.append((scale_sq, name, row, count)) + + candidates.sort(key=lambda x: x[0]) + + batch_size = max(1, len(candidates) // 20) + for i in range(0, len(candidates), batch_size): + batch = candidates[i:i + batch_size] + for _, name, row, _ in batch: + q = result[name + ".q"] + mask = (q[row].abs() == 1) + q[row][mask] = 0 + + buf = io.BytesIO() + torch.save({"w": result, "m": meta}, buf) + raw = buf.getvalue() + if compress == "lzma": + blob = lzma.compress(raw, preset=6) + else: + blob = zlib.compress(raw, level=9) + if len(blob) + code_bytes <= target_bytes: + return result, len(blob) + + return result, len(blob) + + +def main() -> None: + + try: + code = Path(__file__).read_text(encoding="utf-8") + args = Hyperparameters() + args.validate() + + # ── Distributed + CUDA ── + distributed = "RANK" in os.environ and "WORLD_SIZE" in os.environ + rank = int(os.environ.get("RANK", "0")) + world_size = int(os.environ.get("WORLD_SIZE", "1")) + local_rank = int(os.environ.get("LOCAL_RANK", "0")) + if 8 % world_size != 0: + raise ValueError(f"WORLD_SIZE={world_size} must divide 8") + grad_accum_steps = 8 // world_size + grad_scale = 1.0 / grad_accum_steps + if not torch.cuda.is_available(): + raise RuntimeError("CUDA is required") + device = torch.device("cuda", local_rank) + torch.cuda.set_device(device) + if distributed: + dist.init_process_group(backend="nccl", device_id=device) + dist.barrier() + master_process = rank == 0 + + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + from torch.backends.cuda import enable_cudnn_sdp, enable_flash_sdp, enable_math_sdp, enable_mem_efficient_sdp + enable_cudnn_sdp(False) + enable_flash_sdp(True) + enable_mem_efficient_sdp(False) + enable_math_sdp(False) + + logfile = None + if master_process: + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{args.run_id}.txt" + logger.info(f"Log file: {logfile}") + + def log0(msg: str, console: bool = True) -> None: + if not master_process: + return + if console: + logger.info(msg) + if logfile is not None: + with open(logfile, "a", encoding="utf-8") as f: + print(msg, file=f) + + log0(code, console=False) + log0("=" * 100, console=False) + + # ── Tokenizer + Validation ── + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) + + sp = spm.SentencePieceProcessor(model_file=args.tokenizer_path) + if int(sp.vocab_size()) != args.vocab_size: + raise ValueError(f"VOCAB_SIZE={args.vocab_size} != tokenizer vocab_size={int(sp.vocab_size())}") + val_tokens = load_validation_tokens(args.val_files, args.train_seq_len) + base_bytes_lut, has_leading_space_lut, is_boundary_token_lut = build_sentencepiece_luts(sp, args.vocab_size, device) + + # ── Model ── + base_model = YoctoGPT( + vocab_size=args.vocab_size, + model_dim=args.model_dim, + num_heads=args.num_heads, + num_unique_layers=args.num_unique_layers, + num_recurrences=args.num_recurrences, + mlp_mult=args.mlp_mult, + tie_embeddings=args.tie_embeddings, + tied_embed_init_std=args.tied_embed_init_std, + logit_softcap=args.logit_softcap, + rope_base=args.rope_base, + seeking_gain_init=args.seeking_gain_init, + rope_fraction=args.rope_fraction, + ln_scale=args.ln_scale, + ve_enabled=args.ve_enabled, + ve_dim=args.ve_dim, + ve_layers=args.ve_layers, + int5_layers=args.int5_layers, + ).to(device).bfloat16() + + base_model.unified_bank.data = base_model.unified_bank.data.float() + base_model.output_bank.data = base_model.output_bank.data.float() + base_model.fc_bank.data = base_model.fc_bank.data.float() + base_model.proj_bank.data = base_model.proj_bank.data.float() + + for module in base_model.modules(): + if isinstance(module, CastedLinear): + module.float() + if isinstance(module, Rotary): + module.inv_freq.data = module.inv_freq.data.float() + restore_low_dim_params_to_fp32(base_model) + + if master_process: + log_architecture(base_model, args) + + try: + _test_mod = torch.compile(lambda q, k, v: _flash_attn_func(q, k, v, causal=True), dynamic=False) + _tq = torch.randn(1, 8, 1, 48, dtype=torch.bfloat16, device=device) + with torch.amp.autocast('cuda', dtype=torch.bfloat16): + _test_mod(_tq, _tq, _tq) + log0("torch.compile + FA3: COMPATIBLE") + compiled_model = torch.compile(base_model, dynamic=False) + model = compiled_model + except Exception as e: + log0(f"torch.compile + FA3: INCOMPATIBLE ({type(e).__name__}), running uncompiled") + model = base_model + + log0("attention_backend:fa3") + + # ── Optimizer: banks → Muon, rest → Adam/AdamW ── + matrix_params = [ + base_model.unified_bank, base_model.output_bank, + base_model.fc_bank, base_model.proj_bank, + ] + + block_named_params = list(base_model.blocks.named_parameters()) + scalar_params = [p for name, p in block_named_params + if p.ndim < 2 or any(pat in name for pat in CONTROL_TENSOR_NAME_PATTERNS)] + if base_model.skip_weights.numel() > 0: + scalar_params.append(base_model.skip_weights) + scalar_params.append(base_model.smear.gate) + if base_model.ve_shared is not None: + scalar_params.append(base_model.ve_shared.scale) + for s in base_model.ve_layer_scales: + scalar_params.append(s) + if base_model.ve_shared.proj is not None: + scalar_params.append(base_model.ve_shared.proj.weight) + + token_lr = args.tied_embed_lr if args.tie_embeddings else args.embed_lr + tok_param_groups = [{"params": [base_model.tok_emb.weight], "lr": token_lr, "base_lr": token_lr}] + if base_model.ve_shared is not None: + tok_param_groups.append({"params": [base_model.ve_shared.embed.weight], "lr": token_lr, "base_lr": token_lr}) + optimizer_tok = torch.optim.AdamW(tok_param_groups, + betas=(args.beta1, args.beta2), eps=args.adam_eps, + weight_decay=args.muon_weight_decay, fused=True) + optimizer_muon = Muon(matrix_params, lr=args.matrix_lr, momentum=args.muon_momentum, + backend_steps=args.muon_backend_steps, weight_decay=args.muon_weight_decay) + for group in optimizer_muon.param_groups: + group["base_lr"] = args.matrix_lr + optimizer_scalar = torch.optim.AdamW([{"params": scalar_params, "lr": args.scalar_lr, "base_lr": args.scalar_lr}], + betas=(args.beta1, args.beta2), eps=args.adam_eps, + weight_decay=args.muon_weight_decay, fused=True) + optimizers = [optimizer_tok, optimizer_muon, optimizer_scalar] + + replicated_params = [base_model.tok_emb.weight] + scalar_params + if base_model.ve_shared is not None: + replicated_params.append(base_model.ve_shared.embed.weight) + + if base_model.lm_head is not None: + optimizer_head = torch.optim.Adam([{"params": [base_model.lm_head.weight], "lr": args.head_lr, "base_lr": args.head_lr}], + betas=(args.beta1, args.beta2), eps=args.adam_eps, fused=True) + optimizers.insert(1, optimizer_head) + replicated_params.append(base_model.lm_head.weight) + if base_model.bigram is not None: + bigram_params = list(base_model.bigram.parameters()) + optimizer_bigram = torch.optim.AdamW([{"params": bigram_params, "lr": token_lr, "base_lr": token_lr}], + betas=(args.beta1, args.beta2), eps=args.adam_eps, + weight_decay=args.muon_weight_decay, fused=True) + optimizers.append(optimizer_bigram) + replicated_params.extend(bigram_params) + + n_params = sum(p.numel() for p in base_model.parameters()) + log0(f"model_params:{n_params} effective_depth:{args.num_effective_layers}") + if base_model.int5_layer_set: + log0(f"mixed_precision: int5_layers={sorted(base_model.int5_layer_set)} int6_layers={sorted(set(range(args.num_unique_layers)) - base_model.int5_layer_set)}") + log0(f"world_size:{world_size} grad_accum_steps:{grad_accum_steps}") + + # ── Data loader + warmup ── + train_loader = DistributedTokenLoader(args.train_files, rank, world_size, device) + + def zero_grad_all(): + for opt in optimizers: + opt.zero_grad(set_to_none=True) + + max_wallclock_ms = 1000.0 * args.max_wallclock_seconds if args.max_wallclock_seconds > 0 else None + + # ── EMA + SWA shadow weights ── + ema_state = None + swa_params = None + swa_count = 0 + if args.ema_decay > 0: + ema_state = {name: t.detach().float().clone() for name, t in base_model.state_dict().items()} + log0(f"EMA enabled: decay={args.ema_decay}") + if args.swa_every > 0: + swa_params = {name: t.detach().cpu().clone() for name, t in base_model.state_dict().items()} + log0(f"SWA enabled: every {args.swa_every} steps when lr_scale < {args.swa_threshold}") + + def update_ema_swa(step, lr_scale): + nonlocal swa_count + with torch.no_grad(): + if ema_state is not None: + for name, t in base_model.state_dict().items(): + ema_state[name].mul_(args.ema_decay).add_(t.detach().float(), alpha=1.0 - args.ema_decay) + + if swa_params is not None and step > 0 and step % args.swa_every == 0: + if lr_scale < args.swa_threshold: + if swa_count == 0: + for name, t in base_model.state_dict().items(): + swa_params[name].copy_(t.detach().cpu()) + swa_count = 1 + log0(f"SWA started at step {step} (lr_scale={lr_scale:.4f})") + else: + for name, t in base_model.state_dict().items(): + swa_params[name] += t.detach().cpu() + swa_count += 1 + + def get_best_weights(): + """Return best averaged weights. EMA preferred (per PR#401).""" + if ema_state is not None: + log0(f"Using EMA weights (decay={args.ema_decay})") + current_state = base_model.state_dict() + return {name: t.to(dtype=current_state[name].dtype) for name, t in ema_state.items()} + if swa_params is not None and swa_count >= 2: + log0(f"Using SWA weights ({swa_count} checkpoints)") + current_state = base_model.state_dict() + return {name: (t / swa_count).to(dtype=current_state[name].dtype) + for name, t in swa_params.items()} + return None + + def lr_mul(step, elapsed_ms): + if args.lr_warmup_steps > 0 and step < args.lr_warmup_steps: + return (step + 1) / args.lr_warmup_steps + + if args.warmdown_iters <= 0: + return 1.0 + if max_wallclock_ms is None: + warmdown_start = max(args.iterations - args.warmdown_iters, 0) + return max((args.iterations - step) / max(args.warmdown_iters, 1), 0.0) if warmdown_start <= step < args.iterations else 1.0 + step_ms = elapsed_ms / max(step, 1) + warmdown_ms = args.warmdown_iters * step_ms + remaining_ms = max(max_wallclock_ms - elapsed_ms, 0.0) + return remaining_ms / max(warmdown_ms, 1e-9) if remaining_ms <= warmdown_ms else 1.0 + + if args.warmup_steps > 0: + initial_model_state = {n: t.detach().cpu().clone() for n, t in base_model.state_dict().items()} + initial_optimizer_states = [copy.deepcopy(opt.state_dict()) for opt in optimizers] + model.train() + for ws in range(args.warmup_steps): + zero_grad_all() + for ms in range(grad_accum_steps): + x, y = train_loader.next_batch(args.train_batch_tokens, args.train_seq_len, grad_accum_steps) + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + wl = model(x, y) + (wl * grad_scale).backward() + if distributed: + for p in base_model.parameters(): + if p.grad is not None: + dist.all_reduce(p.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + zero_grad_all() + if ws + 1 == args.warmup_steps or (ws + 1) % 10 == 0: + log0(f"warmup_step:{ws+1}/{args.warmup_steps}") + base_model.load_state_dict(initial_model_state, strict=True) + for opt, state in zip(optimizers, initial_optimizer_states, strict=True): + opt.load_state_dict(state) + zero_grad_all() + train_loader = DistributedTokenLoader(args.train_files, rank, world_size, device) + + # ── Main training loop ── + training_time_ms = 0.0 + stop_after_step = None + torch.cuda.synchronize() + t0 = time.perf_counter() + step = 0 + + while True: + last_step = step == args.iterations or (stop_after_step is not None and step >= stop_after_step) + + should_validate = last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0) + if should_validate: + torch.cuda.synchronize() + training_time_ms += 1000.0 * (time.perf_counter() - t0) + val_loss, val_bpb = eval_val(args, model, rank, world_size, device, grad_accum_steps, + val_tokens, base_bytes_lut, has_leading_space_lut, is_boundary_token_lut) + log0(f"step:{step}/{args.iterations} val_loss:{val_loss:.4f} val_bpb:{val_bpb:.4f} " + f"train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms / max(step, 1):.2f}ms") + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if stop_after_step is not None and step < args.iterations: + log0(f"stopping_early: wallclock_cap train_time:{training_time_ms:.0f}ms step:{step}/{args.iterations}") + break + + elapsed_ms = training_time_ms + 1000.0 * (time.perf_counter() - t0) + scale = lr_mul(step, elapsed_ms) + + # ── QAT activation check ── + global _qat_active, _qat_bits + if args.qat_bits > 0 and not _qat_active: + if max_wallclock_ms is not None and max_wallclock_ms > 0: + frac = elapsed_ms / max_wallclock_ms + else: + frac = step / max(args.iterations, 1) + if frac >= args.qat_start_fraction: + _qat_active = True + _qat_bits = args.qat_bits + log0(f"QAT enabled: int{args.qat_bits} at step {step} (fraction={frac:.2f})") + + zero_grad_all() + train_loss = torch.zeros((), device=device) + + for micro_step in range(grad_accum_steps): + x, y = train_loader.next_batch(args.train_batch_tokens, args.train_seq_len, grad_accum_steps) + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + loss = model(x, y) + train_loss += loss.detach() + (loss * grad_scale).backward() + train_loss /= grad_accum_steps + + frac = min(step / args.muon_momentum_warmup_steps, 1.0) if args.muon_momentum_warmup_steps > 0 else 1.0 + muon_momentum = (1 - frac) * args.muon_momentum_warmup_start + frac * args.muon_momentum + for group in optimizer_muon.param_groups: + group["momentum"] = muon_momentum + + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["base_lr"] * scale + + if args.grad_clip_norm > 0: + torch.nn.utils.clip_grad_norm_(base_model.parameters(), args.grad_clip_norm) + + optimizer_muon.launch_reduce_scatters() + if distributed: + for p in replicated_params: + if p.grad is not None: + dist.all_reduce(p.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + if opt is not optimizer_muon: + opt.step() + optimizer_muon.step() + + update_ema_swa(step, scale) + zero_grad_all() + + step += 1 + approx_time = training_time_ms + 1000.0 * (time.perf_counter() - t0) + if args.train_log_every > 0 and (step <= 10 or step % args.train_log_every == 0): + log0(f"step:{step}/{args.iterations} train_loss:{train_loss.item():.4f} " + f"train_time:{approx_time:.0f}ms step_avg:{approx_time / step:.2f}ms") + + reached_cap = max_wallclock_ms is not None and approx_time >= max_wallclock_ms + if distributed and max_wallclock_ms is not None: + rc = torch.tensor(int(reached_cap), device=device) + dist.all_reduce(rc, op=dist.ReduceOp.MAX) + reached_cap = bool(rc.item()) + if stop_after_step is None and reached_cap: + stop_after_step = step + + log0(f"peak memory: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB") + + # ── Load best averaged weights (EMA > SWA > raw) ── + best_weights = get_best_weights() + if best_weights is not None: + base_model.load_state_dict(best_weights, strict=True) + + # ── Serialization ── + if master_process: + torch.save(base_model.state_dict(), "final_model.pt") + model_bytes = os.path.getsize("final_model.pt") + code_bytes = len(code.encode("utf-8")) + log0(f"Raw model: {model_bytes} bytes, code: {code_bytes} bytes") + + # ── Mixed int6/int8 quantization + roundtrip (if QAT was used) ── + if args.qat_bits == 6: + if master_process: + base_model.load_state_dict(torch.load("final_model.pt", map_location="cpu"), strict=True) + sd_cpu = {k: v.detach().cpu() for k, v in base_model.state_dict().items()} + unbanked_sd = _unbank_state_dict(sd_cpu, args.num_unique_layers) + int5_set = set(int(x) for x in args.int5_layers.split(",") if x.strip()) + mixed_result, mixed_meta = quantize_state_dict_mixed(unbanked_sd, int5_layers=int5_set) + code_bytes = len(code.encode("utf-8")) + mixed_result, _ = prune_to_fit(mixed_result, mixed_meta, code_bytes, + target_bytes=16_000_000, compress=args.compression) + mixed_buf = io.BytesIO() + torch.save({"w": mixed_result, "m": mixed_meta}, mixed_buf) + mixed_raw = mixed_buf.getvalue() + if args.compression == "lzma": + mixed_blob = lzma.compress(mixed_raw, preset=6) + mixed_label = "lzma-6" + elif args.compression == "zstd": + try: + import zstandard as zstd_mod + mixed_blob = zstd_mod.ZstdCompressor(level=22).compress(mixed_raw) + mixed_label = "zstd-22" + except ImportError: + mixed_blob = zlib.compress(mixed_raw, level=9) + mixed_label = "zlib-9" + else: + mixed_blob = zlib.compress(mixed_raw, level=9) + mixed_label = "zlib-9" + if master_process: + with open("final_model.mixed.ptz", "wb") as f: + f.write(mixed_blob) + mixed_bytes = os.path.getsize("final_model.mixed.ptz") + code_bytes = len(code.encode("utf-8")) + log0(f"mixed_int6_int8+{mixed_label}: {mixed_bytes} bytes, total: {mixed_bytes + code_bytes} bytes") + if mixed_bytes + code_bytes > 16_000_000: + logger.warning(f"OVER BUDGET: {mixed_bytes + code_bytes} > 16,000,000") + else: + log0(f"FITS: {mixed_bytes + code_bytes} <= 16,000,000") + if distributed: + dist.barrier() + with open("final_model.mixed.ptz", "rb") as f: + mixed_qblob = f.read() + if args.compression == "lzma": + mixed_decompressed = lzma.decompress(mixed_qblob) + elif args.compression == "zstd": + try: + import zstandard as zstd_mod + mixed_decompressed = zstd_mod.ZstdDecompressor().decompress(mixed_qblob) + except ImportError: + mixed_decompressed = zlib.decompress(mixed_qblob) + else: + mixed_decompressed = zlib.decompress(mixed_qblob) + quant_state = torch.load(io.BytesIO(mixed_decompressed), map_location="cpu") + deq_unbanked = dequantize_state_dict_mixed(quant_state["w"], quant_state["m"], unbanked_sd) + deq_sd = _rebank_state_dict(deq_unbanked, args.num_unique_layers, sd_cpu) + base_model.load_state_dict(deq_sd, strict=True) + torch.cuda.synchronize() + qm_val_loss, qm_val_bpb = eval_val(args, model, rank, world_size, device, grad_accum_steps, + val_tokens, base_bytes_lut, has_leading_space_lut, is_boundary_token_lut) + log0(f"final_mixed_{mixed_label}_roundtrip val_loss:{qm_val_loss:.4f} val_bpb:{qm_val_bpb:.4f}") + log0(f"final_mixed_{mixed_label}_roundtrip_exact val_loss:{qm_val_loss:.8f} val_bpb:{qm_val_bpb:.8f}") + + # ── Legal Score-First TTT eval ── + if args.legal_ttt_enabled: + best_weights = get_best_weights() + if best_weights is not None: + base_model.load_state_dict(best_weights, strict=True) + torch.cuda.synchronize() + t_legal = time.perf_counter() + legal_loss, legal_bpb = eval_val_legal_ttt( + args, base_model, rank, world_size, device, + val_tokens, base_bytes_lut, has_leading_space_lut, + is_boundary_token_lut, log0=log0) + log0(f"final_legal_ttt val_loss:{legal_loss:.4f} val_bpb:{legal_bpb:.4f} " + f"eval_time:{1000.0 * (time.perf_counter() - t_legal):.0f}ms") + + if distributed: + dist.destroy_process_group() + + except Exception: + logger.error(f"FATAL ERROR:\n{traceback.format_exc()}") + raise + +if __name__ == "__main__": + main() +==================================================================================================== +torch.compile + FA3: COMPATIBLE +attention_backend:fa3 +model_params:23209295 effective_depth:11 +world_size:8 grad_accum_steps:1 +EMA enabled: decay=0.997 +SWA enabled: every 50 steps when lr_scale < 0.2 +warmup_step:10/20 +warmup_step:20/20 +step:0/72000 val_loss:6.9291 val_bpb:4.1038 train_time:0ms step_avg:0.02ms +step:1/72000 train_loss:6.9301 train_time:99ms step_avg:98.71ms +step:2/72000 train_loss:6.8909 train_time:120ms step_avg:59.95ms +step:3/72000 train_loss:6.7813 train_time:171ms step_avg:57.13ms +step:4/72000 train_loss:6.5785 train_time:219ms step_avg:54.68ms +step:5/72000 train_loss:6.2913 train_time:267ms step_avg:53.43ms +step:6/72000 train_loss:6.1285 train_time:315ms step_avg:52.49ms +step:7/72000 train_loss:5.8954 train_time:364ms step_avg:52.07ms +step:8/72000 train_loss:5.8447 train_time:413ms step_avg:51.58ms +step:9/72000 train_loss:5.7837 train_time:462ms step_avg:51.34ms +step:10/72000 train_loss:5.7434 train_time:509ms step_avg:50.95ms +step:200/72000 train_loss:2.9130 train_time:10266ms step_avg:51.33ms +step:400/72000 train_loss:2.3502 train_time:20364ms step_avg:50.91ms +step:600/72000 train_loss:2.5323 train_time:30564ms step_avg:50.94ms +step:800/72000 train_loss:2.2647 train_time:40768ms step_avg:50.96ms +step:1000/72000 train_loss:2.3641 train_time:51000ms step_avg:51.00ms +step:1000/72000 val_loss:2.3199 val_bpb:1.3740 train_time:51016ms step_avg:51.02ms +step:1200/72000 train_loss:2.3810 train_time:61243ms step_avg:51.04ms +step:1400/72000 train_loss:2.4197 train_time:71490ms step_avg:51.06ms +step:1600/72000 train_loss:2.0862 train_time:81756ms step_avg:51.10ms +step:1800/72000 train_loss:2.1874 train_time:92021ms step_avg:51.12ms +step:2000/72000 train_loss:2.2255 train_time:102281ms step_avg:51.14ms +step:2000/72000 val_loss:2.2090 val_bpb:1.3083 train_time:102298ms step_avg:51.15ms +step:2200/72000 train_loss:2.0449 train_time:112525ms step_avg:51.15ms +step:2400/72000 train_loss:2.1713 train_time:122774ms step_avg:51.16ms +step:2600/72000 train_loss:2.3880 train_time:133037ms step_avg:51.17ms +step:2800/72000 train_loss:2.2069 train_time:143287ms step_avg:51.17ms +step:3000/72000 train_loss:2.1913 train_time:153532ms step_avg:51.18ms +step:3000/72000 val_loss:2.1627 val_bpb:1.2809 train_time:153550ms step_avg:51.18ms +step:3200/72000 train_loss:2.1602 train_time:163780ms step_avg:51.18ms +step:3400/72000 train_loss:2.1384 train_time:174027ms step_avg:51.18ms +step:3600/72000 train_loss:2.0791 train_time:184289ms step_avg:51.19ms +step:3800/72000 train_loss:2.1869 train_time:194523ms step_avg:51.19ms +step:4000/72000 train_loss:2.1527 train_time:204746ms step_avg:51.19ms +step:4000/72000 val_loss:2.1430 val_bpb:1.2692 train_time:204763ms step_avg:51.19ms +step:4200/72000 train_loss:2.1415 train_time:215522ms step_avg:51.31ms +step:4400/72000 train_loss:2.0893 train_time:225760ms step_avg:51.31ms +step:4600/72000 train_loss:1.9514 train_time:236005ms step_avg:51.31ms +step:4800/72000 train_loss:2.2349 train_time:246233ms step_avg:51.30ms +step:5000/72000 train_loss:1.9998 train_time:256476ms step_avg:51.30ms +step:5000/72000 val_loss:2.1322 val_bpb:1.2628 train_time:256492ms step_avg:51.30ms +step:5200/72000 train_loss:2.1570 train_time:266714ms step_avg:51.29ms +step:5400/72000 train_loss:2.1701 train_time:276943ms step_avg:51.29ms +step:5600/72000 train_loss:2.1635 train_time:287188ms step_avg:51.28ms +step:5800/72000 train_loss:2.1194 train_time:297438ms step_avg:51.28ms +step:6000/72000 train_loss:2.2011 train_time:307665ms step_avg:51.28ms +step:6000/72000 val_loss:2.1256 val_bpb:1.2589 train_time:307683ms step_avg:51.28ms +step:6200/72000 train_loss:2.0717 train_time:317901ms step_avg:51.27ms +step:6400/72000 train_loss:2.1548 train_time:328126ms step_avg:51.27ms +step:6600/72000 train_loss:2.1086 train_time:338451ms step_avg:51.28ms +step:6800/72000 train_loss:2.1713 train_time:348681ms step_avg:51.28ms +step:7000/72000 train_loss:2.2172 train_time:358907ms step_avg:51.27ms +step:7000/72000 val_loss:2.1195 val_bpb:1.2553 train_time:358924ms step_avg:51.27ms +step:7200/72000 train_loss:2.1921 train_time:369131ms step_avg:51.27ms +step:7400/72000 train_loss:2.1103 train_time:379368ms step_avg:51.27ms +step:7600/72000 train_loss:1.9878 train_time:389594ms step_avg:51.26ms +step:7800/72000 train_loss:2.1365 train_time:399819ms step_avg:51.26ms +step:8000/72000 train_loss:2.1085 train_time:410054ms step_avg:51.26ms +step:8000/72000 val_loss:2.1102 val_bpb:1.2498 train_time:410071ms step_avg:51.26ms +step:8200/72000 train_loss:2.1726 train_time:420292ms step_avg:51.26ms +step:8400/72000 train_loss:2.1214 train_time:431054ms step_avg:51.32ms +step:8600/72000 train_loss:2.1341 train_time:441276ms step_avg:51.31ms +step:8800/72000 train_loss:2.0985 train_time:451506ms step_avg:51.31ms +step:9000/72000 train_loss:2.0138 train_time:461729ms step_avg:51.30ms +step:9000/72000 val_loss:2.1095 val_bpb:1.2494 train_time:461746ms step_avg:51.31ms +step:9200/72000 train_loss:2.0818 train_time:471967ms step_avg:51.30ms +step:9400/72000 train_loss:2.1340 train_time:482141ms step_avg:51.29ms +step:9600/72000 train_loss:2.1463 train_time:492365ms step_avg:51.29ms +step:9800/72000 train_loss:2.0530 train_time:502592ms step_avg:51.28ms +step:10000/72000 train_loss:2.1075 train_time:512827ms step_avg:51.28ms +step:10000/72000 val_loss:2.1063 val_bpb:1.2475 train_time:512843ms step_avg:51.28ms +step:10200/72000 train_loss:2.0670 train_time:523072ms step_avg:51.28ms +step:10400/72000 train_loss:2.0950 train_time:533311ms step_avg:51.28ms +step:10600/72000 train_loss:1.9655 train_time:543554ms step_avg:51.28ms +step:10800/72000 train_loss:2.1834 train_time:553797ms step_avg:51.28ms +step:11000/72000 train_loss:2.1054 train_time:564036ms step_avg:51.28ms +step:11000/72000 val_loss:2.1018 val_bpb:1.2448 train_time:564053ms step_avg:51.28ms +step:11200/72000 train_loss:2.0747 train_time:574263ms step_avg:51.27ms +step:11400/72000 train_loss:2.0552 train_time:584413ms step_avg:51.26ms +step:11600/72000 train_loss:2.0540 train_time:594667ms step_avg:51.26ms +step:11800/72000 train_loss:2.0955 train_time:604894ms step_avg:51.26ms +step:12000/72000 train_loss:2.0585 train_time:615136ms step_avg:51.26ms +step:12000/72000 val_loss:2.0974 val_bpb:1.2422 train_time:615151ms step_avg:51.26ms +step:12200/72000 train_loss:2.1938 train_time:625381ms step_avg:51.26ms +step:12400/72000 train_loss:1.8476 train_time:636144ms step_avg:51.30ms +step:12600/72000 train_loss:2.0922 train_time:646384ms step_avg:51.30ms +step:12800/72000 train_loss:2.1207 train_time:656621ms step_avg:51.30ms +step:13000/72000 train_loss:2.1774 train_time:666850ms step_avg:51.30ms +step:13000/72000 val_loss:2.0999 val_bpb:1.2437 train_time:666865ms step_avg:51.30ms +step:13200/72000 train_loss:2.2001 train_time:676997ms step_avg:51.29ms +step:13400/72000 train_loss:2.0841 train_time:687219ms step_avg:51.29ms +step:13600/72000 train_loss:1.9367 train_time:697445ms step_avg:51.28ms +step:13800/72000 train_loss:2.0331 train_time:707673ms step_avg:51.28ms +step:14000/72000 train_loss:2.0915 train_time:717904ms step_avg:51.28ms +step:14000/72000 val_loss:2.0939 val_bpb:1.2401 train_time:717921ms step_avg:51.28ms +step:14200/72000 train_loss:2.1792 train_time:728133ms step_avg:51.28ms +step:14400/72000 train_loss:2.0859 train_time:738375ms step_avg:51.28ms +step:14600/72000 train_loss:2.1298 train_time:748598ms step_avg:51.27ms +step:14800/72000 train_loss:1.9234 train_time:758835ms step_avg:51.27ms +step:15000/72000 train_loss:2.0392 train_time:769069ms step_avg:51.27ms +step:15000/72000 val_loss:2.0893 val_bpb:1.2374 train_time:769086ms step_avg:51.27ms +step:15200/72000 train_loss:2.1473 train_time:779260ms step_avg:51.27ms +step:15400/72000 train_loss:2.0392 train_time:789486ms step_avg:51.27ms +step:15600/72000 train_loss:2.0731 train_time:799723ms step_avg:51.26ms +step:15800/72000 train_loss:1.9102 train_time:809962ms step_avg:51.26ms +step:16000/72000 train_loss:2.1377 train_time:820195ms step_avg:51.26ms +step:16000/72000 val_loss:2.0887 val_bpb:1.2370 train_time:820212ms step_avg:51.26ms +step:16200/72000 train_loss:2.0053 train_time:830425ms step_avg:51.26ms +step:16400/72000 train_loss:2.0333 train_time:840654ms step_avg:51.26ms +step:16600/72000 train_loss:1.9734 train_time:851415ms step_avg:51.29ms +step:16800/72000 train_loss:2.1963 train_time:861628ms step_avg:51.29ms +step:17000/72000 train_loss:2.1149 train_time:871892ms step_avg:51.29ms +step:17000/72000 val_loss:2.0879 val_bpb:1.2366 train_time:871908ms step_avg:51.29ms +step:17200/72000 train_loss:2.1003 train_time:882119ms step_avg:51.29ms +step:17400/72000 train_loss:2.0006 train_time:892350ms step_avg:51.28ms +step:17600/72000 train_loss:2.0810 train_time:902583ms step_avg:51.28ms +step:17800/72000 train_loss:2.1592 train_time:912866ms step_avg:51.28ms +step:18000/72000 train_loss:2.0908 train_time:923159ms step_avg:51.29ms +step:18000/72000 val_loss:2.0899 val_bpb:1.2377 train_time:923176ms step_avg:51.29ms +step:18200/72000 train_loss:2.3062 train_time:933393ms step_avg:51.29ms +step:18400/72000 train_loss:2.0642 train_time:943634ms step_avg:51.28ms +step:18600/72000 train_loss:2.0982 train_time:953855ms step_avg:51.28ms +step:18800/72000 train_loss:2.1622 train_time:964069ms step_avg:51.28ms +step:19000/72000 train_loss:2.0871 train_time:974296ms step_avg:51.28ms +step:19000/72000 val_loss:2.0833 val_bpb:1.2338 train_time:974314ms step_avg:51.28ms +step:19200/72000 train_loss:1.9413 train_time:984531ms step_avg:51.28ms +step:19400/72000 train_loss:2.1603 train_time:994779ms step_avg:51.28ms +step:19600/72000 train_loss:2.2274 train_time:1005010ms step_avg:51.28ms +step:19800/72000 train_loss:1.9208 train_time:1015236ms step_avg:51.27ms +step:20000/72000 train_loss:2.1222 train_time:1025456ms step_avg:51.27ms +step:20000/72000 val_loss:2.0828 val_bpb:1.2336 train_time:1025473ms step_avg:51.27ms +step:20200/72000 train_loss:2.1113 train_time:1035691ms step_avg:51.27ms +step:20400/72000 train_loss:2.0980 train_time:1045918ms step_avg:51.27ms +step:20600/72000 train_loss:2.1160 train_time:1056697ms step_avg:51.30ms +step:20800/72000 train_loss:2.0888 train_time:1066926ms step_avg:51.29ms +step:21000/72000 train_loss:2.1749 train_time:1077151ms step_avg:51.29ms +step:21000/72000 val_loss:2.0821 val_bpb:1.2331 train_time:1077169ms step_avg:51.29ms +step:21200/72000 train_loss:1.9974 train_time:1087398ms step_avg:51.29ms +step:21400/72000 train_loss:2.0053 train_time:1097626ms step_avg:51.29ms +step:21600/72000 train_loss:2.0929 train_time:1107864ms step_avg:51.29ms +step:21800/72000 train_loss:2.0483 train_time:1118090ms step_avg:51.29ms +step:22000/72000 train_loss:2.0644 train_time:1128327ms step_avg:51.29ms +step:22000/72000 val_loss:2.0816 val_bpb:1.2329 train_time:1128344ms step_avg:51.29ms +step:22200/72000 train_loss:2.0815 train_time:1138555ms step_avg:51.29ms +step:22400/72000 train_loss:2.1257 train_time:1148791ms step_avg:51.29ms +step:22600/72000 train_loss:2.0398 train_time:1159016ms step_avg:51.28ms +step:22800/72000 train_loss:1.9927 train_time:1169240ms step_avg:51.28ms +step:23000/72000 train_loss:2.1127 train_time:1179463ms step_avg:51.28ms +step:23000/72000 val_loss:2.0774 val_bpb:1.2304 train_time:1179479ms step_avg:51.28ms +step:23200/72000 train_loss:2.1274 train_time:1189679ms step_avg:51.28ms +step:23400/72000 train_loss:1.9742 train_time:1199901ms step_avg:51.28ms +step:23600/72000 train_loss:2.0889 train_time:1210130ms step_avg:51.28ms +step:23800/72000 train_loss:1.9700 train_time:1220369ms step_avg:51.28ms +step:24000/72000 train_loss:2.0669 train_time:1230597ms step_avg:51.27ms +step:24000/72000 val_loss:2.0802 val_bpb:1.2320 train_time:1230614ms step_avg:51.28ms +step:24200/72000 train_loss:1.8765 train_time:1240832ms step_avg:51.27ms +step:24400/72000 train_loss:2.0771 train_time:1251061ms step_avg:51.27ms +step:24600/72000 train_loss:2.0164 train_time:1261284ms step_avg:51.27ms +step:24800/72000 train_loss:1.9685 train_time:1272075ms step_avg:51.29ms +step:25000/72000 train_loss:2.1660 train_time:1282295ms step_avg:51.29ms +step:25000/72000 val_loss:2.0763 val_bpb:1.2297 train_time:1282312ms step_avg:51.29ms +step:25200/72000 train_loss:2.0621 train_time:1292527ms step_avg:51.29ms +step:25400/72000 train_loss:2.0346 train_time:1302757ms step_avg:51.29ms +step:25600/72000 train_loss:2.1133 train_time:1312989ms step_avg:51.29ms +step:25800/72000 train_loss:2.0660 train_time:1323217ms step_avg:51.29ms +step:26000/72000 train_loss:1.9650 train_time:1333361ms step_avg:51.28ms +step:26000/72000 val_loss:2.0796 val_bpb:1.2317 train_time:1333377ms step_avg:51.28ms +step:26200/72000 train_loss:2.0725 train_time:1343578ms step_avg:51.28ms +step:26400/72000 train_loss:2.0474 train_time:1353805ms step_avg:51.28ms +step:26600/72000 train_loss:2.1506 train_time:1364038ms step_avg:51.28ms +step:26800/72000 train_loss:2.0340 train_time:1374264ms step_avg:51.28ms +step:27000/72000 train_loss:2.2535 train_time:1384500ms step_avg:51.28ms +step:27000/72000 val_loss:2.0776 val_bpb:1.2305 train_time:1384516ms step_avg:51.28ms +step:27200/72000 train_loss:2.0125 train_time:1394740ms step_avg:51.28ms +step:27400/72000 train_loss:2.1895 train_time:1404961ms step_avg:51.28ms +step:27600/72000 train_loss:2.0242 train_time:1415209ms step_avg:51.28ms +step:27800/72000 train_loss:2.0743 train_time:1425449ms step_avg:51.28ms +step:28000/72000 train_loss:2.0931 train_time:1435618ms step_avg:51.27ms +step:28000/72000 val_loss:2.0775 val_bpb:1.2304 train_time:1435636ms step_avg:51.27ms +step:28200/72000 train_loss:2.0783 train_time:1445840ms step_avg:51.27ms +step:28400/72000 train_loss:2.1770 train_time:1456079ms step_avg:51.27ms +step:28600/72000 train_loss:2.0408 train_time:1466293ms step_avg:51.27ms +step:28800/72000 train_loss:2.2056 train_time:1476525ms step_avg:51.27ms +step:29000/72000 train_loss:2.1820 train_time:1487287ms step_avg:51.29ms +step:29000/72000 val_loss:2.0740 val_bpb:1.2284 train_time:1487304ms step_avg:51.29ms +step:29200/72000 train_loss:1.7752 train_time:1497610ms step_avg:51.29ms +step:29400/72000 train_loss:2.1523 train_time:1507842ms step_avg:51.29ms +step:29600/72000 train_loss:2.2317 train_time:1518073ms step_avg:51.29ms +step:29800/72000 train_loss:1.9495 train_time:1528297ms step_avg:51.29ms +step:30000/72000 train_loss:2.0438 train_time:1538525ms step_avg:51.28ms +step:30000/72000 val_loss:2.0737 val_bpb:1.2281 train_time:1538542ms step_avg:51.28ms +step:30200/72000 train_loss:2.0186 train_time:1548750ms step_avg:51.28ms +step:30400/72000 train_loss:2.1727 train_time:1558979ms step_avg:51.28ms +step:30600/72000 train_loss:2.1148 train_time:1569209ms step_avg:51.28ms +step:30800/72000 train_loss:2.0529 train_time:1579450ms step_avg:51.28ms +step:31000/72000 train_loss:1.8855 train_time:1589689ms step_avg:51.28ms +step:31000/72000 val_loss:2.0751 val_bpb:1.2290 train_time:1589706ms step_avg:51.28ms +step:31200/72000 train_loss:2.0988 train_time:1599925ms step_avg:51.28ms +step:31400/72000 train_loss:2.1187 train_time:1610150ms step_avg:51.28ms +step:31600/72000 train_loss:1.9860 train_time:1620388ms step_avg:51.28ms +step:31800/72000 train_loss:1.9365 train_time:1630629ms step_avg:51.28ms +step:32000/72000 train_loss:2.0308 train_time:1640861ms step_avg:51.28ms +step:32000/72000 val_loss:2.0708 val_bpb:1.2264 train_time:1640878ms step_avg:51.28ms +step:32200/72000 train_loss:2.1771 train_time:1651086ms step_avg:51.28ms +step:32400/72000 train_loss:1.9261 train_time:1661314ms step_avg:51.28ms +step:32600/72000 train_loss:2.1868 train_time:1671547ms step_avg:51.27ms +step:32800/72000 train_loss:1.9773 train_time:1681785ms step_avg:51.27ms +step:33000/72000 train_loss:2.0616 train_time:1692578ms step_avg:51.29ms +step:33000/72000 val_loss:2.0709 val_bpb:1.2265 train_time:1692595ms step_avg:51.29ms +step:33200/72000 train_loss:2.0523 train_time:1702816ms step_avg:51.29ms +step:33400/72000 train_loss:2.1157 train_time:1713049ms step_avg:51.29ms +step:33600/72000 train_loss:2.0987 train_time:1723307ms step_avg:51.29ms +step:33800/72000 train_loss:2.0270 train_time:1733534ms step_avg:51.29ms +step:34000/72000 train_loss:1.9683 train_time:1743763ms step_avg:51.29ms +step:34000/72000 val_loss:2.0751 val_bpb:1.2290 train_time:1743780ms step_avg:51.29ms +step:34200/72000 train_loss:2.1309 train_time:1753991ms step_avg:51.29ms +step:34400/72000 train_loss:2.0566 train_time:1764220ms step_avg:51.29ms +step:34600/72000 train_loss:2.0684 train_time:1774448ms step_avg:51.28ms +step:34800/72000 train_loss:2.0117 train_time:1784681ms step_avg:51.28ms +step:35000/72000 train_loss:2.1387 train_time:1794923ms step_avg:51.28ms +step:35000/72000 val_loss:2.0725 val_bpb:1.2274 train_time:1794940ms step_avg:51.28ms +step:35200/72000 train_loss:2.0703 train_time:1805168ms step_avg:51.28ms +step:35400/72000 train_loss:2.0225 train_time:1815405ms step_avg:51.28ms +step:35600/72000 train_loss:2.2220 train_time:1825676ms step_avg:51.28ms +step:35800/72000 train_loss:2.0356 train_time:1835907ms step_avg:51.28ms +step:36000/72000 train_loss:1.9178 train_time:1846143ms step_avg:51.28ms +step:36000/72000 val_loss:2.0762 val_bpb:1.2296 train_time:1846160ms step_avg:51.28ms +step:36200/72000 train_loss:2.0563 train_time:1856374ms step_avg:51.28ms +step:36400/72000 train_loss:2.0205 train_time:1866612ms step_avg:51.28ms +step:36600/72000 train_loss:2.0367 train_time:1876834ms step_avg:51.28ms +step:36800/72000 train_loss:2.1934 train_time:1887077ms step_avg:51.28ms +step:37000/72000 train_loss:2.1111 train_time:1897305ms step_avg:51.28ms +step:37000/72000 val_loss:2.0691 val_bpb:1.2254 train_time:1897322ms step_avg:51.28ms +step:37200/72000 train_loss:2.0544 train_time:1908093ms step_avg:51.29ms +step:37400/72000 train_loss:1.9746 train_time:1918357ms step_avg:51.29ms +step:37600/72000 train_loss:2.1749 train_time:1928596ms step_avg:51.29ms +step:37800/72000 train_loss:2.0712 train_time:1938831ms step_avg:51.29ms +step:38000/72000 train_loss:1.9475 train_time:1949067ms step_avg:51.29ms +step:38000/72000 val_loss:2.0717 val_bpb:1.2270 train_time:1949085ms step_avg:51.29ms +step:38200/72000 train_loss:2.1202 train_time:1959309ms step_avg:51.29ms +step:38400/72000 train_loss:2.0193 train_time:1969533ms step_avg:51.29ms +step:38600/72000 train_loss:2.1295 train_time:1979744ms step_avg:51.29ms +step:38800/72000 train_loss:2.0360 train_time:1989979ms step_avg:51.29ms +step:39000/72000 train_loss:2.2114 train_time:2000204ms step_avg:51.29ms +step:39000/72000 val_loss:2.0708 val_bpb:1.2264 train_time:2000235ms step_avg:51.29ms +step:39200/72000 train_loss:2.0337 train_time:2010428ms step_avg:51.29ms +step:39400/72000 train_loss:2.0167 train_time:2020668ms step_avg:51.29ms +step:39600/72000 train_loss:1.9631 train_time:2030910ms step_avg:51.29ms +step:39800/72000 train_loss:2.0608 train_time:2041132ms step_avg:51.28ms +step:40000/72000 train_loss:2.2996 train_time:2051359ms step_avg:51.28ms +step:40000/72000 val_loss:2.0686 val_bpb:1.2252 train_time:2051390ms step_avg:51.28ms +step:40200/72000 train_loss:2.1465 train_time:2061590ms step_avg:51.28ms +step:40400/72000 train_loss:2.0460 train_time:2071813ms step_avg:51.28ms +step:40600/72000 train_loss:2.0543 train_time:2082140ms step_avg:51.28ms +step:40800/72000 train_loss:2.1093 train_time:2092369ms step_avg:51.28ms +step:41000/72000 train_loss:2.3494 train_time:2102605ms step_avg:51.28ms +step:41000/72000 val_loss:2.0697 val_bpb:1.2258 train_time:2102622ms step_avg:51.28ms +step:41200/72000 train_loss:1.9795 train_time:2113400ms step_avg:51.30ms +step:41400/72000 train_loss:2.1849 train_time:2123615ms step_avg:51.30ms +step:41600/72000 train_loss:2.1090 train_time:2133838ms step_avg:51.29ms +step:41800/72000 train_loss:2.1509 train_time:2144060ms step_avg:51.29ms +step:42000/72000 train_loss:1.9434 train_time:2154295ms step_avg:51.29ms +step:42000/72000 val_loss:2.0683 val_bpb:1.2250 train_time:2154313ms step_avg:51.29ms +step:42200/72000 train_loss:2.0407 train_time:2164526ms step_avg:51.29ms +step:42400/72000 train_loss:1.9709 train_time:2174755ms step_avg:51.29ms +step:42600/72000 train_loss:2.0850 train_time:2184991ms step_avg:51.29ms +step:42800/72000 train_loss:2.1334 train_time:2195227ms step_avg:51.29ms +step:43000/72000 train_loss:2.1111 train_time:2205461ms step_avg:51.29ms +step:43000/72000 val_loss:2.0681 val_bpb:1.2248 train_time:2205478ms step_avg:51.29ms +step:43200/72000 train_loss:2.1120 train_time:2215688ms step_avg:51.29ms +step:43400/72000 train_loss:2.0785 train_time:2225928ms step_avg:51.29ms +step:43600/72000 train_loss:2.0955 train_time:2236163ms step_avg:51.29ms +step:43800/72000 train_loss:2.0502 train_time:2246404ms step_avg:51.29ms +step:44000/72000 train_loss:2.0457 train_time:2256624ms step_avg:51.29ms +step:44000/72000 val_loss:2.0685 val_bpb:1.2251 train_time:2256642ms step_avg:51.29ms +step:44200/72000 train_loss:2.1561 train_time:2266869ms step_avg:51.29ms +step:44400/72000 train_loss:2.0469 train_time:2277107ms step_avg:51.29ms +step:44600/72000 train_loss:2.1175 train_time:2287340ms step_avg:51.29ms +step:44800/72000 train_loss:2.1012 train_time:2297584ms step_avg:51.29ms +step:45000/72000 train_loss:2.0584 train_time:2307807ms step_avg:51.28ms +step:45000/72000 val_loss:2.0659 val_bpb:1.2235 train_time:2307824ms step_avg:51.28ms +step:45200/72000 train_loss:2.0828 train_time:2318028ms step_avg:51.28ms +step:45400/72000 train_loss:2.0867 train_time:2328787ms step_avg:51.29ms +step:45600/72000 train_loss:2.1150 train_time:2339013ms step_avg:51.29ms +step:45800/72000 train_loss:2.0445 train_time:2349249ms step_avg:51.29ms +step:46000/72000 train_loss:2.1620 train_time:2359477ms step_avg:51.29ms +step:46000/72000 val_loss:2.0660 val_bpb:1.2236 train_time:2359493ms step_avg:51.29ms +step:46200/72000 train_loss:2.0913 train_time:2369702ms step_avg:51.29ms +step:46400/72000 train_loss:1.8694 train_time:2379932ms step_avg:51.29ms +step:46600/72000 train_loss:2.0238 train_time:2390179ms step_avg:51.29ms +step:46800/72000 train_loss:2.0452 train_time:2400420ms step_avg:51.29ms +step:47000/72000 train_loss:2.0776 train_time:2410645ms step_avg:51.29ms +step:47000/72000 val_loss:2.0662 val_bpb:1.2237 train_time:2410662ms step_avg:51.29ms +step:47200/72000 train_loss:2.0618 train_time:2420878ms step_avg:51.29ms +step:47400/72000 train_loss:2.0669 train_time:2431113ms step_avg:51.29ms +step:47600/72000 train_loss:2.0720 train_time:2441360ms step_avg:51.29ms +step:47800/72000 train_loss:1.9890 train_time:2451596ms step_avg:51.29ms +step:48000/72000 train_loss:2.1159 train_time:2461818ms step_avg:51.29ms +step:48000/72000 val_loss:2.0660 val_bpb:1.2236 train_time:2461836ms step_avg:51.29ms +step:48200/72000 train_loss:2.0098 train_time:2472046ms step_avg:51.29ms +step:48400/72000 train_loss:2.1096 train_time:2482279ms step_avg:51.29ms +step:48600/72000 train_loss:1.9855 train_time:2492504ms step_avg:51.29ms +step:48800/72000 train_loss:2.0885 train_time:2502758ms step_avg:51.29ms +step:49000/72000 train_loss:2.1198 train_time:2512994ms step_avg:51.29ms +step:49000/72000 val_loss:2.0665 val_bpb:1.2239 train_time:2513012ms step_avg:51.29ms +step:49200/72000 train_loss:1.9449 train_time:2523241ms step_avg:51.29ms +step:49400/72000 train_loss:2.1030 train_time:2534009ms step_avg:51.30ms +step:49600/72000 train_loss:2.0114 train_time:2544234ms step_avg:51.30ms +step:49800/72000 train_loss:2.0528 train_time:2554467ms step_avg:51.29ms +step:50000/72000 train_loss:2.1773 train_time:2564696ms step_avg:51.29ms +step:50000/72000 val_loss:2.0645 val_bpb:1.2227 train_time:2564714ms step_avg:51.29ms +step:50200/72000 train_loss:2.1177 train_time:2574917ms step_avg:51.29ms +step:50400/72000 train_loss:2.0521 train_time:2585149ms step_avg:51.29ms +step:50600/72000 train_loss:1.9720 train_time:2595381ms step_avg:51.29ms +step:50800/72000 train_loss:1.9981 train_time:2605633ms step_avg:51.29ms +step:51000/72000 train_loss:2.0117 train_time:2615858ms step_avg:51.29ms +step:51000/72000 val_loss:2.0656 val_bpb:1.2234 train_time:2615875ms step_avg:51.29ms +step:51200/72000 train_loss:2.0681 train_time:2626089ms step_avg:51.29ms +step:51400/72000 train_loss:1.8416 train_time:2636317ms step_avg:51.29ms +step:51600/72000 train_loss:2.0878 train_time:2646540ms step_avg:51.29ms +step:51800/72000 train_loss:2.0548 train_time:2656837ms step_avg:51.29ms +step:52000/72000 train_loss:2.1178 train_time:2667100ms step_avg:51.29ms +step:52000/72000 val_loss:2.0641 val_bpb:1.2225 train_time:2667117ms step_avg:51.29ms +step:52200/72000 train_loss:2.0802 train_time:2677302ms step_avg:51.29ms +step:52400/72000 train_loss:2.0924 train_time:2687525ms step_avg:51.29ms +step:52600/72000 train_loss:2.2835 train_time:2697760ms step_avg:51.29ms +step:52800/72000 train_loss:2.0712 train_time:2707987ms step_avg:51.29ms +step:53000/72000 train_loss:2.0008 train_time:2718211ms step_avg:51.29ms +step:53000/72000 val_loss:2.0633 val_bpb:1.2220 train_time:2718228ms step_avg:51.29ms +step:53200/72000 train_loss:2.1766 train_time:2728431ms step_avg:51.29ms +step:53400/72000 train_loss:2.1325 train_time:2738669ms step_avg:51.29ms +step:53600/72000 train_loss:1.9960 train_time:2749456ms step_avg:51.30ms +step:53800/72000 train_loss:1.9852 train_time:2759679ms step_avg:51.30ms +step:54000/72000 train_loss:2.0944 train_time:2769950ms step_avg:51.30ms +step:54000/72000 val_loss:2.0648 val_bpb:1.2229 train_time:2769965ms step_avg:51.30ms +step:54200/72000 train_loss:2.1909 train_time:2780180ms step_avg:51.29ms +step:54400/72000 train_loss:2.0835 train_time:2790427ms step_avg:51.29ms +step:54600/72000 train_loss:2.0303 train_time:2800720ms step_avg:51.30ms +step:54800/72000 train_loss:2.0697 train_time:2810858ms step_avg:51.29ms +step:55000/72000 train_loss:1.9834 train_time:2820927ms step_avg:51.29ms +step:55000/72000 val_loss:2.0638 val_bpb:1.2223 train_time:2820943ms step_avg:51.29ms +step:55200/72000 train_loss:1.9774 train_time:2830944ms step_avg:51.29ms +step:55400/72000 train_loss:2.0194 train_time:2841174ms step_avg:51.28ms +step:55600/72000 train_loss:2.1661 train_time:2851384ms step_avg:51.28ms +step:55800/72000 train_loss:2.0408 train_time:2861599ms step_avg:51.28ms +step:56000/72000 train_loss:2.1326 train_time:2871670ms step_avg:51.28ms +step:56000/72000 val_loss:2.0635 val_bpb:1.2221 train_time:2871687ms step_avg:51.28ms +step:56200/72000 train_loss:2.0486 train_time:2881894ms step_avg:51.28ms +step:56400/72000 train_loss:1.9551 train_time:2892071ms step_avg:51.28ms +step:56600/72000 train_loss:2.0817 train_time:2902304ms step_avg:51.28ms +step:56800/72000 train_loss:2.0047 train_time:2912476ms step_avg:51.28ms +step:57000/72000 train_loss:2.1542 train_time:2922692ms step_avg:51.28ms +step:57000/72000 val_loss:2.0646 val_bpb:1.2228 train_time:2922708ms step_avg:51.28ms +step:57200/72000 train_loss:2.0547 train_time:2932853ms step_avg:51.27ms +step:57400/72000 train_loss:2.0055 train_time:2943078ms step_avg:51.27ms +step:57600/72000 train_loss:2.0707 train_time:2953244ms step_avg:51.27ms +step:57800/72000 train_loss:1.9558 train_time:2963976ms step_avg:51.28ms +step:58000/72000 train_loss:2.2197 train_time:2974192ms step_avg:51.28ms +step:58000/72000 val_loss:2.0624 val_bpb:1.2214 train_time:2974211ms step_avg:51.28ms +step:58200/72000 train_loss:2.0192 train_time:2984375ms step_avg:51.28ms +step:58400/72000 train_loss:2.2202 train_time:2994598ms step_avg:51.28ms +step:58600/72000 train_loss:2.1401 train_time:3004780ms step_avg:51.28ms +step:58800/72000 train_loss:2.0051 train_time:3014992ms step_avg:51.28ms +step:59000/72000 train_loss:2.0624 train_time:3025161ms step_avg:51.27ms +step:59000/72000 val_loss:2.0609 val_bpb:1.2206 train_time:3025178ms step_avg:51.27ms +step:59200/72000 train_loss:2.1312 train_time:3035375ms step_avg:51.27ms +step:59400/72000 train_loss:1.9950 train_time:3045552ms step_avg:51.27ms +step:59600/72000 train_loss:2.0621 train_time:3055765ms step_avg:51.27ms +QAT enabled: int6 at step 59687 (fraction=0.85) +step:59800/72000 train_loss:2.0130 train_time:3095300ms step_avg:51.76ms +step:60000/72000 train_loss:1.9704 train_time:3105507ms step_avg:51.76ms +step:60000/72000 val_loss:2.0657 val_bpb:1.2234 train_time:3105524ms step_avg:51.76ms +step:60200/72000 train_loss:2.0932 train_time:3115721ms step_avg:51.76ms +step:60400/72000 train_loss:2.2138 train_time:3125953ms step_avg:51.75ms +step:60600/72000 train_loss:2.0538 train_time:3136172ms step_avg:51.75ms +step:60800/72000 train_loss:1.9733 train_time:3146412ms step_avg:51.75ms +step:61000/72000 train_loss:2.0384 train_time:3156648ms step_avg:51.75ms +step:61000/72000 val_loss:2.0519 val_bpb:1.2153 train_time:3156665ms step_avg:51.75ms +step:61200/72000 train_loss:2.0811 train_time:3166895ms step_avg:51.75ms +step:61400/72000 train_loss:1.6426 train_time:3177132ms step_avg:51.74ms +step:61600/72000 train_loss:1.9674 train_time:3187373ms step_avg:51.74ms +step:61800/72000 train_loss:1.9971 train_time:3198150ms step_avg:51.75ms +step:62000/72000 train_loss:2.0852 train_time:3208331ms step_avg:51.75ms +step:62000/72000 val_loss:2.0462 val_bpb:1.2119 train_time:3208347ms step_avg:51.75ms +step:62200/72000 train_loss:2.0235 train_time:3218561ms step_avg:51.75ms +step:62400/72000 train_loss:2.0406 train_time:3228797ms step_avg:51.74ms +step:62600/72000 train_loss:1.9868 train_time:3239101ms step_avg:51.74ms +step:62800/72000 train_loss:2.0986 train_time:3249367ms step_avg:51.74ms +step:63000/72000 train_loss:2.0683 train_time:3259592ms step_avg:51.74ms +step:63000/72000 val_loss:2.0313 val_bpb:1.2031 train_time:3259609ms step_avg:51.74ms +step:63200/72000 train_loss:2.0388 train_time:3269832ms step_avg:51.74ms +step:63400/72000 train_loss:2.0397 train_time:3280090ms step_avg:51.74ms +step:63600/72000 train_loss:2.2187 train_time:3290329ms step_avg:51.73ms +step:63800/72000 train_loss:2.0583 train_time:3300551ms step_avg:51.73ms +step:64000/72000 train_loss:1.9764 train_time:3310781ms step_avg:51.73ms +step:64000/72000 val_loss:2.0184 val_bpb:1.1954 train_time:3310798ms step_avg:51.73ms +step:64200/72000 train_loss:2.0308 train_time:3321027ms step_avg:51.73ms +step:64400/72000 train_loss:2.1771 train_time:3331257ms step_avg:51.73ms +step:64600/72000 train_loss:1.7909 train_time:3341479ms step_avg:51.73ms +step:64800/72000 train_loss:2.0397 train_time:3351705ms step_avg:51.72ms +step:65000/72000 train_loss:2.0475 train_time:3361934ms step_avg:51.72ms +step:65000/72000 val_loss:2.0057 val_bpb:1.1879 train_time:3361951ms step_avg:51.72ms +step:65200/72000 train_loss:2.0607 train_time:3372166ms step_avg:51.72ms +step:65400/72000 train_loss:1.8401 train_time:3382368ms step_avg:51.72ms +step:65600/72000 train_loss:1.9693 train_time:3392591ms step_avg:51.72ms +step:65800/72000 train_loss:2.0459 train_time:3402811ms step_avg:51.71ms +step:66000/72000 train_loss:1.7444 train_time:3413558ms step_avg:51.72ms +step:66000/72000 val_loss:1.9930 val_bpb:1.1804 train_time:3413574ms step_avg:51.72ms +step:66200/72000 train_loss:2.1094 train_time:3423781ms step_avg:51.72ms +step:66400/72000 train_loss:3.1983 train_time:3434013ms step_avg:51.72ms +step:66600/72000 train_loss:1.8124 train_time:3444237ms step_avg:51.72ms +step:66800/72000 train_loss:2.0178 train_time:3454455ms step_avg:51.71ms +step:67000/72000 train_loss:1.9531 train_time:3464669ms step_avg:51.71ms +step:67000/72000 val_loss:1.9731 val_bpb:1.1686 train_time:3464687ms step_avg:51.71ms +step:67200/72000 train_loss:2.1858 train_time:3474836ms step_avg:51.71ms +step:67400/72000 train_loss:1.9383 train_time:3485084ms step_avg:51.71ms +step:67600/72000 train_loss:1.9598 train_time:3495301ms step_avg:51.71ms +SWA started at step 67650 (lr_scale=0.1978) +step:67800/72000 train_loss:1.8535 train_time:3505810ms step_avg:51.71ms +step:68000/72000 train_loss:2.0452 train_time:3516289ms step_avg:51.71ms +step:68000/72000 val_loss:1.9504 val_bpb:1.1552 train_time:3516306ms step_avg:51.71ms +step:68200/72000 train_loss:1.9447 train_time:3526766ms step_avg:51.71ms +step:68400/72000 train_loss:1.9049 train_time:3537246ms step_avg:51.71ms +step:68600/72000 train_loss:1.7141 train_time:3547747ms step_avg:51.72ms +step:68800/72000 train_loss:1.8434 train_time:3558216ms step_avg:51.72ms +step:69000/72000 train_loss:1.9028 train_time:3568677ms step_avg:51.72ms +step:69000/72000 val_loss:1.9224 val_bpb:1.1386 train_time:3568693ms step_avg:51.72ms +step:69200/72000 train_loss:1.9864 train_time:3579096ms step_avg:51.72ms +step:69400/72000 train_loss:1.9165 train_time:3589559ms step_avg:51.72ms +step:69599/72000 val_loss:1.9092 val_bpb:1.1308 train_time:3600015ms step_avg:51.73ms +stopping_early: wallclock_cap train_time:3600015ms step:69599/72000 +peak memory: 12578 MiB +Using EMA weights (decay=0.997) +Raw model: 91514419 bytes, code: 75347 bytes +mixed_int6_int8+lzma-6: 15571256 bytes, total: 15646603 bytes +FITS: 15646603 <= 16,000,000 +final_mixed_lzma-6_roundtrip val_loss:1.9142 val_bpb:1.1337 +final_mixed_lzma-6_roundtrip_exact val_loss:1.91416881 val_bpb:1.13367859 +Using EMA weights (decay=0.997) +legal_ttt:start chunks=1893 chunk_tokens=32768 total_windows=969088 stride=64 lr=0.002 epochs=3 freeze_blocks=0 +legal_ttt:params unfrozen=23209295 frozen=0 + legal_ttt_chunk [1/1893] bpb=1.139360 time=0.5s + legal_ttt_chunk [11/1893] bpb=1.131764 time=3.2s + legal_ttt_chunk [21/1893] bpb=1.117343 time=5.9s + legal_ttt_chunk [31/1893] bpb=1.117041 time=8.5s + legal_ttt_chunk [41/1893] bpb=1.104332 time=11.2s + legal_ttt_chunk [51/1893] bpb=1.098363 time=13.8s + legal_ttt_chunk [61/1893] bpb=1.104675 time=16.5s + legal_ttt_chunk [71/1893] bpb=1.103676 time=19.1s + legal_ttt_chunk [81/1893] bpb=1.103165 time=21.7s + legal_ttt_chunk [91/1893] bpb=1.103843 time=24.4s + legal_ttt_chunk [101/1893] bpb=1.107296 time=27.1s + legal_ttt_chunk [111/1893] bpb=1.109467 time=29.7s + legal_ttt_chunk [121/1893] bpb=1.103013 time=32.3s + legal_ttt_chunk [131/1893] bpb=1.103039 time=35.0s + legal_ttt_chunk [141/1893] bpb=1.108576 time=37.7s + legal_ttt_chunk [151/1893] bpb=1.110609 time=40.3s + legal_ttt_chunk [161/1893] bpb=1.110201 time=43.0s + legal_ttt_chunk [171/1893] bpb=1.114460 time=45.6s + legal_ttt_chunk [181/1893] bpb=1.116786 time=48.2s + legal_ttt_chunk [191/1893] bpb=1.124054 time=50.9s + legal_ttt_chunk [201/1893] bpb=1.122916 time=53.5s + legal_ttt_chunk [211/1893] bpb=1.120673 time=56.2s + legal_ttt_chunk [221/1893] bpb=1.122197 time=58.8s + legal_ttt_chunk [231/1893] bpb=1.120764 time=61.4s + legal_ttt_chunk [241/1893] bpb=1.121171 time=64.1s + legal_ttt_chunk [251/1893] bpb=1.120821 time=66.7s + legal_ttt_chunk [261/1893] bpb=1.117911 time=69.4s + legal_ttt_chunk [271/1893] bpb=1.116824 time=72.1s + legal_ttt_chunk [281/1893] bpb=1.118114 time=74.7s + legal_ttt_chunk [291/1893] bpb=1.119897 time=77.4s + legal_ttt_chunk [301/1893] bpb=1.120580 time=80.0s + legal_ttt_chunk [311/1893] bpb=1.122739 time=82.6s + legal_ttt_chunk [321/1893] bpb=1.124657 time=85.3s + legal_ttt_chunk [331/1893] bpb=1.124595 time=87.9s + legal_ttt_chunk [341/1893] bpb=1.123630 time=90.6s + legal_ttt_chunk [351/1893] bpb=1.125810 time=93.2s + legal_ttt_chunk [361/1893] bpb=1.126056 time=95.9s + legal_ttt_chunk [371/1893] bpb=1.125452 time=98.5s + legal_ttt_chunk [381/1893] bpb=1.125686 time=101.1s + legal_ttt_chunk [391/1893] bpb=1.125548 time=103.8s + legal_ttt_chunk [401/1893] bpb=1.123487 time=106.4s + legal_ttt_chunk [411/1893] bpb=1.122281 time=109.1s + legal_ttt_chunk [421/1893] bpb=1.121358 time=111.8s + legal_ttt_chunk [431/1893] bpb=1.121226 time=114.4s + legal_ttt_chunk [441/1893] bpb=1.121625 time=117.0s + legal_ttt_chunk [451/1893] bpb=1.121931 time=119.7s + legal_ttt_chunk [461/1893] bpb=1.120835 time=122.3s + legal_ttt_chunk [471/1893] bpb=1.121475 time=124.9s + legal_ttt_chunk [481/1893] bpb=1.121108 time=127.6s + legal_ttt_chunk [491/1893] bpb=1.120123 time=130.2s + legal_ttt_chunk [501/1893] bpb=1.119694 time=132.9s + legal_ttt_chunk [511/1893] bpb=1.119029 time=135.5s + legal_ttt_chunk [521/1893] bpb=1.116691 time=138.1s + legal_ttt_chunk [531/1893] bpb=1.117837 time=140.9s + legal_ttt_chunk [541/1893] bpb=1.118212 time=143.5s + legal_ttt_chunk [551/1893] bpb=1.117181 time=146.1s + legal_ttt_chunk [561/1893] bpb=1.117730 time=148.8s + legal_ttt_chunk [571/1893] bpb=1.116723 time=151.4s + legal_ttt_chunk [581/1893] bpb=1.115977 time=154.0s + legal_ttt_chunk [591/1893] bpb=1.115370 time=156.7s + legal_ttt_chunk [601/1893] bpb=1.115932 time=159.3s + legal_ttt_chunk [611/1893] bpb=1.115902 time=161.9s + legal_ttt_chunk [621/1893] bpb=1.115717 time=164.6s + legal_ttt_chunk [631/1893] bpb=1.116421 time=167.2s + legal_ttt_chunk [641/1893] bpb=1.116194 time=169.9s + legal_ttt_chunk [651/1893] bpb=1.116373 time=172.5s + legal_ttt_chunk [661/1893] bpb=1.115893 time=175.2s + legal_ttt_chunk [671/1893] bpb=1.116248 time=177.8s + legal_ttt_chunk [681/1893] bpb=1.116941 time=180.5s + legal_ttt_chunk [691/1893] bpb=1.118010 time=183.1s + legal_ttt_chunk [701/1893] bpb=1.117479 time=185.8s + legal_ttt_chunk [711/1893] bpb=1.117475 time=188.4s + legal_ttt_chunk [721/1893] bpb=1.117186 time=191.2s + legal_ttt_chunk [731/1893] bpb=1.117220 time=193.8s + legal_ttt_chunk [741/1893] bpb=1.117284 time=196.5s + legal_ttt_chunk [751/1893] bpb=1.117194 time=199.1s + legal_ttt_chunk [761/1893] bpb=1.117069 time=201.7s + legal_ttt_chunk [771/1893] bpb=1.116770 time=204.4s + legal_ttt_chunk [781/1893] bpb=1.117487 time=207.0s + legal_ttt_chunk [791/1893] bpb=1.117090 time=209.7s + legal_ttt_chunk [801/1893] bpb=1.117402 time=212.3s + legal_ttt_chunk [811/1893] bpb=1.117155 time=215.0s + legal_ttt_chunk [821/1893] bpb=1.116948 time=217.6s + legal_ttt_chunk [831/1893] bpb=1.116790 time=220.2s + legal_ttt_chunk [841/1893] bpb=1.116112 time=222.9s + legal_ttt_chunk [851/1893] bpb=1.115828 time=225.5s + legal_ttt_chunk [861/1893] bpb=1.115579 time=228.1s + legal_ttt_chunk [871/1893] bpb=1.115835 time=230.8s + legal_ttt_chunk [881/1893] bpb=1.115977 time=233.4s + legal_ttt_chunk [891/1893] bpb=1.115589 time=236.1s + legal_ttt_chunk [901/1893] bpb=1.115362 time=238.7s + legal_ttt_chunk [911/1893] bpb=1.115516 time=241.3s + legal_ttt_chunk [921/1893] bpb=1.116002 time=244.0s + legal_ttt_chunk [931/1893] bpb=1.116020 time=246.6s + legal_ttt_chunk [941/1893] bpb=1.115705 time=249.3s + legal_ttt_chunk [951/1893] bpb=1.116088 time=251.9s + legal_ttt_chunk [961/1893] bpb=1.116147 time=254.6s + legal_ttt_chunk [971/1893] bpb=1.117011 time=257.2s + legal_ttt_chunk [981/1893] bpb=1.117072 time=259.8s + legal_ttt_chunk [991/1893] bpb=1.117111 time=262.5s + legal_ttt_chunk [1001/1893] bpb=1.117063 time=265.1s + legal_ttt_chunk [1011/1893] bpb=1.116825 time=267.8s + legal_ttt_chunk [1021/1893] bpb=1.117143 time=270.4s + legal_ttt_chunk [1031/1893] bpb=1.117571 time=273.1s + legal_ttt_chunk [1041/1893] bpb=1.117240 time=275.7s + legal_ttt_chunk [1051/1893] bpb=1.116993 time=278.4s + legal_ttt_chunk [1061/1893] bpb=1.117066 time=281.1s + legal_ttt_chunk [1071/1893] bpb=1.117656 time=283.7s + legal_ttt_chunk [1081/1893] bpb=1.117911 time=286.3s + legal_ttt_chunk [1091/1893] bpb=1.118698 time=288.9s + legal_ttt_chunk [1101/1893] bpb=1.118701 time=291.6s + legal_ttt_chunk [1111/1893] bpb=1.118585 time=294.2s + legal_ttt_chunk [1121/1893] bpb=1.118401 time=296.8s + legal_ttt_chunk [1131/1893] bpb=1.118329 time=299.5s + legal_ttt_chunk [1141/1893] bpb=1.118022 time=302.1s + legal_ttt_chunk [1151/1893] bpb=1.118058 time=304.7s + legal_ttt_chunk [1161/1893] bpb=1.117696 time=307.4s + legal_ttt_chunk [1171/1893] bpb=1.118034 time=310.0s + legal_ttt_chunk [1181/1893] bpb=1.117308 time=312.8s + legal_ttt_chunk [1191/1893] bpb=1.117191 time=315.4s + legal_ttt_chunk [1201/1893] bpb=1.117620 time=318.1s + legal_ttt_chunk [1211/1893] bpb=1.117145 time=320.7s + legal_ttt_chunk [1221/1893] bpb=1.116855 time=323.3s + legal_ttt_chunk [1231/1893] bpb=1.116614 time=326.0s + legal_ttt_chunk [1241/1893] bpb=1.116277 time=328.6s + legal_ttt_chunk [1251/1893] bpb=1.115706 time=331.2s + legal_ttt_chunk [1261/1893] bpb=1.115697 time=333.9s + legal_ttt_chunk [1271/1893] bpb=1.115335 time=336.5s + legal_ttt_chunk [1281/1893] bpb=1.115197 time=339.1s + legal_ttt_chunk [1291/1893] bpb=1.114965 time=341.8s + legal_ttt_chunk [1301/1893] bpb=1.114380 time=344.4s + legal_ttt_chunk [1311/1893] bpb=1.114001 time=347.1s + legal_ttt_chunk [1321/1893] bpb=1.113690 time=349.7s + legal_ttt_chunk [1331/1893] bpb=1.113643 time=352.4s + legal_ttt_chunk [1341/1893] bpb=1.113520 time=355.0s + legal_ttt_chunk [1351/1893] bpb=1.113465 time=357.6s + legal_ttt_chunk [1361/1893] bpb=1.113520 time=360.2s + legal_ttt_chunk [1371/1893] bpb=1.113381 time=362.9s + legal_ttt_chunk [1381/1893] bpb=1.113368 time=365.5s + legal_ttt_chunk [1391/1893] bpb=1.113005 time=368.1s + legal_ttt_chunk [1401/1893] bpb=1.112951 time=370.8s + legal_ttt_chunk [1411/1893] bpb=1.113080 time=373.4s + legal_ttt_chunk [1421/1893] bpb=1.113307 time=376.0s + legal_ttt_chunk [1431/1893] bpb=1.113027 time=378.7s + legal_ttt_chunk [1441/1893] bpb=1.113555 time=381.4s + legal_ttt_chunk [1451/1893] bpb=1.113891 time=384.0s + legal_ttt_chunk [1461/1893] bpb=1.113449 time=386.7s + legal_ttt_chunk [1471/1893] bpb=1.114504 time=389.3s + legal_ttt_chunk [1481/1893] bpb=1.114067 time=392.0s + legal_ttt_chunk [1491/1893] bpb=1.113875 time=394.6s + legal_ttt_chunk [1501/1893] bpb=1.113784 time=397.2s + legal_ttt_chunk [1511/1893] bpb=1.113783 time=399.9s + legal_ttt_chunk [1521/1893] bpb=1.113843 time=402.5s + legal_ttt_chunk [1531/1893] bpb=1.113328 time=405.1s + legal_ttt_chunk [1541/1893] bpb=1.113208 time=407.8s + legal_ttt_chunk [1551/1893] bpb=1.113505 time=410.4s + legal_ttt_chunk [1561/1893] bpb=1.113507 time=413.1s + legal_ttt_chunk [1571/1893] bpb=1.113361 time=415.8s + legal_ttt_chunk [1581/1893] bpb=1.113486 time=418.4s + legal_ttt_chunk [1591/1893] bpb=1.113333 time=421.0s + legal_ttt_chunk [1601/1893] bpb=1.113512 time=423.7s + legal_ttt_chunk [1611/1893] bpb=1.113453 time=426.3s + legal_ttt_chunk [1621/1893] bpb=1.113077 time=428.9s + legal_ttt_chunk [1631/1893] bpb=1.113365 time=431.6s + legal_ttt_chunk [1641/1893] bpb=1.113366 time=434.2s + legal_ttt_chunk [1651/1893] bpb=1.113320 time=436.8s + legal_ttt_chunk [1661/1893] bpb=1.113194 time=439.5s + legal_ttt_chunk [1671/1893] bpb=1.113654 time=442.1s + legal_ttt_chunk [1681/1893] bpb=1.113790 time=444.7s + legal_ttt_chunk [1691/1893] bpb=1.113631 time=447.4s + legal_ttt_chunk [1701/1893] bpb=1.113780 time=450.1s + legal_ttt_chunk [1711/1893] bpb=1.113771 time=452.7s + legal_ttt_chunk [1721/1893] bpb=1.113784 time=455.4s + legal_ttt_chunk [1731/1893] bpb=1.113666 time=458.0s + legal_ttt_chunk [1741/1893] bpb=1.113453 time=460.6s + legal_ttt_chunk [1751/1893] bpb=1.113297 time=463.3s + legal_ttt_chunk [1761/1893] bpb=1.113450 time=465.9s + legal_ttt_chunk [1771/1893] bpb=1.113352 time=468.5s + legal_ttt_chunk [1781/1893] bpb=1.113373 time=471.2s + legal_ttt_chunk [1791/1893] bpb=1.112983 time=473.8s + legal_ttt_chunk [1801/1893] bpb=1.112869 time=476.5s + legal_ttt_chunk [1811/1893] bpb=1.112773 time=479.1s + legal_ttt_chunk [1821/1893] bpb=1.112826 time=481.7s + legal_ttt_chunk [1831/1893] bpb=1.112232 time=484.5s + legal_ttt_chunk [1841/1893] bpb=1.112198 time=487.1s + legal_ttt_chunk [1851/1893] bpb=1.111987 time=489.7s + legal_ttt_chunk [1861/1893] bpb=1.111629 time=492.4s + legal_ttt_chunk [1871/1893] bpb=1.111626 time=495.0s + legal_ttt_chunk [1881/1893] bpb=1.111200 time=497.7s + legal_ttt_chunk [1891/1893] bpb=1.110975 time=500.3s + legal_ttt_chunk [1893/1893] bpb=1.111018 time=500.7s +legal_ttt:done val_loss=1.872189 val_bpb=1.108819 elapsed=500.7s +final_legal_ttt val_loss:1.8722 val_bpb:1.1088 eval_time:501213ms diff --git a/records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/train_gpt.py b/records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/train_gpt.py new file mode 100644 index 0000000000..8b5c3dbc6c --- /dev/null +++ b/records/track_non_record_16mb/2026-04-01_UnifiedAttention_FA3_1hour/train_gpt.py @@ -0,0 +1,1644 @@ +from __future__ import annotations + +import copy +import glob +import io +import math +import os +import random +import subprocess +import sys +import time +import traceback +import uuid +import zlib +import lzma +from pathlib import Path + +import numpy as np +import sentencepiece as spm +import torch +import torch.distributed as dist +import torch.nn.functional as F +from torch import Tensor, nn +from torch.nn.parallel import DistributedDataParallel as DDP + +try: + from flash_attn_interface import flash_attn_func as _flash_attn_func +except ImportError: + raise ImportError( + "Flash Attention 3 (Hopper) is required. Install with:\n" + " pip install flash_attn_3 --find-links https://windreamer.github.io/flash-attention3-wheels/cu128_torch280\n" + "Or see requirements.txt for details." + ) + + +import logging + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s | %(levelname)-5s | %(message)s", + datefmt="%H:%M:%S", +) +logger = logging.getLogger("yocto-golf") + +def log_architecture(model, args): + n = sum(p.numel() for p in model.parameters()) + logger.info(f"YOCTO d={args.model_dim} K={args.num_unique_layers} heads={args.num_heads} params={n:,}") + +class Hyperparameters: + data_path = os.environ.get("DATA_PATH", "./data/datasets/fineweb10B_sp1024") + train_files = os.path.join(data_path, "fineweb_train_*.bin") + val_files = os.path.join(data_path, "fineweb_val_*.bin") + tokenizer_path = os.environ.get("TOKENIZER_PATH", "./data/tokenizers/fineweb_1024_bpe.model") + run_id = os.environ.get("RUN_ID", str(uuid.uuid4())) + seed = int(os.environ.get("SEED", 1337)) + + val_batch_size = int(os.environ.get("VAL_BATCH_SIZE", 524_288)) + val_loss_every = int(os.environ.get("VAL_LOSS_EVERY", 1000)) + train_log_every = int(os.environ.get("TRAIN_LOG_EVERY", 200)) + + iterations = int(os.environ.get("ITERATIONS", 20000)) + warmdown_iters = int(os.environ.get("WARMDOWN_ITERS", 3500)) + warmup_steps = int(os.environ.get("WARMUP_STEPS", 20)) + train_batch_tokens = int(os.environ.get("TRAIN_BATCH_TOKENS", 524_288)) + train_seq_len = int(os.environ.get("TRAIN_SEQ_LEN", 2048)) + max_wallclock_seconds = float(os.environ.get("MAX_WALLCLOCK_SECONDS", 600.0)) + + # ── Yocto architecture ── + vocab_size = int(os.environ.get("VOCAB_SIZE", 1024)) + model_dim = int(os.environ.get("MODEL_DIM", 552)) + num_heads = int(os.environ.get("NUM_HEADS", 4)) + num_unique_layers = int(os.environ.get("NUM_UNIQUE_LAYERS", 10)) + num_recurrences = int(os.environ.get("NUM_RECURRENCES", 1)) + mlp_mult = int(os.environ.get("MLP_MULT", 3)) + use_swiglu = bool(int(os.environ.get("USE_SWIGLU", "1"))) + tie_embeddings = bool(int(os.environ.get("TIE_EMBEDDINGS", "1"))) + rope_base = float(os.environ.get("ROPE_BASE", 10000.0)) + logit_softcap = float(os.environ.get("LOGIT_SOFTCAP", 30.0)) + seeking_gain_init = float(os.environ.get("SEEKING_GAIN_INIT", 1.5)) + rope_fraction = float(os.environ.get("ROPE_FRACTION", 1.0)) # 1.0 = full RoPE, 0.5 = half partial RoPE + + # ── Optimizer ── + embed_lr = float(os.environ.get("EMBED_LR", 0.6)) + head_lr = float(os.environ.get("HEAD_LR", 0.008)) + tied_embed_lr = float(os.environ.get("TIED_EMBED_LR", 0.035)) + tied_embed_init_std = float(os.environ.get("TIED_EMBED_INIT_STD", 0.005)) + matrix_lr = float(os.environ.get("MATRIX_LR", 0.025)) + scalar_lr = float(os.environ.get("SCALAR_LR", 0.025)) + muon_momentum = float(os.environ.get("MUON_MOMENTUM", 0.99)) + muon_backend_steps = int(os.environ.get("MUON_BACKEND_STEPS", 5)) + muon_momentum_warmup_start = float(os.environ.get("MUON_MOMENTUM_WARMUP_START", 0.92)) + muon_momentum_warmup_steps = int(os.environ.get("MUON_MOMENTUM_WARMUP_STEPS", 1500)) + beta1 = float(os.environ.get("BETA1", 0.9)) + beta2 = float(os.environ.get("BETA2", 0.95)) + adam_eps = float(os.environ.get("ADAM_EPS", 1e-8)) + grad_clip_norm = float(os.environ.get("GRAD_CLIP_NORM", 0.3)) + muon_weight_decay = float(os.environ.get("MUON_WEIGHT_DECAY", 0.04)) + + # ── LR warmup (actual learning rate ramp, separate from compile warmup) ── + lr_warmup_steps = int(os.environ.get("LR_WARMUP_STEPS", 100)) + + # ── EMA ── + ema_decay = float(os.environ.get("EMA_DECAY", 0.997)) # 0 = disabled, 0.997 = SOTA setting + + # ── SWA (Stochastic Weight Averaging) ── + swa_every = int(os.environ.get("SWA_EVERY", 50)) # 0 = disabled, 50 = SOTA setting + swa_threshold = float(os.environ.get("SWA_THRESHOLD", 0.2)) # only SWA when lr_scale < this + + # ── Compression ── + compression = os.environ.get("COMPRESSION", "lzma") # "zlib", "zstd", or "lzma" + + # ── QAT (Quantization-Aware Training) ── + qat_bits = int(os.environ.get("QAT_BITS", 6)) # 0 = disabled, 6 = int6 QAT + qat_start_fraction = float(os.environ.get("QAT_START_FRACTION", 0.15)) # when to start QAT + + # ── Mixed precision quantization ── + int5_layers = os.environ.get("INT5_LAYERS", "") # e.g. "2,3,4,5,6,7,8" + + sliding_window_stride = int(os.environ.get("SLIDING_WINDOW_STRIDE", 64)) + + # ── LN Scale ── + ln_scale = bool(int(os.environ.get("LN_SCALE", "1"))) # 1/sqrt(layer_idx+1) on norm outputs + + # ── Value Embedding (VE128) ── + ve_enabled = bool(int(os.environ.get("VE_ENABLED", "1"))) + ve_dim = int(os.environ.get("VE_DIM", 128)) + ve_layers = os.environ.get("VE_LAYERS", "8,9") # last 2 of 10 layers + + # ── TTT LoRA ── + + # ── Legal Score-First TTT ── + legal_ttt_enabled = bool(int(os.environ.get("LEGAL_TTT_ENABLED", "1"))) + legal_ttt_lr = float(os.environ.get("LEGAL_TTT_LR", 0.002)) + legal_ttt_epochs = int(os.environ.get("LEGAL_TTT_EPOCHS", 3)) + legal_ttt_chunk_tokens = int(os.environ.get("LEGAL_TTT_CHUNK_TOKENS", 32768)) + legal_ttt_freeze_blocks = int(os.environ.get("LEGAL_TTT_FREEZE_BLOCKS", 0)) + legal_ttt_momentum = float(os.environ.get("LEGAL_TTT_MOMENTUM", 0.9)) + legal_ttt_batch_seqs = int(os.environ.get("LEGAL_TTT_BATCH_SEQS", 32)) + legal_ttt_grad_clip = float(os.environ.get("LEGAL_TTT_GRAD_CLIP", 1.0)) + + @property + def num_effective_layers(self) -> int: + return self.num_unique_layers * self.num_recurrences + + def validate(self) -> None: + """Check all divisibility constraints.""" + d = self.model_dim + assert d % 3 == 0, f"model_dim={d} must be divisible by 3 for unified attention split" + comp = d // 3 + assert comp % self.num_heads == 0, ( + f"component_dim={comp} (model_dim/3) must be divisible by num_heads={self.num_heads}" + ) + head_dim = comp // self.num_heads + assert head_dim % 2 == 0, f"head_dim={head_dim} must be even for RoPE" + assert head_dim >= 16, f"head_dim={head_dim} must be >= 16 for useful RoPE (got {head_dim})" + assert self.logit_softcap > 0, f"logit_softcap must be positive" + logger.info(f"Architecture constraints validated: d={d}, comp={comp}, heads={self.num_heads}, " + f"head_dim={head_dim}, RoPE_pairs={head_dim//2}") + + +def zeropower_via_newtonschulz5(G: Tensor, steps: int = 5, eps: float = 1e-7) -> Tensor: + """Batched Newton-Schulz orthogonalization. G: (B,M,N) or (M,N).""" + a, b, c = (3.4445, -4.7750, 2.0315) + was_2d = G.ndim == 2 + if was_2d: + G = G.unsqueeze(0) + X = G.bfloat16() + transposed = X.size(-2) > X.size(-1) + if transposed: + X = X.mT + X = X / (X.norm(dim=(-2, -1), keepdim=True) + eps) + for _ in range(steps): + A = X @ X.mT + B = b * A + c * (A @ A) + X = a * X + B @ X + if transposed: + X = X.mT + if was_2d: + X = X.squeeze(0) + return X + +class Muon(torch.optim.Optimizer): + def __init__(self, params, lr: float, momentum: float, backend_steps: int, + nesterov: bool = True, weight_decay: float = 0.0): + super().__init__(params, dict(lr=lr, momentum=momentum, backend_steps=backend_steps, + nesterov=nesterov, weight_decay=weight_decay)) + self._built = False + + def _build(self): + self._distributed = dist.is_available() and dist.is_initialized() + self._world_size = dist.get_world_size() if self._distributed else 1 + self._rank = dist.get_rank() if self._distributed else 0 + ws = self._world_size + + self._bank_meta = [] + for group in self.param_groups: + for p in group["params"]: + B = p.shape[0] + padded_B = ((B + ws - 1) // ws) * ws + shard_B = padded_B // ws + tail = p.shape[1:] + dev = p.device + self._bank_meta.append({ + 'p': p, + 'B': B, + 'padded_grad': torch.zeros(padded_B, *tail, device=dev, dtype=torch.bfloat16), + 'shard': torch.zeros(shard_B, *tail, device=dev, dtype=torch.bfloat16), + 'shard_mom': torch.zeros(shard_B, *tail, device=dev, dtype=torch.bfloat16), + 'full_update': torch.zeros(padded_B, *tail, device=dev, dtype=torch.bfloat16), + 'scale': max(1, p.shape[-2] / p.shape[-1]) ** 0.5, + }) + self._bank_meta.sort(key=lambda m: -m['p'].numel()) + self._built = True + + def launch_reduce_scatters(self): + """Phase 1: launch async reduce-scatter for all banks.""" + if not self._built: + self._build() + if not self._distributed: + return + self._rs_futures = [] + for m in self._bank_meta: + p = m['p'] + if p.grad is None: + self._rs_futures.append(None) + continue + pg = m['padded_grad'] + pg[:m['B']].copy_(p.grad.bfloat16()) + if pg.shape[0] > m['B']: + pg[m['B']:].zero_() + fut = dist.reduce_scatter_tensor(m['shard'], pg, op=dist.ReduceOp.AVG, async_op=True) + self._rs_futures.append(fut) + + @torch.no_grad() + def step(self, closure=None): + """Phase 3: wait for RS, batched NS5, all-gather.""" + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + if not self._built: + self._build() + + for group in self.param_groups: + lr = group["lr"] + momentum = group["momentum"] + backend_steps = group["backend_steps"] + nesterov = group["nesterov"] + wd = group.get("weight_decay", 0.0) + + prev_ag_handle = None + prev_m = None + + sharded = self._distributed and hasattr(self, '_rs_futures') + + for i, m in enumerate(self._bank_meta): + p = m['p'] + if p.grad is None: + continue + + if prev_ag_handle is not None: + prev_ag_handle.wait() + pp = prev_m['p'] + upd = prev_m['full_update'][:prev_m['B']] + if wd > 0.0: + pp.data.mul_(1.0 - lr * wd) + pp.add_(upd.to(dtype=pp.dtype), alpha=-lr * prev_m['scale']) + + if sharded and self._rs_futures[i] is not None: + self._rs_futures[i].wait() + g = m['shard'] + buf = m['shard_mom'] + else: + g = p.grad.bfloat16() + state = self.state[p] + if "momentum_buffer" not in state: + state["momentum_buffer"] = torch.zeros_like(g) + buf = state["momentum_buffer"] + + buf.mul_(momentum).add_(g) + if nesterov: + update = g.add(buf, alpha=momentum) + else: + update = buf + + update = zeropower_via_newtonschulz5(update, steps=backend_steps) + + if sharded: + prev_ag_handle = dist.all_gather_into_tensor( + m['full_update'], update, async_op=True) + prev_m = m + else: + if wd > 0.0: + p.data.mul_(1.0 - lr * wd) + p.add_(update.to(dtype=p.dtype), alpha=-lr * m['scale']) + + if prev_ag_handle is not None: + prev_ag_handle.wait() + pp = prev_m['p'] + upd = prev_m['full_update'][:prev_m['B']] + if wd > 0.0: + pp.data.mul_(1.0 - lr * wd) + pp.add_(upd.to(dtype=pp.dtype), alpha=-lr * prev_m['scale']) + + if hasattr(self, '_rs_futures'): + del self._rs_futures + + return loss + + +def build_sentencepiece_luts(sp: spm.SentencePieceProcessor, vocab_size: int, device: torch.device): + sp_vocab_size = int(sp.vocab_size()) + table_size = max(sp_vocab_size, vocab_size) + base_bytes_np = np.zeros((table_size,), dtype=np.int16) + has_leading_space_np = np.zeros((table_size,), dtype=np.bool_) + is_boundary_token_np = np.ones((table_size,), dtype=np.bool_) + for token_id in range(sp_vocab_size): + if sp.is_control(token_id) or sp.is_unknown(token_id) or sp.is_unused(token_id): + continue + is_boundary_token_np[token_id] = False + if sp.is_byte(token_id): + base_bytes_np[token_id] = 1 + continue + piece = sp.id_to_piece(token_id) + if piece.startswith("▁"): + has_leading_space_np[token_id] = True + piece = piece[1:] + base_bytes_np[token_id] = len(piece.encode("utf-8")) + return ( + torch.tensor(base_bytes_np, dtype=torch.int16, device=device), + torch.tensor(has_leading_space_np, dtype=torch.bool, device=device), + torch.tensor(is_boundary_token_np, dtype=torch.bool, device=device), + ) + +def load_validation_tokens(pattern: str, seq_len: int) -> Tensor: + files = [Path(p) for p in sorted(glob.glob(pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {pattern}") + tokens = torch.cat([load_data_shard(file) for file in files]).contiguous() + usable = ((tokens.numel() - 1) // seq_len) * seq_len + if usable <= 0: + raise ValueError(f"Validation split too short for seq_len={seq_len}") + return tokens[: usable + 1] + +def eval_val(args, model, rank, world_size, device, grad_accum_steps, val_tokens, + base_bytes_lut, has_leading_space_lut, is_boundary_token_lut): + local_batch_tokens = args.val_batch_size // (world_size * grad_accum_steps) + local_batch_seqs = local_batch_tokens // args.train_seq_len + total_seqs = (val_tokens.numel() - 1) // args.train_seq_len + seq_start = (total_seqs * rank) // world_size + seq_end = (total_seqs * (rank + 1)) // world_size + val_loss_sum = torch.zeros((), device=device, dtype=torch.float64) + val_token_count = torch.zeros((), device=device, dtype=torch.float64) + val_byte_count = torch.zeros((), device=device, dtype=torch.float64) + model.eval() + with torch.inference_mode(): + for batch_seq_start in range(seq_start, seq_end, local_batch_seqs): + batch_seq_end = min(batch_seq_start + local_batch_seqs, seq_end) + raw_start = batch_seq_start * args.train_seq_len + raw_end = batch_seq_end * args.train_seq_len + 1 + local = val_tokens[raw_start:raw_end].to(device=device, dtype=torch.int64, non_blocking=True) + x = local[:-1].reshape(-1, args.train_seq_len) + y = local[1:].reshape(-1, args.train_seq_len) + with torch.autocast(device_type="cuda", dtype=torch.bfloat16, enabled=True): + batch_loss = model(x, y).detach() + batch_token_count = float(y.numel()) + val_loss_sum += batch_loss.to(torch.float64) * batch_token_count + val_token_count += batch_token_count + prev_ids, tgt_ids = x.reshape(-1), y.reshape(-1) + token_bytes = base_bytes_lut[tgt_ids].to(dtype=torch.int16) + token_bytes += (has_leading_space_lut[tgt_ids] & ~is_boundary_token_lut[prev_ids]).to(dtype=torch.int16) + val_byte_count += token_bytes.to(torch.float64).sum() + if dist.is_available() and dist.is_initialized(): + dist.all_reduce(val_loss_sum, op=dist.ReduceOp.SUM) + dist.all_reduce(val_token_count, op=dist.ReduceOp.SUM) + dist.all_reduce(val_byte_count, op=dist.ReduceOp.SUM) + val_loss = val_loss_sum / val_token_count + bits_per_token = val_loss.item() / math.log(2.0) + tokens_per_byte = val_token_count.item() / val_byte_count.item() + model.train() + return float(val_loss.item()), float(bits_per_token * tokens_per_byte) + +CONTROL_TENSOR_NAME_PATTERNS = ("attn_scale", "mlp_scale", "resid_mix", "skip_weight", "seeking_gain", "smear", "ve_layer_scales", "ve_shared.scale") +INT8_KEEP_FLOAT_FP32_NAME_PATTERNS = CONTROL_TENSOR_NAME_PATTERNS +INT8_KEEP_FLOAT_MAX_NUMEL = 65_536 +INT8_KEEP_FLOAT_STORE_DTYPE = torch.float16 +INT8_PER_ROW_SCALE_DTYPE = torch.float16 +INT8_CLIP_Q = 99.99984 / 100.0 + +def tensor_nbytes(t: Tensor) -> int: + return int(t.numel()) * int(t.element_size()) + +def quantize_float_tensor(t: Tensor): + t32 = t.float() + if t32.ndim == 2: + clip_abs = torch.quantile(t32.abs(), INT8_CLIP_Q, dim=1) if t32.numel() else torch.empty((t32.shape[0],), dtype=torch.float32) + clipped = torch.maximum(torch.minimum(t32, clip_abs[:, None]), -clip_abs[:, None]) + scale = (clip_abs / 127.0).clamp_min(1.0 / 127.0) + q = torch.clamp(torch.round(clipped / scale[:, None]), -127, 127).to(torch.int8).contiguous() + return q, scale.to(dtype=INT8_PER_ROW_SCALE_DTYPE).contiguous() + clip_abs = float(torch.quantile(t32.abs().flatten(), INT8_CLIP_Q).item()) if t32.numel() else 0.0 + scale = torch.tensor(clip_abs / 127.0 if clip_abs > 0 else 1.0, dtype=torch.float32) + q = torch.clamp(torch.round(torch.clamp(t32, -clip_abs, clip_abs) / scale), -127, 127).to(torch.int8).contiguous() + return q, scale + +GPTQ_CLIP_PERCENTILES = [0.999, 0.9995, 0.9999, 0.99999, 1.0] + +def quantize_float_tensor_int6(t: Tensor): + return quantize_float_tensor_intN(t, max_val=31) + +def quantize_float_tensor_intN(t: Tensor, max_val: int = 31): + t32 = t.float() + if t32.ndim == 2: + best_q, best_scale, best_err = None, None, float('inf') + + for pct in GPTQ_CLIP_PERCENTILES: + if pct >= 1.0: + clip_abs = t32.abs().amax(dim=1).clamp_min(1e-8) + else: + clip_abs = torch.quantile(t32.abs(), pct, dim=1).clamp_min(1e-8) + scale = (clip_abs / max_val).clamp_min(1e-8).to(torch.float16) + clipped = t32.clamp(-clip_abs[:, None], clip_abs[:, None]) + q = torch.clamp(torch.round(clipped / scale.float()[:, None]), -max_val, max_val).to(torch.int8) + recon = q.float() * scale.float()[:, None] + err = (t32 - recon).pow(2).mean().item() + if err < best_err: + best_q, best_scale, best_err = q, scale, err + + return best_q.contiguous(), best_scale.contiguous() + abs_max = t32.abs().max().clamp_min(1e-8).item() + scale = torch.tensor(abs_max / max_val, dtype=torch.float16) + q = torch.clamp(torch.round(t32 / scale.float()), -max_val, max_val).to(torch.int8) + return q, scale + +# ── Unbank/rebank for quantization ── + +def _unbank_state_dict(sd, num_layers): + out = {} + for name, tensor in sd.items(): + if name == "unified_bank": + for i in range(num_layers): + w = tensor[i] # [d, d] + d = w.shape[0] + comp = d // 3 + out[f"blocks.{i}.attn.W_seeking.weight"] = w[:comp, :] + out[f"blocks.{i}.attn.W_offering.weight"] = w[comp:2*comp, :] + out[f"blocks.{i}.attn.W_content.weight"] = w[2*comp:, :] + elif name == "output_bank": + for i in range(num_layers): + out[f"blocks.{i}.attn.W_output.weight"] = tensor[i] + elif name == "fc_bank": + for i in range(num_layers): + out[f"blocks.{i}.mlp.fc.weight"] = tensor[i] + elif name == "proj_bank": + for i in range(num_layers): + out[f"blocks.{i}.mlp.proj.weight"] = tensor[i] + else: + out[name] = tensor + return out + +def _rebank_state_dict(sd, num_layers, template_sd): + out = {} + consumed = set() + + unified_slices = [] + for i in range(num_layers): + sk = f"blocks.{i}.attn.W_seeking.weight" + ok = f"blocks.{i}.attn.W_offering.weight" + ck = f"blocks.{i}.attn.W_content.weight" + unified_slices.append(torch.cat([sd[sk], sd[ok], sd[ck]], dim=0)) + consumed.update([sk, ok, ck]) + out["unified_bank"] = torch.stack(unified_slices).to(dtype=template_sd["unified_bank"].dtype) + + for bank_name, key_template in [ + ("output_bank", "blocks.{i}.attn.W_output.weight"), + ("fc_bank", "blocks.{i}.mlp.fc.weight"), + ("proj_bank", "blocks.{i}.mlp.proj.weight"), + ]: + slices = [] + for i in range(num_layers): + k = key_template.format(i=i) + slices.append(sd[k]) + consumed.add(k) + out[bank_name] = torch.stack(slices).to(dtype=template_sd[bank_name].dtype) + + for name, tensor in sd.items(): + if name not in consumed: + out[name] = tensor + return out + +INT8_EMBED_PATTERNS = ("tok_emb.", "ve_shared.embed.") + +def quantize_state_dict_mixed(state_dict, int5_layers=None): + if int5_layers is None: + int5_layers = set() + result = {} + meta = {} + for name, tensor in state_dict.items(): + t = tensor.detach().to("cpu").contiguous() + if not t.is_floating_point(): + result[name] = t + meta[name] = "passthrough" + continue + if t.numel() <= INT8_KEEP_FLOAT_MAX_NUMEL: + if any(p in name for p in INT8_KEEP_FLOAT_FP32_NAME_PATTERNS): + result[name] = t.float().contiguous() + meta[name] = "passthrough_ctrl" + else: + result[name] = t.to(torch.float16).contiguous() + meta[name] = "passthrough" + continue + is_embed = any(p in name for p in INT8_EMBED_PATTERNS) + if is_embed: + q, s = quantize_float_tensor(t) + result[name + ".q"] = q + result[name + ".scale"] = s + meta[name] = {"type": "int8"} + else: + layer_idx = -1 + if "blocks." in name: + try: + layer_idx = int(name.split("blocks.")[1].split(".")[0]) + except (ValueError, IndexError): + pass + if layer_idx in int5_layers: + q, s = quantize_float_tensor_intN(t, max_val=15) + result[name + ".q"] = q + result[name + ".scale"] = s + meta[name] = {"type": "int5"} + else: + q, s = quantize_float_tensor_int6(t) + result[name + ".q"] = q + result[name + ".scale"] = s + meta[name] = {"type": "int6"} + return result, meta + +def dequantize_state_dict_mixed(result, meta, template_sd=None): + """Dequantize flat-key mixed int6/int8 state dict back to float tensors.""" + out = {} + for name, info in meta.items(): + if info in ("passthrough", "passthrough_ctrl"): + t = result[name] + if template_sd is not None and name in template_sd: + orig_dtype = template_sd[name].dtype + if t.dtype != orig_dtype: + t = t.to(orig_dtype) + out[name] = t + continue + q = result[name + ".q"] + s = result[name + ".scale"] + if s.ndim > 0: + deq = (q.float() * s.float().view(q.shape[0], *([1] * (q.ndim - 1)))) + else: + deq = q.float() * float(s.item()) + target_dtype = torch.bfloat16 + if template_sd is not None and name in template_sd: + target_dtype = template_sd[name].dtype + out[name] = deq.to(target_dtype).contiguous() + return out + + +def load_data_shard(file: Path) -> Tensor: + header = np.fromfile(file, dtype=" None: + self.file_idx = (self.file_idx + 1) % len(self.files) + self.tokens = load_data_shard(self.files[self.file_idx]) + self.pos = 0 + + def take(self, n: int) -> Tensor: + chunks = [] + remaining = n + while remaining > 0: + avail = self.tokens.numel() - self.pos + if avail <= 0: + self._advance_file() + continue + k = min(remaining, avail) + chunks.append(self.tokens[self.pos : self.pos + k]) + self.pos += k + remaining -= k + return chunks[0] if len(chunks) == 1 else torch.cat(chunks) + +class DistributedTokenLoader: + def __init__(self, pattern: str, rank: int, world_size: int, device: torch.device): + self.rank, self.world_size, self.device = rank, world_size, device + self.stream = TokenStream(pattern) + + def next_batch(self, global_tokens: int, seq_len: int, grad_accum_steps: int): + local_tokens = global_tokens // (self.world_size * grad_accum_steps) + per_rank_span = local_tokens + 1 + chunk = self.stream.take(per_rank_span * self.world_size) + start = self.rank * per_rank_span + local = chunk[start : start + per_rank_span].to(dtype=torch.int64) + x = local[:-1].reshape(-1, seq_len) + y = local[1:].reshape(-1, seq_len) + return x.to(self.device, non_blocking=True), y.to(self.device, non_blocking=True) + + +class RMSNorm(nn.Module): + def __init__(self, eps: float | None = None): + super().__init__() + self.eps = eps + + def forward(self, x: Tensor) -> Tensor: + return F.rms_norm(x, (x.size(-1),), eps=self.eps) + +class CastedLinear(nn.Linear): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._is_mlp = False # kept for compatibility + + def forward(self, x: Tensor) -> Tensor: + w = self.weight + if self.training and _qat_active and w.numel() > INT8_KEEP_FLOAT_MAX_NUMEL: + w = _fake_quantize(w, _qat_bits) + bias = self.bias.to(x.dtype) if self.bias is not None else None + return F.linear(x, w.to(x.dtype), bias) + +# ── QAT globals (set during training) ── +_qat_active = False +_qat_bits = 6 + +def _fake_quantize(w: Tensor, bits: int) -> Tensor: + max_val = (1 << (bits - 1)) - 1 # e.g. int6: max_val = 31 + with torch.no_grad(): + abs_max = w.abs().amax(dim=1, keepdim=True).clamp_min(1e-8) + scale = abs_max / max_val + w_q = (w / scale).round().clamp(-max_val, max_val) * scale + return w + (w_q - w).detach() + +def restore_low_dim_params_to_fp32(module: nn.Module) -> None: + with torch.no_grad(): + for name, param in module.named_parameters(): + if (param.ndim < 2 or any(p in name for p in CONTROL_TENSOR_NAME_PATTERNS)) and param.dtype != torch.float32: + param.data = param.data.float() + +class Rotary(nn.Module): + def __init__(self, dim: int, base: float = 10000.0): + super().__init__() + inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + self._seq_len_cached = 0 + self._cos_cached: Tensor | None = None + self._sin_cached: Tensor | None = None + + def forward(self, seq_len: int, device: torch.device, dtype: torch.dtype): + if self._cos_cached is None or self._seq_len_cached != seq_len or self._cos_cached.device != device: + t = torch.arange(seq_len, device=device, dtype=self.inv_freq.dtype) + freqs = torch.outer(t, self.inv_freq.to(device)) + self._cos_cached = freqs.cos()[None, None, :, :] + self._sin_cached = freqs.sin()[None, None, :, :] + self._seq_len_cached = seq_len + return self._cos_cached.to(dtype=dtype), self._sin_cached.to(dtype=dtype) + +def apply_rotary_emb(x: Tensor, cos: Tensor, sin: Tensor) -> Tensor: + half = x.size(-1) // 2 + x1, x2 = x[..., :half], x[..., half:] + return torch.cat((x1 * cos + x2 * sin, x1 * (-sin) + x2 * cos), dim=-1) + +class SmearGate(nn.Module): + def __init__(self, dim: int): + super().__init__() + self.gate = nn.Parameter(torch.zeros(dim, dtype=torch.float32)) + + def forward(self, x: Tensor) -> Tensor: + g = torch.sigmoid(self.gate.to(dtype=x.dtype))[None, None, :] + x_prev = torch.cat([torch.zeros_like(x[:, :1]), x[:, :-1]], dim=1) + return (1 - g) * x + g * x_prev + +class ValueEmbedding(nn.Module): + def __init__(self, vocab_size: int, ve_dim: int, target_dim: int): + super().__init__() + self.embed = nn.Embedding(vocab_size, ve_dim) + nn.init.normal_(self.embed.weight, std=0.01) + self.proj = CastedLinear(ve_dim, target_dim, bias=False) if ve_dim != target_dim else None + if self.proj is not None: + nn.init.zeros_(self.proj.weight) + self.scale = nn.Parameter(torch.tensor(0.1, dtype=torch.float32)) + + def forward(self, token_ids: Tensor) -> Tensor: + h = self.embed(token_ids) + if self.proj is not None: + h = self.proj(h) + return h * self.scale.to(dtype=h.dtype) + +class UnifiedAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, rope_base: float, + seeking_gain_init: float, rope_fraction: float = 1.0): + super().__init__() + assert dim % 3 == 0, f"dim={dim} must be divisible by 3" + self.dim = dim + self.num_heads = num_heads + self.component_dim = dim // 3 + self.head_dim = self.component_dim // num_heads + assert self.component_dim % num_heads == 0 + + self.rope_dim = int(self.head_dim * rope_fraction) + self.rope_dim = max(self.rope_dim - (self.rope_dim % 2), 2) + self.pass_dim = self.head_dim - self.rope_dim + + self.seeking_gain = nn.Parameter( + torch.full((num_heads,), seeking_gain_init, dtype=torch.float32) + ) + self.rotary = Rotary(self.rope_dim, base=rope_base) + + def forward(self, x: Tensor, unified_w: Tensor, output_w: Tensor, unified_delta=None, v_embed=None) -> Tensor: + bsz, seqlen, _ = x.shape + + unified = F.linear(x, unified_w.to(x.dtype)) + if unified_delta is not None: + unified = unified + unified_delta + + seeking, offering, content = unified.split(self.component_dim, dim=-1) + + if v_embed is not None: + content = content + v_embed + + def to_heads(t): + return t.reshape(bsz, seqlen, self.num_heads, self.head_dim).transpose(1, 2) + + seeking = to_heads(seeking) + offering = to_heads(offering) + content = to_heads(content) + + seeking = F.rms_norm(seeking, (seeking.size(-1),)) + offering = F.rms_norm(offering, (offering.size(-1),)) + + cos, sin = self.rotary(seqlen, x.device, seeking.dtype) + if self.pass_dim > 0: + s_rope, s_pass = seeking[..., :self.rope_dim], seeking[..., self.rope_dim:] + o_rope, o_pass = offering[..., :self.rope_dim], offering[..., self.rope_dim:] + s_rope = apply_rotary_emb(s_rope, cos, sin) + o_rope = apply_rotary_emb(o_rope, cos, sin) + seeking = torch.cat([s_rope, s_pass], dim=-1) + offering = torch.cat([o_rope, o_pass], dim=-1) + else: + seeking = apply_rotary_emb(seeking, cos, sin) + offering = apply_rotary_emb(offering, cos, sin) + + seeking = seeking * self.seeking_gain.to(dtype=seeking.dtype)[None, :, None, None] + + sq = seeking.transpose(1, 2) + of = offering.transpose(1, 2) + ct = content.transpose(1, 2) + dtype = sq.dtype + if dtype not in (torch.float16, torch.bfloat16): + sq, of, ct = sq.to(torch.bfloat16), of.to(torch.bfloat16), ct.to(torch.bfloat16) + hd = sq.size(-1) + pad_n = (8 - hd % 8) % 8 + if pad_n > 0: + sq = F.pad(sq, (0, pad_n)) + of = F.pad(of, (0, pad_n)) + ct = F.pad(ct, (0, pad_n)) + out = _flash_attn_func(sq, of, ct, causal=True) + y = out[0] if isinstance(out, tuple) else out + if pad_n > 0: + y = y[..., :hd] + if y.dtype != dtype: + y = y.to(dtype) + y = y.transpose(1, 2) + + y = y.transpose(1, 2).contiguous().reshape(bsz, seqlen, self.component_dim) + return F.linear(y, output_w.to(x.dtype)) + +class SquaredReLUMLP(nn.Module): + """LeakyReLU(0.5)² MLP — weights passed from banks.""" + def __init__(self, dim: int, mlp_mult: int): + super().__init__() + + def forward(self, x: Tensor, fc_w: Tensor, proj_w: Tensor) -> Tensor: + return F.linear( + F.leaky_relu(F.linear(x, fc_w.to(x.dtype)), negative_slope=0.5).square(), + proj_w.to(x.dtype) + ) + +class Block(nn.Module): + """Single transformer block with unified attention + MLP. Weights from banks.""" + def __init__(self, dim: int, num_heads: int, mlp_mult: int, rope_base: float, + seeking_gain_init: float, rope_fraction: float = 1.0, + layer_idx: int = 0, ln_scale: bool = False): + super().__init__() + self.attn_norm = RMSNorm() + self.mlp_norm = RMSNorm() + self.attn = UnifiedAttention(dim, num_heads, rope_base, seeking_gain_init, rope_fraction) + self.mlp = SquaredReLUMLP(dim, mlp_mult) + self.attn_scale = nn.Parameter(torch.ones(dim, dtype=torch.float32)) + self.mlp_scale = nn.Parameter(torch.ones(dim, dtype=torch.float32)) + self.resid_mix = nn.Parameter(torch.stack((torch.ones(dim), torch.zeros(dim))).float()) + self.ln_scale_factor = 1.0 / math.sqrt(layer_idx + 1) if ln_scale else 1.0 + + def forward(self, x: Tensor, x0: Tensor, unified_w: Tensor, output_w: Tensor, + fc_w: Tensor, proj_w: Tensor, unified_delta_fn=None, v_embed=None) -> Tensor: + mix = self.resid_mix.to(dtype=x.dtype) + x = mix[0][None, None, :] * x + mix[1][None, None, :] * x0 + n = self.attn_norm(x) * self.ln_scale_factor + ud = unified_delta_fn(n) if unified_delta_fn is not None else None + x = x + self.attn_scale.to(dtype=x.dtype)[None, None, :] * self.attn(n, unified_w, output_w, ud, v_embed=v_embed) + x = x + self.mlp_scale.to(dtype=x.dtype)[None, None, :] * self.mlp(self.mlp_norm(x) * self.ln_scale_factor, fc_w, proj_w) + return x + +class YoctoGPT(nn.Module): + def __init__(self, vocab_size: int, model_dim: int, num_heads: int, + num_unique_layers: int, num_recurrences: int, mlp_mult: int, + tie_embeddings: bool, tied_embed_init_std: float, + logit_softcap: float, rope_base: float, seeking_gain_init: float, + rope_fraction: float = 1.0, + ln_scale: bool = True, + ve_enabled: bool = True, ve_dim: int = 128, ve_layers: str = "8,9", + int5_layers: str = ""): + super().__init__() + self.tie_embeddings = tie_embeddings + self.logit_softcap = logit_softcap + self.num_unique_layers = num_unique_layers + self.num_recurrences = num_recurrences + self.int5_layer_set = set(int(x) for x in int5_layers.split(",") if x.strip()) + effective = num_unique_layers * num_recurrences + + comp_dim = model_dim // 3 + mlp_dim = mlp_mult * model_dim + + self.tok_emb = nn.Embedding(vocab_size, model_dim) + self.bigram = None + self.smear = SmearGate(model_dim) + + K = num_unique_layers + self.unified_bank = nn.Parameter(torch.empty(K, model_dim, model_dim)) # W_unified: d→d + self.output_bank = nn.Parameter(torch.empty(K, model_dim, comp_dim)) # W_output: comp→d (F.linear expects [out, in]) + self.fc_bank = nn.Parameter(torch.empty(K, mlp_dim, model_dim)) # MLP fc: d→mlp_dim + self.proj_bank = nn.Parameter(torch.empty(K, model_dim, mlp_dim)) # MLP proj: mlp_dim→d + + self.blocks = nn.ModuleList([ + Block(model_dim, num_heads, mlp_mult, rope_base, seeking_gain_init, rope_fraction, + layer_idx=k, ln_scale=ln_scale) + for k in range(num_unique_layers) + ]) + + self.num_encoder_layers = effective // 2 + self.num_decoder_layers = effective - self.num_encoder_layers + self.num_skip_weights = min(self.num_encoder_layers, self.num_decoder_layers) + self.skip_weights = nn.Parameter(torch.ones(self.num_skip_weights, model_dim, dtype=torch.float32)) + + self.ve_layer_indices = [int(x) for x in ve_layers.split(",") if x.strip()] if ve_enabled else [] + if self.ve_layer_indices: + self.ve_shared = ValueEmbedding(vocab_size, ve_dim, comp_dim) + self.ve_layer_scales = nn.ParameterList( + [nn.Parameter(torch.ones(1, dtype=torch.float32)) for _ in self.ve_layer_indices] + ) + else: + self.ve_shared = None + self.ve_layer_scales = nn.ParameterList() + + self.final_norm = RMSNorm() + self.lm_head = None if tie_embeddings else CastedLinear(model_dim, vocab_size, bias=False) + if self.lm_head is not None: + self.lm_head._zero_init = True + self._init_weights(tied_embed_init_std) + + def _init_weights(self, std: float) -> None: + if self.tie_embeddings: + nn.init.normal_(self.tok_emb.weight, mean=0.0, std=std) + K = self.num_unique_layers + proj_scale = 1.0 / math.sqrt(2 * K * self.num_recurrences) + for i in range(K): + nn.init.orthogonal_(self.unified_bank.data[i], gain=1.0) + nn.init.zeros_(self.output_bank.data[i]) + self.output_bank.data[i].mul_(proj_scale) + nn.init.orthogonal_(self.fc_bank.data[i], gain=1.0) + nn.init.zeros_(self.proj_bank.data[i]) + self.proj_bank.data[i].mul_(proj_scale) + for module in self.modules(): + if isinstance(module, nn.Linear) and getattr(module, "_zero_init", False): + nn.init.zeros_(module.weight) + + def _qat_weight(self, w: Tensor, layer_idx: int = -1) -> Tensor: + if self.training and _qat_active: + bits = 5 if layer_idx in self.int5_layer_set else _qat_bits + return _fake_quantize(w, bits) + return w + + def _get_ve(self, layer_idx: int, input_ids: Tensor, ve_cache: dict) -> Tensor | None: + """Get value embedding for a specific layer using shared table + per-layer scale.""" + if self.ve_shared is None or layer_idx not in self.ve_layer_indices: + return None + if 've' not in ve_cache: + ve_cache['ve'] = self.ve_shared(input_ids) + ve_idx = self.ve_layer_indices.index(layer_idx) + return ve_cache['ve'] * self.ve_layer_scales[ve_idx].to(dtype=ve_cache['ve'].dtype) + + def forward(self, input_ids: Tensor, target_ids: Tensor, lora=None) -> Tensor: + x = self.tok_emb(input_ids) + if self.bigram is not None: + x = x + self.bigram(input_ids) + x = F.rms_norm(x, (x.size(-1),)) + x = self.smear(x) + x0 = x + skips: list[Tensor] = [] + ve_cache: dict = {} + + eff_layer_idx = 0 + for _r in range(self.num_recurrences): + for k in range(self.num_unique_layers): + is_encoder = eff_layer_idx < self.num_encoder_layers + + if not is_encoder and skips: + dec_idx = eff_layer_idx - self.num_encoder_layers + if dec_idx < self.num_skip_weights: + x = x + self.skip_weights[dec_idx].to(dtype=x.dtype)[None, None, :] * skips.pop() + + ud_fn = lora.unified_loras[k] if (lora and lora.unified_loras is not None) else None + ve = self._get_ve(k, input_ids, ve_cache) + x = self.blocks[k](x, x0, + self._qat_weight(self.unified_bank[k], k), + self._qat_weight(self.output_bank[k], k), + self._qat_weight(self.fc_bank[k], k), + self._qat_weight(self.proj_bank[k], k), + ud_fn, v_embed=ve) + + if is_encoder: + skips.append(x) + + eff_layer_idx += 1 + + x = self.final_norm(x) + if self.tie_embeddings: + logits = F.linear(x, self.tok_emb.weight) + else: + logits = self.lm_head(x) + logits = logits + (lora.lm_head_lora(x) if lora else 0) + logits = self.logit_softcap * torch.tanh(logits / self.logit_softcap) + + if lora: + bsz, sl, V = logits.shape + return F.cross_entropy( + logits.float().reshape(-1, V), target_ids.reshape(-1), reduction="none" + ).reshape(bsz, sl) + return F.cross_entropy(logits.float().reshape(-1, logits.size(-1)), target_ids.reshape(-1), reduction="mean") + + def forward_logits(self, input_ids: Tensor) -> Tensor: + x = self.tok_emb(input_ids) + if self.bigram is not None: + x = x + self.bigram(input_ids) + x = F.rms_norm(x, (x.size(-1),)) + x = self.smear(x) + x0 = x + skips: list[Tensor] = [] + ve_cache: dict = {} + + eff_layer_idx = 0 + for _r in range(self.num_recurrences): + for k in range(self.num_unique_layers): + is_encoder = eff_layer_idx < self.num_encoder_layers + + if not is_encoder and skips: + dec_idx = eff_layer_idx - self.num_encoder_layers + if dec_idx < self.num_skip_weights: + x = x + self.skip_weights[dec_idx].to(dtype=x.dtype)[None, None, :] * skips.pop() + + ve = self._get_ve(k, input_ids, ve_cache) + x = self.blocks[k](x, x0, + self.unified_bank[k], self.output_bank[k], + self.fc_bank[k], self.proj_bank[k], + v_embed=ve) + + if is_encoder: + skips.append(x) + + eff_layer_idx += 1 + + x = self.final_norm(x) + if self.tie_embeddings: + logits = F.linear(x, self.tok_emb.weight) + else: + logits = self.lm_head(x) + return self.logit_softcap * torch.tanh(logits / self.logit_softcap) + +def eval_val_legal_ttt(args, base_model, rank, world_size, device, + val_tokens, base_bytes_lut, has_leading_space_lut, + is_boundary_token_lut, log0=print): + seq_len = args.train_seq_len + stride = args.sliding_window_stride + total_tokens = val_tokens.numel() - 1 + ttt_chunk = args.legal_ttt_chunk_tokens + + window_starts = [ws for ws in range(0, total_tokens, stride) + if min(ws + seq_len, total_tokens) - ws >= stride or ws == 0] + + num_chunks = (total_tokens + ttt_chunk - 1) // ttt_chunk + chunk_windows: list[list[int]] = [[] for _ in range(num_chunks)] + for ws in window_starts: + end = min(ws + seq_len, total_tokens) + wlen = end - ws + s = 0 if ws == 0 else max(wlen - stride, 0) + scored_start = ws + s + ci = min(scored_start // ttt_chunk, num_chunks - 1) + chunk_windows[ci].append(ws) + + log0(f"legal_ttt:start chunks={num_chunks} chunk_tokens={ttt_chunk} " + f"total_windows={len(window_starts)} stride={stride} " + f"lr={args.legal_ttt_lr} epochs={args.legal_ttt_epochs} " + f"freeze_blocks={args.legal_ttt_freeze_blocks}") + + loss_sum = torch.zeros((), device=device, dtype=torch.float64) + token_count = torch.zeros((), device=device, dtype=torch.float64) + byte_count = torch.zeros((), device=device, dtype=torch.float64) + + frozen_block_ids = set(range(min(args.legal_ttt_freeze_blocks, len(base_model.blocks)))) + ttt_params = [] + for name, p in base_model.named_parameters(): + freeze = False + for bi in frozen_block_ids: + if f"blocks.{bi}." in name: + freeze = True + break + if freeze: + p.requires_grad_(False) + else: + p.requires_grad_(True) + ttt_params.append(p) + + log0(f"legal_ttt:params unfrozen={sum(p.numel() for p in ttt_params)} " + f"frozen={sum(p.numel() for p in base_model.parameters() if not p.requires_grad)}") + + optimizer = torch.optim.SGD(ttt_params, lr=args.legal_ttt_lr, momentum=args.legal_ttt_momentum) + batch_seqs = args.legal_ttt_batch_seqs + t0 = time.perf_counter() + + for ci in range(num_chunks): + windows = chunk_windows[ci] + if not windows: + continue + + my_s = (len(windows) * rank) // world_size + my_e = (len(windows) * (rank + 1)) // world_size + my_windows = windows[my_s:my_e] + + base_model.eval() + with torch.no_grad(): + for bi in range(0, len(my_windows), batch_seqs): + batch_ws = my_windows[bi:bi + batch_seqs] + bsz = len(batch_ws) + x_batch = torch.zeros(bsz, seq_len, dtype=torch.int64, device=device) + y_batch = torch.zeros(bsz, seq_len, dtype=torch.int64, device=device) + wlens: list[int] = [] + for i, ws in enumerate(batch_ws): + end = min(ws + seq_len, total_tokens) + wlen = end - ws + wlens.append(wlen) + chunk_tok = val_tokens[ws:end + 1].to(dtype=torch.int64, device=device) + x_batch[i, :wlen] = chunk_tok[:-1] + y_batch[i, :wlen] = chunk_tok[1:] + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + logits = base_model.forward_logits(x_batch) + nll = F.cross_entropy( + logits.reshape(-1, logits.size(-1)).float(), + y_batch.reshape(-1), reduction="none", + ).reshape(bsz, seq_len) + for i, ws in enumerate(batch_ws): + wlen = wlens[i] + s = 0 if ws == 0 else max(wlen - stride, 0) + scored_nll = nll[i, s:wlen].to(torch.float64) + loss_sum += scored_nll.sum() + token_count += float(wlen - s) + tgt, prev = y_batch[i, s:wlen], x_batch[i, s:wlen] + tb = base_bytes_lut[tgt].to(torch.float64) + tb += (has_leading_space_lut[tgt] & ~is_boundary_token_lut[prev]).to(torch.float64) + byte_count += tb.sum() + + is_last_chunk = (ci == num_chunks - 1) + if not is_last_chunk and args.legal_ttt_epochs > 0: + base_model.train() + for block in base_model.blocks: + block.attn.rotary._cos_cached = None + block.attn.rotary._sin_cached = None + chunk_start = ci * ttt_chunk + chunk_end = min((ci + 1) * ttt_chunk, total_tokens) + chunk_seqs = (chunk_end - chunk_start) // seq_len + if chunk_seqs > 0: + cos_lr = args.legal_ttt_lr * 0.5 * (1.0 + math.cos(math.pi * ci / max(num_chunks - 1, 1))) + for pg in optimizer.param_groups: + pg['lr'] = cos_lr + my_seq_s = (chunk_seqs * rank) // world_size + my_seq_e = (chunk_seqs * (rank + 1)) // world_size + my_chunk_seqs = my_seq_e - my_seq_s + for _ep in range(args.legal_ttt_epochs): + for bs in range(0, my_chunk_seqs, batch_seqs): + be = min(bs + batch_seqs, my_chunk_seqs) + actual_bs = my_seq_s + bs + start_tok = chunk_start + actual_bs * seq_len + end_tok = chunk_start + (my_seq_s + be) * seq_len + 1 + if end_tok > val_tokens.numel(): + continue + local = val_tokens[start_tok:end_tok].to(device=device, dtype=torch.int64) + x = local[:-1].reshape(-1, seq_len) + y = local[1:].reshape(-1, seq_len) + optimizer.zero_grad(set_to_none=True) + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + loss = base_model(x, y) + loss.backward() + if world_size > 1: + for p in ttt_params: + if p.grad is not None: + dist.all_reduce(p.grad, op=dist.ReduceOp.AVG) + torch.nn.utils.clip_grad_norm_(ttt_params, args.legal_ttt_grad_clip) + optimizer.step() + + if rank == 0 and (ci % 10 == 0 or ci == num_chunks - 1): + elapsed = time.perf_counter() - t0 + rl = loss_sum.item() / max(token_count.item(), 1) + rbpb = rl / math.log(2.0) * (token_count.item() / max(byte_count.item(), 1)) if token_count.item() > 0 else 0.0 + log0(f" legal_ttt_chunk [{ci+1}/{num_chunks}] bpb={rbpb:.6f} time={elapsed:.1f}s") + + if dist.is_available() and dist.is_initialized(): + dist.all_reduce(loss_sum, op=dist.ReduceOp.SUM) + dist.all_reduce(token_count, op=dist.ReduceOp.SUM) + dist.all_reduce(byte_count, op=dist.ReduceOp.SUM) + + val_loss = (loss_sum / token_count).item() + val_bpb = val_loss / math.log(2.0) * (token_count.item() / byte_count.item()) + + for p in base_model.parameters(): + p.requires_grad_(True) + base_model.eval() + + log0(f"legal_ttt:done val_loss={val_loss:.6f} val_bpb={val_bpb:.6f} " + f"elapsed={time.perf_counter() - t0:.1f}s") + return val_loss, val_bpb + +def prune_to_fit(result, meta, code_bytes, target_bytes=16_000_000, compress="lzma"): + """Selectively zero ±1 quantized values to fit artifact in budget.""" + buf = io.BytesIO() + torch.save({"w": result, "m": meta}, buf) + raw = buf.getvalue() + if compress == "lzma": + blob = lzma.compress(raw, preset=6) + else: + blob = zlib.compress(raw, level=9) + if len(blob) + code_bytes <= target_bytes: + return result, len(blob) + + candidates = [] + for name, info in meta.items(): + if isinstance(info, dict) and info.get("type") in ("int6", "int5"): + q = result[name + ".q"] + s = result[name + ".scale"] + for row in range(q.shape[0]): + mask = (q[row].abs() == 1) + if mask.any(): + scale_sq = float(s[row].float() ** 2) if s.ndim > 0 else float(s.float() ** 2) + count = int(mask.sum().item()) + candidates.append((scale_sq, name, row, count)) + + candidates.sort(key=lambda x: x[0]) + + batch_size = max(1, len(candidates) // 20) + for i in range(0, len(candidates), batch_size): + batch = candidates[i:i + batch_size] + for _, name, row, _ in batch: + q = result[name + ".q"] + mask = (q[row].abs() == 1) + q[row][mask] = 0 + + buf = io.BytesIO() + torch.save({"w": result, "m": meta}, buf) + raw = buf.getvalue() + if compress == "lzma": + blob = lzma.compress(raw, preset=6) + else: + blob = zlib.compress(raw, level=9) + if len(blob) + code_bytes <= target_bytes: + return result, len(blob) + + return result, len(blob) + + +def main() -> None: + + try: + code = Path(__file__).read_text(encoding="utf-8") + args = Hyperparameters() + args.validate() + + # ── Distributed + CUDA ── + distributed = "RANK" in os.environ and "WORLD_SIZE" in os.environ + rank = int(os.environ.get("RANK", "0")) + world_size = int(os.environ.get("WORLD_SIZE", "1")) + local_rank = int(os.environ.get("LOCAL_RANK", "0")) + if 8 % world_size != 0: + raise ValueError(f"WORLD_SIZE={world_size} must divide 8") + grad_accum_steps = 8 // world_size + grad_scale = 1.0 / grad_accum_steps + if not torch.cuda.is_available(): + raise RuntimeError("CUDA is required") + device = torch.device("cuda", local_rank) + torch.cuda.set_device(device) + if distributed: + dist.init_process_group(backend="nccl", device_id=device) + dist.barrier() + master_process = rank == 0 + + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + from torch.backends.cuda import enable_cudnn_sdp, enable_flash_sdp, enable_math_sdp, enable_mem_efficient_sdp + enable_cudnn_sdp(False) + enable_flash_sdp(True) + enable_mem_efficient_sdp(False) + enable_math_sdp(False) + + logfile = None + if master_process: + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{args.run_id}.txt" + logger.info(f"Log file: {logfile}") + + def log0(msg: str, console: bool = True) -> None: + if not master_process: + return + if console: + logger.info(msg) + if logfile is not None: + with open(logfile, "a", encoding="utf-8") as f: + print(msg, file=f) + + log0(code, console=False) + log0("=" * 100, console=False) + + # ── Tokenizer + Validation ── + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) + + sp = spm.SentencePieceProcessor(model_file=args.tokenizer_path) + if int(sp.vocab_size()) != args.vocab_size: + raise ValueError(f"VOCAB_SIZE={args.vocab_size} != tokenizer vocab_size={int(sp.vocab_size())}") + val_tokens = load_validation_tokens(args.val_files, args.train_seq_len) + base_bytes_lut, has_leading_space_lut, is_boundary_token_lut = build_sentencepiece_luts(sp, args.vocab_size, device) + + # ── Model ── + base_model = YoctoGPT( + vocab_size=args.vocab_size, + model_dim=args.model_dim, + num_heads=args.num_heads, + num_unique_layers=args.num_unique_layers, + num_recurrences=args.num_recurrences, + mlp_mult=args.mlp_mult, + tie_embeddings=args.tie_embeddings, + tied_embed_init_std=args.tied_embed_init_std, + logit_softcap=args.logit_softcap, + rope_base=args.rope_base, + seeking_gain_init=args.seeking_gain_init, + rope_fraction=args.rope_fraction, + ln_scale=args.ln_scale, + ve_enabled=args.ve_enabled, + ve_dim=args.ve_dim, + ve_layers=args.ve_layers, + int5_layers=args.int5_layers, + ).to(device).bfloat16() + + base_model.unified_bank.data = base_model.unified_bank.data.float() + base_model.output_bank.data = base_model.output_bank.data.float() + base_model.fc_bank.data = base_model.fc_bank.data.float() + base_model.proj_bank.data = base_model.proj_bank.data.float() + + for module in base_model.modules(): + if isinstance(module, CastedLinear): + module.float() + if isinstance(module, Rotary): + module.inv_freq.data = module.inv_freq.data.float() + restore_low_dim_params_to_fp32(base_model) + + if master_process: + log_architecture(base_model, args) + + try: + _test_mod = torch.compile(lambda q, k, v: _flash_attn_func(q, k, v, causal=True), dynamic=False) + _tq = torch.randn(1, 8, 1, 48, dtype=torch.bfloat16, device=device) + with torch.amp.autocast('cuda', dtype=torch.bfloat16): + _test_mod(_tq, _tq, _tq) + log0("torch.compile + FA3: COMPATIBLE") + compiled_model = torch.compile(base_model, dynamic=False) + model = compiled_model + except Exception as e: + log0(f"torch.compile + FA3: INCOMPATIBLE ({type(e).__name__}), running uncompiled") + model = base_model + + log0("attention_backend:fa3") + + # ── Optimizer: banks → Muon, rest → Adam/AdamW ── + matrix_params = [ + base_model.unified_bank, base_model.output_bank, + base_model.fc_bank, base_model.proj_bank, + ] + + block_named_params = list(base_model.blocks.named_parameters()) + scalar_params = [p for name, p in block_named_params + if p.ndim < 2 or any(pat in name for pat in CONTROL_TENSOR_NAME_PATTERNS)] + if base_model.skip_weights.numel() > 0: + scalar_params.append(base_model.skip_weights) + scalar_params.append(base_model.smear.gate) + if base_model.ve_shared is not None: + scalar_params.append(base_model.ve_shared.scale) + for s in base_model.ve_layer_scales: + scalar_params.append(s) + if base_model.ve_shared.proj is not None: + scalar_params.append(base_model.ve_shared.proj.weight) + + token_lr = args.tied_embed_lr if args.tie_embeddings else args.embed_lr + tok_param_groups = [{"params": [base_model.tok_emb.weight], "lr": token_lr, "base_lr": token_lr}] + if base_model.ve_shared is not None: + tok_param_groups.append({"params": [base_model.ve_shared.embed.weight], "lr": token_lr, "base_lr": token_lr}) + optimizer_tok = torch.optim.AdamW(tok_param_groups, + betas=(args.beta1, args.beta2), eps=args.adam_eps, + weight_decay=args.muon_weight_decay, fused=True) + optimizer_muon = Muon(matrix_params, lr=args.matrix_lr, momentum=args.muon_momentum, + backend_steps=args.muon_backend_steps, weight_decay=args.muon_weight_decay) + for group in optimizer_muon.param_groups: + group["base_lr"] = args.matrix_lr + optimizer_scalar = torch.optim.AdamW([{"params": scalar_params, "lr": args.scalar_lr, "base_lr": args.scalar_lr}], + betas=(args.beta1, args.beta2), eps=args.adam_eps, + weight_decay=args.muon_weight_decay, fused=True) + optimizers = [optimizer_tok, optimizer_muon, optimizer_scalar] + + replicated_params = [base_model.tok_emb.weight] + scalar_params + if base_model.ve_shared is not None: + replicated_params.append(base_model.ve_shared.embed.weight) + + if base_model.lm_head is not None: + optimizer_head = torch.optim.Adam([{"params": [base_model.lm_head.weight], "lr": args.head_lr, "base_lr": args.head_lr}], + betas=(args.beta1, args.beta2), eps=args.adam_eps, fused=True) + optimizers.insert(1, optimizer_head) + replicated_params.append(base_model.lm_head.weight) + if base_model.bigram is not None: + bigram_params = list(base_model.bigram.parameters()) + optimizer_bigram = torch.optim.AdamW([{"params": bigram_params, "lr": token_lr, "base_lr": token_lr}], + betas=(args.beta1, args.beta2), eps=args.adam_eps, + weight_decay=args.muon_weight_decay, fused=True) + optimizers.append(optimizer_bigram) + replicated_params.extend(bigram_params) + + n_params = sum(p.numel() for p in base_model.parameters()) + log0(f"model_params:{n_params} effective_depth:{args.num_effective_layers}") + if base_model.int5_layer_set: + log0(f"mixed_precision: int5_layers={sorted(base_model.int5_layer_set)} int6_layers={sorted(set(range(args.num_unique_layers)) - base_model.int5_layer_set)}") + log0(f"world_size:{world_size} grad_accum_steps:{grad_accum_steps}") + + # ── Data loader + warmup ── + train_loader = DistributedTokenLoader(args.train_files, rank, world_size, device) + + def zero_grad_all(): + for opt in optimizers: + opt.zero_grad(set_to_none=True) + + max_wallclock_ms = 1000.0 * args.max_wallclock_seconds if args.max_wallclock_seconds > 0 else None + + # ── EMA + SWA shadow weights ── + ema_state = None + swa_params = None + swa_count = 0 + if args.ema_decay > 0: + ema_state = {name: t.detach().float().clone() for name, t in base_model.state_dict().items()} + log0(f"EMA enabled: decay={args.ema_decay}") + if args.swa_every > 0: + swa_params = {name: t.detach().cpu().clone() for name, t in base_model.state_dict().items()} + log0(f"SWA enabled: every {args.swa_every} steps when lr_scale < {args.swa_threshold}") + + def update_ema_swa(step, lr_scale): + nonlocal swa_count + with torch.no_grad(): + if ema_state is not None: + for name, t in base_model.state_dict().items(): + ema_state[name].mul_(args.ema_decay).add_(t.detach().float(), alpha=1.0 - args.ema_decay) + + if swa_params is not None and step > 0 and step % args.swa_every == 0: + if lr_scale < args.swa_threshold: + if swa_count == 0: + for name, t in base_model.state_dict().items(): + swa_params[name].copy_(t.detach().cpu()) + swa_count = 1 + log0(f"SWA started at step {step} (lr_scale={lr_scale:.4f})") + else: + for name, t in base_model.state_dict().items(): + swa_params[name] += t.detach().cpu() + swa_count += 1 + + def get_best_weights(): + """Return best averaged weights. EMA preferred (per PR#401).""" + if ema_state is not None: + log0(f"Using EMA weights (decay={args.ema_decay})") + current_state = base_model.state_dict() + return {name: t.to(dtype=current_state[name].dtype) for name, t in ema_state.items()} + if swa_params is not None and swa_count >= 2: + log0(f"Using SWA weights ({swa_count} checkpoints)") + current_state = base_model.state_dict() + return {name: (t / swa_count).to(dtype=current_state[name].dtype) + for name, t in swa_params.items()} + return None + + def lr_mul(step, elapsed_ms): + if args.lr_warmup_steps > 0 and step < args.lr_warmup_steps: + return (step + 1) / args.lr_warmup_steps + + if args.warmdown_iters <= 0: + return 1.0 + if max_wallclock_ms is None: + warmdown_start = max(args.iterations - args.warmdown_iters, 0) + return max((args.iterations - step) / max(args.warmdown_iters, 1), 0.0) if warmdown_start <= step < args.iterations else 1.0 + step_ms = elapsed_ms / max(step, 1) + warmdown_ms = args.warmdown_iters * step_ms + remaining_ms = max(max_wallclock_ms - elapsed_ms, 0.0) + return remaining_ms / max(warmdown_ms, 1e-9) if remaining_ms <= warmdown_ms else 1.0 + + if args.warmup_steps > 0: + initial_model_state = {n: t.detach().cpu().clone() for n, t in base_model.state_dict().items()} + initial_optimizer_states = [copy.deepcopy(opt.state_dict()) for opt in optimizers] + model.train() + for ws in range(args.warmup_steps): + zero_grad_all() + for ms in range(grad_accum_steps): + x, y = train_loader.next_batch(args.train_batch_tokens, args.train_seq_len, grad_accum_steps) + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + wl = model(x, y) + (wl * grad_scale).backward() + if distributed: + for p in base_model.parameters(): + if p.grad is not None: + dist.all_reduce(p.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + zero_grad_all() + if ws + 1 == args.warmup_steps or (ws + 1) % 10 == 0: + log0(f"warmup_step:{ws+1}/{args.warmup_steps}") + base_model.load_state_dict(initial_model_state, strict=True) + for opt, state in zip(optimizers, initial_optimizer_states, strict=True): + opt.load_state_dict(state) + zero_grad_all() + train_loader = DistributedTokenLoader(args.train_files, rank, world_size, device) + + # ── Main training loop ── + training_time_ms = 0.0 + stop_after_step = None + torch.cuda.synchronize() + t0 = time.perf_counter() + step = 0 + + while True: + last_step = step == args.iterations or (stop_after_step is not None and step >= stop_after_step) + + should_validate = last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0) + if should_validate: + torch.cuda.synchronize() + training_time_ms += 1000.0 * (time.perf_counter() - t0) + val_loss, val_bpb = eval_val(args, model, rank, world_size, device, grad_accum_steps, + val_tokens, base_bytes_lut, has_leading_space_lut, is_boundary_token_lut) + log0(f"step:{step}/{args.iterations} val_loss:{val_loss:.4f} val_bpb:{val_bpb:.4f} " + f"train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms / max(step, 1):.2f}ms") + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if stop_after_step is not None and step < args.iterations: + log0(f"stopping_early: wallclock_cap train_time:{training_time_ms:.0f}ms step:{step}/{args.iterations}") + break + + elapsed_ms = training_time_ms + 1000.0 * (time.perf_counter() - t0) + scale = lr_mul(step, elapsed_ms) + + # ── QAT activation check ── + global _qat_active, _qat_bits + if args.qat_bits > 0 and not _qat_active: + if max_wallclock_ms is not None and max_wallclock_ms > 0: + frac = elapsed_ms / max_wallclock_ms + else: + frac = step / max(args.iterations, 1) + if frac >= args.qat_start_fraction: + _qat_active = True + _qat_bits = args.qat_bits + log0(f"QAT enabled: int{args.qat_bits} at step {step} (fraction={frac:.2f})") + + zero_grad_all() + train_loss = torch.zeros((), device=device) + + for micro_step in range(grad_accum_steps): + x, y = train_loader.next_batch(args.train_batch_tokens, args.train_seq_len, grad_accum_steps) + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + loss = model(x, y) + train_loss += loss.detach() + (loss * grad_scale).backward() + train_loss /= grad_accum_steps + + frac = min(step / args.muon_momentum_warmup_steps, 1.0) if args.muon_momentum_warmup_steps > 0 else 1.0 + muon_momentum = (1 - frac) * args.muon_momentum_warmup_start + frac * args.muon_momentum + for group in optimizer_muon.param_groups: + group["momentum"] = muon_momentum + + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["base_lr"] * scale + + if args.grad_clip_norm > 0: + torch.nn.utils.clip_grad_norm_(base_model.parameters(), args.grad_clip_norm) + + optimizer_muon.launch_reduce_scatters() + if distributed: + for p in replicated_params: + if p.grad is not None: + dist.all_reduce(p.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + if opt is not optimizer_muon: + opt.step() + optimizer_muon.step() + + update_ema_swa(step, scale) + zero_grad_all() + + step += 1 + approx_time = training_time_ms + 1000.0 * (time.perf_counter() - t0) + if args.train_log_every > 0 and (step <= 10 or step % args.train_log_every == 0): + log0(f"step:{step}/{args.iterations} train_loss:{train_loss.item():.4f} " + f"train_time:{approx_time:.0f}ms step_avg:{approx_time / step:.2f}ms") + + reached_cap = max_wallclock_ms is not None and approx_time >= max_wallclock_ms + if distributed and max_wallclock_ms is not None: + rc = torch.tensor(int(reached_cap), device=device) + dist.all_reduce(rc, op=dist.ReduceOp.MAX) + reached_cap = bool(rc.item()) + if stop_after_step is None and reached_cap: + stop_after_step = step + + log0(f"peak memory: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB") + + # ── Load best averaged weights (EMA > SWA > raw) ── + best_weights = get_best_weights() + if best_weights is not None: + base_model.load_state_dict(best_weights, strict=True) + + # ── Serialization ── + if master_process: + torch.save(base_model.state_dict(), "final_model.pt") + model_bytes = os.path.getsize("final_model.pt") + code_bytes = len(code.encode("utf-8")) + log0(f"Raw model: {model_bytes} bytes, code: {code_bytes} bytes") + + # ── Mixed int6/int8 quantization + roundtrip (if QAT was used) ── + if args.qat_bits == 6: + if master_process: + base_model.load_state_dict(torch.load("final_model.pt", map_location="cpu"), strict=True) + sd_cpu = {k: v.detach().cpu() for k, v in base_model.state_dict().items()} + unbanked_sd = _unbank_state_dict(sd_cpu, args.num_unique_layers) + int5_set = set(int(x) for x in args.int5_layers.split(",") if x.strip()) + mixed_result, mixed_meta = quantize_state_dict_mixed(unbanked_sd, int5_layers=int5_set) + code_bytes = len(code.encode("utf-8")) + mixed_result, _ = prune_to_fit(mixed_result, mixed_meta, code_bytes, + target_bytes=16_000_000, compress=args.compression) + mixed_buf = io.BytesIO() + torch.save({"w": mixed_result, "m": mixed_meta}, mixed_buf) + mixed_raw = mixed_buf.getvalue() + if args.compression == "lzma": + mixed_blob = lzma.compress(mixed_raw, preset=6) + mixed_label = "lzma-6" + elif args.compression == "zstd": + try: + import zstandard as zstd_mod + mixed_blob = zstd_mod.ZstdCompressor(level=22).compress(mixed_raw) + mixed_label = "zstd-22" + except ImportError: + mixed_blob = zlib.compress(mixed_raw, level=9) + mixed_label = "zlib-9" + else: + mixed_blob = zlib.compress(mixed_raw, level=9) + mixed_label = "zlib-9" + if master_process: + with open("final_model.mixed.ptz", "wb") as f: + f.write(mixed_blob) + mixed_bytes = os.path.getsize("final_model.mixed.ptz") + code_bytes = len(code.encode("utf-8")) + log0(f"mixed_int6_int8+{mixed_label}: {mixed_bytes} bytes, total: {mixed_bytes + code_bytes} bytes") + if mixed_bytes + code_bytes > 16_000_000: + logger.warning(f"OVER BUDGET: {mixed_bytes + code_bytes} > 16,000,000") + else: + log0(f"FITS: {mixed_bytes + code_bytes} <= 16,000,000") + if distributed: + dist.barrier() + with open("final_model.mixed.ptz", "rb") as f: + mixed_qblob = f.read() + if args.compression == "lzma": + mixed_decompressed = lzma.decompress(mixed_qblob) + elif args.compression == "zstd": + try: + import zstandard as zstd_mod + mixed_decompressed = zstd_mod.ZstdDecompressor().decompress(mixed_qblob) + except ImportError: + mixed_decompressed = zlib.decompress(mixed_qblob) + else: + mixed_decompressed = zlib.decompress(mixed_qblob) + quant_state = torch.load(io.BytesIO(mixed_decompressed), map_location="cpu") + deq_unbanked = dequantize_state_dict_mixed(quant_state["w"], quant_state["m"], unbanked_sd) + deq_sd = _rebank_state_dict(deq_unbanked, args.num_unique_layers, sd_cpu) + base_model.load_state_dict(deq_sd, strict=True) + torch.cuda.synchronize() + qm_val_loss, qm_val_bpb = eval_val(args, model, rank, world_size, device, grad_accum_steps, + val_tokens, base_bytes_lut, has_leading_space_lut, is_boundary_token_lut) + log0(f"final_mixed_{mixed_label}_roundtrip val_loss:{qm_val_loss:.4f} val_bpb:{qm_val_bpb:.4f}") + log0(f"final_mixed_{mixed_label}_roundtrip_exact val_loss:{qm_val_loss:.8f} val_bpb:{qm_val_bpb:.8f}") + + # ── Legal Score-First TTT eval ── + if args.legal_ttt_enabled: + best_weights = get_best_weights() + if best_weights is not None: + base_model.load_state_dict(best_weights, strict=True) + torch.cuda.synchronize() + t_legal = time.perf_counter() + legal_loss, legal_bpb = eval_val_legal_ttt( + args, base_model, rank, world_size, device, + val_tokens, base_bytes_lut, has_leading_space_lut, + is_boundary_token_lut, log0=log0) + log0(f"final_legal_ttt val_loss:{legal_loss:.4f} val_bpb:{legal_bpb:.4f} " + f"eval_time:{1000.0 * (time.perf_counter() - t_legal):.0f}ms") + + if distributed: + dist.destroy_process_group() + + except Exception: + logger.error(f"FATAL ERROR:\n{traceback.format_exc()}") + raise + +if __name__ == "__main__": + main() \ No newline at end of file