{
"problem": "04_kahan_softmax",
"harness": "opencode",
"model": "openrouter-pinned/qwen/qwen3.6-27b",
"reasoning_effort": "",
"has_solution": false,
"correct": false,
"peak_fraction": null,
"elapsed_seconds": 698,
"harness_exit_code": 0,
"session_complete": true,
"usage": {"input_tokens": 67664, "output_tokens": 666, "cache_read_tokens": 0, "cache_creation_tokens": 0, "reasoning_tokens": 267, "total_cost_usd": null}
}
{
"filePath": "/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax"
}<path>/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax</path>
<type>directory</type>
<entries>
benchmark.py
check.py
problem.yaml
PROMPT.txt
reference.py
shapes.py
sota.py
(7 entries)
</entries>{
"filePath": "/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax/reference.py"
}<path>/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax/reference.py</path>
<type>file</type>
<content>
1: """Naive softmax over the last dim, computed in fp64 for ground-truth.
2:
3: The reference deliberately runs in float64 so that fp16 / fp32 accumulation
4: drift in agent solutions is exposed by the tight tolerance in problem.yaml.
5: The agent's job is to produce an fp32 softmax whose values match this
6: double-precision reference within atol=rtol=1e-5 — this requires either
7: fp32 accumulation or compensated (Kahan) summation when vocab is large.
8: """
9: import torch
10: import torch.nn as nn
11:
12: OP_TYPE = "softmax"
13: SUPPORTED_PRECISIONS = ["fp32"]
14: HARDWARE_REQUIRED = ["RTX_PRO_6000", "H100", "B200"]
15:
16:
17: class Model(nn.Module):
18: """y = softmax(x, dim=-1) computed in fp64 then returned as fp32.
19:
20: No learned parameters — softmax is parameter-free. We still expose an
21: empty state_dict so the harness's strict load_state_dict matches.
22: """
23:
24: def __init__(self, batch: int, vocab: int):
25: super().__init__()
26: self.batch = batch
27: self.vocab = vocab
28:
29: def forward(self, x: torch.Tensor) -> torch.Tensor:
30: # Promote to fp64 for the ground-truth pathway. Even with double
31: # precision we still subtract the row-max for stability.
32: x64 = x.to(torch.float64)
33: m = x64.amax(dim=-1, keepdim=True)
34: e = torch.exp
... (592 more chars){
"filePath": "/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax/check.py"
}<path>/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax/check.py</path>
<type>file</type>
<content>
1: """Correctness runner for Kahan-corrected softmax.
2:
3: Runs solution.Model vs reference.Model across all shapes in shapes.py, 3
4: seeds each, with the tight (1e-5) fp32 tolerance from problem.yaml. Also
5: rejects forbidden ops via grep.
6: """
7: import re
8: import sys
9: from pathlib import Path
10:
11: import torch
12: import yaml
13:
14: # Make the repo's src/ importable
15: REPO_ROOT = Path(__file__).resolve().parents[2]
16: sys.path.insert(0, str(REPO_ROOT))
17:
18: from src.eval.correctness import check_correctness # noqa: E402
19:
20:
21: def _make_inputs(batch: int, vocab: int, extreme: bool, seed: int) -> torch.Tensor:
22: g = torch.Generator().manual_seed(seed)
23: if extreme:
24: # Adversarial: most logits are mild but a handful per row are huge.
25: # If the kernel forgets to subtract the row-max before exp, this
26: # row overflows fp32 and produces NaN/Inf. If it accumulates in
27: # fp16, the long tail of small exp() values is lost beneath the
28: # tolerance threshold.
29: x = torch.randn(batch, vocab, generator=g) * 2.0
30: # Spike: 4 very large positive logits per row.
31: idx = torch.randint(0, vocab, (batch, 4), generator=g)
32: x.scatter_(1, idx, 30.0)
33: else:
34: x = torch.randn(batch, vocab, generator=g) * 4.0
35: return x.to(torc
... (3464 more chars){
"filePath": "/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax/benchmark.py"
}<path>/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax/benchmark.py</path>
<type>file</type>
<content>
1: """Roofline benchmark for Kahan-corrected softmax.
2:
3: For each shape: times eager reference, compiled reference, SOTA (if
4: available), and the agent's solution. Reports achieved TFLOPS, GB/s, and
5: peak_fraction. Softmax is memory-bound, so the score is GB/s / peak_dram.
6:
7: Output lines the harness picks up:
8: shape=<idx> variant=<name> tflops=<N> gbps=<N> ms=<N>
9: peak_fraction: <N> (geomean over shapes of solution's peak_fraction)
10: """
11: import sys
12: from math import exp, log
13: from pathlib import Path
14:
15: import torch
16: import yaml
17:
18: REPO_ROOT = Path(__file__).resolve().parents[2]
19: sys.path.insert(0, str(REPO_ROOT))
20:
21: from src.eval.roofline import compute_gbps, compute_tflops, peak_fraction # noqa: E402
22: from src.eval.timing import time_fn # noqa: E402
23: from src.hardware import get as get_hw # noqa: E402
24:
25:
26: def _eval_formula(expr: str, vars: dict) -> float:
27: return float(eval(expr, {"__builtins__": {}}, vars))
28:
29:
30: def _make_inputs(batch: int, vocab: int, extreme: bool) -> torch.Tensor:
31: if extreme:
32: x = torch.randn(batch, vocab) * 2.0
33: idx = torch.randint(0, vocab, (batch, 4))
34: x.scatter_(1, idx, 30.0)
35: else:
36: x = torch.randn(batch, vocab) * 4.0
37: return x.to(torch.float32)
38:
39:
40: def main():
41:
... (3706 more chars){
"filePath": "/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax/problem.yaml"
}<path>/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax/problem.yaml</path>
<type>file</type>
<content>
1: name: 04_kahan_softmax
2: display_name: "Kahan-corrected Softmax"
3: precision: fp32
4: regime: memory # softmax is bandwidth-bound: 2 passes over the input tensor
5:
6: # Softmax FLOPs: per-element exp + 2 reductions + divide. Roughly 5 flops/elt.
7: flops_formula: "5 * batch * vocab"
8:
9: # Bytes moved: read x once, write y once. Both fp32.
10: bytes_formula: "batch * vocab * 4 + batch * vocab * 4"
11:
12: hardware: [RTX_PRO_6000]
13: peak_tflops_key: fp32
14: peak_bandwidth_key: dram
15:
16: # TIGHTER than default (fp32 default is 1e-4). This problem exists
17: # specifically to test whether the agent uses compensated summation, so
18: # we squeeze the tolerance to 1e-5 — naive fp16 sum across 256K elements
19: # drifts past this; fp32 accumulation passes; Kahan/fp32 always passes.
20: tolerance:
21: "torch.float32": {"atol": 1.0e-5, "rtol": 1.0e-5}
22:
23: # Forbidden ops — block the obvious "just call the library" cheats. The
24: # agent must implement softmax themselves with explicit (compensated)
25: # summation logic.
26: forbidden:
27: - "torch.nn.functional.softmax"
28: - "torch.softmax"
29: - "F.softmax"
30: - "liger_kernel.softmax"
31: - "liger_kernel.transformers.softmax"
32: - ".softmax("
33:
34: sota:
35: name: "Liger-Kernel Softmax (Triton)"
36: url: "https://github.com/linkedin/Liger-Kernel"
37: function: "liger_
... (225 more chars){
"filePath": "/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax/shapes.py"
}<path>/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax/shapes.py</path>
<type>file</type>
<content>
1: """Shape sweep for Kahan-corrected softmax.
2:
3: The point of this problem is numerical accuracy on long reductions. Shapes
4: mix typical LLM vocab sizes with deliberately adversarial regimes:
5:
6: - small vocab (sanity check; naive fp32 should pass)
7: - Llama3 vocab 128K (real-world, where fp16 accumulation starts to drift)
8: - 256K (DeepSeek-V3 / Gemma-3 class vocab; naive fp16 sum DOES drift past
9: the 1e-5 tolerance — this row is what proves Kahan was needed)
10: - extreme-logit edge case (large positive logits stress max-subtract +
11: summation; if the implementation accidentally exps before subtracting
12: max, this row overflows)
13:
14: The 'extreme' flag is read by check.py to switch input generation to a
15: distribution that produces a few very large logits per row.
16: """
17:
18: SHAPES = [
19: {"batch": 32, "vocab": 4096, "extreme": False}, # sanity
20: {"batch": 16, "vocab": 32768, "extreme": False}, # GPT-2 class
21: {"batch": 8, "vocab": 131072, "extreme": False}, # Llama3 vocab
22: {"batch": 4, "vocab": 262144, "extreme": False}, # 256K — Kahan needed
23: {"batch": 8, "vocab": 131072, "extreme": True}, # extreme logits edge
24: ]
(End of file - total 24 lines)
</content>{
"filePath": "/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax/sota.py"
}<path>/home/infatoshi/cuda/KernelBench-Hard/problems/04_kahan_softmax/sota.py</path>
<type>file</type>
<content>
1: """SOTA reference for last-dim softmax.
2:
3: Preference order:
4: 1. liger-kernel's Triton softmax (LigerSoftmaxFunction) — fused, fp32-acc
5: 2. torch.compile(torch.softmax) — Inductor generates a Triton kernel that
6: is competitive on bandwidth-bound reductions
7:
8: Agents are FORBIDDEN from importing either of these in solution.py (see
9: problem.yaml.forbidden). This file is only the benchmark's reference line.
10: """
11: from __future__ import annotations
12:
13: import torch
14:
15: _compiled_softmax = None
16:
17:
18: def _liger_softmax(x: torch.Tensor) -> torch.Tensor | None:
19: try:
20: from liger_kernel.ops.softmax import LigerSoftmaxFunction
21: return LigerSoftmaxFunction.apply(x)
22: except Exception:
23: return None
24:
25:
26: def _compiled(x: torch.Tensor) -> torch.Tensor:
27: global _compiled_softmax
28: if _compiled_softmax is None:
29: _compiled_softmax = torch.compile(
30: lambda t: torch.softmax(t, dim=-1),
31: mode="reduce-overhead",
32: )
33: return _compiled_softmax(x)
34:
35:
36: def sota_forward(x: torch.Tensor) -> torch.Tensor:
37: """Best-available softmax reference. x: (batch, vocab) fp32."""
38: out = _liger_softmax(x)
39: if out is not None:
40: return out
41: return _compiled(x)
42:
43:
44: def is_a
... (128 more chars){
"command": "nvidia-smi --query-gpu=name,memory.total,compute_cap --format=csv,noheader 2>/dev/null | head -1",
"description": "Check GPU info"
}NVIDIA RTX PRO 6000 Blackwell Workstation Edition, 97887 MiB, 12.0
{
"command": "python3 -c \"import triton; print(triton.__version__)\" 2>&1",
"description": "Check triton version"
}3.6.0
{
"command": "python3 -c \"import torch; print(torch.version.cuda, torch.cuda.get_device_properties(0).major, torch.cuda.get_device_properties(0).minor)\" 2>&1",
"description": "Check torch CUDA version"
}13.0 12 0
{
"pattern": "problems/*/solution.py"
}No files found{
"filePath": "/home/infatoshi/cuda/KernelBench-Hard/src/eval/correctness.py"
}<path>/home/infatoshi/cuda/KernelBench-Hard/src/eval/correctness.py</path>
<type>file</type>
<content>
1: """Per-dtype correctness tolerance.
2:
3: Stanford's KernelBench uses 1e-4 atol/rtol for fp32 and 1e-2 for fp16/bf16.
4: We extend to fp8 (0.1, permissive) and int (exact).
5: """
6: from __future__ import annotations
7:
8: import torch
9:
10: DEFAULT_TOLERANCE = {
11: torch.float32: {"atol": 1e-4, "rtol": 1e-4},
12: torch.float16: {"atol": 1e-2, "rtol": 1e-2},
13: torch.bfloat16: {"atol": 1e-2, "rtol": 1e-2},
14: torch.float8_e4m3fn: {"atol": 1e-1, "rtol": 1e-1},
15: torch.float8_e5m2: {"atol": 1e-1, "rtol": 1e-1},
16: torch.int8: {"atol": 0, "rtol": 0},
17: torch.int32: {"atol": 0, "rtol": 0},
18: torch.int64: {"atol": 0, "rtol": 0},
19: }
20:
21:
22: def _coerce_float(x):
23: """Accept int/float/str scientific-notation as float; pass through dicts."""
24: if isinstance(x, (int, float)):
25: return float(x)
26: if isinstance(x, str):
27: try:
28: return float(x)
29: except ValueError:
30: return x
31: return x
32:
33:
34: def tolerance_for_dtype(dtype: torch.dtype, override: dict | None = None) -> dict:
35: """Lookup atol/rtol for a given dtype, with optional per-problem override.
36:
37: The override dict is keyed by str(dtype). Values may be:
38: - a single number (or a string parseable as float, e.g. "5e-2") -> used
39: as both atol and rtol
4
... (2580 more chars)