id stringlengths 36 36 | instruction stringlengths 45 112 | input stringclasses 1
value | output stringlengths 159 1.25k | text stringlengths 260 1.39k | module stringlengths 11 24 | python_ver stringclasses 3
values | category stringclasses 5
values | hypernix_version stringclasses 1
value | source stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|
2a126d6d-0442-426b-86aa-4e9ad716778d | Download the hyper-nix.1 model snapshot using the short name. | from hypernix.download import download_model
# Uses KNOWN_MODELS short-name resolution; downloads to the default
# HuggingFace cache directory on first call, returns the local path.
local_dir: str = download_model("hyper-nix.1")
print(f"Snapshot cached at: {local_dir}")
| ### Instruction:
Download the hyper-nix.1 model snapshot using the short name.
### Response:
from hypernix.download import download_model
# Uses KNOWN_MODELS short-name resolution; downloads to the default
# HuggingFace cache directory on first call, returns the local path.
local_dir: str = download_model("hyper-nix.... | hypernix.download | 3.12 | basic | 0.61.2 | hypernix-dataset-generator | |
07bd6ac0-c7bd-4913-b234-183141ab4f26 | Download a gated HuggingFace model snapshot with an access token. | import os
from hypernix.download import download_model
hf_token: str = os.environ["HF_TOKEN"] # read from env β never hardcode
local_dir: str = download_model(
"meta-llama/Meta-Llama-3.1-8B-Instruct",
token=hf_token,
)
print(f"Downloaded to: {local_dir}")
| ### Instruction:
Download a gated HuggingFace model snapshot with an access token.
### Response:
import os
from hypernix.download import download_model
hf_token: str = os.environ["HF_TOKEN"] # read from env β never hardcode
local_dir: str = download_model(
"meta-llama/Meta-Llama-3.1-8B-Instruct",
token=hf_to... | hypernix.download | 3.13 | intermediate | 0.61.2 | hypernix-dataset-generator | |
d7fef277-1a64-4a8b-815d-bf6b2fe37208 | Download a model using a full HuggingFace repo ID and a custom cache directory. | from pathlib import Path
from hypernix.download import download_model
cache_dir = Path("./model_cache")
cache_dir.mkdir(parents=True, exist_ok=True)
local_dir = download_model(
"ray0rf1re/hyper-nix.1",
cache_dir=str(cache_dir),
)
print(f"Model ready at {local_dir}")
| ### Instruction:
Download a model using a full HuggingFace repo ID and a custom cache directory.
### Response:
from pathlib import Path
from hypernix.download import download_model
cache_dir = Path("./model_cache")
cache_dir.mkdir(parents=True, exist_ok=True)
local_dir = download_model(
"ray0rf1re/hyper-nix.1",
... | hypernix.download | 3.11 | intermediate | 0.61.2 | hypernix-dataset-generator | |
9df87132-a4c9-4f87-ba1a-e656b8a39c84 | Preheat a CodeOven from the 'nix2.5' short name and complete a Python function stub. | from hypernix.old_oven import preheat
# preheat() resolves short names via KNOWN_MODELS and downloads on demand.
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
prompt = "def fibonacci(n: int) -> int:\n "
completion = oven.complete(prompt, max_new_tokens=128, temperature=0.1)
print(completion)
| ### Instruction:
Preheat a CodeOven from the 'nix2.5' short name and complete a Python function stub.
### Response:
from hypernix.old_oven import preheat
# preheat() resolves short names via KNOWN_MODELS and downloads on demand.
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
prompt = "def fibonacci(... | hypernix.old_oven | 3.12 | basic | 0.61.2 | hypernix-dataset-generator | |
96c3de92-7637-448b-96b1-b10cee30ddb6 | Use CodeOven.chat() for a single-turn conversation with a system prompt. | from hypernix.old_oven import preheat
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
messages = [
{"role": "system", "content": "You are a concise Python tutor."},
{"role": "user", "content": "Explain list comprehensions in one sentence."},
]
reply = oven.chat(messages, max_new_tokens=64)
p... | ### Instruction:
Use CodeOven.chat() for a single-turn conversation with a system prompt.
### Response:
from hypernix.old_oven import preheat
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
messages = [
{"role": "system", "content": "You are a concise Python tutor."},
{"role": "user", "cont... | hypernix.old_oven | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
27c43964-6ea4-412d-974e-8fe4b9f2d6ee | Use new_oven() to spin up a fresh parametric HyperNix model and save a PyTorch checkpoint. | from hypernix.old_oven import new_oven
# Spin a fresh ~92 M-parameter HyperNix 1.5 model (no pretrained weights).
oven = new_oven(
arch="hypernix",
device="cpu",
hidden_size=512,
num_layers=8,
num_heads=8,
)
print(f"Parameters: {sum(p.numel() for p in oven.model.parameters()):,}")
# Persist the mo... | ### Instruction:
Use new_oven() to spin up a fresh parametric HyperNix model and save a PyTorch checkpoint.
### Response:
from hypernix.old_oven import new_oven
# Spin a fresh ~92 M-parameter HyperNix 1.5 model (no pretrained weights).
oven = new_oven(
arch="hypernix",
device="cpu",
hidden_size=512,
n... | hypernix.old_oven | 3.11 | advanced | 0.61.2 | hypernix-dataset-generator | |
d0001ce7-7e2c-4737-856d-692d88e9341a | Use CodeOven.fill() for fill-in-the-middle code completion. | from hypernix.old_oven import preheat
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
prefix = "def greet(name: str) -> str:\n return "
suffix = "\n\nresult = greet('world')\nprint(result)"
middle = oven.fill(prefix, suffix, max_new_tokens=32)
print(f"Filled: {middle!r}")
| ### Instruction:
Use CodeOven.fill() for fill-in-the-middle code completion.
### Response:
from hypernix.old_oven import preheat
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
prefix = "def greet(name: str) -> str:\n return "
suffix = "\n\nresult = greet('world')\nprint(result)"
middle = oven.fil... | hypernix.old_oven | 3.13 | intermediate | 0.61.2 | hypernix-dataset-generator | |
d150d248-7f88-448a-a56e-267fab31574c | Freeze the embedding layer of a model to prevent it from being updated during fine-tuning. | from hypernix.old_oven import preheat
from hypernix import old_fridge
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
# Freeze every parameter whose name matches the pattern.
old_fridge.freeze(oven.model, patterns=("embed_tokens",))
stats = old_fridge.parameter_stats(oven.model)
print(f"Trainable para... | ### Instruction:
Freeze the embedding layer of a model to prevent it from being updated during fine-tuning.
### Response:
from hypernix.old_oven import preheat
from hypernix import old_fridge
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
# Freeze every parameter whose name matches the pattern.
old_... | hypernix.old_fridge | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
7dc38710-49b4-480a-a0e9-ae7e58accf64 | Offload a model to CPU to free GPU VRAM, then move it back for inference. | from hypernix.old_oven import preheat
from hypernix import old_fridge
oven = preheat(repo_id="nix2.5", device="cuda", dtype="float16")
# Free VRAM between inference calls.
old_fridge.offload_to_cpu(oven.model)
print("Model offloaded to CPU.")
# Bring it back when needed.
oven.model.to("cuda")
out = oven.complete("He... | ### Instruction:
Offload a model to CPU to free GPU VRAM, then move it back for inference.
### Response:
from hypernix.old_oven import preheat
from hypernix import old_fridge
oven = preheat(repo_id="nix2.5", device="cuda", dtype="float16")
# Free VRAM between inference calls.
old_fridge.offload_to_cpu(oven.model)
pr... | hypernix.old_fridge | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
893451da-32bf-4628-86f1-e2e2d2c567f4 | Unfreeze previously frozen layers before the second stage of training. | from hypernix.old_oven import preheat
from hypernix import old_fridge
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
old_fridge.freeze(oven.model, patterns=("embed_tokens",))
print("Stage 1 β embeddings frozen.")
# ... first stage training loop ...
old_fridge.unfreeze(oven.model, patterns=("embed_t... | ### Instruction:
Unfreeze previously frozen layers before the second stage of training.
### Response:
from hypernix.old_oven import preheat
from hypernix import old_fridge
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
old_fridge.freeze(oven.model, patterns=("embed_tokens",))
print("Stage 1 β embedd... | hypernix.old_fridge | 3.11 | intermediate | 0.61.2 | hypernix-dataset-generator | |
d7861780-ead4-401e-9e60-7f5c5512148d | Clear the GPU CUDA cache to reclaim fragmented VRAM between batches. | from hypernix import old_fridge
# Safe to call even when no GPU is present β no-ops on CPU.
freed = old_fridge.chill_cache()
print(f"Cache cleared: {freed}")
| ### Instruction:
Clear the GPU CUDA cache to reclaim fragmented VRAM between batches.
### Response:
from hypernix import old_fridge
# Safe to call even when no GPU is present β no-ops on CPU.
freed = old_fridge.chill_cache()
print(f"Cache cleared: {freed}")
| hypernix.old_fridge | 3.13 | basic | 0.61.2 | hypernix-dataset-generator | |
b5bbc064-7f7a-426f-b665-f923e0b600c6 | Use auto_freezer() to pick the right VRAM strategy for the current GPU automatically. | from hypernix import freezer
# Detects compute capability and installed VRAM; returns OldFreezer or
# NewFreezer accordingly.
fz = freezer.auto_freezer()
print(type(fz).__name__) # e.g. "OldFreezer" on an 8-GB card
print(fz)
| ### Instruction:
Use auto_freezer() to pick the right VRAM strategy for the current GPU automatically.
### Response:
from hypernix import freezer
# Detects compute capability and installed VRAM; returns OldFreezer or
# NewFreezer accordingly.
fz = freezer.auto_freezer()
print(type(fz).__name__) # e.g. "OldFreezer" o... | hypernix.freezer | 3.12 | basic | 0.61.2 | hypernix-dataset-generator | |
450014e2-2b71-4d3f-a6bf-f42ba2810986 | Wrap training in a FlashFreezer to recover from GPU OOM errors automatically. | from hypernix import freezer, old_oven
base = freezer.auto_freezer()
fz = freezer.flash_freezer(base=base, slow=True)
oven = old_oven.preheat(repo_id="nix2.5", device="cuda", dtype="float16")
def train_step() -> None:
# Your training loop here. If an OOM occurs, FlashFreezer
# halves current_batch_size an... | ### Instruction:
Wrap training in a FlashFreezer to recover from GPU OOM errors automatically.
### Response:
from hypernix import freezer, old_oven
base = freezer.auto_freezer()
fz = freezer.flash_freezer(base=base, slow=True)
oven = old_oven.preheat(repo_id="nix2.5", device="cuda", dtype="float16")
def train_ste... | hypernix.freezer | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
78eda2ca-3dde-498b-be9c-0cc9336b96e3 | Look up a GPU preset to see its VRAM, compute capability, and recommended batch size. | from hypernix.freezer import gpu_preset, GPU_PRESETS
spec = gpu_preset("rtx-3080-ti")
print(f"Name: {spec.name}")
print(f"VRAM: {spec.vram_gb} GB")
print(f"Compute cap: {spec.compute_capability}")
print(f"Recommended batch: {spec.recommended_batch_size}")
print(f"Total GPU presets: {len... | ### Instruction:
Look up a GPU preset to see its VRAM, compute capability, and recommended batch size.
### Response:
from hypernix.freezer import gpu_preset, GPU_PRESETS
spec = gpu_preset("rtx-3080-ti")
print(f"Name: {spec.name}")
print(f"VRAM: {spec.vram_gb} GB")
print(f"Compute cap: ... | hypernix.freezer | 3.11 | intermediate | 0.61.2 | hypernix-dataset-generator | |
b27c6b8f-f795-4983-83df-bfc35832e02c | Check Pascal GPU constraints and retrieve safe dtype and mode hints. | from hypernix.freezer import is_pascal, pascal_safe_dtype, pascal_mode_hints
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if is_pascal(device):
dtype = pascal_safe_dtype(device) # always fp16 on Pascal
hints = pascal_mode_hints()
print(f"Pascal detected β using dtype... | ### Instruction:
Check Pascal GPU constraints and retrieve safe dtype and mode hints.
### Response:
from hypernix.freezer import is_pascal, pascal_safe_dtype, pascal_mode_hints
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if is_pascal(device):
dtype = pascal_safe_dtype(devic... | hypernix.freezer | 3.13 | advanced | 0.61.2 | hypernix-dataset-generator | |
c83d2685-2fab-4938-93e4-f191b9ad7111 | Create a GasAlarm from a GPU preset and estimate the number of training steps that fit in a two-hour budget. | from hypernix.smoke_alarm import GasAlarm
alarm = GasAlarm(gpu_preset="rtx-3080-ti", time_budget_seconds=7200)
steps = alarm.recommended_steps()
print(f"Recommended steps in 2 hours: {steps}")
print(f"ETA per step: {alarm.estimate_step_seconds():.3f} s")
| ### Instruction:
Create a GasAlarm from a GPU preset and estimate the number of training steps that fit in a two-hour budget.
### Response:
from hypernix.smoke_alarm import GasAlarm
alarm = GasAlarm(gpu_preset="rtx-3080-ti", time_budget_seconds=7200)
steps = alarm.recommended_steps()
print(f"Recommended steps in 2 ho... | hypernix.smoke_alarm | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
9e63bc8f-7d82-4d7f-b50a-a54928c45635 | Use AutoAlarm to automatically select the best alarm tier for the current hardware. | from hypernix.smoke_alarm import AutoAlarm
alarm = AutoAlarm() # inspects GPU/CPU and picks Rads, Gas, or Modern
print(f"Selected tier: {type(alarm).__name__}")
print(f"Recommended steps: {alarm.recommended_steps()}")
# Mid-run check: call inside your training loop.
for step in range(10):
alarm.check(step=step, ... | ### Instruction:
Use AutoAlarm to automatically select the best alarm tier for the current hardware.
### Response:
from hypernix.smoke_alarm import AutoAlarm
alarm = AutoAlarm() # inspects GPU/CPU and picks Rads, Gas, or Modern
print(f"Selected tier: {type(alarm).__name__}")
print(f"Recommended steps: {alarm.recomme... | hypernix.smoke_alarm | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
94c566e3-de6f-4020-9ceb-983a1693f45a | Build a GasAlarm from a CPU preset with custom cadence knobs and a step cap. | from hypernix.smoke_alarm import GasAlarm
alarm = GasAlarm(
cpu_preset="i7-12700h",
time_budget_seconds=3600,
max_steps=2000,
log_every=50,
save_every=200,
eval_every=500,
)
print(f"Steps (capped): {alarm.recommended_steps()}")
print(f"Phase: {alarm.phase if hasattr(alarm, 'phase') e... | ### Instruction:
Build a GasAlarm from a CPU preset with custom cadence knobs and a step cap.
### Response:
from hypernix.smoke_alarm import GasAlarm
alarm = GasAlarm(
cpu_preset="i7-12700h",
time_budget_seconds=3600,
max_steps=2000,
log_every=50,
save_every=200,
eval_every=500,
)
print(f"Step... | hypernix.smoke_alarm | 3.11 | intermediate | 0.61.2 | hypernix-dataset-generator | |
558888c0-552a-4f69-9cb9-0a1253317eee | Use RadsAlarm as the lightest-weight option when hardware information is not available. | from hypernix.smoke_alarm import RadsAlarm
# RadsAlarm uses fixed constants β no hardware probing.
alarm = RadsAlarm(time_budget_seconds=1800)
print(f"Recommended steps: {alarm.recommended_steps()}")
print(f"Est. step time: {alarm.estimate_step_seconds():.2f} s")
| ### Instruction:
Use RadsAlarm as the lightest-weight option when hardware information is not available.
### Response:
from hypernix.smoke_alarm import RadsAlarm
# RadsAlarm uses fixed constants β no hardware probing.
alarm = RadsAlarm(time_budget_seconds=1800)
print(f"Recommended steps: {alarm.recommended_steps()}")... | hypernix.smoke_alarm | 3.13 | basic | 0.61.2 | hypernix-dataset-generator | |
f7eb29a2-8e61-4422-a6a1-993479bd4a29 | Use GrillPan to deduplicate and filter a raw text corpus, then write the result with Sink. | from pathlib import Path
from hypernix import pans
from hypernix.sink import Sink
# Create a tiny sample corpus for the demo.
raw = Path("raw_corpus.txt")
raw.write_text("\n".join([
"Hello world",
" hello world ", # duplicate after strip
"Hi", # too short (< 8 chars)
"The quick brown... | ### Instruction:
Use GrillPan to deduplicate and filter a raw text corpus, then write the result with Sink.
### Response:
from pathlib import Path
from hypernix import pans
from hypernix.sink import Sink
# Create a tiny sample corpus for the demo.
raw = Path("raw_corpus.txt")
raw.write_text("\n".join([
"Hello wor... | hypernix.pans | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
d7accf24-fa51-4b75-aa62-60988730b752 | Use Skillet to wrap lines with instruct-style tags for fine-tuning. | from pathlib import Path
from hypernix import pans
raw = Path("prompts.txt")
raw.write_text("Explain recursion.\nWhat is a closure?\n")
for formatted_line in pans.Skillet(str(raw), mode="instruct"):
print(repr(formatted_line))
| ### Instruction:
Use Skillet to wrap lines with instruct-style tags for fine-tuning.
### Response:
from pathlib import Path
from hypernix import pans
raw = Path("prompts.txt")
raw.write_text("Explain recursion.\nWhat is a closure?\n")
for formatted_line in pans.Skillet(str(raw), mode="instruct"):
print(repr(form... | hypernix.pans | 3.11 | basic | 0.61.2 | hypernix-dataset-generator | |
0b3e30af-f024-43b8-9890-74c054b713e8 | Use Wok for shuffle-augmented preprocessing with reverse-order augmentation. | from pathlib import Path
from hypernix import pans
corpus = Path("corpus.txt")
corpus.write_text("\n".join(f"Sentence {i}." for i in range(20)))
processed = list(pans.Wok(str(corpus), seed=42, reverse_ratio=0.1))
print(f"Lines out: {len(processed)}")
print("First 3:", processed[:3])
| ### Instruction:
Use Wok for shuffle-augmented preprocessing with reverse-order augmentation.
### Response:
from pathlib import Path
from hypernix import pans
corpus = Path("corpus.txt")
corpus.write_text("\n".join(f"Sentence {i}." for i in range(20)))
processed = list(pans.Wok(str(corpus), seed=42, reverse_ratio=0.... | hypernix.pans | 3.13 | advanced | 0.61.2 | hypernix-dataset-generator | |
6a42468e-9d3f-4e56-b7b5-8660862b8488 | Pick a pan by name at runtime using pick_pan(). | from pathlib import Path
from hypernix import pans
corpus = Path("data.txt")
corpus.write_text("Line one.\nLine two.\nLine three.\n")
tier_name = "sauce-pan" # could come from a config file
pan = pans.pick_pan(tier_name, source=str(corpus))
for line in pan:
print(repr(line))
| ### Instruction:
Pick a pan by name at runtime using pick_pan().
### Response:
from pathlib import Path
from hypernix import pans
corpus = Path("data.txt")
corpus.write_text("Line one.\nLine two.\nLine three.\n")
tier_name = "sauce-pan" # could come from a config file
pan = pans.pick_pan(tier_name, source=str(corp... | hypernix.pans | 3.12 | basic | 0.61.2 | hypernix-dataset-generator | |
6569bed5-09df-4e9c-8a92-07a5ea372ed1 | Use zap() for a standard 64-token completion without managing an oven object. | from hypernix.microwave import zap
# zap() preheats the oven, runs completion, and discards the oven.
output: str = zap("nix2.5", "def add(a, b):", device="cpu")
print(output)
| ### Instruction:
Use zap() for a standard 64-token completion without managing an oven object.
### Response:
from hypernix.microwave import zap
# zap() preheats the oven, runs completion, and discards the oven.
output: str = zap("nix2.5", "def add(a, b):", device="cpu")
print(output)
| hypernix.microwave | 3.12 | basic | 0.61.2 | hypernix-dataset-generator | |
eecb4054-319d-4818-95d5-d8922de109b7 | Use high_zap() to draft a long answer (512 tokens) to a technical question. | from hypernix.microwave import high_zap
answer = high_zap(
"nix2.5",
"Explain Rotary Position Embeddings (RoPE) in detail.",
device="cpu",
temperature=0.7,
)
print(answer)
| ### Instruction:
Use high_zap() to draft a long answer (512 tokens) to a technical question.
### Response:
from hypernix.microwave import high_zap
answer = high_zap(
"nix2.5",
"Explain Rotary Position Embeddings (RoPE) in detail.",
device="cpu",
temperature=0.7,
)
print(answer)
| hypernix.microwave | 3.11 | intermediate | 0.61.2 | hypernix-dataset-generator | |
a2a9b32a-5c04-4757-ad77-5067462c2ee7 | Use defrost() to reuse the same oven across multiple calls without reloading weights. | from hypernix.microwave import defrost, reheat, zap
# defrost() preheats and returns the oven; weights are loaded once.
oven = defrost("nix2.5", device="cpu")
first_out = zap("nix2.5", "The capital of France is", device="cpu")
second_out = reheat(oven, prior_output=first_out, max_new_tokens=32)
print("First: ", fir... | ### Instruction:
Use defrost() to reuse the same oven across multiple calls without reloading weights.
### Response:
from hypernix.microwave import defrost, reheat, zap
# defrost() preheats and returns the oven; weights are loaded once.
oven = defrost("nix2.5", device="cpu")
first_out = zap("nix2.5", "The capital o... | hypernix.microwave | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
0db85fe2-47fe-46c7-a81f-f0ab55718ff1 | Use chat_zap() for a one-turn chat interaction with a system prompt. | from hypernix.microwave import chat_zap
reply = chat_zap(
"nix2.5",
"What is the difference between a list and a tuple in Python?",
system="Answer in exactly two sentences.",
device="cpu",
)
print(reply)
| ### Instruction:
Use chat_zap() for a one-turn chat interaction with a system prompt.
### Response:
from hypernix.microwave import chat_zap
reply = chat_zap(
"nix2.5",
"What is the difference between a list and a tuple in Python?",
system="Answer in exactly two sentences.",
device="cpu",
)
print(reply... | hypernix.microwave | 3.13 | basic | 0.61.2 | hypernix-dataset-generator | |
e7a0be14-dc94-4ebb-96e3-985423da80c8 | Create a PressureCooker optimizer with warmup, plateau, and cosine cooldown for a training loop. | import torch
import torch.nn as nn
from hypernix.pressure_cooker import pressure_cooker
model = nn.Linear(128, 64)
opt = pressure_cooker(
model.parameters(),
peak_lr=3e-4,
warmup_steps=100,
plateau_steps=500,
cooldown_steps=100,
weight_decay=0.1,
)
# Minimal training loop.
for step in rang... | ### Instruction:
Create a PressureCooker optimizer with warmup, plateau, and cosine cooldown for a training loop.
### Response:
import torch
import torch.nn as nn
from hypernix.pressure_cooker import pressure_cooker
model = nn.Linear(128, 64)
opt = pressure_cooker(
model.parameters(),
peak_lr=3e-4,
wa... | hypernix.pressure_cooker | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
5695c0c5-7aac-4c41-8dbe-4386fbbe4595 | Enable Lookahead on the PressureCooker optimizer for stabilized training on a narrow network. | import torch
import torch.nn as nn
from hypernix.pressure_cooker import pressure_cooker
model = nn.Linear(64, 32)
opt = pressure_cooker(
model.parameters(),
peak_lr=1e-3,
warmup_steps=50,
plateau_steps=200,
cooldown_steps=50,
lookahead_k=5, # Slow-weight seal every 5 inner steps.
lo... | ### Instruction:
Enable Lookahead on the PressureCooker optimizer for stabilized training on a narrow network.
### Response:
import torch
import torch.nn as nn
from hypernix.pressure_cooker import pressure_cooker
model = nn.Linear(64, 32)
opt = pressure_cooker(
model.parameters(),
peak_lr=1e-3,
warmup_s... | hypernix.pressure_cooker | 3.11 | advanced | 0.61.2 | hypernix-dataset-generator | |
4d9ae796-62cd-4f20-96fc-5f8eece56462 | Use universal_cooker() to auto-select the right device-tuned tier. | import torch
import torch.nn as nn
from hypernix.pressure_cooker import universal_cooker
model = nn.Sequential(nn.Linear(256, 128), nn.ReLU(), nn.Linear(128, 10))
opt = universal_cooker(model.parameters(), prefer_speed=True)
print(f"Selected: {type(opt).__name__}") # ElectricCooker on CPU
# Train for a few steps.
... | ### Instruction:
Use universal_cooker() to auto-select the right device-tuned tier.
### Response:
import torch
import torch.nn as nn
from hypernix.pressure_cooker import universal_cooker
model = nn.Sequential(nn.Linear(256, 128), nn.ReLU(), nn.Linear(128, 10))
opt = universal_cooker(model.parameters(), prefer_speed=... | hypernix.pressure_cooker | 3.13 | intermediate | 0.61.2 | hypernix-dataset-generator | |
d9a40fee-62c3-437f-8147-27cb8841d75d | List all recommended quantization types from the QUANT_CATALOG. | from hypernix.quantize import quant_recommended, quant_by_category
print("Recommended quants:")
for spec in quant_recommended():
print(f" {spec.name:<12} {spec.bits_per_weight:.2f} bpw {spec.notes}")
print("\nK-quant family (sorted by bpw):")
for spec in quant_by_category("k"):
print(f" {spec.name:<10} {... | ### Instruction:
List all recommended quantization types from the QUANT_CATALOG.
### Response:
from hypernix.quantize import quant_recommended, quant_by_category
print("Recommended quants:")
for spec in quant_recommended():
print(f" {spec.name:<12} {spec.bits_per_weight:.2f} bpw {spec.notes}")
print("\nK-quan... | hypernix.quantize | 3.12 | basic | 0.61.2 | hypernix-dataset-generator | |
bd53158f-cd0d-4281-aa68-e59ee320bd8d | Estimate which quantization type fits a target file-size budget. | from hypernix.quantize import quant_for_size, quant_estimate_size
# Suppose the fp16 GGUF is 4 GB.
fp16_bytes = 4 * 1024 ** 3
target_bytes = 2 * 1024 ** 3 # want to fit on a 2 GB download limit
best = quant_for_size(target_bytes, fp16_bytes)
print(f"Best fit: {best.name} ({best.bits_per_weight:.2f} bpw)")
estim... | ### Instruction:
Estimate which quantization type fits a target file-size budget.
### Response:
from hypernix.quantize import quant_for_size, quant_estimate_size
# Suppose the fp16 GGUF is 4 GB.
fp16_bytes = 4 * 1024 ** 3
target_bytes = 2 * 1024 ** 3 # want to fit on a 2 GB download limit
best = quant_for_size(t... | hypernix.quantize | 3.11 | intermediate | 0.61.2 | hypernix-dataset-generator | |
bbba8ce2-a3c2-456a-9aa9-7a1bb1749e8e | Resolve a quantization alias and look up its QuantSpec. | from hypernix.quantize import quant_resolve_spec
for alias in ("q4km", "q4-k-m", "Q4_K_M", "q6"):
spec = quant_resolve_spec(alias)
print(f"{alias!r:12s} β {spec.name:<10} {spec.bits_per_weight:.2f} bpw "
f"category={spec.category}")
| ### Instruction:
Resolve a quantization alias and look up its QuantSpec.
### Response:
from hypernix.quantize import quant_resolve_spec
for alias in ("q4km", "q4-k-m", "Q4_K_M", "q6"):
spec = quant_resolve_spec(alias)
print(f"{alias!r:12s} β {spec.name:<10} {spec.bits_per_weight:.2f} bpw "
f"categ... | hypernix.quantize | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
a3db8949-490d-479e-aa0e-9ebd37960b83 | Run a complete preheat β train β GGUF pipeline with instant_pot in a single call. | from pathlib import Path
from hypernix import instant_pot
# Create a tiny training corpus for the demo.
corpus = Path("demo_corpus.txt")
corpus.write_text("\n".join(
[f"Training sentence number {i} for the demo." for i in range(200)]
))
recipe = {
"repo_id": "nix2.5",
"dataset": str(corpus),... | ### Instruction:
Run a complete preheat β train β GGUF pipeline with instant_pot in a single call.
### Response:
from pathlib import Path
from hypernix import instant_pot
# Create a tiny training corpus for the demo.
corpus = Path("demo_corpus.txt")
corpus.write_text("\n".join(
[f"Training sentence number {i} for... | hypernix.instant_pot | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
31acd715-8c1f-4c8a-aaf4-8c94254f7cdc | Load an instant_pot recipe from a JSON file and run it with a CLI step override. | # recipe.json content:
# {
# "repo_id": "nix2.5",
# "dataset": "./corpus.txt",
# "out_dir": "./out",
# "steps": 100
# }
#
# Shell usage:
# hypernix brew recipe.json --set steps=500
# Python equivalent of --set overrides:
import json
from pathlib import Path
from hypernix import instant_pot
recipe_path = Pat... | ### Instruction:
Load an instant_pot recipe from a JSON file and run it with a CLI step override.
### Response:
# recipe.json content:
# {
# "repo_id": "nix2.5",
# "dataset": "./corpus.txt",
# "out_dir": "./out",
# "steps": 100
# }
#
# Shell usage:
# hypernix brew recipe.json --set steps=500
# Python equiva... | hypernix.instant_pot | 3.11 | advanced | 0.61.2 | hypernix-dataset-generator | |
2f99efba-ed06-4d41-be22-584858ca6060 | Write lines to a Sink with deduplication enabled. | from pathlib import Path
from hypernix.sink import Sink
out = Path("output.txt")
s = Sink(str(out), dedupe=True)
lines = ["apple", "banana", "apple", "cherry", "banana", "date"]
for line in lines:
wrote = s.write(line)
print(f"{'written' if wrote else 'skipped':8s} {line!r}")
s.close()
print("\nFinal file... | ### Instruction:
Write lines to a Sink with deduplication enabled.
### Response:
from pathlib import Path
from hypernix.sink import Sink
out = Path("output.txt")
s = Sink(str(out), dedupe=True)
lines = ["apple", "banana", "apple", "cherry", "banana", "date"]
for line in lines:
wrote = s.write(line)
print(f... | hypernix.sink | 3.12 | basic | 0.61.2 | hypernix-dataset-generator | |
364137d0-9f05-4c74-9cdd-fe56b32c99d3 | Use Sink as a context manager and write structured JSON events. | from hypernix.sink import Sink
with Sink("events.jsonl") as s:
for i in range(5):
s.write_json({"step": i, "loss": 3.0 - i * 0.1})
import json
from pathlib import Path
for line in Path("events.jsonl").read_text().splitlines():
print(json.loads(line))
| ### Instruction:
Use Sink as a context manager and write structured JSON events.
### Response:
from hypernix.sink import Sink
with Sink("events.jsonl") as s:
for i in range(5):
s.write_json({"step": i, "loss": 3.0 - i * 0.1})
import json
from pathlib import Path
for line in Path("events.jsonl").read_text... | hypernix.sink | 3.13 | intermediate | 0.61.2 | hypernix-dataset-generator | |
ebdd7855-60b8-44e5-b11d-0c054fa4ed78 | Use Sink with file rotation to split output into 1 KB chunks. | from pathlib import Path, glob
from hypernix.sink import Sink
s = Sink("rotated.txt", rotate_bytes=1024)
for i in range(200):
s.write(f"Line {i:04d}: " + "x" * 20)
s.close()
# Each shard is <= 1 KB; the sink names them rotated.txt, rotated.txt.1, ...
import glob as _glob
shards = sorted(_glob.glob("rotated.txt*")... | ### Instruction:
Use Sink with file rotation to split output into 1 KB chunks.
### Response:
from pathlib import Path, glob
from hypernix.sink import Sink
s = Sink("rotated.txt", rotate_bytes=1024)
for i in range(200):
s.write(f"Line {i:04d}: " + "x" * 20)
s.close()
# Each shard is <= 1 KB; the sink names them r... | hypernix.sink | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
be9561df-e3da-4eb0-9fdf-2b91ab56ab1e | Load a training log into a Table and display the worst-loss steps. | from pathlib import Path
from hypernix.table import Table
# Simulate a training log.
log = Path("train.log")
log.write_text(
"\n".join(
f"step {i}/{100} loss={3.0 - i * 0.02:.3f} lr=3e-4"
for i in range(1, 101)
)
)
t = Table.from_training_log(str(log))
# Show the 5 steps with the highest loss... | ### Instruction:
Load a training log into a Table and display the worst-loss steps.
### Response:
from pathlib import Path
from hypernix.table import Table
# Simulate a training log.
log = Path("train.log")
log.write_text(
"\n".join(
f"step {i}/{100} loss={3.0 - i * 0.02:.3f} lr=3e-4"
for i in ran... | hypernix.table | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
02f80763-b3ca-40e2-a01d-06d57f3458af | Filter and project a judge corpus table to show only BAD examples. | from pathlib import Path
from hypernix.table import Table
# Simulate a judge corpus (output of mediocre_fridge.synthesize_judge_corpus).
corpus = Path("judge.txt")
corpus.write_text(
"\n".join([
'{"prompt": "2+2", "response": "5", "label": "BAD"}',
'{"prompt": "capital of France", "response": "Pari... | ### Instruction:
Filter and project a judge corpus table to show only BAD examples.
### Response:
from pathlib import Path
from hypernix.table import Table
# Simulate a judge corpus (output of mediocre_fridge.synthesize_judge_corpus).
corpus = Path("judge.txt")
corpus.write_text(
"\n".join([
'{"prompt": "... | hypernix.table | 3.11 | intermediate | 0.61.2 | hypernix-dataset-generator | |
dbfbf91b-d5ee-45ec-ae33-91e91b261ca9 | Mix two corpora with weighted sampling using CountertopBlender. | from pathlib import Path
from hypernix.blender import CountertopBlender
from hypernix.sink import Sink
Path("high_quality.txt").write_text(
"\n".join(f"HQ line {i}" for i in range(50))
)
Path("scraped.txt").write_text(
"\n".join(f"Scraped line {i}" for i in range(200))
)
# 70% curated, 30% scraped.
blender = ... | ### Instruction:
Mix two corpora with weighted sampling using CountertopBlender.
### Response:
from pathlib import Path
from hypernix.blender import CountertopBlender
from hypernix.sink import Sink
Path("high_quality.txt").write_text(
"\n".join(f"HQ line {i}" for i in range(50))
)
Path("scraped.txt").write_text(
... | hypernix.blender | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
db32a682-1bf6-4b31-9742-b13bbbb2b4fe | Round-robin interleave multiple data sources with PersonalBlender. | from pathlib import Path
from hypernix.blender import PersonalBlender
from hypernix.sink import Sink
for name, content in [("a.txt", "A1\nA2\nA3\n"),
("b.txt", "B1\nB2\nB3\n"),
("c.txt", "C1\nC2\nC3\n")]:
Path(name).write_text(content)
Sink("interleaved.txt").pour(
... | ### Instruction:
Round-robin interleave multiple data sources with PersonalBlender.
### Response:
from pathlib import Path
from hypernix.blender import PersonalBlender
from hypernix.sink import Sink
for name, content in [("a.txt", "A1\nA2\nA3\n"),
("b.txt", "B1\nB2\nB3\n"),
... | hypernix.blender | 3.13 | basic | 0.61.2 | hypernix-dataset-generator | |
73d8f29a-eb55-4414-bc57-d11ffc013b11 | Chunk a large document into fixed-length character slices with overlap for context windows. | from pathlib import Path
from hypernix.food_processor import SliceBlade
from hypernix.sink import Sink
doc = Path("large_doc.txt")
doc.write_text(" ".join(f"word{i}" for i in range(1000)))
Sink("chunks.txt").pour(
SliceBlade(str(doc), slice_chars=256, overlap_chars=32)
)
chunks = Path("chunks.txt").read_text().s... | ### Instruction:
Chunk a large document into fixed-length character slices with overlap for context windows.
### Response:
from pathlib import Path
from hypernix.food_processor import SliceBlade
from hypernix.sink import Sink
doc = Path("large_doc.txt")
doc.write_text(" ".join(f"word{i}" for i in range(1000)))
Sink(... | hypernix.food_processor | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
ee5b567d-f122-4f2e-be77-21ce2075339b | Use ShredBlade for a sliding-window tokenized split suited for language model training. | from pathlib import Path
from hypernix.food_processor import ShredBlade
text = Path("story.txt")
text.write_text(
"The cat sat on the mat. The mat was red. The cat was black. "
* 20
)
windows = list(ShredBlade(str(text), window_tokens=16, stride_tokens=8))
print(f"Windows: {len(windows)}")
print(f"Sample: {w... | ### Instruction:
Use ShredBlade for a sliding-window tokenized split suited for language model training.
### Response:
from pathlib import Path
from hypernix.food_processor import ShredBlade
text = Path("story.txt")
text.write_text(
"The cat sat on the mat. The mat was red. The cat was black. "
* 20
)
window... | hypernix.food_processor | 3.11 | intermediate | 0.61.2 | hypernix-dataset-generator | |
b8ba7dd8-93f1-49e5-9807-098444db888e | Format a file of Q/A line pairs as prompt-response training records using TwoSliceToaster. | from pathlib import Path
from hypernix.toaster import TwoSliceToaster
pairs = Path("qa_pairs.txt")
pairs.write_text(
"What is 2+2?\n4\n"
"What is the capital of France?\nParis\n"
"Name a primary color.\nRed\n"
)
for record in TwoSliceToaster(
source=str(pairs), prompt_tag="Q: ", response_tag="A: "
):
... | ### Instruction:
Format a file of Q/A line pairs as prompt-response training records using TwoSliceToaster.
### Response:
from pathlib import Path
from hypernix.toaster import TwoSliceToaster
pairs = Path("qa_pairs.txt")
pairs.write_text(
"What is 2+2?\n4\n"
"What is the capital of France?\nParis\n"
"Name... | hypernix.toaster | 3.12 | basic | 0.61.2 | hypernix-dataset-generator | |
e59609d5-036c-4c61-a434-e9daf7bcf720 | Wrap whole documents with XML-style header/footer using ToasterOven. | from pathlib import Path
from hypernix.toaster import ToasterOven
docs = Path("documents.txt")
docs.write_text(
"First document sentence one. First document sentence two.\n\n"
"Second document sentence.\n\n"
"Third document.\n"
)
for wrapped in ToasterOven(
source=str(docs),
header="<DOCUMENT>",
... | ### Instruction:
Wrap whole documents with XML-style header/footer using ToasterOven.
### Response:
from pathlib import Path
from hypernix.toaster import ToasterOven
docs = Path("documents.txt")
docs.write_text(
"First document sentence one. First document sentence two.\n\n"
"Second document sentence.\n\n"
... | hypernix.toaster | 3.13 | intermediate | 0.61.2 | hypernix-dataset-generator | |
82fb9df1-2e0c-4934-bc2e-836a2bc9a807 | Train a model with GoodSmoker to get warmup and cosine cooldown built in. | from pathlib import Path
from hypernix.smoker import good_smoker
from hypernix.old_oven import preheat
corpus = Path("corpus.txt")
corpus.write_text("\n".join(f"Example sentence {i}." for i in range(500)))
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
smoker = good_smoker(
oven=oven,
steps=... | ### Instruction:
Train a model with GoodSmoker to get warmup and cosine cooldown built in.
### Response:
from pathlib import Path
from hypernix.smoker import good_smoker
from hypernix.old_oven import preheat
corpus = Path("corpus.txt")
corpus.write_text("\n".join(f"Example sentence {i}." for i in range(500)))
oven =... | hypernix.smoker | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
9e5feb6b-3534-4507-8de8-240cf1be091b | Use HighQualitySmoker with a curriculum that grows the context length progressively. | from pathlib import Path
from hypernix.smoker import high_quality_smoker
from hypernix.old_oven import preheat
corpus = Path("big_corpus.txt")
corpus.write_text("\n".join(f"Sentence {i}: " + "word " * 50 for i in range(300)))
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
smoker = high_quality_smoke... | ### Instruction:
Use HighQualitySmoker with a curriculum that grows the context length progressively.
### Response:
from pathlib import Path
from hypernix.smoker import high_quality_smoker
from hypernix.old_oven import preheat
corpus = Path("big_corpus.txt")
corpus.write_text("\n".join(f"Sentence {i}: " + "word " * 5... | hypernix.smoker | 3.11 | advanced | 0.61.2 | hypernix-dataset-generator | |
2939934b-68b0-44f0-9340-f062869f66e4 | Run a nightly training job on a schedule using CoffeeMaker. | from hypernix.coffee_maker import coffee_maker
call_count = 0
def nightly_train() -> None:
global call_count
call_count += 1
print(f"Training cycle {call_count} complete.")
maker = coffee_maker(nightly_train, interval_seconds=1) # 1s for demo
maker.run(cycles=3)
maker.summary()
| ### Instruction:
Run a nightly training job on a schedule using CoffeeMaker.
### Response:
from hypernix.coffee_maker import coffee_maker
call_count = 0
def nightly_train() -> None:
global call_count
call_count += 1
print(f"Training cycle {call_count} complete.")
maker = coffee_maker(nightly_train, inte... | hypernix.coffee_maker | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
53c2b959-66b9-46ff-aa83-8d534648afd6 | Use ColdBrewMaker to checkpoint a long multi-phase run so it can resume after a crash. | from pathlib import Path
from hypernix.coffee_maker import cold_brew
ckpt_path = "./run_checkpoint.json"
def phase_fn(state: dict, phase: int) -> dict:
print(f"Running phase {phase}...")
state[f"phase_{phase}_done"] = True
return state
cb = cold_brew(phase_fn, phases=4, checkpoint_path=ckpt_path)
final_s... | ### Instruction:
Use ColdBrewMaker to checkpoint a long multi-phase run so it can resume after a crash.
### Response:
from pathlib import Path
from hypernix.coffee_maker import cold_brew
ckpt_path = "./run_checkpoint.json"
def phase_fn(state: dict, phase: int) -> dict:
print(f"Running phase {phase}...")
stat... | hypernix.coffee_maker | 3.13 | advanced | 0.61.2 | hypernix-dataset-generator | |
11ee6594-9f6f-482c-9820-20708825cbdc | Use PercolatorMaker to iteratively refine a draft via critique-and-revision cycles. | from hypernix.coffee_maker import percolator
revisions: list[str] = []
def draft_then_revise(prior: str) -> str:
# In production this would call an LLM.
revised = prior.upper() if len(prior) < 50 else prior
revisions.append(revised)
return revised
final = percolator(
draft_then_revise,
seed_i... | ### Instruction:
Use PercolatorMaker to iteratively refine a draft via critique-and-revision cycles.
### Response:
from hypernix.coffee_maker import percolator
revisions: list[str] = []
def draft_then_revise(prior: str) -> str:
# In production this would call an LLM.
revised = prior.upper() if len(prior) < 5... | hypernix.coffee_maker | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
99f0e32f-4a4f-4dee-baac-d7474230f01c | Evaluate a model against a prompt battery using DoubleShot (two samples, scorer picks the better one). | from hypernix.old_oven import preheat
from hypernix.espresso_maker import double_shot
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
def keyword_scorer(prompt: str, output: str, reference: str) -> float:
"""Score by keyword overlap."""
ref_words = set(reference.lower().split())
out_words ... | ### Instruction:
Evaluate a model against a prompt battery using DoubleShot (two samples, scorer picks the better one).
### Response:
from hypernix.old_oven import preheat
from hypernix.espresso_maker import double_shot
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
def keyword_scorer(prompt: str, o... | hypernix.espresso_maker | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
3b67e30b-a96c-494e-983a-ff6d31af3d38 | Synthesize a judge-training corpus and then plot its label distribution. | from pathlib import Path
from hypernix import mediocre_fridge, new_fridge
# Generate 256 labelled pairs (no GPU needed).
dataset = mediocre_fridge.synthesize_judge_corpus(
n=256,
out_path="judge_corpus.txt",
)
print(f"Generated {len(dataset)} examples")
# Plot the score distribution (Matplotlib installed lazi... | ### Instruction:
Synthesize a judge-training corpus and then plot its label distribution.
### Response:
from pathlib import Path
from hypernix import mediocre_fridge, new_fridge
# Generate 256 labelled pairs (no GPU needed).
dataset = mediocre_fridge.synthesize_judge_corpus(
n=256,
out_path="judge_corpus.txt"... | hypernix.mediocre_fridge | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
88844d85-9bc8-4270-9679-eb2322163915 | Parse a training log and plot the loss curve. | from pathlib import Path
from hypernix import new_fridge
# Simulate a training log.
log_text = "\n".join(
f"step {i}/500 loss={3.0 * 0.995**i:.4f} lr=3e-4"
for i in range(1, 501)
)
Path("train.log").write_text(log_text)
records = new_fridge.parse_training_log(log_text)
print(f"Parsed {len(records)} steps, fin... | ### Instruction:
Parse a training log and plot the loss curve.
### Response:
from pathlib import Path
from hypernix import new_fridge
# Simulate a training log.
log_text = "\n".join(
f"step {i}/500 loss={3.0 * 0.995**i:.4f} lr=3e-4"
for i in range(1, 501)
)
Path("train.log").write_text(log_text)
records = ne... | hypernix.new_fridge | 3.11 | intermediate | 0.61.2 | hypernix-dataset-generator | |
d778ed6a-a442-4690-9922-bedfe9965dff | Use new_range to label model responses with a zero-dependency first-fail rubric. | from hypernix.new_range import new_range
rubric = new_range()
examples = [
("What is 2+2?", ""), # empty
("Solve x+1=3.", "I cannot do math."), # refusal
("What is sqrt(9)?", "three"), # math without digit
("Hi", "Hello!"), # repetition-free pass... | ### Instruction:
Use new_range to label model responses with a zero-dependency first-fail rubric.
### Response:
from hypernix.new_range import new_range
rubric = new_range()
examples = [
("What is 2+2?", ""), # empty
("Solve x+1=3.", "I cannot do math."), # refusal
("What is sqrt(9)?... | hypernix.new_range | 3.12 | basic | 0.61.2 | hypernix-dataset-generator | |
0bb02c66-c89d-4444-a8ef-997fc7ee84d0 | Use old_range for a weighted-mean scored rubric with keyword and reference checks. | from hypernix.old_range import old_range
rubric = old_range(
keywords=["Paris", "France", "capital"],
stopwords={"the", "a", "is", "of"},
)
pairs = [
("What is the capital of France?", "Paris is the capital of France.", "Paris France"),
("What is the capital of France?", "I don't know.", "Paris France... | ### Instruction:
Use old_range for a weighted-mean scored rubric with keyword and reference checks.
### Response:
from hypernix.old_range import old_range
rubric = old_range(
keywords=["Paris", "France", "capital"],
stopwords={"the", "a", "is", "of"},
)
pairs = [
("What is the capital of France?", "Paris... | hypernix.old_range | 3.13 | intermediate | 0.61.2 | hypernix-dataset-generator | |
a13b0bb3-86e9-4847-9306-79aecd03284a | Use Lunchbox to build a consistent-schema eval dataset and save it as JSONL. | from hypernix.lunchbox import Lunchbox
lb = Lunchbox.for_eval() # preloads EVAL_SCHEMA columns
lb.add(
id="001",
category="math",
difficulty="easy",
tier=1,
prompt="2+2",
reference="4",
model_response="4",
keyword_score=1.0,
latency_s=0.12,
variant="zero_shot",
pipeline_m... | ### Instruction:
Use Lunchbox to build a consistent-schema eval dataset and save it as JSONL.
### Response:
from hypernix.lunchbox import Lunchbox
lb = Lunchbox.for_eval() # preloads EVAL_SCHEMA columns
lb.add(
id="001",
category="math",
difficulty="easy",
tier=1,
prompt="2+2",
reference="4... | hypernix.lunchbox | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
ef6b5cbb-f478-4c2c-8882-6bdc42d09a27 | Resolve the correct chat template for hyper-Nix.2 and format a multi-turn conversation. | from hypernix.cookbook import COOKBOOK, for_model
tmpl = for_model("ray0rf1re/hyper-Nix.2")
print(f"Template: {tmpl.name}")
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the Pythagorean theorem?"},
{"role": "assistant", "content": ... | ### Instruction:
Resolve the correct chat template for hyper-Nix.2 and format a multi-turn conversation.
### Response:
from hypernix.cookbook import COOKBOOK, for_model
tmpl = for_model("ray0rf1re/hyper-Nix.2")
print(f"Template: {tmpl.name}")
messages = [
{"role": "system", "content": "You are a helpful assis... | hypernix.cookbook | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
9450a423-de6c-4593-a53e-a5487f62046f | Run a multi-turn chat session with a Countertop, streaming tokens through Bell and cleaning replies with Flour. | from hypernix.old_oven import preheat
from hypernix.countertop import Countertop
from hypernix.bell import stdout_bell
from hypernix.flour import Flour
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
bell = stdout_bell()
flt = Flour.smart_default(template="chatml")
chat = Countertop(oven, system="You... | ### Instruction:
Run a multi-turn chat session with a Countertop, streaming tokens through Bell and cleaning replies with Flour.
### Response:
from hypernix.old_oven import preheat
from hypernix.countertop import Countertop
from hypernix.bell import stdout_bell
from hypernix.flour import Flour
oven = preheat(repo_id=... | hypernix.countertop | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
82c10469-0ade-47f2-a4ca-672727926804 | Use Menu to find a persona by fuzzy name and apply it to a Countertop session. | from hypernix.old_oven import preheat
from hypernix.countertop import Countertop
from hypernix.menu import Menu
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
persona = Menu.find("code") # fuzzy match β "code-helper"
chat = Countertop(oven, persona=persona)
print(f"Active persona: {persona}")
re... | ### Instruction:
Use Menu to find a persona by fuzzy name and apply it to a Countertop session.
### Response:
from hypernix.old_oven import preheat
from hypernix.countertop import Countertop
from hypernix.menu import Menu
oven = preheat(repo_id="nix2.5", device="cpu", dtype="float32")
persona = Menu.find("code") ... | hypernix.menu | 3.13 | intermediate | 0.61.2 | hypernix-dataset-generator | |
cd4b9ab9-221a-4cd7-9eeb-2d0576232804 | Average three checkpoints with SWA (Stochastic Weight Averaging) and write the blended snapshot. | import torch
import torch.nn as nn
from pathlib import Path
from hypernix.whisk import whisk_to_snapshot
# Create three dummy checkpoints.
model = nn.Linear(64, 32)
ckpts: list[dict] = []
for i in range(3):
# Slightly perturb weights to simulate different checkpoints.
with torch.no_grad():
for p in mod... | ### Instruction:
Average three checkpoints with SWA (Stochastic Weight Averaging) and write the blended snapshot.
### Response:
import torch
import torch.nn as nn
from pathlib import Path
from hypernix.whisk import whisk_to_snapshot
# Create three dummy checkpoints.
model = nn.Linear(64, 32)
ckpts: list[dict] = []
fo... | hypernix.whisk | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
dc1ed4d2-6e93-414f-b037-38e8b4152590 | Split a text corpus into train/val/test files deterministically. | from pathlib import Path
from hypernix.cutting_board import CuttingBoard
corpus = Path("full_dataset.txt")
corpus.write_text("\n".join(f"Example {i}." for i in range(1000)))
board = CuttingBoard(
train_ratio=0.8,
val_ratio=0.1,
test_ratio=0.1,
seed=42,
shuffle=True,
)
board.slice_to_files(str(corp... | ### Instruction:
Split a text corpus into train/val/test files deterministically.
### Response:
from pathlib import Path
from hypernix.cutting_board import CuttingBoard
corpus = Path("full_dataset.txt")
corpus.write_text("\n".join(f"Example {i}." for i in range(1000)))
board = CuttingBoard(
train_ratio=0.8,
... | hypernix.cutting_board | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
034761c6-2db3-4db0-955b-e7bed9c997d0 | Use an Apron context manager to run a sampling step without leaking RNG state to the caller. | import random
import torch
from hypernix.apron import apron
# Caller's RNG state.
random.seed(0)
torch.manual_seed(0)
before = random.random()
with apron(seed=99):
# Everything inside is seeded with 99 and isolated.
print("Inside:", random.random(), torch.randn(1).item())
# After exiting, the caller's RNG is... | ### Instruction:
Use an Apron context manager to run a sampling step without leaking RNG state to the caller.
### Response:
import random
import torch
from hypernix.apron import apron
# Caller's RNG state.
random.seed(0)
torch.manual_seed(0)
before = random.random()
with apron(seed=99):
# Everything inside is se... | hypernix.apron | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
252c48a9-d5af-43db-a231-ad7ed930da55 | Set up UPS mode to checkpoint when severe weather is detected, using offline mode for testing. | from pathlib import Path
from hypernix.ups import UPS
checkpoints: list[str] = []
def my_snapshot() -> None:
path = f"emergency_ckpt_{len(checkpoints)}.pt"
checkpoints.append(path)
print(f"Emergency checkpoint saved: {path}")
ups = UPS(
snapshot_fn=my_snapshot,
check_interval_seconds=60,
offl... | ### Instruction:
Set up UPS mode to checkpoint when severe weather is detected, using offline mode for testing.
### Response:
from pathlib import Path
from hypernix.ups import UPS
checkpoints: list[str] = []
def my_snapshot() -> None:
path = f"emergency_ckpt_{len(checkpoints)}.pt"
checkpoints.append(path)
... | hypernix.ups | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
e36b4038-b68e-4127-b02c-d2cfa5c643de | Wrap user messages with ThinkingInjector for a model that uses <think>...</think> reasoning. | from hypernix import injection
# Module-level shortcut β no instantiation needed.
prompt = "Solve: x^2 - 5x + 6 = 0"
wrapped_text = injection.thinking(prompt)
print("Wrapped text:")
print(wrapped_text)
# Wrap a messages list directly.
messages = [{"role": "user", "content": prompt}]
inj = injection.ThinkingInjector()... | ### Instruction:
Wrap user messages with ThinkingInjector for a model that uses <think>...</think> reasoning.
### Response:
from hypernix import injection
# Module-level shortcut β no instantiation needed.
prompt = "Solve: x^2 - 5x + 6 = 0"
wrapped_text = injection.thinking(prompt)
print("Wrapped text:")
print(wrappe... | hypernix.injection | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
ba94bbe1-58ee-473c-b839-cf1b641c8c14 | Run a plasma benchmark to calibrate a smoke_alarm ETA estimate. | from hypernix.plasma import plasma
from hypernix.smoke_alarm import GasAlarm
# Benchmark takes ~2 s on CPU; use the result to calibrate the alarm.
result = plasma(device="cpu")
print(f"Median step time: {result.step_ms:.1f} ms")
print(f"Throughput: {result.tokens_per_sec:.0f} tok/s")
print(f"Calibration factor... | ### Instruction:
Run a plasma benchmark to calibrate a smoke_alarm ETA estimate.
### Response:
from hypernix.plasma import plasma
from hypernix.smoke_alarm import GasAlarm
# Benchmark takes ~2 s on CPU; use the result to calibrate the alarm.
result = plasma(device="cpu")
print(f"Median step time: {result.step_ms:.1f... | hypernix.plasma | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
55b1a900-2669-4b94-8927-593a527453c3 | Render a single dashboard frame from a training log using the hypernix.tv API (new 4-panel layout from v0.61.2). | from pathlib import Path
from hypernix import tv
# Simulate a training log that tvtop would tail.
log = Path("train.log")
log.write_text(
"\n".join(
f"step {i}/500 loss={3.0 * 0.995**i:.4f} lr=3e-4"
for i in range(1, 200)
)
)
# latest_frame() parses the log and returns an ANSI string ready to ... | ### Instruction:
Render a single dashboard frame from a training log using the hypernix.tv API (new 4-panel layout from v0.61.2).
### Response:
from pathlib import Path
from hypernix import tv
# Simulate a training log that tvtop would tail.
log = Path("train.log")
log.write_text(
"\n".join(
f"step {i}/50... | hypernix.tv | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
ce87dfff-741d-49b2-9523-1df8afb4801b | Launch the tvtop dashboard CLI programmatically against a training log directory. | # CLI usage (run in your shell while training is active):
# tvtop # auto-discovers training logs under cwd
# tvtop --log ./trained/train.log # explicit log
# tvtop --ascii # non-UTF terminal fallback
# tvtop --interval 2 # refresh every 2 seconds
# Python equiv... | ### Instruction:
Launch the tvtop dashboard CLI programmatically against a training log directory.
### Response:
# CLI usage (run in your shell while training is active):
# tvtop # auto-discovers training logs under cwd
# tvtop --log ./trained/train.log # explicit log
# tvtop --ascii ... | hypernix.tv | 3.11 | cli | 0.61.2 | hypernix-dataset-generator | |
1f2eb819-d3e1-4fa1-9e1e-91f3315ee0c1 | Compact old training checkpoints to zip archives, keeping the 3 most recent. | from pathlib import Path
from hypernix.compactor import Compactor
# Create fake checkpoint directories.
root = Path("./checkpoints")
root.mkdir(exist_ok=True)
for i in range(7):
d = root / f"ckpt-{i:04d}"
d.mkdir(exist_ok=True)
(d / "model.safetensors").write_bytes(b"fake weights " * 100)
# Plan without t... | ### Instruction:
Compact old training checkpoints to zip archives, keeping the 3 most recent.
### Response:
from pathlib import Path
from hypernix.compactor import Compactor
# Create fake checkpoint directories.
root = Path("./checkpoints")
root.mkdir(exist_ok=True)
for i in range(7):
d = root / f"ckpt-{i:04d}"
... | hypernix.compactor | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
7b2dfb0e-8132-445d-a74f-610ecd6c436d | Use Cheesecloth to remove near-duplicate lines from a noisy dataset. | from pathlib import Path
from hypernix.strainer import Cheesecloth
noisy = Path("noisy.txt")
noisy.write_text(
"The quick brown fox jumps over the lazy dog.\n"
"The quick brown fox jumped over the lazy dog.\n" # near-dup
"Machine learning is transforming technology.\n"
"Machine learning is transformin... | ### Instruction:
Use Cheesecloth to remove near-duplicate lines from a noisy dataset.
### Response:
from pathlib import Path
from hypernix.strainer import Cheesecloth
noisy = Path("noisy.txt")
noisy.write_text(
"The quick brown fox jumps over the lazy dog.\n"
"The quick brown fox jumped over the lazy dog.\n" ... | hypernix.strainer | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
e2671496-ca50-4a56-9051-28496d46a71b | Use IntervalTimer to throttle checkpoint saves inside a training loop. | import time
from hypernix.timer import IntervalTimer
save_timer = IntervalTimer(interval_seconds=0.1) # 100 ms for demo
saved_steps: list[int] = []
for step in range(50):
time.sleep(0.02) # simulate 20 ms per step
if save_timer.should_fire():
saved_steps.append(step)
# In production: torc... | ### Instruction:
Use IntervalTimer to throttle checkpoint saves inside a training loop.
### Response:
import time
from hypernix.timer import IntervalTimer
save_timer = IntervalTimer(interval_seconds=0.1) # 100 ms for demo
saved_steps: list[int] = []
for step in range(50):
time.sleep(0.02) # simulate 20 ms p... | hypernix.timer | 3.13 | intermediate | 0.61.2 | hypernix-dataset-generator | |
8981ea07-4df3-49b6-8a83-19f9b266ab3b | Run HandWash to delete __pycache__ and log files from a training run directory. | from pathlib import Path
from hypernix.dishwasher import HandWash
run_dir = Path("./my_run")
run_dir.mkdir(exist_ok=True)
(run_dir / "train.log").write_text("log content")
cache = run_dir / "__pycache__"
cache.mkdir(exist_ok=True)
(cache / "compiled.pyc").write_bytes(b"\x00" * 100)
washer = HandWash(root=str(run_dir)... | ### Instruction:
Run HandWash to delete __pycache__ and log files from a training run directory.
### Response:
from pathlib import Path
from hypernix.dishwasher import HandWash
run_dir = Path("./my_run")
run_dir.mkdir(exist_ok=True)
(run_dir / "train.log").write_text("log content")
cache = run_dir / "__pycache__"
cac... | hypernix.dishwasher | 3.12 | basic | 0.61.2 | hypernix-dataset-generator | |
fdeb4508-bf1d-4631-b061-4000b88329af | Read current CPU and GPU temperatures with InstantThermometer. | from hypernix.thermometer import InstantThermometer
therm = InstantThermometer()
reading = therm.read()
print(f"CPU temperature: {reading.cpu_celsius:.1f} Β°C")
if reading.gpu_celsius is not None:
print(f"GPU temperature: {reading.gpu_celsius:.1f} Β°C")
else:
print("GPU temperature: not available")
| ### Instruction:
Read current CPU and GPU temperatures with InstantThermometer.
### Response:
from hypernix.thermometer import InstantThermometer
therm = InstantThermometer()
reading = therm.read()
print(f"CPU temperature: {reading.cpu_celsius:.1f} Β°C")
if reading.gpu_celsius is not None:
print(f"GPU temperature... | hypernix.thermometer | 3.12 | basic | 0.61.2 | hypernix-dataset-generator | |
d46cb970-58e2-405d-bc36-6ca28507464f | Apply LightFry noise to a model, train a step, then restore pristine weights. | import torch
import torch.nn as nn
from hypernix.deep_fryer import LightFry
model = nn.Linear(64, 32)
fryer = LightFry(seed=42)
fryer.save_pristine(model)
print("Weights before fry:", model.weight[0, :4].tolist())
fryer.fry(model)
print("Weights after fry: ", model.weight[0, :4].tolist())
fryer.un_fry(model)
print(... | ### Instruction:
Apply LightFry noise to a model, train a step, then restore pristine weights.
### Response:
import torch
import torch.nn as nn
from hypernix.deep_fryer import LightFry
model = nn.Linear(64, 32)
fryer = LightFry(seed=42)
fryer.save_pristine(model)
print("Weights before fry:", model.weight[0, :4].toli... | hypernix.deep_fryer | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
0745e482-e79a-4a08-8def-571bf06c38fc | Wrap a training step in CakePan to auto-skip NaN/Inf losses and roll back on failure. | import torch
import torch.nn as nn
from hypernix.cake_pan import CakePan, BakeOff
model = nn.Linear(32, 16)
opt = torch.optim.AdamW(model.parameters(), lr=1e-3)
pan = CakePan(model, watch_gradients=True)
def step_fn(batch: torch.Tensor) -> torch.Tensor:
opt.zero_grad()
loss = model(batch).sum()
los... | ### Instruction:
Wrap a training step in CakePan to auto-skip NaN/Inf losses and roll back on failure.
### Response:
import torch
import torch.nn as nn
from hypernix.cake_pan import CakePan, BakeOff
model = nn.Linear(32, 16)
opt = torch.optim.AdamW(model.parameters(), lr=1e-3)
pan = CakePan(model, watch_gradie... | hypernix.cake_pan | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
44949d56-1b25-43e1-a7dd-68e01c68ae8a | Run a healthcheck and print the full diagnostic info dictionary. | from hypernix import utils
ok = utils.healthcheck()
print(f"Environment healthy: {ok}")
info = utils.diagnostic_info()
print(f"PyTorch version: {info.get('torch_version', 'not installed')}")
print(f"CUDA available: {info.get('cuda_available', False)}")
print(f"Known models: {info.get('known_models_count', 0)}"... | ### Instruction:
Run a healthcheck and print the full diagnostic info dictionary.
### Response:
from hypernix import utils
ok = utils.healthcheck()
print(f"Environment healthy: {ok}")
info = utils.diagnostic_info()
print(f"PyTorch version: {info.get('torch_version', 'not installed')}")
print(f"CUDA available: {in... | hypernix.utils | 3.12 | basic | 0.61.2 | hypernix-dataset-generator | |
0d5c8c10-afbc-41dc-a65a-86e4a94e1ce3 | List all known model short names and check binary availability. | from hypernix import utils
models = utils.list_models()
print(f"Total known models: {len(models)}")
print("First 10:", models[:10])
for binary in ("llama-quantize", "nvidia-smi", "nvcc"):
found = utils.has_binary(binary)
print(f" {binary}: {'β' if found else 'β'}")
| ### Instruction:
List all known model short names and check binary availability.
### Response:
from hypernix import utils
models = utils.list_models()
print(f"Total known models: {len(models)}")
print("First 10:", models[:10])
for binary in ("llama-quantize", "nvidia-smi", "nvcc"):
found = utils.has_binary(binar... | hypernix.utils | 3.11 | basic | 0.61.2 | hypernix-dataset-generator | |
7b00db17-fd55-40a5-a0d2-931a5e022a35 | Use torch_compat to safely create RMSNorm on both modern and legacy (torch 1.13) installations. | from hypernix.torch_compat import RMSNorm, scaled_dot_product_attention
import torch
# RMSNorm β available natively in torch >= 2.4; shim on older versions.
norm = RMSNorm(normalized_shape=128)
x = torch.randn(2, 10, 128)
out = norm(x)
print(f"RMSNorm output shape: {out.shape}")
# scaled_dot_product_attention ... | ### Instruction:
Use torch_compat to safely create RMSNorm on both modern and legacy (torch 1.13) installations.
### Response:
from hypernix.torch_compat import RMSNorm, scaled_dot_product_attention
import torch
# RMSNorm β available natively in torch >= 2.4; shim on older versions.
norm = RMSNorm(normalized_shape=1... | hypernix.torch_compat | 3.11 | intermediate | 0.61.2 | hypernix-dataset-generator | |
aee19c7a-9f16-44ca-95b8-74c69b5b3710 | Apply gentle character-substitution augmentation with FromTheBag. | from pathlib import Path
from hypernix.salt_shaker import FromTheBag
from hypernix.sink import Sink
corpus = Path("clean.txt")
corpus.write_text("\n".join([
"The quick brown fox.",
"Machine learning is fun.",
"Python is a great language.",
]))
# Augment at 5% character substitution rate.
shaker = FromTheB... | ### Instruction:
Apply gentle character-substitution augmentation with FromTheBag.
### Response:
from pathlib import Path
from hypernix.salt_shaker import FromTheBag
from hypernix.sink import Sink
corpus = Path("clean.txt")
corpus.write_text("\n".join([
"The quick brown fox.",
"Machine learning is fun.",
... | hypernix.salt_shaker | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
dabf4c5d-a48c-4145-82a4-133b6bb27da2 | Inject negations into sentences using TallHandmade pepper shaker. | from hypernix.pepper_shaker import TallHandmade
shaker = TallHandmade(rate=0.3, negator="NOT", seed=42)
lines = [
"This model produces accurate results.",
"The training converged successfully.",
"The loss decreased each epoch.",
]
augmented = list(shaker.shake(lines))
for orig, aug in zip(lines, augment... | ### Instruction:
Inject negations into sentences using TallHandmade pepper shaker.
### Response:
from hypernix.pepper_shaker import TallHandmade
shaker = TallHandmade(rate=0.3, negator="NOT", seed=42)
lines = [
"This model produces accurate results.",
"The training converged successfully.",
"The loss d... | hypernix.pepper_shaker | 3.13 | intermediate | 0.61.2 | hypernix-dataset-generator | |
e8cbed0d-2f9f-4436-b1bb-1b291e56578f | Create a RecipeBook, add a custom recipe, and execute it with an override. | from hypernix.recipe_book import RecipeBook
rb = RecipeBook()
rb.add("my_finetune", {
"kind": "instant_pot",
"repo_id": "nix2.5",
"dataset": "./corpus.txt",
"out_dir": "./out",
"steps": 500,
"batch_size": 1,
"context_length": 512,
"device": ... | ### Instruction:
Create a RecipeBook, add a custom recipe, and execute it with an override.
### Response:
from hypernix.recipe_book import RecipeBook
rb = RecipeBook()
rb.add("my_finetune", {
"kind": "instant_pot",
"repo_id": "nix2.5",
"dataset": "./corpus.txt",
"out_dir": ... | hypernix.recipe_book | 3.12 | intermediate | 0.61.2 | hypernix-dataset-generator | |
7f24d95b-7799-4ab3-83ea-b656a299be15 | Convert a safetensors snapshot to fp16 GGUF using hypernix.convert. | from hypernix import convert
# snapshot_dir must contain model.safetensors (or shards) + config.json.
convert.convert_to_gguf(
snapshot_dir="./my_model_snapshot",
output_dir="./gguf_output",
dtype="fp16", # "fp32" or "fp16"
)
print("GGUF written to ./gguf_output/")
| ### Instruction:
Convert a safetensors snapshot to fp16 GGUF using hypernix.convert.
### Response:
from hypernix import convert
# snapshot_dir must contain model.safetensors (or shards) + config.json.
convert.convert_to_gguf(
snapshot_dir="./my_model_snapshot",
output_dir="./gguf_output",
dtype="fp16", ... | hypernix.convert | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
e53bce19-edeb-44b6-a9ed-b2aba581532b | Upload GGUF files to a HuggingFace repo using hypernix.upload. | import os
from hypernix import upload
hf_token = os.environ["HF_TOKEN"] # never hardcode
upload.upload_gguf(
gguf_dir="./gguf_output",
repo_id="my_org/my_model-gguf",
token=hf_token,
)
print("Upload complete.")
| ### Instruction:
Upload GGUF files to a HuggingFace repo using hypernix.upload.
### Response:
import os
from hypernix import upload
hf_token = os.environ["HF_TOKEN"] # never hardcode
upload.upload_gguf(
gguf_dir="./gguf_output",
repo_id="my_org/my_model-gguf",
token=hf_token,
)
print("Upload complete.")... | hypernix.upload | 3.12 | advanced | 0.61.2 | hypernix-dataset-generator | |
879b3d26-9949-4cb6-b605-15846e23d143 | Full pipeline: preprocess corpus β train with GoodSmoker β checkpoint averaging β evaluate with espresso_maker. | from pathlib import Path
import torch
import torch.nn as nn
from hypernix import pans
from hypernix.sink import Sink
from hypernix.old_oven import preheat
from hypernix.smoker import good_smoker
from hypernix.whisk import whisk_to_snapshot
from hypernix.espresso_maker import single_shot
# 1. Preprocess.
raw = Path("r... | ### Instruction:
Full pipeline: preprocess corpus β train with GoodSmoker β checkpoint averaging β evaluate with espresso_maker.
### Response:
from pathlib import Path
import torch
import torch.nn as nn
from hypernix import pans
from hypernix.sink import Sink
from hypernix.old_oven import preheat
from hypernix.smoker... | hypernix.old_oven | 3.12 | integration | 0.61.2 | hypernix-dataset-generator | |
00922a2d-d22d-4c88-aa69-08125340e6c1 | End-to-end CLI walkthrough: download β train β quantize β upload (all subcommands). | #!/usr/bin/env bash
# This script demonstrates the full hypernix CLI pipeline.
# Run in a shell, not Python; shown here as a Python docstring for reference.
"""
# 1. Sanity-check the environment.
hypernix doctor --fix
# 2. Download the hyper-nix.1 snapshot.
hypernix download --repo-id hyper-nix.1 --output-dir ./model
... | ### Instruction:
End-to-end CLI walkthrough: download β train β quantize β upload (all subcommands).
### Response:
#!/usr/bin/env bash
# This script demonstrates the full hypernix CLI pipeline.
# Run in a shell, not Python; shown here as a Python docstring for reference.
"""
# 1. Sanity-check the environment.
hypernix... | hypernix.upload | 3.12 | cli | 0.61.2 | hypernix-dataset-generator |
README.md exists but content is empty.
- Downloads last month
- 13