Repositories / more_nnsight.git

more_nnsight.git

Clone (read-only): git clone http://git.guha-anderson.com/git/more_nnsight.git

Branch

Add steering_search with Bayesian optimization and sentiment tests

Introduces SteeringSearchOutput (baseline_responses + trials) so callers
get unsteered and steered responses alongside scores. SteeringTrialResult
now carries per-trial responses. GPT-2 integration test verifies that
steering neg-pos vector shifts "The weather today is" from positive to
negative sentiment using a binary keyword-based score.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Author
Arjun Guha <a.guha@northeastern.edu>
Date
2026-04-02 06:30:46 -0400
Commit
e49c4eea30482c547a65d4e86b7805901c73e741
pyproject.toml
index fb80caa..0aa60c9 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -10,6 +10,7 @@ requires-python = ">=3.12"
 dependencies = [
     "lark>=1.2",
     "nnsight>=0.6.3",
+    "scikit-optimize>=0.10.2",
     "torch>=2.10.0",
     "transformers>=4",
 ]
src/more_nnsight/__init__.py
index 5f149ac..7b476de 100644
--- a/src/more_nnsight/__init__.py
+++ b/src/more_nnsight/__init__.py
@@ -1,6 +1,22 @@
 from .saved_activation import SavedActivation, save_activations, updates
+from .steering_search import (
+    SteeringSearchConfig,
+    SteeringSearchOutput,
+    SteeringTrialResult,
+    best_trial,
+    steering_search,
+)
 
-__all__ = ["SavedActivation", "save_activations", "updates"]
+__all__ = [
+    "SavedActivation",
+    "save_activations",
+    "updates",
+    "SteeringSearchConfig",
+    "SteeringSearchOutput",
+    "SteeringTrialResult",
+    "best_trial",
+    "steering_search",
+]
 
 
 def main() -> None:
src/more_nnsight/steering_search.py
new file mode 100644
index 0000000..4ed3ace
--- /dev/null
+++ b/src/more_nnsight/steering_search.py
@@ -0,0 +1,335 @@
+from __future__ import annotations
+
+import json
+from dataclasses import dataclass, asdict
+from pathlib import Path
+from typing import Any, Callable
+
+import torch
+from skopt import gp_minimize
+from skopt.space import Integer, Real
+
+from .saved_activation import SavedActivation, save_activations, updates
+
+
+PENALTY = 1e6
+
+
+@dataclass(frozen=True, slots=True)
+class SteeringSearchConfig:
+    """Configuration for a Bayesian-optimized steering vector search."""
+
+    candidate_paths: list[str]
+    max_simultaneous_paths: int
+    alpha_range: tuple[float, float]
+    n_calls: int
+    n_initial_points: int = 10
+    seed: int = 0
+
+
+@dataclass(frozen=True, slots=True)
+class SteeringTrialResult:
+    """One trial from the search."""
+
+    trial_index: int
+    selected_paths: list[str]
+    alpha: float
+    score: float
+    raw_point: list[int | float]
+    responses: list[str]
+
+
+@dataclass(frozen=True, slots=True)
+class SteeringSearchOutput:
+    """Full output from a steering search run."""
+
+    baseline_responses: list[str]
+    trials: list[SteeringTrialResult]
+
+
+def best_trial(results: list[SteeringTrialResult]) -> SteeringTrialResult:
+    """Return the trial with the lowest score."""
+    return min(results, key=lambda r: r.score)
+
+
+def _decode_point(
+    point: list[Any],
+    n_candidates: int,
+    candidate_paths: list[str],
+) -> tuple[list[str], float]:
+    """Decode a raw skopt point into (selected_paths, alpha).
+
+    Path slot indices equal to n_candidates are sentinels meaning "unused".
+    Indices are sorted and deduplicated.
+    """
+    *path_indices, alpha = point
+    unique_indices = sorted(set(idx for idx in path_indices if idx < n_candidates))
+    selected = [candidate_paths[i] for i in unique_indices]
+    return selected, float(alpha)
+
+
+def _append_jsonl(path: Path, result: SteeringTrialResult) -> None:
+    with open(path, "a") as f:
+        f.write(json.dumps(asdict(result)) + "\n")
+
+
+@torch.no_grad()
+def _compute_steering_vectors(
+    model: Any,
+    candidate_paths: list[str],
+    positive_prompts: list[str],
+    negative_prompts: list[str],
+) -> dict[str, torch.Tensor]:
+    """Compute mean-difference steering vectors for each candidate path.
+
+    For each path: vector = mean(positive_activations) - mean(negative_activations).
+    Processes each prompt individually.
+    """
+    sums: dict[str, dict[str, Any]] = {p: {} for p in candidate_paths}
+
+    for prompts, label in [(positive_prompts, "pos"), (negative_prompts, "neg")]:
+        for prompt in prompts:
+            with model.trace() as tracer:
+                with tracer.invoke(prompt):
+                    saved = save_activations(model, candidate_paths)
+            for path in candidate_paths:
+                tensor = (
+                    torch.as_tensor(saved.get(path))
+                    .detach()
+                    .float()
+                    .cpu()
+                    .reshape(1, -1)
+                )
+                bucket = sums[path]
+                if label not in bucket:
+                    bucket[label] = tensor.clone()
+                    bucket[f"{label}_n"] = 1
+                else:
+                    bucket[label] = bucket[label] + tensor
+                    bucket[f"{label}_n"] += 1
+
+    vectors: dict[str, torch.Tensor] = {}
+    for path, bucket in sums.items():
+        pos_mean = bucket["pos"] / bucket["pos_n"]
+        neg_mean = bucket["neg"] / bucket["neg_n"]
+        vectors[path] = pos_mean - neg_mean
+    return vectors
+
+
+@torch.no_grad()
+def _generate_batch(
+    model: Any,
+    prompts: list[str],
+    max_new_tokens: int,
+) -> list[str]:
+    """Generate responses for a batch of prompts without steering."""
+    tokenizer = model.tokenizer
+
+    orig_padding_side = getattr(tokenizer, "padding_side", "right")
+    orig_pad_token = tokenizer.pad_token
+    tokenizer.padding_side = "left"
+    if tokenizer.pad_token is None:
+        tokenizer.pad_token = tokenizer.eos_token
+
+    try:
+        encoded = tokenizer(
+            prompts, return_tensors="pt", padding=True, add_special_tokens=False
+        )
+        total_len = int(encoded["input_ids"].shape[1])
+
+        with model.generate(max_new_tokens=max_new_tokens, do_sample=False) as tracer:
+            with tracer.invoke(prompts):
+                output_ids = model.generator.output.save()
+
+        responses: list[str] = []
+        for i in range(len(prompts)):
+            response = tokenizer.decode(
+                output_ids[i][total_len:], skip_special_tokens=True
+            )
+            responses.append(response)
+        return responses
+    finally:
+        tokenizer.padding_side = orig_padding_side
+        tokenizer.pad_token = orig_pad_token
+
+
+@torch.no_grad()
+def _generate_steered_batch(
+    model: Any,
+    prompts: list[str],
+    direction: SavedActivation,
+    alpha: float,
+    max_new_tokens: int,
+) -> list[str]:
+    """Generate responses for a batch of prompts with steering applied."""
+    tokenizer = model.tokenizer
+
+    orig_padding_side = getattr(tokenizer, "padding_side", "right")
+    orig_pad_token = tokenizer.pad_token
+    tokenizer.padding_side = "left"
+    if tokenizer.pad_token is None:
+        tokenizer.pad_token = tokenizer.eos_token
+
+    try:
+        encoded = tokenizer(
+            prompts, return_tensors="pt", padding=True, add_special_tokens=False
+        )
+        total_len = int(encoded["input_ids"].shape[1])
+
+        with model.generate(max_new_tokens=max_new_tokens, do_sample=False) as tracer:
+            with tracer.invoke(prompts):
+                for key, current_value, update in updates(model, direction.keys()):
+                    steer = direction.get(key).to(
+                        device=current_value.device,
+                        dtype=current_value.dtype,
+                    )
+                    update(current_value + alpha * steer)
+                output_ids = model.generator.output.save()
+
+        responses: list[str] = []
+        for i in range(len(prompts)):
+            response = tokenizer.decode(
+                output_ids[i][total_len:], skip_special_tokens=True
+            )
+            responses.append(response)
+        return responses
+    finally:
+        tokenizer.padding_side = orig_padding_side
+        tokenizer.pad_token = orig_pad_token
+
+
+@torch.no_grad()
+def _generate_steered(
+    model: Any,
+    prompt: str,
+    direction: SavedActivation,
+    alpha: float,
+    max_new_tokens: int,
+) -> str:
+    """Generate a response with steering applied, return the response text."""
+    return _generate_steered_batch(model, [prompt], direction, alpha, max_new_tokens)[0]
+
+
+@torch.no_grad()
+def steering_search(
+    config: SteeringSearchConfig,
+    model: Any,
+    positive_prompts: list[str],
+    negative_prompts: list[str],
+    eval_prompts: list[str],
+    score: Callable[[str, str], float | None],
+    max_new_tokens: int,
+    ineligible_score: float,
+    output_path: Path | None = None,
+    batch_size: int = 1,
+) -> SteeringSearchOutput:
+    """Run Bayesian-optimized search over steering path subsets and alpha values.
+
+    Args:
+        config: Search configuration (candidate paths, budget, etc.).
+        model: An nnsight LanguageModel (already loaded).
+        positive_prompts: Prompts for the "positive" class (used to compute
+            steering vectors).
+        negative_prompts: Prompts for the "negative" class (used to compute
+            steering vectors).
+        eval_prompts: Prompts to generate steered responses on each trial.
+        score: Scoring function. Takes (prompt, response) and returns a float
+            where **lower is better**, or ``None`` if the result is ineligible.
+        max_new_tokens: Maximum tokens to generate per prompt.
+        ineligible_score: Score to assign when steering makes a previously
+            eligible prompt ineligible (e.g. program stops passing tests).
+        output_path: If provided, write trial results as JSONL (appended
+            incrementally so partial results survive crashes).
+        batch_size: Number of prompts to generate in parallel per batch.
+
+    Returns:
+        SteeringSearchOutput with baseline_responses and all trial results.
+    """
+    if not config.candidate_paths:
+        raise ValueError("candidate_paths must not be empty")
+
+    n_candidates = len(config.candidate_paths)
+    m = config.max_simultaneous_paths
+
+    # Compute steering vectors once up front.
+    vectors = _compute_steering_vectors(
+        model, config.candidate_paths, positive_prompts, negative_prompts
+    )
+
+    # Pre-filter: generate baselines (no steering) and score.
+    # Prompts where the baseline score is None are permanently excluded.
+    eligible_prompts: list[str] = []
+    baseline_responses: list[str] = []
+    print(f"Pre-filtering {len(eval_prompts)} eval prompts...")
+    for i in range(0, len(eval_prompts), batch_size):
+        batch = eval_prompts[i : i + batch_size]
+        responses = _generate_batch(model, batch, max_new_tokens)
+        for prompt, response in zip(batch, responses):
+            baseline_score = score(prompt, response)
+            if baseline_score is not None:
+                eligible_prompts.append(prompt)
+                baseline_responses.append(response)
+    print(f"Eligible prompts: {len(eligible_prompts)} / {len(eval_prompts)}")
+
+    if not eligible_prompts:
+        raise ValueError("No eligible eval prompts after baseline filtering")
+
+    # Clear output file if it exists.
+    if output_path is not None:
+        output_path.parent.mkdir(parents=True, exist_ok=True)
+        output_path.write_text("")
+
+    # Build search space.
+    dimensions = [
+        Integer(0, n_candidates, name=f"path_{i}") for i in range(m)
+    ] + [
+        Real(config.alpha_range[0], config.alpha_range[1], name="alpha"),
+    ]
+
+    trials: list[SteeringTrialResult] = []
+
+    def objective(point: list[Any]) -> float:
+        selected_paths, alpha = _decode_point(point, n_candidates, config.candidate_paths)
+
+        if not selected_paths:
+            trial_score = PENALTY
+            trial_responses: list[str] = []
+        else:
+            direction = SavedActivation.from_pairs(
+                *[(p, vectors[p]) for p in selected_paths]
+            )
+            trial_scores: list[float] = []
+            trial_responses = []
+            for i in range(0, len(eligible_prompts), batch_size):
+                batch_prompts = eligible_prompts[i : i + batch_size]
+                batch_responses = _generate_steered_batch(
+                    model, batch_prompts, direction, alpha, max_new_tokens
+                )
+                trial_responses.extend(batch_responses)
+                for prompt, response in zip(batch_prompts, batch_responses):
+                    s = score(prompt, response)
+                    trial_scores.append(s if s is not None else ineligible_score)
+            trial_score = sum(trial_scores) / len(trial_scores)
+
+        result = SteeringTrialResult(
+            trial_index=len(trials),
+            selected_paths=selected_paths,
+            alpha=alpha,
+            score=trial_score,
+            raw_point=[int(x) if isinstance(x, (int,)) else float(x) for x in point],
+            responses=trial_responses,
+        )
+        trials.append(result)
+        if output_path is not None:
+            _append_jsonl(output_path, result)
+        return trial_score
+
+    gp_minimize(
+        func=objective,
+        dimensions=dimensions,
+        n_calls=config.n_calls,
+        n_initial_points=config.n_initial_points,
+        random_state=config.seed,
+    )
+
+    return SteeringSearchOutput(baseline_responses=baseline_responses, trials=trials)
tests/test_steering_search.py
new file mode 100644
index 0000000..f26bf58
--- /dev/null
+++ b/tests/test_steering_search.py
@@ -0,0 +1,315 @@
+"""Unit tests for steering_search — CPU-only, no model required."""
+
+from __future__ import annotations
+
+import json
+import sys
+
+import pytest
+import torch
+
+from more_nnsight.steering_search import (
+    PENALTY,
+    SavedActivation,
+    SteeringSearchConfig,
+    SteeringSearchOutput,
+    SteeringTrialResult,
+    _decode_point,
+    best_trial,
+    steering_search,
+)
+
+_ss = sys.modules["more_nnsight.steering_search"]
+
+
+# ---------------------------------------------------------------------------
+# _decode_point
+# ---------------------------------------------------------------------------
+
+
+def test_decode_deduplicates_and_removes_sentinel():
+    candidates = ["path_a", "path_b", "path_c"]
+    selected, alpha = _decode_point([2, 0, 2, 1.5], n_candidates=3, candidate_paths=candidates)
+    assert selected == ["path_a", "path_c"]
+    assert alpha == 1.5
+
+
+def test_decode_all_sentinel_returns_empty():
+    candidates = ["path_a", "path_b"]
+    selected, alpha = _decode_point([2, 2, 0.5], n_candidates=2, candidate_paths=candidates)
+    assert selected == []
+    assert alpha == 0.5
+
+
+# ---------------------------------------------------------------------------
+# best_trial
+# ---------------------------------------------------------------------------
+
+
+def test_best_trial_returns_minimum_score():
+    trials = [
+        SteeringTrialResult(0, ["a"], 1.0, 5.0, [0, 1.0], ["r0"]),
+        SteeringTrialResult(1, ["b"], 2.0, 1.0, [1, 2.0], ["r1"]),
+        SteeringTrialResult(2, ["a", "b"], 1.5, 3.0, [0, 1, 1.5], ["r2"]),
+    ]
+    assert best_trial(trials).trial_index == 1
+
+
+# ---------------------------------------------------------------------------
+# steering_search (with mocked internals)
+# ---------------------------------------------------------------------------
+
+
+class _FakeDirection:
+    """Mimics SavedActivation enough for the objective function."""
+
+    def __init__(self, paths):
+        self._paths = list(paths)
+
+    def keys(self):
+        return list(self._paths)
+
+    def get(self, key):
+        return torch.zeros(1, 8)
+
+
+def _patch_internals(monkeypatch, fake_vectors, generate_fn=None):
+    """Patch _compute_steering_vectors, SavedActivation.from_pairs,
+    _generate_batch (baseline), and _generate_steered_batch."""
+    monkeypatch.setattr(_ss, "_compute_steering_vectors", lambda *a, **kw: fake_vectors)
+
+    def fake_from_pairs(*pairs):
+        return _FakeDirection([p for p, _ in pairs])
+
+    monkeypatch.setattr(SavedActivation, "from_pairs", staticmethod(fake_from_pairs))
+
+    # Mock baseline generation (for pre-filter).
+    def baseline_batch_fn(model, prompts, max_new_tokens):
+        return ["baseline response"] * len(prompts)
+
+    monkeypatch.setattr(_ss, "_generate_batch", baseline_batch_fn)
+
+    # Mock steered generation.
+    if generate_fn is not None:
+        def batch_fn(model, prompts, direction, alpha, max_new_tokens):
+            return [generate_fn(model, p, direction, alpha, max_new_tokens) for p in prompts]
+    else:
+        def batch_fn(model, prompts, direction, alpha, max_new_tokens):
+            return ["fake response"] * len(prompts)
+
+    monkeypatch.setattr(_ss, "_generate_steered_batch", batch_fn)
+
+
+def test_empty_candidates_raises():
+    config = SteeringSearchConfig(
+        candidate_paths=[],
+        max_simultaneous_paths=2,
+        alpha_range=(0.1, 5.0),
+        n_calls=5,
+    )
+    with pytest.raises(ValueError, match="candidate_paths must not be empty"):
+        steering_search(
+            config=config,
+            model=None,
+            positive_prompts=[],
+            negative_prompts=[],
+            eval_prompts=[],
+            score=lambda p, r: 0.0,
+            max_new_tokens=10,
+            ineligible_score=1.0,
+        )
+
+
+def test_search_returns_correct_number_of_results(tmp_path, monkeypatch):
+    fake_vectors = {"p0": torch.randn(1, 8), "p1": torch.randn(1, 8)}
+    _patch_internals(monkeypatch, fake_vectors)
+
+    config = SteeringSearchConfig(
+        candidate_paths=["p0", "p1"],
+        max_simultaneous_paths=2,
+        alpha_range=(0.1, 3.0),
+        n_calls=15,
+        n_initial_points=5,
+    )
+    output = tmp_path / "trials.jsonl"
+    out = steering_search(
+        config=config,
+        model=None,
+        positive_prompts=["pos1"],
+        negative_prompts=["neg1"],
+        eval_prompts=["eval1", "eval2"],
+        score=lambda p, r: 1.0,
+        max_new_tokens=10,
+        ineligible_score=1.0,
+        output_path=output,
+    )
+    assert isinstance(out, SteeringSearchOutput)
+    assert len(out.trials) == 15
+    assert len(out.baseline_responses) == 2
+
+
+def test_search_finds_known_optimum(monkeypatch):
+    candidates = [f"path_{i}" for i in range(4)]
+    fake_vectors = {p: torch.randn(1, 8) for p in candidates}
+
+    def fake_generate(model, prompt, direction, alpha, max_new_tokens):
+        return ",".join(direction.keys())
+
+    _patch_internals(monkeypatch, fake_vectors, generate_fn=fake_generate)
+
+    def score_fn(prompt, response):
+        paths = response.split(",") if response else []
+        return 0.0 if "path_2" in paths else 1.0
+
+    config = SteeringSearchConfig(
+        candidate_paths=candidates,
+        max_simultaneous_paths=2,
+        alpha_range=(0.5, 5.0),
+        n_calls=30,
+        n_initial_points=10,
+    )
+    out = steering_search(
+        config=config,
+        model=None,
+        positive_prompts=["pos"],
+        negative_prompts=["neg"],
+        eval_prompts=["eval"],
+        score=score_fn,
+        max_new_tokens=10,
+        ineligible_score=1.0,
+    )
+    best = best_trial(out.trials)
+    assert best.score == 0.0
+    assert "path_2" in best.selected_paths
+
+
+def test_jsonl_output_written_incrementally(tmp_path, monkeypatch):
+    fake_vectors = {"p0": torch.randn(1, 8)}
+    _patch_internals(monkeypatch, fake_vectors)
+
+    config = SteeringSearchConfig(
+        candidate_paths=["p0"],
+        max_simultaneous_paths=1,
+        alpha_range=(0.1, 3.0),
+        n_calls=8,
+        n_initial_points=4,
+    )
+    output = tmp_path / "out.jsonl"
+    steering_search(
+        config=config,
+        model=None,
+        positive_prompts=["pos"],
+        negative_prompts=["neg"],
+        eval_prompts=["eval"],
+        score=lambda p, r: 1.0,
+        max_new_tokens=10,
+        ineligible_score=1.0,
+        output_path=output,
+    )
+    lines = output.read_text().strip().split("\n")
+    assert len(lines) == 8
+    for line in lines:
+        record = json.loads(line)
+        assert "trial_index" in record
+        assert "selected_paths" in record
+        assert "alpha" in record
+        assert "score" in record
+        assert "raw_point" in record
+        assert "responses" in record
+
+
+def test_empty_selection_gets_penalty(monkeypatch):
+    fake_vectors = {"p0": torch.randn(1, 8)}
+    _patch_internals(monkeypatch, fake_vectors)
+
+    config = SteeringSearchConfig(
+        candidate_paths=["p0"],
+        max_simultaneous_paths=2,
+        alpha_range=(0.1, 3.0),
+        n_calls=10,
+        n_initial_points=5,
+    )
+    out = steering_search(
+        config=config,
+        model=None,
+        positive_prompts=["pos"],
+        negative_prompts=["neg"],
+        eval_prompts=["eval"],
+        score=lambda p, r: 0.0,
+        max_new_tokens=10,
+        ineligible_score=1.0,
+    )
+    penalty_trials = [r for r in out.trials if not r.selected_paths]
+    for t in penalty_trials:
+        assert t.score == PENALTY
+
+
+def test_ineligible_baseline_prompts_excluded(monkeypatch):
+    """Prompts that score None on baseline are excluded from evaluation."""
+    fake_vectors = {"p0": torch.randn(1, 8)}
+    _patch_internals(monkeypatch, fake_vectors)
+
+    # 3 eval prompts; "bad" one returns None on baseline, others return 0.5
+    def score_fn(prompt, response):
+        if prompt == "bad_prompt":
+            return None
+        return 0.5
+
+    config = SteeringSearchConfig(
+        candidate_paths=["p0"],
+        max_simultaneous_paths=1,
+        alpha_range=(0.1, 3.0),
+        n_calls=5,
+        n_initial_points=3,
+    )
+    out = steering_search(
+        config=config,
+        model=None,
+        positive_prompts=["pos"],
+        negative_prompts=["neg"],
+        eval_prompts=["good1", "bad_prompt", "good2"],
+        score=score_fn,
+        max_new_tokens=10,
+        ineligible_score=1.0,
+    )
+    # All non-penalty trials should have score 0.5 (only good prompts evaluated)
+    for t in out.trials:
+        if t.selected_paths:
+            assert t.score == 0.5
+
+
+def test_steered_none_uses_ineligible_score(monkeypatch):
+    """When steering makes an eligible prompt return None, ineligible_score is used."""
+    fake_vectors = {"p0": torch.randn(1, 8)}
+    _patch_internals(monkeypatch, fake_vectors)
+
+    call_count = [0]
+
+    def score_fn(prompt, response):
+        call_count[0] += 1
+        # Baseline call returns 0.5 (eligible), steered calls return None
+        if response == "baseline response":
+            return 0.5
+        return None
+
+    config = SteeringSearchConfig(
+        candidate_paths=["p0"],
+        max_simultaneous_paths=1,
+        alpha_range=(0.1, 3.0),
+        n_calls=3,
+        n_initial_points=3,
+    )
+    out = steering_search(
+        config=config,
+        model=None,
+        positive_prompts=["pos"],
+        negative_prompts=["neg"],
+        eval_prompts=["eval"],
+        score=score_fn,
+        max_new_tokens=10,
+        ineligible_score=1.0,
+    )
+    # All non-penalty trials should use ineligible_score since steered returns None
+    for t in out.trials:
+        if t.selected_paths:
+            assert t.score == 1.0
tests/test_steering_search_gpt2.py
new file mode 100644
index 0000000..ecc3d34
--- /dev/null
+++ b/tests/test_steering_search_gpt2.py
@@ -0,0 +1,112 @@
+"""Integration tests for steering_search with GPT-2."""
+
+from __future__ import annotations
+
+import pytest
+import torch
+from nnsight import LanguageModel
+
+from more_nnsight import SteeringSearchConfig, SteeringSearchOutput, best_trial, steering_search
+
+# Derived by running a forward pass on GPT-2 with "The weather today is" and
+# manually classifying the top-50 next tokens by sentiment.
+_POSITIVE_KEYWORDS = [
+    "good", "perfect", "nice", "beautiful", "sunny", "great", "fine",
+    "calm", "amazing", "excellent", "warm", "wonderful", "pleasant", "lovely",
+]
+_NEGATIVE_KEYWORDS = [
+    "bad", "cold", "freezing", "cloudy", "terrible", "awful",
+    "gloomy", "miserable", "horrible",
+]
+
+
+@pytest.fixture(scope="module")
+def gpt2():
+    torch.cuda.memory.set_per_process_memory_fraction(0.8)
+    return LanguageModel("openai-community/gpt2", device_map="auto", dispatch=True)
+
+
+def _sentiment_score(response: str) -> float:
+    """Binary score: -1 if only negative keywords, +1 if only positive, 0 if neither/both."""
+    text = response.lower()
+    has_pos = any(kw in text for kw in _POSITIVE_KEYWORDS)
+    has_neg = any(kw in text for kw in _NEGATIVE_KEYWORDS)
+    if has_neg and not has_pos:
+        return -1.0
+    if has_pos and not has_neg:
+        return 1.0
+    return 0.0
+
+
+
+def test_steering_changes_sentiment(gpt2, tmp_path):
+    """Steering negative - positive should push output toward negative sentiment."""
+    candidate_paths = [
+        "model.transformer.h[10].output[-1]",
+        "model.transformer.h[11].output[-1]",
+    ]
+    # Swapped: steering vector = mean(neg_activations) - mean(pos_activations)
+    positive_prompts = [
+        "I hate this! It's terrible",
+        "This is awful and horrible",
+        "I'm so sad about this",
+    ]
+    negative_prompts = [
+        "I love this! It's fantastic",
+        "This is wonderful and amazing",
+        "I'm so happy about this",
+    ]
+    eval_prompt = "The weather today is"
+
+    def score(prompt: str, response: str) -> float:
+        return _sentiment_score(response)
+
+    config = SteeringSearchConfig(
+        candidate_paths=candidate_paths,
+        max_simultaneous_paths=2,
+        alpha_range=(0.5, 5.0),
+        n_calls=8,
+        n_initial_points=4,
+        seed=42,
+    )
+    output = tmp_path / "trials.jsonl"
+    out = steering_search(
+        config=config,
+        model=gpt2,
+        positive_prompts=positive_prompts,
+        negative_prompts=negative_prompts,
+        eval_prompts=[eval_prompt],
+        score=score,
+        max_new_tokens=4,
+        ineligible_score=1.0,
+        output_path=output,
+    )
+
+    assert isinstance(out, SteeringSearchOutput)
+    assert len(out.trials) == 8
+    assert output.exists()
+
+    # Baseline response should contain a positive keyword (GPT-2 default).
+    assert len(out.baseline_responses) == 1
+    baseline_response = out.baseline_responses[0]
+    baseline_score = _sentiment_score(baseline_response)
+    print(f"\nBaseline: {repr(baseline_response)}  score={baseline_score}")
+    assert any(kw in baseline_response.lower() for kw in _POSITIVE_KEYWORDS), (
+        f"Expected positive keyword in baseline: {repr(baseline_response)}"
+    )
+
+    # Best steered trial should score better (more negative) than baseline.
+    best = best_trial(out.trials)
+    print(f"Best steered score={best.score}  paths={best.selected_paths}  alpha={best.alpha:.2f}")
+    assert len(best.selected_paths) > 0
+    assert best.score < baseline_score, (
+        f"Steering did not make sentiment more negative: baseline={baseline_score}, best={best.score}"
+    )
+
+    # Best trial's responses should contain a negative keyword.
+    assert len(best.responses) == 1
+    best_response = best.responses[0]
+    print(f"Best steered response: {repr(best_response)}")
+    assert any(kw in best_response.lower() for kw in _NEGATIVE_KEYWORDS), (
+        f"Expected negative keyword in steered response: {repr(best_response)}"
+    )
uv.lock
index d2531d0..560796f 100644
--- a/uv.lock
+++ b/uv.lock
@@ -460,6 +460,15 @@ wheels = [
 ]
 
 [[package]]
+name = "joblib"
+version = "1.5.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/41/f2/d34e8b3a08a9cc79a50b2208a93dce981fe615b64d5a4d4abee421d898df/joblib-1.5.3.tar.gz", hash = "sha256:8561a3269e6801106863fd0d6d84bb737be9e7631e33aaed3fb9ce5953688da3", size = 331603, upload-time = "2025-12-15T08:41:46.427Z" }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl", hash = "sha256:5fc3c5039fc5ca8c0276333a188bbd59d6b7ab37fe6632daa76bc7f9ec18e713", size = 309071, upload-time = "2025-12-15T08:41:44.973Z" },
+]
+
+[[package]]
 name = "lark"
 version = "1.3.1"
 source = { registry = "https://pypi.org/simple" }
@@ -571,6 +580,7 @@ source = { editable = "." }
 dependencies = [
     { name = "lark" },
     { name = "nnsight" },
+    { name = "scikit-optimize" },
     { name = "torch" },
     { name = "transformers" },
 ]
@@ -584,6 +594,7 @@ dev = [
 requires-dist = [
     { name = "lark", specifier = ">=1.2" },
     { name = "nnsight", specifier = ">=0.6.3" },
+    { name = "scikit-optimize", specifier = ">=0.10.2" },
     { name = "torch", specifier = ">=2.10.0" },
     { name = "transformers", specifier = ">=4" },
 ]
@@ -955,6 +966,18 @@ wheels = [
 ]
 
 [[package]]
+name = "pyaml"
+version = "26.2.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+    { name = "pyyaml" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/38/fb/2b9590512a9d7763620d87171c7531d5295678ce96e57393614b91da8998/pyaml-26.2.1.tar.gz", hash = "sha256:489dd82997235d4cfcf76a6287fce2f075487d77a6567c271e8d790583690c68", size = 30653, upload-time = "2026-02-06T13:49:30.769Z" }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/5d/f3/1f8651f23101e6fae41d0d504414c9722b0140bf0fc6acf87ac52e18aa41/pyaml-26.2.1-py3-none-any.whl", hash = "sha256:6261c2f0a2f33245286c794ad6ec234be33a73d2b05427079fd343e2812a87cf", size = 27211, upload-time = "2026-02-06T13:49:29.652Z" },
+]
+
+[[package]]
 name = "pydantic"
 version = "2.12.5"
 source = { registry = "https://pypi.org/simple" }
@@ -1281,6 +1304,128 @@ wheels = [
 ]
 
 [[package]]
+name = "scikit-learn"
+version = "1.8.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+    { name = "joblib" },
+    { name = "numpy" },
+    { name = "scipy" },
+    { name = "threadpoolctl" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/0e/d4/40988bf3b8e34feec1d0e6a051446b1f66225f8529b9309becaeef62b6c4/scikit_learn-1.8.0.tar.gz", hash = "sha256:9bccbb3b40e3de10351f8f5068e105d0f4083b1a65fa07b6634fbc401a6287fd", size = 7335585, upload-time = "2025-12-10T07:08:53.618Z" }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/90/74/e6a7cc4b820e95cc38cf36cd74d5aa2b42e8ffc2d21fe5a9a9c45c1c7630/scikit_learn-1.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5fb63362b5a7ddab88e52b6dbb47dac3fd7dafeee740dc6c8d8a446ddedade8e", size = 8548242, upload-time = "2025-12-10T07:07:51.568Z" },
+    { url = "https://files.pythonhosted.org/packages/49/d8/9be608c6024d021041c7f0b3928d4749a706f4e2c3832bbede4fb4f58c95/scikit_learn-1.8.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:5025ce924beccb28298246e589c691fe1b8c1c96507e6d27d12c5fadd85bfd76", size = 8079075, upload-time = "2025-12-10T07:07:53.697Z" },
+    { url = "https://files.pythonhosted.org/packages/dd/47/f187b4636ff80cc63f21cd40b7b2d177134acaa10f6bb73746130ee8c2e5/scikit_learn-1.8.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4496bb2cf7a43ce1a2d7524a79e40bc5da45cf598dbf9545b7e8316ccba47bb4", size = 8660492, upload-time = "2025-12-10T07:07:55.574Z" },
+    { url = "https://files.pythonhosted.org/packages/97/74/b7a304feb2b49df9fafa9382d4d09061a96ee9a9449a7cbea7988dda0828/scikit_learn-1.8.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0bcfe4d0d14aec44921545fd2af2338c7471de9cb701f1da4c9d85906ab847a", size = 8931904, upload-time = "2025-12-10T07:07:57.666Z" },
+    { url = "https://files.pythonhosted.org/packages/9f/c4/0ab22726a04ede56f689476b760f98f8f46607caecff993017ac1b64aa5d/scikit_learn-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:35c007dedb2ffe38fe3ee7d201ebac4a2deccd2408e8621d53067733e3c74809", size = 8019359, upload-time = "2025-12-10T07:07:59.838Z" },
+    { url = "https://files.pythonhosted.org/packages/24/90/344a67811cfd561d7335c1b96ca21455e7e472d281c3c279c4d3f2300236/scikit_learn-1.8.0-cp312-cp312-win_arm64.whl", hash = "sha256:8c497fff237d7b4e07e9ef1a640887fa4fb765647f86fbe00f969ff6280ce2bb", size = 7641898, upload-time = "2025-12-10T07:08:01.36Z" },
+    { url = "https://files.pythonhosted.org/packages/03/aa/e22e0768512ce9255eba34775be2e85c2048da73da1193e841707f8f039c/scikit_learn-1.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0d6ae97234d5d7079dc0040990a6f7aeb97cb7fa7e8945f1999a429b23569e0a", size = 8513770, upload-time = "2025-12-10T07:08:03.251Z" },
+    { url = "https://files.pythonhosted.org/packages/58/37/31b83b2594105f61a381fc74ca19e8780ee923be2d496fcd8d2e1147bd99/scikit_learn-1.8.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:edec98c5e7c128328124a029bceb09eda2d526997780fef8d65e9a69eead963e", size = 8044458, upload-time = "2025-12-10T07:08:05.336Z" },
+    { url = "https://files.pythonhosted.org/packages/2d/5a/3f1caed8765f33eabb723596666da4ebbf43d11e96550fb18bdec42b467b/scikit_learn-1.8.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:74b66d8689d52ed04c271e1329f0c61635bcaf5b926db9b12d58914cdc01fe57", size = 8610341, upload-time = "2025-12-10T07:08:07.732Z" },
+    { url = "https://files.pythonhosted.org/packages/38/cf/06896db3f71c75902a8e9943b444a56e727418f6b4b4a90c98c934f51ed4/scikit_learn-1.8.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8fdf95767f989b0cfedb85f7ed8ca215d4be728031f56ff5a519ee1e3276dc2e", size = 8900022, upload-time = "2025-12-10T07:08:09.862Z" },
+    { url = "https://files.pythonhosted.org/packages/1c/f9/9b7563caf3ec8873e17a31401858efab6b39a882daf6c1bfa88879c0aa11/scikit_learn-1.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:2de443b9373b3b615aec1bb57f9baa6bb3a9bd093f1269ba95c17d870422b271", size = 7989409, upload-time = "2025-12-10T07:08:12.028Z" },
+    { url = "https://files.pythonhosted.org/packages/49/bd/1f4001503650e72c4f6009ac0c4413cb17d2d601cef6f71c0453da2732fc/scikit_learn-1.8.0-cp313-cp313-win_arm64.whl", hash = "sha256:eddde82a035681427cbedded4e6eff5e57fa59216c2e3e90b10b19ab1d0a65c3", size = 7619760, upload-time = "2025-12-10T07:08:13.688Z" },
+    { url = "https://files.pythonhosted.org/packages/d2/7d/a630359fc9dcc95496588c8d8e3245cc8fd81980251079bc09c70d41d951/scikit_learn-1.8.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7cc267b6108f0a1499a734167282c00c4ebf61328566b55ef262d48e9849c735", size = 8826045, upload-time = "2025-12-10T07:08:15.215Z" },
+    { url = "https://files.pythonhosted.org/packages/cc/56/a0c86f6930cfcd1c7054a2bc417e26960bb88d32444fe7f71d5c2cfae891/scikit_learn-1.8.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:fe1c011a640a9f0791146011dfd3c7d9669785f9fed2b2a5f9e207536cf5c2fd", size = 8420324, upload-time = "2025-12-10T07:08:17.561Z" },
+    { url = "https://files.pythonhosted.org/packages/46/1e/05962ea1cebc1cf3876667ecb14c283ef755bf409993c5946ade3b77e303/scikit_learn-1.8.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:72358cce49465d140cc4e7792015bb1f0296a9742d5622c67e31399b75468b9e", size = 8680651, upload-time = "2025-12-10T07:08:19.952Z" },
+    { url = "https://files.pythonhosted.org/packages/fe/56/a85473cd75f200c9759e3a5f0bcab2d116c92a8a02ee08ccd73b870f8bb4/scikit_learn-1.8.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:80832434a6cc114f5219211eec13dcbc16c2bac0e31ef64c6d346cde3cf054cb", size = 8925045, upload-time = "2025-12-10T07:08:22.11Z" },
+    { url = "https://files.pythonhosted.org/packages/cc/b7/64d8cfa896c64435ae57f4917a548d7ac7a44762ff9802f75a79b77cb633/scikit_learn-1.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ee787491dbfe082d9c3013f01f5991658b0f38aa8177e4cd4bf434c58f551702", size = 8507994, upload-time = "2025-12-10T07:08:23.943Z" },
+    { url = "https://files.pythonhosted.org/packages/5e/37/e192ea709551799379958b4c4771ec507347027bb7c942662c7fbeba31cb/scikit_learn-1.8.0-cp313-cp313t-win_arm64.whl", hash = "sha256:bf97c10a3f5a7543f9b88cbf488d33d175e9146115a451ae34568597ba33dcde", size = 7869518, upload-time = "2025-12-10T07:08:25.71Z" },
+    { url = "https://files.pythonhosted.org/packages/24/05/1af2c186174cc92dcab2233f327336058c077d38f6fe2aceb08e6ab4d509/scikit_learn-1.8.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:c22a2da7a198c28dd1a6e1136f19c830beab7fdca5b3e5c8bba8394f8a5c45b3", size = 8528667, upload-time = "2025-12-10T07:08:27.541Z" },
+    { url = "https://files.pythonhosted.org/packages/a8/25/01c0af38fe969473fb292bba9dc2b8f9b451f3112ff242c647fee3d0dfe7/scikit_learn-1.8.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:6b595b07a03069a2b1740dc08c2299993850ea81cce4fe19b2421e0c970de6b7", size = 8066524, upload-time = "2025-12-10T07:08:29.822Z" },
+    { url = "https://files.pythonhosted.org/packages/be/ce/a0623350aa0b68647333940ee46fe45086c6060ec604874e38e9ab7d8e6c/scikit_learn-1.8.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:29ffc74089f3d5e87dfca4c2c8450f88bdc61b0fc6ed5d267f3988f19a1309f6", size = 8657133, upload-time = "2025-12-10T07:08:31.865Z" },
+    { url = "https://files.pythonhosted.org/packages/b8/cb/861b41341d6f1245e6ca80b1c1a8c4dfce43255b03df034429089ca2a2c5/scikit_learn-1.8.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fb65db5d7531bccf3a4f6bec3462223bea71384e2cda41da0f10b7c292b9e7c4", size = 8923223, upload-time = "2025-12-10T07:08:34.166Z" },
+    { url = "https://files.pythonhosted.org/packages/76/18/a8def8f91b18cd1ba6e05dbe02540168cb24d47e8dcf69e8d00b7da42a08/scikit_learn-1.8.0-cp314-cp314-win_amd64.whl", hash = "sha256:56079a99c20d230e873ea40753102102734c5953366972a71d5cb39a32bc40c6", size = 8096518, upload-time = "2025-12-10T07:08:36.339Z" },
+    { url = "https://files.pythonhosted.org/packages/d1/77/482076a678458307f0deb44e29891d6022617b2a64c840c725495bee343f/scikit_learn-1.8.0-cp314-cp314-win_arm64.whl", hash = "sha256:3bad7565bc9cf37ce19a7c0d107742b320c1285df7aab1a6e2d28780df167242", size = 7754546, upload-time = "2025-12-10T07:08:38.128Z" },
+    { url = "https://files.pythonhosted.org/packages/2d/d1/ef294ca754826daa043b2a104e59960abfab4cf653891037d19dd5b6f3cf/scikit_learn-1.8.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:4511be56637e46c25721e83d1a9cea9614e7badc7040c4d573d75fbe257d6fd7", size = 8848305, upload-time = "2025-12-10T07:08:41.013Z" },
+    { url = "https://files.pythonhosted.org/packages/5b/e2/b1f8b05138ee813b8e1a4149f2f0d289547e60851fd1bb268886915adbda/scikit_learn-1.8.0-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:a69525355a641bf8ef136a7fa447672fb54fe8d60cab5538d9eb7c6438543fb9", size = 8432257, upload-time = "2025-12-10T07:08:42.873Z" },
+    { url = "https://files.pythonhosted.org/packages/26/11/c32b2138a85dcb0c99f6afd13a70a951bfdff8a6ab42d8160522542fb647/scikit_learn-1.8.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c2656924ec73e5939c76ac4c8b026fc203b83d8900362eb2599d8aee80e4880f", size = 8678673, upload-time = "2025-12-10T07:08:45.362Z" },
+    { url = "https://files.pythonhosted.org/packages/c7/57/51f2384575bdec454f4fe4e7a919d696c9ebce914590abf3e52d47607ab8/scikit_learn-1.8.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15fc3b5d19cc2be65404786857f2e13c70c83dd4782676dd6814e3b89dc8f5b9", size = 8922467, upload-time = "2025-12-10T07:08:47.408Z" },
+    { url = "https://files.pythonhosted.org/packages/35/4d/748c9e2872637a57981a04adc038dacaa16ba8ca887b23e34953f0b3f742/scikit_learn-1.8.0-cp314-cp314t-win_amd64.whl", hash = "sha256:00d6f1d66fbcf4eba6e356e1420d33cc06c70a45bb1363cd6f6a8e4ebbbdece2", size = 8774395, upload-time = "2025-12-10T07:08:49.337Z" },
+    { url = "https://files.pythonhosted.org/packages/60/22/d7b2ebe4704a5e50790ba089d5c2ae308ab6bb852719e6c3bd4f04c3a363/scikit_learn-1.8.0-cp314-cp314t-win_arm64.whl", hash = "sha256:f28dd15c6bb0b66ba09728cf09fd8736c304be29409bd8445a080c1280619e8c", size = 8002647, upload-time = "2025-12-10T07:08:51.601Z" },
+]
+
+[[package]]
+name = "scikit-optimize"
+version = "0.10.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+    { name = "joblib" },
+    { name = "numpy" },
+    { name = "packaging" },
+    { name = "pyaml" },
+    { name = "scikit-learn" },
+    { name = "scipy" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/b3/95/1b433b9eb9eb653fb97fd525552fd027886e3812d7d20d843994263340aa/scikit_optimize-0.10.2.tar.gz", hash = "sha256:00a3d91bf9015e292b6e7aaefe7e6cb95e8d25ce19adafd2cd88849e1a0b0da0", size = 86202, upload-time = "2024-06-04T19:12:56.879Z" }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/65/cd/15c9ebea645cc9860aa71fe0474f4be981f10ed8e19e1fb0ef1027d4966e/scikit_optimize-0.10.2-py2.py3-none-any.whl", hash = "sha256:45bc7e879b086133984721f2f6735a86c085073f6c481c2ec665b5c67b44d723", size = 107794, upload-time = "2024-06-04T19:12:54.592Z" },
+]
+
+[[package]]
+name = "scipy"
+version = "1.17.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+    { name = "numpy" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/7a/97/5a3609c4f8d58b039179648e62dd220f89864f56f7357f5d4f45c29eb2cc/scipy-1.17.1.tar.gz", hash = "sha256:95d8e012d8cb8816c226aef832200b1d45109ed4464303e997c5b13122b297c0", size = 30573822, upload-time = "2026-02-23T00:26:24.851Z" }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/35/48/b992b488d6f299dbe3f11a20b24d3dda3d46f1a635ede1c46b5b17a7b163/scipy-1.17.1-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:35c3a56d2ef83efc372eaec584314bd0ef2e2f0d2adb21c55e6ad5b344c0dcb8", size = 31610954, upload-time = "2026-02-23T00:17:49.855Z" },
+    { url = "https://files.pythonhosted.org/packages/b2/02/cf107b01494c19dc100f1d0b7ac3cc08666e96ba2d64db7626066cee895e/scipy-1.17.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:fcb310ddb270a06114bb64bbe53c94926b943f5b7f0842194d585c65eb4edd76", size = 28172662, upload-time = "2026-02-23T00:18:01.64Z" },
+    { url = "https://files.pythonhosted.org/packages/cf/a9/599c28631bad314d219cf9ffd40e985b24d603fc8a2f4ccc5ae8419a535b/scipy-1.17.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:cc90d2e9c7e5c7f1a482c9875007c095c3194b1cfedca3c2f3291cdc2bc7c086", size = 20344366, upload-time = "2026-02-23T00:18:12.015Z" },
+    { url = "https://files.pythonhosted.org/packages/35/f5/906eda513271c8deb5af284e5ef0206d17a96239af79f9fa0aebfe0e36b4/scipy-1.17.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:c80be5ede8f3f8eded4eff73cc99a25c388ce98e555b17d31da05287015ffa5b", size = 22704017, upload-time = "2026-02-23T00:18:21.502Z" },
+    { url = "https://files.pythonhosted.org/packages/da/34/16f10e3042d2f1d6b66e0428308ab52224b6a23049cb2f5c1756f713815f/scipy-1.17.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e19ebea31758fac5893a2ac360fedd00116cbb7628e650842a6691ba7ca28a21", size = 32927842, upload-time = "2026-02-23T00:18:35.367Z" },
+    { url = "https://files.pythonhosted.org/packages/01/8e/1e35281b8ab6d5d72ebe9911edcdffa3f36b04ed9d51dec6dd140396e220/scipy-1.17.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02ae3b274fde71c5e92ac4d54bc06c42d80e399fec704383dcd99b301df37458", size = 35235890, upload-time = "2026-02-23T00:18:49.188Z" },
+    { url = "https://files.pythonhosted.org/packages/c5/5c/9d7f4c88bea6e0d5a4f1bc0506a53a00e9fcb198de372bfe4d3652cef482/scipy-1.17.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8a604bae87c6195d8b1045eddece0514d041604b14f2727bbc2b3020172045eb", size = 35003557, upload-time = "2026-02-23T00:18:54.74Z" },
+    { url = "https://files.pythonhosted.org/packages/65/94/7698add8f276dbab7a9de9fb6b0e02fc13ee61d51c7c3f85ac28b65e1239/scipy-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f590cd684941912d10becc07325a3eeb77886fe981415660d9265c4c418d0bea", size = 37625856, upload-time = "2026-02-23T00:19:00.307Z" },
+    { url = "https://files.pythonhosted.org/packages/a2/84/dc08d77fbf3d87d3ee27f6a0c6dcce1de5829a64f2eae85a0ecc1f0daa73/scipy-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:41b71f4a3a4cab9d366cd9065b288efc4d4f3c0b37a91a8e0947fb5bd7f31d87", size = 36549682, upload-time = "2026-02-23T00:19:07.67Z" },
+    { url = "https://files.pythonhosted.org/packages/bc/98/fe9ae9ffb3b54b62559f52dedaebe204b408db8109a8c66fdd04869e6424/scipy-1.17.1-cp312-cp312-win_arm64.whl", hash = "sha256:f4115102802df98b2b0db3cce5cb9b92572633a1197c77b7553e5203f284a5b3", size = 24547340, upload-time = "2026-02-23T00:19:12.024Z" },
+    { url = "https://files.pythonhosted.org/packages/76/27/07ee1b57b65e92645f219b37148a7e7928b82e2b5dbeccecb4dff7c64f0b/scipy-1.17.1-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:5e3c5c011904115f88a39308379c17f91546f77c1667cea98739fe0fccea804c", size = 31590199, upload-time = "2026-02-23T00:19:17.192Z" },
+    { url = "https://files.pythonhosted.org/packages/ec/ae/db19f8ab842e9b724bf5dbb7db29302a91f1e55bc4d04b1025d6d605a2c5/scipy-1.17.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:6fac755ca3d2c3edcb22f479fceaa241704111414831ddd3bc6056e18516892f", size = 28154001, upload-time = "2026-02-23T00:19:22.241Z" },
+    { url = "https://files.pythonhosted.org/packages/5b/58/3ce96251560107b381cbd6e8413c483bbb1228a6b919fa8652b0d4090e7f/scipy-1.17.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:7ff200bf9d24f2e4d5dc6ee8c3ac64d739d3a89e2326ba68aaf6c4a2b838fd7d", size = 20325719, upload-time = "2026-02-23T00:19:26.329Z" },
+    { url = "https://files.pythonhosted.org/packages/b2/83/15087d945e0e4d48ce2377498abf5ad171ae013232ae31d06f336e64c999/scipy-1.17.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:4b400bdc6f79fa02a4d86640310dde87a21fba0c979efff5248908c6f15fad1b", size = 22683595, upload-time = "2026-02-23T00:19:30.304Z" },
+    { url = "https://files.pythonhosted.org/packages/b4/e0/e58fbde4a1a594c8be8114eb4aac1a55bcd6587047efc18a61eb1f5c0d30/scipy-1.17.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2b64ca7d4aee0102a97f3ba22124052b4bd2152522355073580bf4845e2550b6", size = 32896429, upload-time = "2026-02-23T00:19:35.536Z" },
+    { url = "https://files.pythonhosted.org/packages/f5/5f/f17563f28ff03c7b6799c50d01d5d856a1d55f2676f537ca8d28c7f627cd/scipy-1.17.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:581b2264fc0aa555f3f435a5944da7504ea3a065d7029ad60e7c3d1ae09c5464", size = 35203952, upload-time = "2026-02-23T00:19:42.259Z" },
+    { url = "https://files.pythonhosted.org/packages/8d/a5/9afd17de24f657fdfe4df9a3f1ea049b39aef7c06000c13db1530d81ccca/scipy-1.17.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:beeda3d4ae615106d7094f7e7cef6218392e4465cc95d25f900bebabfded0950", size = 34979063, upload-time = "2026-02-23T00:19:47.547Z" },
+    { url = "https://files.pythonhosted.org/packages/8b/13/88b1d2384b424bf7c924f2038c1c409f8d88bb2a8d49d097861dd64a57b2/scipy-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6609bc224e9568f65064cfa72edc0f24ee6655b47575954ec6339534b2798369", size = 37598449, upload-time = "2026-02-23T00:19:53.238Z" },
+    { url = "https://files.pythonhosted.org/packages/35/e5/d6d0e51fc888f692a35134336866341c08655d92614f492c6860dc45bb2c/scipy-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:37425bc9175607b0268f493d79a292c39f9d001a357bebb6b88fdfaff13f6448", size = 36510943, upload-time = "2026-02-23T00:20:50.89Z" },
+    { url = "https://files.pythonhosted.org/packages/2a/fd/3be73c564e2a01e690e19cc618811540ba5354c67c8680dce3281123fb79/scipy-1.17.1-cp313-cp313-win_arm64.whl", hash = "sha256:5cf36e801231b6a2059bf354720274b7558746f3b1a4efb43fcf557ccd484a87", size = 24545621, upload-time = "2026-02-23T00:20:55.871Z" },
+    { url = "https://files.pythonhosted.org/packages/6f/6b/17787db8b8114933a66f9dcc479a8272e4b4da75fe03b0c282f7b0ade8cd/scipy-1.17.1-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:d59c30000a16d8edc7e64152e30220bfbd724c9bbb08368c054e24c651314f0a", size = 31936708, upload-time = "2026-02-23T00:19:58.694Z" },
+    { url = "https://files.pythonhosted.org/packages/38/2e/524405c2b6392765ab1e2b722a41d5da33dc5c7b7278184a8ad29b6cb206/scipy-1.17.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:010f4333c96c9bb1a4516269e33cb5917b08ef2166d5556ca2fd9f082a9e6ea0", size = 28570135, upload-time = "2026-02-23T00:20:03.934Z" },
+    { url = "https://files.pythonhosted.org/packages/fd/c3/5bd7199f4ea8556c0c8e39f04ccb014ac37d1468e6cfa6a95c6b3562b76e/scipy-1.17.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:2ceb2d3e01c5f1d83c4189737a42d9cb2fc38a6eeed225e7515eef71ad301dce", size = 20741977, upload-time = "2026-02-23T00:20:07.935Z" },
+    { url = "https://files.pythonhosted.org/packages/d9/b8/8ccd9b766ad14c78386599708eb745f6b44f08400a5fd0ade7cf89b6fc93/scipy-1.17.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:844e165636711ef41f80b4103ed234181646b98a53c8f05da12ca5ca289134f6", size = 23029601, upload-time = "2026-02-23T00:20:12.161Z" },
+    { url = "https://files.pythonhosted.org/packages/6d/a0/3cb6f4d2fb3e17428ad2880333cac878909ad1a89f678527b5328b93c1d4/scipy-1.17.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:158dd96d2207e21c966063e1635b1063cd7787b627b6f07305315dd73d9c679e", size = 33019667, upload-time = "2026-02-23T00:20:17.208Z" },
+    { url = "https://files.pythonhosted.org/packages/f3/c3/2d834a5ac7bf3a0c806ad1508efc02dda3c8c61472a56132d7894c312dea/scipy-1.17.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:74cbb80d93260fe2ffa334efa24cb8f2f0f622a9b9febf8b483c0b865bfb3475", size = 35264159, upload-time = "2026-02-23T00:20:23.087Z" },
+    { url = "https://files.pythonhosted.org/packages/4d/77/d3ed4becfdbd217c52062fafe35a72388d1bd82c2d0ba5ca19d6fcc93e11/scipy-1.17.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:dbc12c9f3d185f5c737d801da555fb74b3dcfa1a50b66a1a93e09190f41fab50", size = 35102771, upload-time = "2026-02-23T00:20:28.636Z" },
+    { url = "https://files.pythonhosted.org/packages/bd/12/d19da97efde68ca1ee5538bb261d5d2c062f0c055575128f11a2730e3ac1/scipy-1.17.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:94055a11dfebe37c656e70317e1996dc197e1a15bbcc351bcdd4610e128fe1ca", size = 37665910, upload-time = "2026-02-23T00:20:34.743Z" },
+    { url = "https://files.pythonhosted.org/packages/06/1c/1172a88d507a4baaf72c5a09bb6c018fe2ae0ab622e5830b703a46cc9e44/scipy-1.17.1-cp313-cp313t-win_amd64.whl", hash = "sha256:e30bdeaa5deed6bc27b4cc490823cd0347d7dae09119b8803ae576ea0ce52e4c", size = 36562980, upload-time = "2026-02-23T00:20:40.575Z" },
+    { url = "https://files.pythonhosted.org/packages/70/b0/eb757336e5a76dfa7911f63252e3b7d1de00935d7705cf772db5b45ec238/scipy-1.17.1-cp313-cp313t-win_arm64.whl", hash = "sha256:a720477885a9d2411f94a93d16f9d89bad0f28ca23c3f8daa521e2dcc3f44d49", size = 24856543, upload-time = "2026-02-23T00:20:45.313Z" },
+    { url = "https://files.pythonhosted.org/packages/cf/83/333afb452af6f0fd70414dc04f898647ee1423979ce02efa75c3b0f2c28e/scipy-1.17.1-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:a48a72c77a310327f6a3a920092fa2b8fd03d7deaa60f093038f22d98e096717", size = 31584510, upload-time = "2026-02-23T00:21:01.015Z" },
+    { url = "https://files.pythonhosted.org/packages/ed/a6/d05a85fd51daeb2e4ea71d102f15b34fedca8e931af02594193ae4fd25f7/scipy-1.17.1-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:45abad819184f07240d8a696117a7aacd39787af9e0b719d00285549ed19a1e9", size = 28170131, upload-time = "2026-02-23T00:21:05.888Z" },
+    { url = "https://files.pythonhosted.org/packages/db/7b/8624a203326675d7746a254083a187398090a179335b2e4a20e2ddc46e83/scipy-1.17.1-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:3fd1fcdab3ea951b610dc4cef356d416d5802991e7e32b5254828d342f7b7e0b", size = 20342032, upload-time = "2026-02-23T00:21:09.904Z" },
+    { url = "https://files.pythonhosted.org/packages/c9/35/2c342897c00775d688d8ff3987aced3426858fd89d5a0e26e020b660b301/scipy-1.17.1-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:7bdf2da170b67fdf10bca777614b1c7d96ae3ca5794fd9587dce41eb2966e866", size = 22678766, upload-time = "2026-02-23T00:21:14.313Z" },
+    { url = "https://files.pythonhosted.org/packages/ef/f2/7cdb8eb308a1a6ae1e19f945913c82c23c0c442a462a46480ce487fdc0ac/scipy-1.17.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:adb2642e060a6549c343603a3851ba76ef0b74cc8c079a9a58121c7ec9fe2350", size = 32957007, upload-time = "2026-02-23T00:21:19.663Z" },
+    { url = "https://files.pythonhosted.org/packages/0b/2e/7eea398450457ecb54e18e9d10110993fa65561c4f3add5e8eccd2b9cd41/scipy-1.17.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eee2cfda04c00a857206a4330f0c5e3e56535494e30ca445eb19ec624ae75118", size = 35221333, upload-time = "2026-02-23T00:21:25.278Z" },
+    { url = "https://files.pythonhosted.org/packages/d9/77/5b8509d03b77f093a0d52e606d3c4f79e8b06d1d38c441dacb1e26cacf46/scipy-1.17.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d2650c1fb97e184d12d8ba010493ee7b322864f7d3d00d3f9bb97d9c21de4068", size = 35042066, upload-time = "2026-02-23T00:21:31.358Z" },
+    { url = "https://files.pythonhosted.org/packages/f9/df/18f80fb99df40b4070328d5ae5c596f2f00fffb50167e31439e932f29e7d/scipy-1.17.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:08b900519463543aa604a06bec02461558a6e1cef8fdbb8098f77a48a83c8118", size = 37612763, upload-time = "2026-02-23T00:21:37.247Z" },
+    { url = "https://files.pythonhosted.org/packages/4b/39/f0e8ea762a764a9dc52aa7dabcfad51a354819de1f0d4652b6a1122424d6/scipy-1.17.1-cp314-cp314-win_amd64.whl", hash = "sha256:3877ac408e14da24a6196de0ddcace62092bfc12a83823e92e49e40747e52c19", size = 37290984, upload-time = "2026-02-23T00:22:35.023Z" },
+    { url = "https://files.pythonhosted.org/packages/7c/56/fe201e3b0f93d1a8bcf75d3379affd228a63d7e2d80ab45467a74b494947/scipy-1.17.1-cp314-cp314-win_arm64.whl", hash = "sha256:f8885db0bc2bffa59d5c1b72fad7a6a92d3e80e7257f967dd81abb553a90d293", size = 25192877, upload-time = "2026-02-23T00:22:39.798Z" },
+    { url = "https://files.pythonhosted.org/packages/96/ad/f8c414e121f82e02d76f310f16db9899c4fcde36710329502a6b2a3c0392/scipy-1.17.1-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:1cc682cea2ae55524432f3cdff9e9a3be743d52a7443d0cba9017c23c87ae2f6", size = 31949750, upload-time = "2026-02-23T00:21:42.289Z" },
+    { url = "https://files.pythonhosted.org/packages/7c/b0/c741e8865d61b67c81e255f4f0a832846c064e426636cd7de84e74d209be/scipy-1.17.1-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:2040ad4d1795a0ae89bfc7e8429677f365d45aa9fd5e4587cf1ea737f927b4a1", size = 28585858, upload-time = "2026-02-23T00:21:47.706Z" },
+    { url = "https://files.pythonhosted.org/packages/ed/1b/3985219c6177866628fa7c2595bfd23f193ceebbe472c98a08824b9466ff/scipy-1.17.1-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:131f5aaea57602008f9822e2115029b55d4b5f7c070287699fe45c661d051e39", size = 20757723, upload-time = "2026-02-23T00:21:52.039Z" },
+    { url = "https://files.pythonhosted.org/packages/c0/19/2a04aa25050d656d6f7b9e7b685cc83d6957fb101665bfd9369ca6534563/scipy-1.17.1-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:9cdc1a2fcfd5c52cfb3045feb399f7b3ce822abdde3a193a6b9a60b3cb5854ca", size = 23043098, upload-time = "2026-02-23T00:21:56.185Z" },
+    { url = "https://files.pythonhosted.org/packages/86/f1/3383beb9b5d0dbddd030335bf8a8b32d4317185efe495374f134d8be6cce/scipy-1.17.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6e3dcd57ab780c741fde8dc68619de988b966db759a3c3152e8e9142c26295ad", size = 33030397, upload-time = "2026-02-23T00:22:01.404Z" },
+    { url = "https://files.pythonhosted.org/packages/41/68/8f21e8a65a5a03f25a79165ec9d2b28c00e66dc80546cf5eb803aeeff35b/scipy-1.17.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a9956e4d4f4a301ebf6cde39850333a6b6110799d470dbbb1e25326ac447f52a", size = 35281163, upload-time = "2026-02-23T00:22:07.024Z" },
+    { url = "https://files.pythonhosted.org/packages/84/8d/c8a5e19479554007a5632ed7529e665c315ae7492b4f946b0deb39870e39/scipy-1.17.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:a4328d245944d09fd639771de275701ccadf5f781ba0ff092ad141e017eccda4", size = 35116291, upload-time = "2026-02-23T00:22:12.585Z" },
+    { url = "https://files.pythonhosted.org/packages/52/52/e57eceff0e342a1f50e274264ed47497b59e6a4e3118808ee58ddda7b74a/scipy-1.17.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a77cbd07b940d326d39a1d1b37817e2ee4d79cb30e7338f3d0cddffae70fcaa2", size = 37682317, upload-time = "2026-02-23T00:22:18.513Z" },
+    { url = "https://files.pythonhosted.org/packages/11/2f/b29eafe4a3fbc3d6de9662b36e028d5f039e72d345e05c250e121a230dd4/scipy-1.17.1-cp314-cp314t-win_amd64.whl", hash = "sha256:eb092099205ef62cd1782b006658db09e2fed75bffcae7cc0d44052d8aa0f484", size = 37345327, upload-time = "2026-02-23T00:22:24.442Z" },
+    { url = "https://files.pythonhosted.org/packages/07/39/338d9219c4e87f3e708f18857ecd24d22a0c3094752393319553096b98af/scipy-1.17.1-cp314-cp314t-win_arm64.whl", hash = "sha256:200e1050faffacc162be6a486a984a0497866ec54149a01270adc8a59b7c7d21", size = 25489165, upload-time = "2026-02-23T00:22:29.563Z" },
+]
+
+[[package]]
 name = "setuptools"
 version = "81.0.0"
 source = { registry = "https://pypi.org/simple" }
@@ -1337,6 +1482,15 @@ wheels = [
 ]
 
 [[package]]
+name = "threadpoolctl"
+version = "3.6.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b7/4d/08c89e34946fce2aec4fbb45c9016efd5f4d7f24af8e5d93296e935631d8/threadpoolctl-3.6.0.tar.gz", hash = "sha256:8ab8b4aa3491d812b623328249fab5302a68d2d71745c8a4c719a2fcaba9f44e", size = 21274, upload-time = "2025-03-13T13:49:23.031Z" }
+wheels = [
+    { url = "https://files.pythonhosted.org/packages/32/d5/f9a850d79b0851d1d4ef6456097579a9005b31fea68726a4ae5f2d82ddd9/threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb", size = 18638, upload-time = "2025-03-13T13:49:21.846Z" },
+]
+
+[[package]]
 name = "tokenizers"
 version = "0.22.2"
 source = { registry = "https://pypi.org/simple" }