Evaluation

class motac.eval.BacktestResult(n_train, horizon, nll, rmse, mae, coverage, n_folds, fold_metrics, baseline_metrics)[source]

Bases: object

Backtest summary with fold metrics and baseline comparisons.

n_train: int
horizon: int
nll: float
rmse: float
mae: float
coverage: float
n_folds: int
fold_metrics: tuple[dict[str, float], ...]
baseline_metrics: dict[str, dict[str, float]]
__init__(n_train, horizon, nll, rmse, mae, coverage, n_folds, fold_metrics, baseline_metrics)
motac.eval.backtest_fit_forecast_nll(*, travel_time_s, kernel, y, n_train, horizon, family='poisson', dispersion=None, init_alpha=0.05, init_beta=0.05, maxiter=250, kernel_fn=None, validate_kernel=True, n_paths=200, q=(0.05, 0.5, 0.95), rolling_step=None, max_folds=None, max_travel_time_s=None, speed_gate_smoothness_s=300.0, mu_ridge=0.0, mu_laplacian=0.0, stability_mode='warn', stability_penalty=100.0, seasonal_period=7)[source]

Rolling-origin probabilistic backtest with baseline comparisons.

Return type:

BacktestResult

class motac.eval.BacktestConfig(counts_path, substrate_cache_dir, n_train, horizon, family='poisson', dispersion=None, kernel=None, n_lags=6, beta=1.0, init_alpha=0.05, init_beta=0.05, maxiter=250, out_dir='reports/backtest', n_paths=200, q=(0.05, 0.5, 0.95), rolling_step=None, max_folds=None, seasonal_period=7, max_travel_time_s=None, speed_gate_smoothness_s=300.0, mu_ridge=0.0, mu_laplacian=0.0, stability_mode='warn', stability_penalty=100.0)[source]

Bases: object

Configuration for rolling probabilistic backtesting report bundles.

counts_path: str
substrate_cache_dir: str
n_train: int
horizon: int
family: str
dispersion: float | None
kernel: list[float] | None
n_lags: int
beta: float
init_alpha: float
init_beta: float
maxiter: int
out_dir: str
n_paths: int
q: tuple[float, ...]
rolling_step: int | None
max_folds: int | None
seasonal_period: int
max_travel_time_s: float | None
speed_gate_smoothness_s: float
mu_ridge: float
mu_laplacian: float
stability_mode: str
stability_penalty: float
static from_json(path)[source]
Return type:

BacktestConfig

__init__(counts_path, substrate_cache_dir, n_train, horizon, family='poisson', dispersion=None, kernel=None, n_lags=6, beta=1.0, init_alpha=0.05, init_beta=0.05, maxiter=250, out_dir='reports/backtest', n_paths=200, q=(0.05, 0.5, 0.95), rolling_step=None, max_folds=None, seasonal_period=7, max_travel_time_s=None, speed_gate_smoothness_s=300.0, mu_ridge=0.0, mu_laplacian=0.0, stability_mode='warn', stability_penalty=100.0)
motac.eval.run_backtest_report(*, config)[source]

Run rolling probabilistic backtest and save report bundle (JSON + figures).

Return type:

dict[str, Any]

class motac.eval.BenchmarkSuiteConfig(out_dir='reports/benchmarks', local_cache_dir='docs/tutorials/_local_data', chicago_events_path=None, chicago_years=(2024, 2025), chicago_cell_size_m=1500.0, acled_events_path=None, acled_start='2024-01-01', acled_end='2025-12-31', acled_region='gaza', acled_mode='full', acled_cell_size_m=200.0, n_lags=3, kernel_beta=0.9, n_paths=200, maxiter=80, n_train=None, horizon=None, sim_seed=123, sim_cells=24, sim_steps=120, sim_mu=0.1, sim_alpha=0.45, sim_beta=0.001)[source]

Bases: object

Configuration for simulator + Chicago + ACLED benchmark suite.

out_dir: str
local_cache_dir: str
chicago_events_path: str | None
chicago_years: tuple[int, int]
chicago_cell_size_m: float
acled_events_path: str | None
acled_start: str
acled_end: str
acled_region: str
acled_mode: str
acled_cell_size_m: float
n_lags: int
kernel_beta: float
n_paths: int
maxiter: int
n_train: int | None
horizon: int | None
sim_seed: int
sim_cells: int
sim_steps: int
sim_mu: float
sim_alpha: float
sim_beta: float
static from_json(path)[source]
Return type:

BenchmarkSuiteConfig

__init__(out_dir='reports/benchmarks', local_cache_dir='docs/tutorials/_local_data', chicago_events_path=None, chicago_years=(2024, 2025), chicago_cell_size_m=1500.0, acled_events_path=None, acled_start='2024-01-01', acled_end='2025-12-31', acled_region='gaza', acled_mode='full', acled_cell_size_m=200.0, n_lags=3, kernel_beta=0.9, n_paths=200, maxiter=80, n_train=None, horizon=None, sim_seed=123, sim_cells=24, sim_steps=120, sim_mu=0.1, sim_alpha=0.45, sim_beta=0.001)
motac.eval.run_benchmark_suite(*, config)[source]

Run simulator + Chicago + ACLED benchmark suite and write reports.

Return type:

dict[str, Any]

class motac.eval.ProfileResult(fit_time_s, forecast_time_s, total_time_s)[source]

Bases: object

fit_time_s: float
forecast_time_s: float
total_time_s: float
__init__(fit_time_s, forecast_time_s, total_time_s)
motac.eval.profile_fit_forecast(*, travel_time_s, kernel, y, horizon, family='poisson', init_alpha=0.05, init_beta=0.001, maxiter=250, kernel_fn=None, validate_kernel=True)[source]

Profile fit + forecast latency for a road Hawkes model.

Return type:

tuple[ProfileResult, dict[str, object], ndarray]

class motac.eval.EvalConfig(seed=0, n_locations=5, n_steps_train=60, horizon=7, mu=0.1, alpha=0.6, n_lags=6, beta=1.0, fit_maxiter=400, n_paths=200, q=(0.05, 0.5, 0.95))[source]

Bases: object

Configuration for a small synthetic evaluation run.

seed: int
n_locations: int
n_steps_train: int
horizon: int
mu: float
alpha: float
n_lags: int
beta: float
fit_maxiter: int
n_paths: int
q: tuple[float, ...]
to_json()[source]
Return type:

str

static from_json(text)[source]
Return type:

EvalConfig

__init__(seed=0, n_locations=5, n_steps_train=60, horizon=7, mu=0.1, alpha=0.6, n_lags=6, beta=1.0, fit_maxiter=400, n_paths=200, q=(0.05, 0.5, 0.95))
motac.eval.evaluate_synthetic(config)[source]

Run a small deterministic end-to-end synthetic benchmark.

Return type:

dict[str, object]

class motac.eval.backtest.BacktestResult(n_train, horizon, nll, rmse, mae, coverage, n_folds, fold_metrics, baseline_metrics)[source]

Bases: object

Backtest summary with fold metrics and baseline comparisons.

n_train: int
horizon: int
nll: float
rmse: float
mae: float
coverage: float
n_folds: int
fold_metrics: tuple[dict[str, float], ...]
baseline_metrics: dict[str, dict[str, float]]
__init__(n_train, horizon, nll, rmse, mae, coverage, n_folds, fold_metrics, baseline_metrics)
motac.eval.backtest.backtest_fit_forecast_nll(*, travel_time_s, kernel, y, n_train, horizon, family='poisson', dispersion=None, init_alpha=0.05, init_beta=0.05, maxiter=250, kernel_fn=None, validate_kernel=True, n_paths=200, q=(0.05, 0.5, 0.95), rolling_step=None, max_folds=None, max_travel_time_s=None, speed_gate_smoothness_s=300.0, mu_ridge=0.0, mu_laplacian=0.0, stability_mode='warn', stability_penalty=100.0, seasonal_period=7)[source]

Rolling-origin probabilistic backtest with baseline comparisons.

Return type:

BacktestResult

class motac.eval.profiling.ProfileResult(fit_time_s, forecast_time_s, total_time_s)[source]

Bases: object

fit_time_s: float
forecast_time_s: float
total_time_s: float
__init__(fit_time_s, forecast_time_s, total_time_s)
motac.eval.profiling.profile_fit_forecast(*, travel_time_s, kernel, y, horizon, family='poisson', init_alpha=0.05, init_beta=0.001, maxiter=250, kernel_fn=None, validate_kernel=True)[source]

Profile fit + forecast latency for a road Hawkes model.

Return type:

tuple[ProfileResult, dict[str, object], ndarray]