#!/usr/bin/env python3 """ engine_4d.py — 4D FDTD Wave Interference Engine for Time Ledger Theory Implements EQUATIONS_SPEC_v2.txt exactly. Every equation traced to source. No additions, no leanings, no nudges. Test: TLT-4D-001 (pre-registered in test description document) Audit: AUDIT_REPORT_v1.txt (20 findings), AUDIT_REPORT_v2.txt (all resolved) Usage: python3 engine_4d.py # Single run at c=1.625 python3 engine_4d.py --sweep # Full 13-value framerate sweep python3 engine_4d.py --c 1.707 # Single run at specific c python3 engine_4d.py --phase 2 --sweep # Phase 2 resolution (48^4) """ import argparse import json import logging import math import os import sys import time as time_mod from pathlib import Path import numpy as np logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") logger = logging.getLogger("4d_engine") RESULTS_DIR = Path(__file__).parent / "results" RESULTS_DIR.mkdir(exist_ok=True) # ══════════════════════════════════════════════════════════════════════════════ # 24-CELL VERTICES (Schläfli 1852, established geometry) # ══════════════════════════════════════════════════════════════════════════════ def get_form_a_vertices(): """Form A: cell-center form, 24 vertices on unit sphere (radius 1.0). 8 permutations of (±1,0,0,0) + 16 sign combinations of (½,½,½,½). """ verts = [] # 8 axis-aligned vertices for i in range(4): for s in [1.0, -1.0]: v = [0.0, 0.0, 0.0, 0.0] v[i] = s verts.append(v) # 16 half-integer vertices for s0 in [0.5, -0.5]: for s1 in [0.5, -0.5]: for s2 in [0.5, -0.5]: for s3 in [0.5, -0.5]: verts.append([s0, s1, s2, s3]) return np.array(verts) def get_form_b_vertices_normalized(): """Form B: edge-midpoint form, normalized to unit sphere. Permutations of (±1,±1,0,0) / √2. """ from itertools import permutations verts = set() for s1 in [1.0, -1.0]: for s2 in [1.0, -1.0]: for p in permutations([s1, s2, 0.0, 0.0]): verts.add(p) verts = np.array(sorted(verts)) / math.sqrt(2) return verts # ══════════════════════════════════════════════════════════════════════════════ # ENGINE CORE # ══════════════════════════════════════════════════════════════════════════════ class Engine4D: """4D FDTD wave equation engine implementing EQUATIONS_SPEC_v2. Equation (1): d²ψ/dt² = c²∇²ψ - ω₀²ψ(1+A/A_ref) + J(x,t) Source (3): J(x,t) = A_src × sin(2πft) × W(t;r) C_potential (7): r(x) = r₀ + α × κ(x) Curvature (8): κ(x) = -∇²⟨|ψ|²⟩ / max(|∇²⟨|ψ|²⟩|) """ def __init__(self, c_4d=1.625, phase=1, r0=0.3, alpha_cpot=0.1, omega0=2*math.pi, A_base=1.0, A_ref=1.0, n_periods=100, source_config="form_a"): """Initialize the engine. Args: c_4d: Propagation speed (in units of c_3D=1) phase: 1 (N=32), 2 (N=48), 3 (N=64) r0: Base decoherence ratio (0.3 from math_framework) alpha_cpot: C_potential coupling strength omega0: Base angular frequency (restoring force) A_base: Base source amplitude A_ref: Reference amplitude for potential coupling n_periods: Number of wave periods to simulate source_config: "form_a", "form_b", or "both" """ # Grid parameters grid_sizes = {1: 32, 2: 48, 3: 64} self.N = grid_sizes.get(phase, 32) self.L = 3.0 # Domain: [-L, L]^4 self.h = 2 * self.L / self.N self.c = c_4d # CFL condition: dt = 0.9 * h / (2*c_max) # Use c_max across all sweep values to keep dt consistent c_max = max(c_4d, 1.8) self.dt = 0.9 * self.h / (2.0 * c_max) # Physics parameters self.f0 = 1.0 # Base frequency (dimensionless) self.T = 1.0 / self.f0 # Period self.omega0 = omega0 self.r0 = r0 self.alpha_cpot = alpha_cpot self.A_base = A_base self.A_ref = A_ref # Simulation parameters self.n_periods = n_periods self.steps_per_period = int(self.T / self.dt) self.total_steps = self.n_periods * self.steps_per_period # Windowing ramp (smooth transitions, spec eq. 4) self.n_ramp = max(4, int(0.05 * self.steps_per_period)) # PML parameters self.n_pml = 10 self.sigma_max = 3.0 * self.c / (2.0 * self.h * self.n_pml) # Source configuration self.source_config = source_config # 5D overflow layer self.N5 = 8 self.c5d = 2.625 # Fibonacci pair {8,13}, sum=21, 21/8=2.625 # Allocate fields self._allocate() # Place sources self._setup_sources() logger.info( "Engine4D initialized: N=%d, c=%.3f, h=%.4f, dt=%.5f, " "steps=%d (%.0f periods), ramp=%d, PML=%d", self.N, self.c, self.h, self.dt, self.total_steps, self.n_periods, self.n_ramp, self.n_pml, ) def _allocate(self): """Allocate field arrays.""" N = self.N shape = (N, N, N, N) # Wave field: ψ at t, t-dt self.psi = np.zeros(shape, dtype=np.float64) self.psi_prev = np.zeros(shape, dtype=np.float64) # Source field (recomputed each step, not stored permanently) # PML damping field self.sigma = np.zeros(shape, dtype=np.float64) self._setup_pml() # Time-averaged intensity for C_potential self.intensity_avg = np.zeros(shape, dtype=np.float64) self.intensity_count = 0 # 5D overflow layer: thin (N5 points) at each 4D position # For memory efficiency, only track total overflow energy self.overflow_energy = 0.0 # Decoherence ratio field self.r_field = np.full(shape, self.r0, dtype=np.float64) # Energy tracking self.energy_history = [] mem_mb = (self.psi.nbytes * 3 + self.sigma.nbytes + self.intensity_avg.nbytes + self.r_field.nbytes) / 1e6 logger.info("Memory allocated: %.1f MB", mem_mb) def _setup_pml(self): """Set up PML absorbing boundary layer.""" N = self.N for dim in range(4): for i in range(N): # Distance into PML from either boundary if i < self.n_pml: d = (self.n_pml - i) / self.n_pml elif i >= N - self.n_pml: d = (i - (N - self.n_pml - 1)) / self.n_pml else: continue sigma_val = self.sigma_max * d * d # Quadratic profile # Build slicing for this dimension slc = [slice(None)] * 4 slc[dim] = i self.sigma[tuple(slc)] = np.maximum( self.sigma[tuple(slc)], sigma_val ) def _setup_sources(self): """Place sources at 24-cell vertices using 4D trilinear interpolation.""" if self.source_config == "form_a": verts = get_form_a_vertices() elif self.source_config == "form_b": verts = get_form_b_vertices_normalized() elif self.source_config == "both": verts = np.vstack([get_form_a_vertices(), get_form_b_vertices_normalized()]) else: raise ValueError(f"Unknown source config: {self.source_config}") # Map vertex coordinates to grid indices # Domain is [-L, L], grid indices [0, N-1] self.source_points = [] for v in verts: # Grid position (floating point) gpos = [(v[d] + self.L) / self.h for d in range(4)] # 4D trilinear interpolation: 16 nearest grid nodes neighbors = [] for i0 in [int(gpos[0]), min(int(gpos[0]) + 1, self.N - 1)]: for i1 in [int(gpos[1]), min(int(gpos[1]) + 1, self.N - 1)]: for i2 in [int(gpos[2]), min(int(gpos[2]) + 1, self.N - 1)]: for i3 in [int(gpos[3]), min(int(gpos[3]) + 1, self.N - 1)]: # Weight = product of (1 - fractional distance) in each dim w = 1.0 for d, idx in enumerate([i0, i1, i2, i3]): w *= 1.0 - abs(gpos[d] - idx) if w > 0: neighbors.append(((i0, i1, i2, i3), w)) # Normalize weights total_w = sum(w for _, w in neighbors) if total_w > 0: neighbors = [(idx, w / total_w) for idx, w in neighbors] self.source_points.append(neighbors) logger.info("Sources placed: %d vertices, %d source config", len(verts), len(self.source_points)) def _window(self, t, r): """Windowing function W(t; r) with smooth ramp transitions. Spec equations (3) and (4). """ # Position within current period phase = (t % self.T) / self.T # 0 to 1 # ON phase: 0 to (1-r), OFF phase: (1-r) to 1 on_end = 1.0 - r if phase < on_end: # In ON phase — check if in ramp-up region ramp_frac = self.n_ramp * self.dt / self.T if phase < ramp_frac: return 0.5 * (1.0 - math.cos(math.pi * phase / ramp_frac)) elif phase > on_end - ramp_frac: return 0.5 * (1.0 + math.cos(math.pi * (phase - on_end + ramp_frac) / ramp_frac)) else: return 1.0 else: return 0.0 def _laplacian_4d(self, field): """Compute 4D Laplacian using central differences. Spec eq. (2).""" lap = np.zeros_like(field) h2 = self.h * self.h for dim in range(4): lap += (np.roll(field, 1, axis=dim) + np.roll(field, -1, axis=dim) - 2.0 * field) / h2 return lap def _inject_sources(self, t): """Compute source field J(x,t) at time t. Spec eq. (3).""" J = np.zeros_like(self.psi) sin_val = math.sin(2.0 * math.pi * self.f0 * t) for neighbors in self.source_points: for (i0, i1, i2, i3), weight in neighbors: # Get local decoherence ratio r_local = self.r_field[i0, i1, i2, i3] w_val = self._window(t, r_local) J[i0, i1, i2, i3] += self.A_base * sin_val * w_val * weight return J def _update_cpotential(self): """Update C_potential: r(x) = r₀ + α × κ(x). Spec eqs. (7-8).""" if self.intensity_count < self.steps_per_period: return # Not enough data yet # Compute curvature: κ(x) = -∇²⟨|ψ|²⟩ / max(|∇²⟨|ψ|²⟩|) avg_intensity = self.intensity_avg / max(self.intensity_count, 1) lap_intensity = self._laplacian_4d(avg_intensity) max_lap = np.max(np.abs(lap_intensity)) if max_lap > 1e-12: kappa = -lap_intensity / max_lap # Negative sign: peaks get positive κ else: kappa = np.zeros_like(lap_intensity) # Update r field self.r_field = self.r0 + self.alpha_cpot * kappa # Hard clamp at 0.5 (ceiling) and 0.01 (floor) overflow_mask = self.r_field >= 0.5 self.r_field = np.clip(self.r_field, 0.01, 0.5) # 5D overflow: energy at positions where r hit 0.5 if np.any(overflow_mask): overflow_e = np.sum(self.psi[overflow_mask] ** 2) * self.h ** 4 self.overflow_energy += overflow_e * 0.01 # Small fraction per step def step(self, t): """Advance one FDTD timestep. Spec eq. (1) discretized.""" # Source injection J = self._inject_sources(t) # 4D Laplacian lap = self._laplacian_4d(self.psi) # Potential term: ω₀²ψ(1 + A/A_ref) # Phase 1: A = A_base uniform, so (1 + A_base/A_ref) is constant potential = self.omega0 ** 2 * self.psi * (1.0 + self.A_base / self.A_ref) # FDTD update: ψ(t+dt) = 2ψ(t) - ψ(t-dt) + dt²[c²∇²ψ - potential + J] dt2 = self.dt * self.dt psi_new = (2.0 * self.psi - self.psi_prev + dt2 * (self.c ** 2 * lap - potential + J)) # PML damping psi_new *= np.exp(-self.sigma * self.dt) # Update fields self.psi_prev = self.psi self.psi = psi_new # Accumulate intensity for C_potential self.intensity_avg += self.psi ** 2 self.intensity_count += 1 def run(self): """Run the full simulation.""" logger.info("Running %d steps (%.0f periods)...", self.total_steps, self.n_periods) t_start = time_mod.monotonic() t = 0.0 for step_n in range(self.total_steps): t = step_n * self.dt # FDTD step self.step(t) # Update C_potential every period if step_n > 0 and step_n % self.steps_per_period == 0: self._update_cpotential() # Reset intensity accumulator self.intensity_avg[:] = 0.0 self.intensity_count = 0 # Track energy every 10 steps if step_n % 10 == 0: field_energy = np.sum(self.psi ** 2) * self.h ** 4 self.energy_history.append({ "step": step_n, "t": round(t, 6), "field_energy": float(field_energy), "overflow_energy": float(self.overflow_energy), }) # Progress log if step_n > 0 and step_n % (self.total_steps // 10) == 0: elapsed = time_mod.monotonic() - t_start pct = step_n / self.total_steps * 100 field_e = np.sum(self.psi ** 2) * self.h ** 4 logger.info(" %.0f%% (step %d, t=%.2f, E_field=%.4e, elapsed=%.1fs)", pct, step_n, t, field_e, elapsed) elapsed = time_mod.monotonic() - t_start logger.info("Simulation complete: %.1f seconds", elapsed) return self._collect_results(elapsed) def _collect_results(self, elapsed): """Collect measurements for output.""" # Final time-averaged intensity (last 10 periods) final_intensity = self.intensity_avg / max(self.intensity_count, 1) # 3D cross-section at w=0 (midplane of 4th dimension) mid = self.N // 2 cross_section = final_intensity[:, :, :, mid] # Peak analysis from scipy.ndimage import maximum_filter local_max = maximum_filter(final_intensity, size=3) == final_intensity threshold = np.mean(final_intensity) + 2 * np.std(final_intensity) peaks = np.argwhere(local_max & (final_intensity > threshold)) # Check intensity at dual vertex positions (Form B, not sourced) form_b = get_form_b_vertices_normalized() dual_intensities = [] for v in form_b: gpos = tuple(int((v[d] + self.L) / self.h) for d in range(4)) gpos = tuple(min(max(g, 0), self.N - 1) for g in gpos) dual_intensities.append(float(final_intensity[gpos])) avg_dual = np.mean(dual_intensities) avg_field = np.mean(final_intensity[final_intensity > 0]) # Coherence: spatial autocorrelation flat = final_intensity.ravel() if np.std(flat) > 1e-12: autocorr = np.corrcoef(flat[:-1], flat[1:])[0, 1] else: autocorr = 0.0 # Symmetry: peak count and distribution result = { "c_4d": self.c, "phase": {32: 1, 48: 2, 64: 3}.get(self.N, 0), "N": self.N, "total_steps": self.total_steps, "elapsed_seconds": round(elapsed, 2), "field_energy_final": float(np.sum(self.psi ** 2) * self.h ** 4), "overflow_energy": float(self.overflow_energy), "n_peaks": len(peaks), "avg_dual_intensity": float(avg_dual), "avg_field_intensity": float(avg_field), "dual_to_field_ratio": float(avg_dual / avg_field) if avg_field > 0 else 0, "autocorrelation": float(autocorr), "intensity_std": float(np.std(final_intensity)), "peak_to_noise": float(np.max(final_intensity) / (np.std(final_intensity) + 1e-12)), "energy_history_len": len(self.energy_history), } return result, cross_section, final_intensity # ══════════════════════════════════════════════════════════════════════════════ # SWEEP # ══════════════════════════════════════════════════════════════════════════════ def run_sweep(phase=1, n_periods=50): """Run the framerate sweep: 1.500 to 1.800 in 0.025 steps.""" c_values = [round(1.5 + i * 0.025, 3) for i in range(13)] logger.info("═" * 60) logger.info("FRAMERATE SWEEP: %d values from %.3f to %.3f", len(c_values), c_values[0], c_values[-1]) logger.info("Phase %d (N=%d), %d periods each", phase, {1: 32, 2: 48, 3: 64}.get(phase, 32), n_periods) logger.info("═" * 60) all_results = [] for c_val in c_values: logger.info("\n--- c_4D = %.3f ---", c_val) engine = Engine4D(c_4d=c_val, phase=phase, n_periods=n_periods) result, cross_section, _ = engine.run() all_results.append(result) # Save cross-section np.save(RESULTS_DIR / f"cross_section_c{c_val:.3f}.npy", cross_section) # Save all results with open(RESULTS_DIR / "sweep_results.json", "w") as f: json.dump(all_results, f, indent=2) # Print summary print("\n" + "=" * 70) print("SWEEP RESULTS") print("=" * 70) print(f"{'c_4D':>8s} {'Peaks':>8s} {'Dual/Field':>10s} {'AutoCorr':>10s} {'PeakNoise':>10s} {'Energy':>12s}") print("-" * 70) best_coherence = None best_c = None for r in all_results: print(f"{r['c_4d']:8.3f} {r['n_peaks']:8d} {r['dual_to_field_ratio']:10.4f} " f"{r['autocorrelation']:10.4f} {r['peak_to_noise']:10.2f} " f"{r['field_energy_final']:12.4e}") if best_coherence is None or r['peak_to_noise'] > best_coherence: best_coherence = r['peak_to_noise'] best_c = r['c_4d'] print("-" * 70) print(f"GOLDILOCKS: c_4D = {best_c:.3f} (highest peak-to-noise ratio)") print(f" Fibonacci prediction: 1.625") print(f" 24-cell geometry: 1.707 (heuristic)") print(f" Steinberg measured: 1.700 ± 0.200") return all_results # ══════════════════════════════════════════════════════════════════════════════ # MAIN # ══════════════════════════════════════════════════════════════════════════════ if __name__ == "__main__": parser = argparse.ArgumentParser(description="4D FDTD Wave Engine for TLT") parser.add_argument("--sweep", action="store_true", help="Run full framerate sweep") parser.add_argument("--c", type=float, default=1.625, help="Propagation speed (default: 1.625)") parser.add_argument("--phase", type=int, default=1, choices=[1, 2, 3], help="Resolution phase") parser.add_argument("--periods", type=int, default=50, help="Number of periods to simulate") parser.add_argument("--source", default="form_a", choices=["form_a", "form_b", "both"], help="Source configuration") args = parser.parse_args() if args.sweep: results = run_sweep(phase=args.phase, n_periods=args.periods) else: logger.info("Single run: c_4D=%.3f, phase=%d, %d periods", args.c, args.phase, args.periods) engine = Engine4D(c_4d=args.c, phase=args.phase, n_periods=args.periods, source_config=args.source) result, cross_section, intensity = engine.run() # Save np.save(RESULTS_DIR / f"cross_section_c{args.c:.3f}.npy", cross_section) with open(RESULTS_DIR / f"result_c{args.c:.3f}.json", "w") as f: json.dump(result, f, indent=2) print(json.dumps(result, indent=2))