#!/usr/bin/env python3 """ B.6.9 FIBONACCI CASCADE — THREE COMPETING MODELS =================================================== Date: 2026-03-19 Purpose: Test three hypotheses for what happens to energy that overflows the 2D curvature ceiling (r=0.5). All three share the same overflow trigger but differ in how the excess is handled. MODEL A — PARTIAL CAPTURE: A fraction of overflow stays in z-field, the rest redistributes back into the 2D system at reduced potential. The new dimension doesn't capture everything — some leaks back. z += capture_fraction × overflow I_2d += (1 - capture_fraction) × overflow (redistributed) MODEL B — FULL EJECTION: ALL overflow energy leaves the 2D system and enters z exclusively. Nothing comes back. The 2D system is permanently depleted at overflow sites. This is the anti-particle model — complete separation. z += overflow (100%) I_2d unchanged (energy is gone) MODEL C — RECURSIVE f|t (NEW DIMENSION): The overflow doesn't just accumulate — it starts its OWN f|t dynamics in the z-dimension. The z-field runs its own interference, coalescence, and potential deepening. The 3D dimension is a NEW INSTANCE of the same mechanism at a Fibonacci-scaled budget. z runs its own wave equation with its own r(z) feedback This is the most provocative: dimensions are recursive f|t PREDICTIONS: Model A: z-field should be WEAK (partial capture) but STABLE (redistribution prevents blowup). 2D pattern partially preserved. Model B: z-field should be STRONG and STRUCTURED. 2D pattern depleted at overflow sites. Clean separation between dimensions. Model C: z-field should show its OWN INTERFERENCE PATTERN with its own hexagonal-like structure. The most complex outcome. All three use Fibonacci budget ceiling: z_max = 8/5 × budget_scale CRITICAL DESIGN RULE: OUTCOME-AGNOSTIC. Same overflow trigger, three treatments. The data determines which model best describes reality. """ import numpy as np from scipy import ndimage from scipy.spatial import cKDTree import json import time import os def log(msg): print(msg, flush=True) # ====================================================================== # SHARED: 2D FDTD ENGINE WITH OVERFLOW DETECTION # ====================================================================== class CascadeSimulation: """Base FDTD engine with overflow detection. Models override handle_overflow().""" def __init__(self, grid_size=256, n_wavelengths=10, r_base=0.3, feedback_alpha=0.10, k=2*np.pi, budget_scale=1.0): self.grid_size = grid_size self.n_wavelengths = n_wavelengths self.r_base = r_base self.feedback_alpha = feedback_alpha self.budget_scale = budget_scale L = n_wavelengths self.dx = L / grid_size self.c = 1.0 self.dt = 0.4 * self.dx / self.c self.courant2 = (self.c * self.dt / self.dx) ** 2 wavelength = 1.0 f0 = self.c / wavelength self.f0 = f0 self.T = 1.0 / f0 self.steps_per_cycle = int(self.T / self.dt) x = np.linspace(-L/2, L/2, grid_size) y = np.linspace(-L/2, L/2, grid_size) self.X, self.Y = np.meshgrid(x, y) angles = np.array([0, 2*np.pi/3, 4*np.pi/3]) self.kx = k * np.cos(angles) self.ky = k * np.sin(angles) # Fibonacci ceilings self.r_ceiling = 0.5 self.z_ceiling = budget_scale * (8.0 / 5.0) # 1.6 self.w_ceiling = budget_scale * (13.0 / 5.0) # 2.6 # Fields self.psi = np.zeros((grid_size, grid_size)) self.psi_prev = np.zeros((grid_size, grid_size)) self.I_accumulated = np.zeros((grid_size, grid_size)) self.z_field = np.zeros((grid_size, grid_size)) self.w_field = np.zeros((grid_size, grid_size)) self.expelled = np.zeros((grid_size, grid_size)) self.total_overflow_2d = np.zeros((grid_size, grid_size)) def run(self, n_cycles=300, snapshot_interval=20): snapshots = [] for cycle in range(n_cycles): # Compute potential and decoherence I_norm = self.I_accumulated / (cycle + 1) if cycle > 0 else np.zeros_like(self.I_accumulated) V_field = self.feedback_alpha * I_norm r_raw = self.r_base + V_field # 2D decoherence (capped) r_2d = np.clip(r_raw, 0.01, self.r_ceiling - 0.01) # Detect overflow overflow = np.maximum(r_raw - self.r_ceiling, 0.0) self.total_overflow_2d += overflow # MODEL-SPECIFIC: handle the overflow self.handle_overflow(overflow, cycle) # Apply z-ceiling (Fibonacci bound) z_over = np.maximum(self.z_field - self.z_ceiling, 0.0) self.z_field = np.minimum(self.z_field, self.z_ceiling) self.w_field += z_over * 0.1 w_over = np.maximum(self.w_field - self.w_ceiling, 0.0) self.w_field = np.minimum(self.w_field, self.w_ceiling) self.expelled += w_over # z-coupling for Laplacian dz_x = np.zeros_like(self.z_field) dz_y = np.zeros_like(self.z_field) dz_x[1:-1, :] = self.z_field[2:, :] - self.z_field[:-2, :] dz_y[:, 1:-1] = self.z_field[:, 2:] - self.z_field[:, :-2] z_coupling = 1.0 / (1.0 + (dz_x**2 + dz_y**2) / (self.dx**2)) # FDTD cycle I_this = np.zeros((self.grid_size, self.grid_size)) for step in range(self.steps_per_cycle): t_local = step * self.dt cycle_phase = step / self.steps_per_cycle source_mask = (cycle_phase < (1.0 - r_2d)).astype(float) J = np.zeros((self.grid_size, self.grid_size)) for j in range(3): J += source_mask * np.sin( self.kx[j]*self.X + self.ky[j]*self.Y - 2*np.pi*self.f0*(cycle*self.T + t_local) ) lap = np.zeros_like(self.psi) lap[1:-1, 1:-1] = z_coupling[1:-1, 1:-1] * ( self.psi[2:, 1:-1] + self.psi[:-2, 1:-1] + self.psi[1:-1, 2:] + self.psi[1:-1, :-2] - 4 * self.psi[1:-1, 1:-1] ) psi_next = 2*self.psi - self.psi_prev + self.courant2 * lap + self.dt**2 * J mur = (self.c*self.dt - self.dx) / (self.c*self.dt + self.dx) psi_next[:, 0] = self.psi[:, 1] + mur * (psi_next[:, 1] - self.psi[:, 0]) psi_next[:, -1] = self.psi[:, -2] + mur * (psi_next[:, -2] - self.psi[:, -1]) psi_next[0, :] = self.psi[1, :] + mur * (psi_next[1, :] - self.psi[0, :]) psi_next[-1, :] = self.psi[-2, :] + mur * (psi_next[-2, :] - self.psi[-1, :]) self.psi_prev = self.psi.copy() self.psi = psi_next.copy() I_this += self.psi**2 I_this /= self.steps_per_cycle self.I_accumulated += I_this # Snapshot if cycle % snapshot_interval == 0 or cycle == n_cycles - 1: snap = self.take_snapshot(I_norm, r_raw, cycle) snapshots.append(snap) return snapshots def handle_overflow(self, overflow, cycle): """Override in subclasses.""" raise NotImplementedError def take_snapshot(self, I_norm, r_raw, cycle): edge = int(self.grid_size * 0.15) z_int = self.z_field[edge:-edge, edge:-edge] z_max = float(np.max(z_int)) z_mean = float(np.mean(z_int)) z_cv = float(np.std(z_int) / z_mean) if z_mean > 1e-15 else 0.0 # z-peaks and symmetry z_n_peaks = 0 z_sym_5 = 0.0 z_sym_6 = 0.0 z_mean_angle = 0.0 if z_max > 0.01: z_mf = ndimage.maximum_filter(z_int, size=7) z_is_max = (z_int == z_mf) & (z_int > 0.3 * z_max) z_peaks = np.argwhere(z_is_max) z_n_peaks = len(z_peaks) if z_n_peaks >= 5: center = np.array([z_int.shape[0]//2, z_int.shape[1]//2]) rel = z_peaks - center angs = np.arctan2(rel[:, 0], rel[:, 1]) adiffs = np.abs(np.subtract.outer(angs, angs)) n_pairs = max(1, z_n_peaks * (z_n_peaks - 1)) tol = 0.15 baseline = tol / np.pi for nf, attr in [(5, 'z_sym_5'), (6, 'z_sym_6')]: count = 0 for h in range(1, nf): count += np.sum(np.abs(adiffs - h*2*np.pi/nf) < tol) val = float(max(0, count / (n_pairs * (nf-1)) - baseline)) if nf == 5: z_sym_5 = val else: z_sym_6 = val if z_n_peaks >= 3: try: tree = cKDTree(z_peaks) d, idx = tree.query(z_peaks, k=min(4, z_n_peaks)) angles_list = [] for i in range(z_n_peaks): if d.shape[1] >= 3: e1 = (z_peaks[idx[i,1]] - z_peaks[i]).astype(float) e2 = (z_peaks[idx[i,2]] - z_peaks[i]).astype(float) ca = np.dot(e1,e2)/(np.linalg.norm(e1)*np.linalg.norm(e2)+1e-15) angles_list.append(float(np.degrees(np.arccos(np.clip(ca,-1,1))))) if angles_list: z_mean_angle = float(np.mean(angles_list)) except Exception: pass r_int = r_raw[edge:-edge, edge:-edge] overflow_frac = float(np.sum(r_int > 0.5) / r_int.size) return { "cycle": cycle, "z_max": z_max, "z_cv": z_cv, "z_saturation": float(z_max / self.z_ceiling) if self.z_ceiling > 0 else 0, "z_n_peaks": z_n_peaks, "z_sym_5": z_sym_5, "z_sym_6": z_sym_6, "z_mean_angle": z_mean_angle, "w_max": float(np.max(self.w_field)), "expelled_total": float(np.sum(self.expelled)), "overflow_2d_frac": overflow_frac, } # ====================================================================== # MODEL A: PARTIAL CAPTURE # ====================================================================== class ModelA_PartialCapture(CascadeSimulation): """Fraction captured in z, rest redistributed to 2D.""" def __init__(self, capture_fraction=0.5, **kwargs): super().__init__(**kwargs) self.capture_fraction = capture_fraction self.model_name = f"A_partial_{capture_fraction}" def handle_overflow(self, overflow, cycle): # Captured portion goes to z self.z_field += self.capture_fraction * overflow * 0.1 # Uncaptured portion redistributes to I_accumulated (reduces potential locally) # This acts as a pressure relief valve self.I_accumulated *= (1.0 - (1.0 - self.capture_fraction) * overflow * 0.01) # ====================================================================== # MODEL B: FULL EJECTION # ====================================================================== class ModelB_FullEjection(CascadeSimulation): """100% overflow goes to z. 2D system permanently depleted.""" def __init__(self, **kwargs): super().__init__(**kwargs) self.model_name = "B_full_ejection" def handle_overflow(self, overflow, cycle): # All overflow goes to z self.z_field += overflow * 0.1 # 2D system is depleted: subtract overflow energy from accumulation self.I_accumulated = np.maximum(0, self.I_accumulated - overflow * 0.5) # ====================================================================== # MODEL C: RECURSIVE f|t (NEW DIMENSION) # ====================================================================== class ModelC_RecursiveFT(CascadeSimulation): """Overflow starts its own f|t dynamics in the z-dimension.""" def __init__(self, **kwargs): super().__init__(**kwargs) self.model_name = "C_recursive_ft" # z-field has its own "wave" dynamics self.z_velocity = np.zeros((self.grid_size, self.grid_size)) self.z_potential = np.zeros((self.grid_size, self.grid_size)) def handle_overflow(self, overflow, cycle): # Overflow becomes SOURCE for z-dynamics (like f|t pulse for z) z_source = overflow * 0.1 # z-wave equation: simple diffusion + source # d²z/dt² = c_z² ∇²z + source # Using explicit Euler for the z-dynamics (one step per cycle) c_z = 0.5 # z-dimension wave speed (slower than 2D) z_lap = np.zeros_like(self.z_field) z_lap[1:-1, 1:-1] = ( self.z_field[2:, 1:-1] + self.z_field[:-2, 1:-1] + self.z_field[1:-1, 2:] + self.z_field[1:-1, :-2] - 4 * self.z_field[1:-1, 1:-1] ) # z-decoherence: the z-field develops its own potential self.z_potential += self.z_field * 0.01 z_r = np.clip(0.3 + self.z_potential * 0.1, 0.01, 0.49) # z-wave update with decoherence z_decoherence = 1.0 - z_r # persistence self.z_velocity += c_z**2 * z_lap * (self.dx**2) + z_source self.z_velocity *= z_decoherence # damping from z-decoherence self.z_field += self.z_velocity # ====================================================================== # MAIN # ====================================================================== def main(): log("=" * 70) log("B.6.9 FIBONACCI CASCADE — THREE COMPETING MODELS") log("Same overflow trigger, three treatments of excess energy") log("=" * 70) log("") grid_size = 256 n_wavelengths = 10 n_cycles = 300 snapshot_interval = 20 alpha = 0.10 # use the value where overflow is significant common_kwargs = dict( grid_size=grid_size, n_wavelengths=n_wavelengths, r_base=0.3, feedback_alpha=alpha, budget_scale=1.0, ) models = [ ("A_partial_30", ModelA_PartialCapture(capture_fraction=0.3, **common_kwargs)), ("A_partial_50", ModelA_PartialCapture(capture_fraction=0.5, **common_kwargs)), ("A_partial_80", ModelA_PartialCapture(capture_fraction=0.8, **common_kwargs)), ("B_full_eject", ModelB_FullEjection(**common_kwargs)), ("C_recursive", ModelC_RecursiveFT(**common_kwargs)), ] log(f"Grid: {grid_size}x{grid_size}, {n_wavelengths} wavelengths") log(f"Cycles: {n_cycles}, α={alpha}") log(f"Fibonacci ceilings: z={1.6:.1f}, w={2.6:.1f}") log(f"Models: {[name for name, _ in models]}") log("") all_results = {} t_start = time.time() for model_name, sim in models: log("=" * 70) log(f"MODEL: {model_name}") log("=" * 70) t0 = time.time() snapshots = sim.run(n_cycles=n_cycles, snapshot_interval=snapshot_interval) elapsed = time.time() - t0 log(f" Elapsed: {elapsed:.0f}s") if snapshots: last = snapshots[-1] log(f" FINAL: z_sat={last['z_saturation']*100:.0f}% " f"z_pk={last['z_n_peaks']} " f"s5={last['z_sym_5']:.3f} s6={last['z_sym_6']:.3f} " f"w={last['w_max']:.4f} exp={last['expelled_total']:.4f} " f"angle={last['z_mean_angle']:.1f}°") if last['z_sym_5'] > last['z_sym_6'] and last['z_sym_5'] > 0.005: log(f" ** 5-FOLD DOMINATES **") if last['z_mean_angle'] > 0: d5 = min(abs(last['z_mean_angle']-72), abs(last['z_mean_angle']-108)) d6 = abs(last['z_mean_angle']-60) if d5 < d6: log(f" ** PHI ANGLE: {last['z_mean_angle']:.1f}° **") if last['expelled_total'] > 0.001: log(f" ** ANTI-PARTICLE OVERFLOW: {last['expelled_total']:.4f} **") log("") all_results[model_name] = { "model": model_name, "snapshots": snapshots, "elapsed": elapsed, } _save_intermediate(all_results) total_elapsed = time.time() - t_start # ---- CROSS-MODEL COMPARISON ---- log("=" * 70) log("CROSS-MODEL COMPARISON") log("=" * 70) log("") log(f"{'Model':>15s} | {'z_sat%':>6s} {'z_pk':>5s} {'s5z':>5s} {'s6z':>5s} " f"| {'w_max':>8s} {'expel':>8s} | {'angle':>6s} {'zCV':>5s}") log("-" * 75) for model_name, _ in models: if model_name in all_results: snaps = all_results[model_name]["snapshots"] if snaps: last = snaps[-1] log(f"{model_name:>15s} | " f"{last['z_saturation']*100:5.0f}% {last['z_n_peaks']:5d} " f"{last['z_sym_5']:5.3f} {last['z_sym_6']:5.3f} | " f"{last['w_max']:8.4f} {last['expelled_total']:8.4f} | " f"{last['z_mean_angle']:6.1f} {last['z_cv']:5.3f}") log("") # Determine winner best_5fold = max(all_results.items(), key=lambda x: x[1]["snapshots"][-1]["z_sym_5"] if x[1]["snapshots"] else 0) log(f"Highest 5-fold: {best_5fold[0]} " f"(sym_5={best_5fold[1]['snapshots'][-1]['z_sym_5']:.3f})") best_structure = max(all_results.items(), key=lambda x: x[1]["snapshots"][-1]["z_n_peaks"] if x[1]["snapshots"] else 0) log(f"Most z-peaks: {best_structure[0]} " f"({best_structure[1]['snapshots'][-1]['z_n_peaks']} peaks)") log("") log(f"Total elapsed: {total_elapsed:.0f}s") _save_final(all_results, total_elapsed) def _save_intermediate(results): odir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "tlt notes", "theory", "mathematical_framework") try: with open(os.path.join(odir, "B6_fibonacci_cascade_partial.json"), 'w') as f: json.dump(results, f, indent=2, default=_jd) except Exception: pass def _save_final(results, elapsed): odir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "tlt notes", "theory", "mathematical_framework") jp = os.path.join(odir, "B6_fibonacci_cascade_results.json") tp = os.path.join(odir, "B6_fibonacci_cascade_results.txt") with open(jp, 'w') as f: json.dump(results, f, indent=2, default=_jd) with open(tp, 'w') as f: f.write("="*70+"\n") f.write("B.6.9 FIBONACCI CASCADE — THREE COMPETING MODELS — RESULTS\n") f.write("="*70+"\n") f.write(f"Date: {time.strftime('%Y-%m-%d %H:%M:%S')}\n") f.write(f"Elapsed: {elapsed:.0f}s\n\nSee JSON for full data.\n") log(f" Results: {tp}") log(f" JSON: {jp}") def _jd(obj): if isinstance(obj, (np.integer,)): return int(obj) if isinstance(obj, (np.floating,)): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return str(obj) if __name__ == "__main__": main()