#!/usr/bin/env python3 """ CURVED POTENTIAL SITE DIFFERENTIATION TEST ============================================ Date: 2026-03-19 Purpose: Test whether a position-dependent Lagrangian potential (the "curve of time") creates site differentiation in N=3 pulsed wave interference. HYPOTHESIS (Jonathan, 2026-03-19): The Lagrangian curve is the missing symmetry-breaking mechanism. - Flat bandwidth → uniform lattice → all peaks identical (CV=0, PROVEN) - Curved bandwidth → position-dependent decoherence → site differentiation THEORY CONNECTION: t = C_potential (theory.txt line 155): decoherence varies with position on the Lagrangian potential. The cipher's cone has: Height = Compton frequency (base wavelength) Curvature = Lagrangian potential (bends the wavelength field) Spiral = spin-orbit coupling (rotational structure) If the potential is FLAT, all sites see the same effective decoherence ratio r, producing identical peaks (proven analytically in time_averaged_intensity.py). If the potential CURVES, different sites see different effective r values, breaking the 3-fold symmetry. TEST DESIGN: 1. Same N=3 plane wave setup as time_averaged_intensity.py 2. Add position-dependent potential V(x,y) — several profiles tested: a. Linear gradient (simplest asymmetry) b. Parabolic (radial curvature) c. Conical (matches cipher cone geometry) d. Gaussian well (localized potential) 3. The potential modifies the effective phase accumulation per cycle: delta_j(x) = delta_j_base + V(x,y) * coupling This makes the Fejer factors position-dependent. 4. Measure CV of lattice peak intensities 5. Compare to flat case (CV=0 baseline) CRITICAL DESIGN RULE: OUTCOME-AGNOSTIC. No pass/fail. The data shows what it shows. """ import numpy as np from scipy import ndimage import json import time # ====================================================================== # SECTION 1: POTENTIAL PROFILES # ====================================================================== def potential_flat(X, Y): """Flat potential — control case (should reproduce CV=0).""" return np.zeros_like(X) def potential_linear(X, Y, slope=0.1): """Linear gradient in x-direction — simplest asymmetry.""" return slope * X def potential_parabolic(X, Y, curvature=0.05): """Radial parabolic potential — symmetric curvature.""" return curvature * (X**2 + Y**2) def potential_conical(X, Y, slope=0.1): """Conical potential — matches cipher cone geometry.""" return slope * np.sqrt(X**2 + Y**2) def potential_gaussian_well(X, Y, depth=0.5, sigma=2.0): """Localized Gaussian well — concentrated curvature.""" return -depth * np.exp(-(X**2 + Y**2) / (2 * sigma**2)) def potential_sinusoidal(X, Y, amplitude=0.3, period=3.0): """Sinusoidal modulation — periodic curvature.""" return amplitude * np.sin(2 * np.pi * X / period) POTENTIAL_PROFILES = { "flat": (potential_flat, {}), "linear_01": (potential_linear, {"slope": 0.1}), "linear_03": (potential_linear, {"slope": 0.3}), "linear_05": (potential_linear, {"slope": 0.5}), "parabolic_005": (potential_parabolic, {"curvature": 0.05}), "parabolic_02": (potential_parabolic, {"curvature": 0.2}), "conical_01": (potential_conical, {"slope": 0.1}), "conical_03": (potential_conical, {"slope": 0.3}), "gaussian_05": (potential_gaussian_well, {"depth": 0.5, "sigma": 2.0}), "gaussian_10": (potential_gaussian_well, {"depth": 1.0, "sigma": 1.5}), "sinusoidal": (potential_sinusoidal, {"amplitude": 0.3, "period": 3.0}), } # ====================================================================== # SECTION 2: CURVED-POTENTIAL INTERFERENCE # ====================================================================== def compute_curved_interference(X, Y, M, base_drifts, V_field, coupling=1.0, k=2*np.pi): """ Compute time-averaged intensity with GEOMETRIC path-length coupling. The Lagrangian potential V(x,y) curves the bandwidth surface. A wave propagating through a curved potential travels a LONGER PATH than one on a flat surface — just as a line drawn on a hill covers more distance than the same horizontal span on flat ground. The arc length metric: ds = sqrt(1 + (dV/dx)^2) dx For a wave propagating in direction k_hat through potential V(x,y): ds_j = sqrt(1 + (k_hat_j · ∇V)^2) × dx Different wave directions see different effective path lengths because their k-vectors project differently onto the gradient ∇V: - Wave along the gradient: maximum path length increase - Wave perpendicular to gradient: no path length increase - Wave at intermediate angle: intermediate increase For N=3 at 120°, three k-vectors × one gradient direction = three DIFFERENT effective wavelengths. Symmetry broken by GEOMETRY, not by any ad hoc coupling factor. This is the same mechanism as GR: the metric determines distances, and different directions through curved spacetime see different distances. The Lagrangian curve IS the metric. """ angles = np.array([0, 2*np.pi/3, 4*np.pi/3]) kx = k * np.cos(angles) ky = k * np.sin(angles) # Compute potential gradient (the slope of the curve) dy, dx = np.gradient(V_field) # numpy gradient returns (row, col) # For each wave direction, compute the arc-length metric factor # ds_j / dx = sqrt(1 + (k_hat_j · ∇V)^2) # This determines the effective path length for wave j at each position arc_length_factor = np.zeros((3,) + X.shape) for j in range(3): k_hat_x = np.cos(angles[j]) k_hat_y = np.sin(angles[j]) # Directional slope: how steep the potential is along this wave direction directional_slope = k_hat_x * dx + k_hat_y * dy # Arc length factor: longer path on steeper slopes arc_length_factor[j] = np.sqrt(1.0 + (coupling * directional_slope)**2) # Accumulate intensity over M cycles with geometry-dependent phase I_acc = np.zeros_like(X) for m in range(M): psi = np.zeros_like(X, dtype=complex) for j in range(3): # Effective wavenumber: k_eff = k * arc_length_factor # Wave on curved surface accumulates more phase per unit # horizontal distance than wave on flat surface k_eff_x = kx[j] * arc_length_factor[j] k_eff_y = ky[j] * arc_length_factor[j] # Phase: geometry-modified k + small base drift phase = k_eff_x*X + k_eff_y*Y + base_drifts[j] * m psi += np.exp(1j * phase) I_acc += np.abs(psi)**2 return I_acc / M def compute_curved_interference_v2(X, Y, M, r_base, V_field, coupling=1.0, k=2*np.pi): """ Version 2: Position-dependent DECOHERENCE RATIO. Theory says t = C_potential: the decoherence time varies with position. This means r(x,y) = r_base + coupling * V(x,y). The decoherence ratio affects how much each cycle's interference pattern persists. Higher r → more rest → more decay → weaker contribution. Lower r → less rest → stronger contribution. This creates position-dependent AMPLITUDE of the accumulated pattern: sites in low-potential regions accumulate more strongly than sites in high-potential regions. Implementation: weight each cycle's contribution by a decay factor that depends on the local r value. """ angles = np.array([0, 2*np.pi/3, 4*np.pi/3]) kx = k * np.cos(angles) ky = k * np.sin(angles) # Position-dependent decoherence ratio r_field = np.clip(r_base + coupling * V_field, 0.01, 0.49) # The persistence factor: how much of each cycle's pattern survives # to the next cycle. Higher r → more decay → lower persistence. # Model: persistence = 1 - r (fraction of energy that survives rest) persistence = 1.0 - r_field # ranges from 0.51 to 0.99 I_acc = np.zeros_like(X) for m in range(M): psi = np.zeros_like(X, dtype=complex) for j in range(3): # Standard phase for plane wave interference # Add small base drift to avoid perfect coherence phase = kx[j]*X + ky[j]*Y + 0.01 * j * m psi += np.exp(1j * phase) # Weight this cycle's intensity by local persistence^m # (cumulative decay over m cycles) weight = persistence ** m I_acc += weight * np.abs(psi)**2 return I_acc / M def compute_curved_interference_v3(X, Y, M, r_base, V_field, coupling=1.0, k=2*np.pi): """ Version 3: COMBINED geometric path-length + decoherence ratio. The most complete model: the Lagrangian potential affects BOTH: 1. The effective path length (arc-length metric) — geometric coupling 2. The decoherence ratio (persistence) — via r(x) = r_base + αV(x) This combines V1's geometric arc-length mechanism with V2's position-dependent decoherence. The wave equation on a curved potential naturally does both: - Curvature modifies effective wavelength (path length) - Curvature modifies decoherence time (t = C_potential) NOTE: V_field is used DIRECTLY (not normalized by max), so different potential amplitudes produce different results as they should. """ angles = np.array([0, 2*np.pi/3, 4*np.pi/3]) kx_base = k * np.cos(angles) ky_base = k * np.sin(angles) # Compute potential gradient (slope of the curve) dy_V, dx_V = np.gradient(V_field) # Arc-length metric factor per wave direction (same as V1) arc_length_factor = np.zeros((3,) + X.shape) for j in range(3): k_hat_x = np.cos(angles[j]) k_hat_y = np.sin(angles[j]) directional_slope = k_hat_x * dx_V + k_hat_y * dy_V arc_length_factor[j] = np.sqrt(1.0 + (coupling * directional_slope)**2) # Position-dependent decoherence ratio (same as V2, using raw V) r_field = np.clip(r_base + coupling * 0.05 * V_field, 0.01, 0.49) persistence = 1.0 - r_field I_acc = np.zeros_like(X) for m in range(M): psi = np.zeros_like(X, dtype=complex) for j in range(3): # Geometry-modified wavenumber (arc-length) kx_j = kx_base[j] * arc_length_factor[j] ky_j = ky_base[j] * arc_length_factor[j] # Phase: geometry-modified k + small base drift phase = kx_j*X + ky_j*Y + 0.01 * j * m psi += np.exp(1j * phase) # Position-dependent persistence weighting (decoherence) weight = persistence ** m I_acc += weight * np.abs(psi)**2 return I_acc / M # ====================================================================== # SECTION 3: PEAK ANALYSIS # ====================================================================== def find_lattice_peaks(I_field, neighborhood=9, height_frac=0.7, edge_margin=30): """ Find local maxima of 2D intensity field, excluding edges. Returns peak intensities and positions. """ max_filt = ndimage.maximum_filter(I_field, size=neighborhood) is_max = (I_field == max_filt) I_max = np.max(I_field) I_min = np.min(I_field) I_range = I_max - I_min if I_range < 1e-10: return np.array([]), [] threshold = I_min + height_frac * I_range is_peak = is_max & (I_field > threshold) # Exclude edges is_peak[:edge_margin, :] = False is_peak[-edge_margin:, :] = False is_peak[:, :edge_margin] = False is_peak[:, -edge_margin:] = False positions = np.argwhere(is_peak) values = I_field[is_peak] return values, positions def analyze_peaks(peak_values, label=""): """Compute statistics on peak intensity distribution.""" if len(peak_values) < 2: return { "label": label, "n_peaks": len(peak_values), "cv": 0.0, "mean": float(peak_values[0]) if len(peak_values) == 1 else 0.0, "std": 0.0, "min": 0.0, "max": 0.0, "range_pct": 0.0, "n_classes": 0, } mean_v = np.mean(peak_values) std_v = np.std(peak_values) cv = std_v / mean_v if mean_v > 0 else 0 # Try to identify distinct intensity classes (site types) # using a simple binning approach sorted_vals = np.sort(peak_values) gaps = np.diff(sorted_vals) if len(gaps) > 0 and np.max(gaps) > 0: # Significant gaps (> 3x median gap) separate classes median_gap = np.median(gaps) big_gaps = gaps > 3 * median_gap if median_gap > 0 else np.zeros_like(gaps, dtype=bool) n_classes = int(np.sum(big_gaps)) + 1 else: n_classes = 1 return { "label": label, "n_peaks": len(peak_values), "cv": float(cv), "mean": float(mean_v), "std": float(std_v), "min": float(np.min(peak_values)), "max": float(np.max(peak_values)), "range_pct": float((np.max(peak_values) - np.min(peak_values)) / mean_v * 100) if mean_v > 0 else 0.0, "n_classes": n_classes, } # ====================================================================== # SECTION 4: MAIN COMPUTATION # ====================================================================== def main(): print("=" * 70) print("CURVED POTENTIAL SITE DIFFERENTIATION TEST") print("=" * 70) print() # Parameters wavelength = 1.0 k = 2 * np.pi / wavelength grid_extent = 6.0 # larger grid for more interior peaks resolution = 600 # fine grid M = 30 # cycles r_base = 0.3 # base decoherence ratio # Coupling strengths to sweep coupling_values = [0.0, 0.05, 0.1, 0.2, 0.5, 1.0] x = np.linspace(-grid_extent, grid_extent, resolution) y = np.linspace(-grid_extent, grid_extent, resolution) X, Y = np.meshgrid(x, y) # Base drifts (small asymmetric drifts to break trivial degeneracy) base_drifts = np.array([0.01, 0.02, 0.03]) all_results = {} t_start = time.time() print(f"Grid: {resolution}x{resolution}, extent=±{grid_extent}") print(f"Wavelength: {wavelength}, M={M} cycles, r_base={r_base}") print(f"Coupling values: {coupling_values}") print(f"Potential profiles: {list(POTENTIAL_PROFILES.keys())}") print() # ---- TEST 1: Phase-drift model (V1) ---- print("-" * 70) print("MODEL V1: Position-dependent PHASE DRIFT") print(" V(x,y) modifies phase accumulation per cycle per wave direction") print("-" * 70) print() for profile_name, (pot_func, pot_kwargs) in POTENTIAL_PROFILES.items(): V_field = pot_func(X, Y, **pot_kwargs) V_max = np.max(np.abs(V_field)) results_profile = [] for coupling in coupling_values: I_avg = compute_curved_interference( X, Y, M, base_drifts, V_field, coupling=coupling, k=k ) peaks, positions = find_lattice_peaks(I_avg, edge_margin=40) stats = analyze_peaks(peaks, label=f"V1_{profile_name}_c{coupling}") stats["coupling"] = coupling stats["V_max"] = float(V_max) stats["profile"] = profile_name stats["model"] = "V1_phase_drift" results_profile.append(stats) flag = " ***" if stats["cv"] > 0.01 else "" print(f" {profile_name:16s} c={coupling:.2f}: " f"CV={stats['cv']:.6f} n_peaks={stats['n_peaks']:3d} " f"range={stats['range_pct']:.2f}% classes={stats['n_classes']}" f"{flag}") all_results[f"V1_{profile_name}"] = results_profile print() # ---- TEST 2: Decoherence-ratio model (V2) ---- print("-" * 70) print("MODEL V2: Position-dependent DECOHERENCE RATIO") print(" r(x,y) = r_base + coupling * V(x,y)") print(" Affects persistence/decay of pattern at each position") print("-" * 70) print() for profile_name, (pot_func, pot_kwargs) in POTENTIAL_PROFILES.items(): V_field = pot_func(X, Y, **pot_kwargs) V_max = np.max(np.abs(V_field)) results_profile = [] for coupling in coupling_values: I_avg = compute_curved_interference_v2( X, Y, M, r_base, V_field, coupling=coupling, k=k ) peaks, positions = find_lattice_peaks(I_avg, edge_margin=40) stats = analyze_peaks(peaks, label=f"V2_{profile_name}_c{coupling}") stats["coupling"] = coupling stats["V_max"] = float(V_max) stats["profile"] = profile_name stats["model"] = "V2_decoherence" results_profile.append(stats) flag = " ***" if stats["cv"] > 0.01 else "" print(f" {profile_name:16s} c={coupling:.2f}: " f"CV={stats['cv']:.6f} n_peaks={stats['n_peaks']:3d} " f"range={stats['range_pct']:.2f}% classes={stats['n_classes']}" f"{flag}") all_results[f"V2_{profile_name}"] = results_profile print() # ---- TEST 3: Combined model (V3) ---- print("-" * 70) print("MODEL V3: COMBINED phase + decoherence + k-modification") print(" Wave equation on curved potential: ∂²ψ/∂t² = c²∇²ψ - V(x)ψ") print(" Position-dependent: k_eff, r(x), and directional ∇V coupling") print("-" * 70) print() for profile_name, (pot_func, pot_kwargs) in POTENTIAL_PROFILES.items(): V_field = pot_func(X, Y, **pot_kwargs) V_max = np.max(np.abs(V_field)) results_profile = [] for coupling in coupling_values: I_avg = compute_curved_interference_v3( X, Y, M, r_base, V_field, coupling=coupling, k=k ) peaks, positions = find_lattice_peaks(I_avg, edge_margin=40) stats = analyze_peaks(peaks, label=f"V3_{profile_name}_c{coupling}") stats["coupling"] = coupling stats["V_max"] = float(V_max) stats["profile"] = profile_name stats["model"] = "V3_combined" results_profile.append(stats) flag = " ***" if stats["cv"] > 0.01 else "" print(f" {profile_name:16s} c={coupling:.2f}: " f"CV={stats['cv']:.6f} n_peaks={stats['n_peaks']:3d} " f"range={stats['range_pct']:.2f}% classes={stats['n_classes']}" f"{flag}") all_results[f"V3_{profile_name}"] = results_profile print() elapsed = time.time() - t_start # ---- SUMMARY ---- print("=" * 70) print("SUMMARY: SITE DIFFERENTIATION BY POTENTIAL PROFILE") print("=" * 70) print() print(f"{'Model':6s} {'Profile':16s} {'Coupling':8s} {'CV':10s} " f"{'Range%':8s} {'Classes':7s} {'nPeaks':6s}") print("-" * 65) significant = [] for key, results in all_results.items(): for r in results: if r["cv"] > 0.01: # 1% threshold for "significant" significant.append(r) print(f"{r['model']:6s} {r['profile']:16s} " f"{r['coupling']:8.2f} {r['cv']:10.6f} " f"{r['range_pct']:8.2f} {r['n_classes']:7d} " f"{r['n_peaks']:6d}") if not significant: print(" (No configurations showed CV > 1%)") print() print(f"Total elapsed: {elapsed:.1f}s") print() # ---- KEY QUESTION ---- print("=" * 70) print("INTERPRETATION") print("=" * 70) print() # Find best CV for each model for model_prefix in ["V1", "V2", "V3"]: model_results = [] for key, results in all_results.items(): if key.startswith(model_prefix): for r in results: model_results.append(r) if model_results: best = max(model_results, key=lambda x: x["cv"]) print(f" {model_prefix} best: CV={best['cv']:.6f} " f"({best['profile']}, coupling={best['coupling']:.2f})") print() print(" If CV > 0.01: curved potential DOES create site differentiation") print(" If CV ≈ 0.00: curved potential alone is insufficient") print(" Multiple classes: different archetype zones emerging") print() # Save results output_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..", "tlt notes", "theory", "curved_potential_results.txt" ) with open(output_path, 'w') as f: f.write("=" * 70 + "\n") f.write("CURVED POTENTIAL SITE DIFFERENTIATION — RESULTS\n") f.write("=" * 70 + "\n") f.write(f"Date: {time.strftime('%Y-%m-%d %H:%M:%S')}\n") f.write(f"Grid: {resolution}x{resolution}, extent=±{grid_extent}\n") f.write(f"M={M} cycles, r_base={r_base}, k={k:.4f}\n") f.write(f"Elapsed: {elapsed:.1f}s\n\n") for key, results in sorted(all_results.items()): f.write(f"\n{key}:\n") for r in results: f.write(f" c={r['coupling']:.2f}: CV={r['cv']:.6f} " f"n={r['n_peaks']} range={r['range_pct']:.2f}% " f"classes={r['n_classes']}\n") f.write("\n\nSIGNIFICANT (CV > 1%):\n") if significant: for r in significant: f.write(f" {r['model']} {r['profile']} c={r['coupling']:.2f}: " f"CV={r['cv']:.6f}\n") else: f.write(" None\n") print(f" Results saved to: {output_path}") # Also save JSON for further analysis json_path = output_path.replace('.txt', '.json') with open(json_path, 'w') as f: json.dump(all_results, f, indent=2, default=str) print(f" JSON saved to: {json_path}") if __name__ == "__main__": import os main()