import numpy as np
def inner_product(matrix_a, matrix_b):
"""
Calculates the inner product of two matrices.
= sum(A_ij * B_ij*)
"""
vec_a = matrix_a.flatten()
vec_b = matrix_b.flatten()
return np.dot(vec_a, vec_b.conj())
def format_in_pitchfork_basis(c_number):
"""
Formats a complex number c_number in the \pitchfork polar basis.
c = |c| * e^(i*theta) ≈ |c| * \pitchfork^k
"""
# Handle the zero case
if np.isclose(c_number, 0):
# Add padding for alignment
return "0.00000000 "
magnitude = np.abs(c_number)
angle = np.angle(c_number)
# Find the closest power k
k = int(np.round(angle / (np.pi / 8))) % 16
# --- MODIFIED LINE ---
# Return the formatted string with 8 decimal places
return f"{magnitude:10.8f} * \u2694^{k:<2}" # \u2694 is the unicode for ⚔
def analyze_and_decompose(target_matrix, permutation_matrix):
"""
1. Finds the TRUE eigenvectors of the permutation matrix.
2. Decomposes the target matrix using this true basis.
3. Reconstructs the matrix to verify success.
4. Displays the coefficients AND the eigenvectors in the \pitchfork system.
"""
print("--- 1. Calculating TRUE Eigenbasis from \u039E Matrix ---")
eigenvalues, eigenvectors = np.linalg.eig(permutation_matrix)
print(f" Successfully found {len(eigenvalues)} numerically exact eigenvectors (pure notes).")
# --- 2. Center the Target Matrix ---
print("\n--- 2. Decomposing Target Lo-Shu (Centered) ---")
mean_value = np.mean(target_matrix)
centered_target = target_matrix - mean_value
print(np.round(centered_target, 8)) # Use higher precision here too
# --- 3. Calculate Coefficients and Display Notes ---
print("\n--- 3. Calculating TRUE Decomposition Coefficients and Notes ---")
print(" (Coefficients c_k = )")
print(" (Showing \u2694-based polar format and standard complex format)\n")
coefficients = []
sort_indices = np.argsort(np.angle(eigenvalues))
eigenvalues = eigenvalues[sort_indices]
eigenvectors = eigenvectors[:, sort_indices]
# Create a vectorized version of the formatting function
vectorized_format = np.vectorize(format_in_pitchfork_basis)
for i in range(len(eigenvalues)):
lambda_k = eigenvalues[i]
eigenvector_i = eigenvectors[:, i]
eigenmatrix_i = eigenvector_i.reshape((4, 4))
# Calculate coefficient
coefficient = inner_product(centered_target, eigenmatrix_i)
coefficients.append(coefficient)
# Format coefficient and eigenvalue
pitchfork_format = format_in_pitchfork_basis(coefficient)
pitchfork_lambda = format_in_pitchfork_basis(lambda_k)
print(f"-----------------------------------------------------------------------------")
print(f" Note {i+1:2d} (for \u03BB = {pitchfork_lambda} approx {lambda_k:.8f}):")
# --- MODIFIED LINE ---
# Print coefficient with 8 decimal places
print(f" Coefficient c_{i+1:2d}: {pitchfork_format} (approx {coefficient.real:10.8f} + {coefficient.imag:10.8f}j)")
# Format and print the 4x4 "pure note" matrix
formatted_note_matrix = vectorized_format(eigenmatrix_i)
print(f" Shape of \u03D1_{i+1:2d} (Pure Note):")
# Set formatter to print strings without quotes and align
with np.printoptions(formatter={'str_kind': lambda x: x}, linewidth=200):
print(formatted_note_matrix, "\n")
# --- 4. Reconstruct the Matrix from its Notes ---
print("-----------------------------------------------------------------------------")
print("--- 4. Reconstructing Matrix from Coefficients and TRUE Notes ---")
reconstructed_matrix = np.zeros((4, 4), dtype=complex)
for i in range(len(coefficients)):
coefficient = coefficients[i]
eigenvector_i = eigenvectors[:, i]
eigenmatrix_i = eigenvector_i.reshape((4, 4))
reconstructed_matrix += coefficient * eigenmatrix_i
reconstructed_original = reconstructed_matrix.real + mean_value
print("\n Reconstructed Matrix (Real Part + Mean):")
print(np.round(reconstructed_original, 8)) # Use higher precision here too
# --- 5. Verification ---
print("\n--- 5. Verification ---")
is_reconstruction_correct = np.allclose(target_matrix, reconstructed_original)
if is_reconstruction_correct:
print(" SUCCESS! The reconstructed matrix perfectly matches the original target Lo-Shu.")
else:
print(" Failure. The reconstructed matrix does not match the original.")
# --- Define the Matrices ---
# The \Xi permutation matrix
Xi = np.array([
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1], [0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0], [0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0], [0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0], [0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0]
])
# The classic Lo-Shu matrix to be decomposed
L_classic = np.array([
[16, 3, 10, 5],
[2, 13, 8, 11],
[7, 12, 1, 14],
[9, 6, 15, 4]
])
# --- Run the Analysis ---
# --- MODIFIED LINE ---
# Set numpy print options for better readability
np.set_printoptions(precision=8, suppress=True)
analyze_and_decompose(L_classic, Xi)