#%% import pandas as pd import torch import numpy as np #%% df = pd.read_csv('../temp/2body_data.csv').sort_values(by='c') df.loc[df['re_E'] < 0, 'im_E'] = 0 # set im_E = 0 for bound states (to avoid square root issues) df['E'] = df['re_E'] + 1j * df['im_E'] df['k'] = np.sqrt(df['E']) c0 = df[df['E'] == 0]['c'].values[0] df['c'] = df['c'] - c0 # shift c to set c=0 at the exceptional point train_data = df[df['re_E'] < 0] target_data = df[df['re_E'] > 0] train_cs = train_data['c'].to_numpy() train_ks = torch.tensor(train_data['k'].to_numpy(), dtype=torch.complex128) #%% # hyperparameters N = 5 # initialize random Hamiltonians H0 = torch.randn(N, N, dtype=torch.complex128) H0 = (H0 + torch.transpose(H0, 0, 1)).requires_grad_() # symmetric H1 = torch.randn(N, N, dtype=torch.complex128) H1 = (H1 + torch.transpose(H1, 0, 1)).requires_grad_() # symmetric def enforce_ep(): # enforce exceptional point at c=0 with torch.no_grad(): H0[0:2, :] = 0 H0[:, 0:2] = 0 H0[0, 1] = 1 enforce_ep() #%% # training # generate a set of c values to follow by subdividing the training cs subdivisions = 2 c_steps = np.concatenate([np.linspace(start, stop, subdivisions, endpoint=False) for (start, stop) in zip(train_cs, train_cs[1:])]) c_steps = np.append(c_steps, train_cs[-1]) c_steps = np.delete(c_steps, 0) # remove the first point (c=0) lr = 0.01 epochs = 200000 for epoch in range(epochs): ks = torch.empty(len(train_data), dtype=torch.complex128) kvs = torch.empty(len(train_data), dtype=torch.complex128) current_k = 0.0 # start at the threshold current_kv = 0.0 # start at the threshold for c in c_steps: H = H0 + c * H1 evals = torch.linalg.eigvals(H) current_k = evals[torch.argmin(torch.abs(evals - current_k))] evals = evals[evals != current_k] # remove selected k current_kv = evals[torch.argmin(torch.abs(evals - current_kv))] if np.any(c == train_cs): index = np.where(c == train_cs)[0][0] ks[index] = current_k kvs[index] = current_kv loss = ((ks - train_ks).abs() ** 2).sum() # push virtual states towards (-)bound and then to the imaginary axis if epoch/epochs < 0.25: loss += ((kvs + train_ks).abs() ** 2).sum() else: loss += (kvs.imag ** 2).sum() if epoch % 1000 == 0: print(f"Training {(epoch+1)/epochs:.1%} \t Loss: {loss}") if H0.grad is not None: H0.grad.zero_() if H1.grad is not None: H1.grad.zero_() loss.backward() with torch.no_grad(): H0 -= lr * H0.grad H1 -= lr * H1.grad enforce_ep() # %% # evaluate for all points all_c = torch.tensor(df['c'].values, dtype=torch.float64) exact_k = torch.tensor(df['k'].values, dtype=torch.complex128) pred_ks = np.empty(len(df), dtype=np.complex128) with torch.no_grad(): for (index, (c, k)) in enumerate(zip(all_c, exact_k)): H = H0 + c * H1 evals = torch.linalg.eigvals(H) i = torch.argmin(torch.abs(evals - k)) # TODO: more robust way to identify the eigenvector pred_ks[index]= evals[i] pred_Es = pred_ks ** 2 # %% # plot the results import matplotlib.pyplot as plt fig, axs = plt.subplots(2, 1, figsize=(8, 12)) # Create a figure with two vertical panels # First panel: k values axs[0].scatter(np.real(train_data['k']), np.imag(train_data['k']), label='training') axs[0].scatter(np.real(target_data['k']), np.imag(target_data['k']), label='target') axs[0].scatter(np.real(pred_ks), np.imag(pred_ks), marker='x', label='predicted') axs[0].set_xlabel('Re(k)') axs[0].set_ylabel('Im(k)') axs[0].legend() # Second panel: E values axs[1].scatter(np.real(train_data['E']), np.imag(train_data['E']), label='training') axs[1].scatter(np.real(target_data['E']), np.imag(target_data['E']), label='target') axs[1].scatter(np.real(pred_Es), np.imag(pred_Es), marker='x', label='predicted') axs[1].set_xlabel('Re(E)') axs[1].set_ylabel('Im(E)') axs[1].legend() plt.tight_layout() # Adjust spacing between panels plt.show() # %% # animation of eigenvalues import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation # Prepare figure fig, ax = plt.subplots(figsize=(8, 8)) ax.scatter(np.real(train_data['k']), np.imag(train_data['k']), label='training') ax.scatter(np.real(target_data['k']), np.imag(target_data['k']), label='target') sc = ax.scatter([], [], marker='x', label='predicted') # Placeholder for predicted eigenvalues ax.set_xlim(-1, 1) # Adjust limits as needed ax.set_ylim(-1, 1) # Adjust limits as needed ax.set_xlabel('Re(k)') ax.set_ylabel('Im(k)') ax.legend() ax.set_title('c = ?') # Animation function def update(c): H = H0 + c * H1 evals = torch.linalg.eigvals(H).detach().numpy() sc.set_offsets(np.c_[np.real(evals), np.imag(evals)]) ax.set_title(f'c = {c:.2f}') return sc, # Create animation no_frames = 100 c_steps = np.linspace(max(df['c']), min(df['c']), no_frames) ani = FuncAnimation(fig, update, frames=c_steps, interval=100, blit=True) # Save or display the animation ani.save('../temp/PMM.gif', writer='pillow') # Save as a GIF file plt.show() # Display the animation # %%