#%% import pandas as pd import torch import numpy as np #%% df = pd.read_csv('../temp/2body_data.csv').sort_values(by='c') df.loc[df['re_E'] < 0, 'im_E'] = 0 # set im_E = 0 for bound states (to avoid square root issues) df['E'] = df['re_E'] + 1j * df['im_E'] df['k'] = np.sqrt(df['E']) train_data = df[df['re_E'] < 0] target_data = df[df['re_E'] > 0] train_cs = train_data['c'].to_numpy() train_ks = torch.tensor(train_data['k'].to_numpy(), dtype=torch.complex128) #%% # hyperparameters N = 9 # initialize random Hamiltonians H0 = torch.randn(N, N, dtype=torch.complex128) H0 = (H0 + torch.transpose(H0, 0, 1)).requires_grad_() # symmetric H1 = torch.randn(N, N, dtype=torch.complex128) H1 = (H1 + torch.transpose(H1, 0, 1)).requires_grad_() # symmetric #%% # training # generate a set of c values to follow by subdividing the training cs subdivisions = 3 c_steps = np.concatenate([np.linspace(start, stop, subdivisions, endpoint=False) for (start, stop) in zip(train_cs, train_cs[1:])]) c_steps = np.append(c_steps, train_cs[-1]) lr = 0.05 epochs = 100000 for epoch in range(epochs): ks = torch.empty(len(train_data), dtype=torch.complex128) current_k = 0.0 # start at the threshold for c in c_steps: H = H0 + c * H1 evals = torch.linalg.eigvals(H) current_k = evals[torch.argmin(torch.abs(evals - current_k))] if np.any(c == train_cs): index = np.where(c == train_cs)[0][0] ks[index] = current_k loss = ((ks - train_ks).abs() ** 2).sum() if epoch % 1000 == 0: print(f"Training {(epoch+1)/epochs:.1%} \t Loss: {loss}") if H0.grad is not None: H0.grad.zero_() if H1.grad is not None: H1.grad.zero_() loss.backward() with torch.no_grad(): H0 -= lr * H0.grad H1 -= lr * H1.grad # %% # evaluate for all points all_c = torch.tensor(df['c'].values, dtype=torch.float64) exact_k = torch.tensor(df['k'].values, dtype=torch.complex128) pred_ks = np.empty(len(df), dtype=np.complex128) with torch.no_grad(): for (index, (c, k)) in enumerate(zip(all_c, exact_k)): H = H0 + c * H1 evals = torch.linalg.eigvals(H) i = torch.argmin(torch.abs(evals - k)) # TODO: more robust way to identify the eigenvector pred_ks[index]= evals[i] pred_Es = pred_ks ** 2 # %% # plot the results import matplotlib.pyplot as plt fig, axs = plt.subplots(2, 1, figsize=(8, 12)) # Create a figure with two vertical panels # First panel: k values axs[0].scatter(np.real(train_data['k']), np.imag(train_data['k']), label='training') axs[0].scatter(np.real(target_data['k']), np.imag(target_data['k']), label='target') axs[0].scatter(np.real(pred_ks), np.imag(pred_ks), marker='x', label='predicted') axs[0].set_xlabel('Re(k)') axs[0].set_ylabel('Im(k)') axs[0].legend() # Second panel: E values axs[1].scatter(np.real(train_data['E']), np.imag(train_data['E']), label='training') axs[1].scatter(np.real(target_data['E']), np.imag(target_data['E']), label='target') axs[1].scatter(np.real(pred_Es), np.imag(pred_Es), marker='x', label='predicted') axs[1].set_xlabel('Re(E)') axs[1].set_ylabel('Im(E)') axs[1].legend() plt.tight_layout() # Adjust spacing between panels plt.show()