#%% import pandas as pd import torch #%% df = pd.read_csv('../temp/PMM.csv') df['E'] = df['re_E'] + 1j * df['im_E'] train_data = df[abs(df['im_E']) < 1e-5] target_data = df[abs(df['im_E']) > 1e-5] train_cs = torch.tensor(train_data['c'], dtype=torch.float64) train_Es = torch.tensor(train_data['E'], dtype=torch.complex128) #%% # hyperparameters N = 9 # initialize random Hamiltonians H0 = torch.randn(N, N, dtype=torch.complex128) H0 = (H0 + torch.transpose(H0, 0, 1)).requires_grad_() # symmetric H1 = torch.randn(N, N, dtype=torch.complex128) H1 = (H1 + torch.transpose(H1, 0, 1)).requires_grad_() # symmetric #%% # training lr = 0.05 epochs = 100000 for epoch in range(epochs): Es = torch.empty(len(train_data), dtype=torch.complex128) for (index, (c, E)) in enumerate(zip(train_cs, train_Es)): H = H0 + c * H1 evals = torch.linalg.eigvals(H) i = torch.argmin(torch.abs(evals - E)) # TODO: more robust way to identify the eigenvector Es[index]= evals[i] loss = ((Es - train_Es).abs() ** 2).sum() if epoch % 1000 == 0: print(f"Training {(epoch+1)/epochs:.1%} \t Loss: {loss}") if H0.grad is not None: H0.grad.zero_() if H1.grad is not None: H1.grad.zero_() loss.backward() with torch.no_grad(): H0 -= lr * H0.grad H1 -= lr * H1.grad # %% # evaluate for all points all_c = torch.tensor(df['c'].values, dtype=torch.float64) exact_E = torch.tensor(df['E'].values, dtype=torch.complex128) pred_Es = torch.empty(len(df), dtype=torch.complex128) with torch.no_grad(): for (index, (c, E)) in enumerate(zip(all_c, exact_E)): H = H0 + c * H1 evals = torch.linalg.eigvals(H) i = torch.argmin(torch.abs(evals - E)) # TODO: more robust way to identify the eigenvector pred_Es[index]= evals[i] # %% # plot the results import matplotlib.pyplot as plt plt.scatter(train_data['re_E'], train_data['im_E'], label='training') plt.scatter(target_data['re_E'], target_data['im_E'], label='target') plt.scatter(pred_Es.real, pred_Es.imag, marker='x', label='predicted') plt.legend() # %%