Implemented PMM with PyTorch
This commit is contained in:
parent
b01b6e5d6c
commit
b68887e822
|
|
@ -1,5 +1,7 @@
|
||||||
[deps]
|
[deps]
|
||||||
Arpack = "7d9fca2a-8960-54d3-9f78-7d1dccf2cb97"
|
Arpack = "7d9fca2a-8960-54d3-9f78-7d1dccf2cb97"
|
||||||
|
CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
|
||||||
|
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
|
||||||
DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa"
|
DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa"
|
||||||
FastGaussQuadrature = "442a2c76-b920-505d-bb47-c5924d526838"
|
FastGaussQuadrature = "442a2c76-b920-505d-bb47-c5924d526838"
|
||||||
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
|
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
|
||||||
|
|
|
||||||
|
|
@ -69,3 +69,9 @@ end
|
||||||
scatter(real.(data_E), imag.(data_E), label="training", title="PMM", xlabel="Re E", ylabel="Im E")
|
scatter(real.(data_E), imag.(data_E), label="training", title="PMM", xlabel="Re E", ylabel="Im E")
|
||||||
scatter!(real.(exact_E), imag.(exact_E), label="exact", m=:+)
|
scatter!(real.(exact_E), imag.(exact_E), label="exact", m=:+)
|
||||||
scatter!(real.(extrapolated_E), imag.(extrapolated_E), label="predicted", m=:x)
|
scatter!(real.(extrapolated_E), imag.(extrapolated_E), label="predicted", m=:x)
|
||||||
|
|
||||||
|
# export results
|
||||||
|
using DataFrames, CSV
|
||||||
|
df = DataFrame(c=all_c, re_E=real.(exact_E), im_E=imag.(exact_E))
|
||||||
|
#pushfirst!(df, (c0, 0, 0)) # add the threshold
|
||||||
|
CSV.write("temp/PMM.csv", df)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,72 @@
|
||||||
|
#%%
|
||||||
|
import pandas as pd
|
||||||
|
import torch
|
||||||
|
|
||||||
|
#%%
|
||||||
|
df = pd.read_csv('../temp/PMM.csv')
|
||||||
|
df['E'] = df['re_E'] + 1j * df['im_E']
|
||||||
|
train_data = df[abs(df['im_E']) < 1e-5]
|
||||||
|
target_data = df[abs(df['im_E']) > 1e-5]
|
||||||
|
|
||||||
|
train_cs = torch.tensor(train_data['c'], dtype=torch.float64)
|
||||||
|
train_Es = torch.tensor(train_data['E'], dtype=torch.complex128)
|
||||||
|
|
||||||
|
#%%
|
||||||
|
# hyperparameters
|
||||||
|
N = 9
|
||||||
|
|
||||||
|
# initialize random Hamiltonians
|
||||||
|
H0 = torch.randn(N, N, dtype=torch.complex128)
|
||||||
|
H0 = (H0 + torch.transpose(H0, 0, 1)).requires_grad_() # symmetric
|
||||||
|
H1 = torch.randn(N, N, dtype=torch.complex128)
|
||||||
|
H1 = (H1 + torch.transpose(H1, 0, 1)).requires_grad_() # symmetric
|
||||||
|
|
||||||
|
#%%
|
||||||
|
# training
|
||||||
|
|
||||||
|
lr = 0.05
|
||||||
|
epochs = 100000
|
||||||
|
for epoch in range(epochs):
|
||||||
|
Es = torch.empty(len(train_data), dtype=torch.complex128)
|
||||||
|
for (index, (c, E)) in enumerate(zip(train_cs, train_Es)):
|
||||||
|
H = H0 + c * H1
|
||||||
|
evals = torch.linalg.eigvals(H)
|
||||||
|
i = torch.argmin(torch.abs(evals - E)) # TODO: more robust way to identify the eigenvector
|
||||||
|
Es[index]= evals[i]
|
||||||
|
|
||||||
|
loss = ((Es - train_Es).abs() ** 2).sum()
|
||||||
|
|
||||||
|
if epoch % 1000 == 0:
|
||||||
|
print(f"Training {(epoch+1)/epochs:.1%} \t Loss: {loss}")
|
||||||
|
|
||||||
|
if H0.grad is not None:
|
||||||
|
H0.grad.zero_()
|
||||||
|
if H1.grad is not None:
|
||||||
|
H1.grad.zero_()
|
||||||
|
loss.backward()
|
||||||
|
|
||||||
|
with torch.no_grad():
|
||||||
|
H0 -= lr * H0.grad
|
||||||
|
H1 -= lr * H1.grad
|
||||||
|
|
||||||
|
# %%
|
||||||
|
# evaluate for all points
|
||||||
|
all_c = torch.tensor(df['c'].values, dtype=torch.float64)
|
||||||
|
exact_E = torch.tensor(df['E'].values, dtype=torch.complex128)
|
||||||
|
pred_Es = torch.empty(len(df), dtype=torch.complex128)
|
||||||
|
with torch.no_grad():
|
||||||
|
for (index, (c, E)) in enumerate(zip(all_c, exact_E)):
|
||||||
|
H = H0 + c * H1
|
||||||
|
evals = torch.linalg.eigvals(H)
|
||||||
|
i = torch.argmin(torch.abs(evals - E)) # TODO: more robust way to identify the eigenvector
|
||||||
|
pred_Es[index]= evals[i]
|
||||||
|
|
||||||
|
# %%
|
||||||
|
# plot the results
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
plt.scatter(train_data['re_E'], train_data['im_E'], label='training')
|
||||||
|
plt.scatter(target_data['re_E'], target_data['im_E'], label='target')
|
||||||
|
plt.scatter(pred_Es.real, pred_Es.imag, marker='x', label='predicted')
|
||||||
|
plt.legend()
|
||||||
|
|
||||||
|
# %%
|
||||||
Loading…
Reference in New Issue