Change M(c) to mimick ACCC
This commit is contained in:
parent
b30ee6e1fb
commit
31fa9542b2
|
|
@ -24,16 +24,19 @@ train_ks = torch.tensor(train_data['k'].to_numpy(), dtype=torch.complex128)
|
||||||
N = 5
|
N = 5
|
||||||
|
|
||||||
# initialize random Hamiltonians
|
# initialize random Hamiltonians
|
||||||
H0 = torch.randn(N, N, dtype=torch.complex128)
|
H0 = 0.1 * torch.randn(N, N, dtype=torch.complex128)
|
||||||
H0 = (H0 + torch.transpose(H0, 0, 1)).requires_grad_() # symmetric
|
H0 = (H0 + H0.T).requires_grad_() # symmetric
|
||||||
H1 = torch.randn(N, N, dtype=torch.complex128)
|
H1 = 0.1 * torch.randn(N, N, dtype=torch.complex128)
|
||||||
H1 = (H1 + torch.transpose(H1, 0, 1)).requires_grad_() # symmetric
|
H1 = (H1 + H1.T).requires_grad_() # symmetric
|
||||||
|
|
||||||
def enforce_ep(): # enforce exceptional point at c=0
|
def get_H(c):
|
||||||
|
H = H0 + np.sqrt(complex(c)) * H1
|
||||||
|
return H
|
||||||
|
|
||||||
|
def enforce_ep(): # enforce threshold point at c=0
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
H0[0:2, :] = 0
|
H0[0, :] = 0
|
||||||
H0[:, 0:2] = 0
|
H0[:, 0] = 0
|
||||||
H0[0, 1] = 1
|
|
||||||
|
|
||||||
enforce_ep()
|
enforce_ep()
|
||||||
|
|
||||||
|
|
@ -50,31 +53,20 @@ c_steps = np.delete(c_steps, 0) # remove the first point (c=0)
|
||||||
optimizer = torch.optim.Adam([H0, H1])
|
optimizer = torch.optim.Adam([H0, H1])
|
||||||
|
|
||||||
# Training loop
|
# Training loop
|
||||||
epochs = 200000
|
epochs = 20000
|
||||||
for epoch in range(epochs):
|
for epoch in range(epochs):
|
||||||
ks = torch.empty(len(train_data), dtype=torch.complex128)
|
ks = torch.empty(len(train_data), dtype=torch.complex128)
|
||||||
kvs = torch.empty(len(train_data), dtype=torch.complex128)
|
|
||||||
current_k = 0.0 # start at the threshold
|
current_k = 0.0 # start at the threshold
|
||||||
current_kv = 0.0 # start at the threshold
|
|
||||||
for c in c_steps:
|
for c in c_steps:
|
||||||
H = H0 + c * H1
|
H = get_H(c)
|
||||||
evals = torch.linalg.eigvals(H)
|
evals = torch.linalg.eigvals(H)
|
||||||
current_k = evals[torch.argmin(torch.abs(evals - current_k))]
|
current_k = evals[torch.argmin(torch.abs(evals - current_k))]
|
||||||
evals = evals[evals != current_k] # remove selected k
|
|
||||||
current_kv = evals[torch.argmin(torch.abs(evals - current_kv))]
|
|
||||||
if np.any(c == train_cs):
|
if np.any(c == train_cs):
|
||||||
index = np.where(c == train_cs)[0][0]
|
index = np.where(c == train_cs)[0][0]
|
||||||
ks[index] = current_k
|
ks[index] = current_k
|
||||||
kvs[index] = current_kv
|
|
||||||
|
|
||||||
loss = ((ks - train_ks).abs() ** 2).sum()
|
loss = ((ks - train_ks).abs() ** 2).sum()
|
||||||
|
|
||||||
# push virtual states towards (-)bound and then to the imaginary axis
|
|
||||||
if epoch/epochs < 0.25:
|
|
||||||
loss += ((kvs + train_ks).abs() ** 2).sum()
|
|
||||||
else:
|
|
||||||
loss += (kvs.imag ** 2).sum()
|
|
||||||
|
|
||||||
if epoch % 1000 == 0:
|
if epoch % 1000 == 0:
|
||||||
print(f"Training {(epoch+1)/epochs:.1%} \t Loss: {loss}")
|
print(f"Training {(epoch+1)/epochs:.1%} \t Loss: {loss}")
|
||||||
|
|
||||||
|
|
@ -82,8 +74,6 @@ for epoch in range(epochs):
|
||||||
optimizer.zero_grad()
|
optimizer.zero_grad()
|
||||||
loss.backward()
|
loss.backward()
|
||||||
optimizer.step()
|
optimizer.step()
|
||||||
|
|
||||||
# Enforce exceptional point constraints
|
|
||||||
enforce_ep()
|
enforce_ep()
|
||||||
|
|
||||||
# %%
|
# %%
|
||||||
|
|
@ -93,7 +83,7 @@ exact_k = torch.tensor(df['k'].values, dtype=torch.complex128)
|
||||||
pred_ks = np.empty(len(df), dtype=np.complex128)
|
pred_ks = np.empty(len(df), dtype=np.complex128)
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
for (index, (c, k)) in enumerate(zip(all_c, exact_k)):
|
for (index, (c, k)) in enumerate(zip(all_c, exact_k)):
|
||||||
H = H0 + c * H1
|
H = get_H(c)
|
||||||
evals = torch.linalg.eigvals(H)
|
evals = torch.linalg.eigvals(H)
|
||||||
i = torch.argmin(torch.abs(evals - k)) # TODO: more robust way to identify the eigenvector
|
i = torch.argmin(torch.abs(evals - k)) # TODO: more robust way to identify the eigenvector
|
||||||
pred_ks[index]= evals[i]
|
pred_ks[index]= evals[i]
|
||||||
|
|
@ -144,7 +134,7 @@ ax.set_title('c = ?')
|
||||||
|
|
||||||
# Animation function
|
# Animation function
|
||||||
def update(c):
|
def update(c):
|
||||||
H = H0 + c * H1
|
H = get_H(c)
|
||||||
evals = torch.linalg.eigvals(H).detach().numpy()
|
evals = torch.linalg.eigvals(H).detach().numpy()
|
||||||
sc.set_offsets(np.c_[np.real(evals), np.imag(evals)])
|
sc.set_offsets(np.c_[np.real(evals), np.imag(evals)])
|
||||||
ax.set_title(f'c = {c:.2f}')
|
ax.set_title(f'c = {c:.2f}')
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue