Adam optimizer
This commit is contained in:
parent
c1eb71c231
commit
56497d6fbf
|
|
@ -45,7 +45,10 @@ c_steps = np.concatenate([np.linspace(start, stop, subdivisions, endpoint=False)
|
||||||
c_steps = np.append(c_steps, train_cs[-1])
|
c_steps = np.append(c_steps, train_cs[-1])
|
||||||
c_steps = np.delete(c_steps, 0) # remove the first point (c=0)
|
c_steps = np.delete(c_steps, 0) # remove the first point (c=0)
|
||||||
|
|
||||||
lr = 0.01
|
# Initialize the Adam optimizer
|
||||||
|
optimizer = torch.optim.Adam([H0, H1])
|
||||||
|
|
||||||
|
# Training loop
|
||||||
epochs = 200000
|
epochs = 200000
|
||||||
for epoch in range(epochs):
|
for epoch in range(epochs):
|
||||||
ks = torch.empty(len(train_data), dtype=torch.complex128)
|
ks = torch.empty(len(train_data), dtype=torch.complex128)
|
||||||
|
|
@ -74,15 +77,12 @@ for epoch in range(epochs):
|
||||||
if epoch % 1000 == 0:
|
if epoch % 1000 == 0:
|
||||||
print(f"Training {(epoch+1)/epochs:.1%} \t Loss: {loss}")
|
print(f"Training {(epoch+1)/epochs:.1%} \t Loss: {loss}")
|
||||||
|
|
||||||
if H0.grad is not None:
|
# Zero gradients, backpropagate, and update parameters
|
||||||
H0.grad.zero_()
|
optimizer.zero_grad()
|
||||||
if H1.grad is not None:
|
|
||||||
H1.grad.zero_()
|
|
||||||
loss.backward()
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
with torch.no_grad():
|
# Enforce exceptional point constraints
|
||||||
H0 -= lr * H0.grad
|
|
||||||
H1 -= lr * H1.grad
|
|
||||||
enforce_ep()
|
enforce_ep()
|
||||||
|
|
||||||
# %%
|
# %%
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue