Adam optimizer
This commit is contained in:
parent
c1eb71c231
commit
56497d6fbf
|
|
@ -45,7 +45,10 @@ c_steps = np.concatenate([np.linspace(start, stop, subdivisions, endpoint=False)
|
|||
c_steps = np.append(c_steps, train_cs[-1])
|
||||
c_steps = np.delete(c_steps, 0) # remove the first point (c=0)
|
||||
|
||||
lr = 0.01
|
||||
# Initialize the Adam optimizer
|
||||
optimizer = torch.optim.Adam([H0, H1])
|
||||
|
||||
# Training loop
|
||||
epochs = 200000
|
||||
for epoch in range(epochs):
|
||||
ks = torch.empty(len(train_data), dtype=torch.complex128)
|
||||
|
|
@ -74,15 +77,12 @@ for epoch in range(epochs):
|
|||
if epoch % 1000 == 0:
|
||||
print(f"Training {(epoch+1)/epochs:.1%} \t Loss: {loss}")
|
||||
|
||||
if H0.grad is not None:
|
||||
H0.grad.zero_()
|
||||
if H1.grad is not None:
|
||||
H1.grad.zero_()
|
||||
# Zero gradients, backpropagate, and update parameters
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
with torch.no_grad():
|
||||
H0 -= lr * H0.grad
|
||||
H1 -= lr * H1.grad
|
||||
# Enforce exceptional point constraints
|
||||
enforce_ep()
|
||||
|
||||
# %%
|
||||
|
|
|
|||
Loading…
Reference in New Issue