x = @(t) cos(sqrt(-mu1Actual)*t) + sin(sqrt(-mu2Actual)*t);
maxT = 2*pi/sqrt(max(-mu1Actual, -mu2Actual));
t = dlarray(linspace(0, maxT, batchSize), "CB");
xactual = dlarray(x(t), "CB");
params.net = dlnetwork(net);
params.mu1 = dlarray(-0.5);
params.mu2 = dlarray(0.5);
[loss, grad] = dlfeval(lossFcn, t, xactual, params);
[params, avgG, avgSqG] = adamupdate(params, grad, avgG, avgSqG, i, lr);
fprintf("Epoch: %d, Predicted mu1: %.3f, Actual mu1: %.3f, Predicted mu2: %.3f, Actual mu2: %.3f\n", ...
i, extractdata(params.mu1), mu1Actual, extractdata(params.mu2), mu2Actual);
function [loss, grad] = modelLoss(t, x, params)
xpred = forward(params.net, t);
dxdt = dlgradient(sum(real(xpred)), t, 'EnableHigherDerivatives', true);
d2xdt2 = dlgradient(sum(dxdt), t);
odeResidual = d2xdt2 - (params.mu1 * xpred + params.mu2 * xpred.^2);
odeLoss = mean(odeResidual.^2);
dataLoss = l2loss(real(x), real(xpred));
loss = odeLoss + dataLoss;
[grad.net, grad.mu1, grad.mu2] = dlgradient(loss, params.net.Learnables, params.mu1, params.mu2);