@@ -608,6 +608,7 @@ function get_optim_functions(
608
608
ng, nc, neq = length (mpc. con. i_g), mpc. con. nc, mpc. con. neq
609
609
nZ̃, nU, nŶ, nX̂ = length (mpc. Z̃), Hp* nu, Hp* ny, Hp* nx̂
610
610
nΔŨ, nUe, nŶe = nu* Hc + nϵ, nU + nu, nŶ + ny
611
+ strict = Val (true )
611
612
myNaN = convert (JNT, NaN ) # NaN to force update_simulations! at first call:
612
613
Z̃ :: Vector{JNT} = fill (myNaN, nZ̃)
613
614
ΔŨ:: Vector{JNT} = zeros (JNT, nΔŨ)
@@ -636,7 +637,7 @@ function get_optim_functions(
636
637
Cache (Û0), Cache (X̂0),
637
638
Cache (gc), Cache (g), Cache (geq),
638
639
)
639
- ∇J_prep = prepare_gradient (Jfunc!, grad_backend, Z̃_∇J, ∇J_context... )
640
+ ∇J_prep = prepare_gradient (Jfunc!, grad_backend, Z̃_∇J, ∇J_context... ; strict )
640
641
∇J = Vector {JNT} (undef, nZ̃)
641
642
∇Jfunc! = if nZ̃ == 1
642
643
function (Z̃arg)
@@ -675,7 +676,7 @@ function get_optim_functions(
675
676
)
676
677
# temporarily enable all the inequality constraints for sparsity detection:
677
678
mpc. con. i_g[1 : end - nc] .= true
678
- ∇g_prep = prepare_jacobian (gfunc!, g, jac_backend, Z̃_∇g, ∇g_context... )
679
+ ∇g_prep = prepare_jacobian (gfunc!, g, jac_backend, Z̃_∇g, ∇g_context... ; strict )
679
680
mpc. con. i_g[1 : end - nc] .= false
680
681
∇g = init_diffmat (JNT, jac_backend, ∇g_prep, nZ̃, ng)
681
682
∇gfuncs! = Vector {Function} (undef, ng)
@@ -721,7 +722,7 @@ function get_optim_functions(
721
722
Cache (Û0), Cache (X̂0),
722
723
Cache (gc), Cache (g)
723
724
)
724
- ∇geq_prep = prepare_jacobian (geqfunc!, geq, jac_backend, Z̃_∇geq, ∇geq_context... )
725
+ ∇geq_prep = prepare_jacobian (geqfunc!, geq, jac_backend, Z̃_∇geq, ∇geq_context... ; strict )
725
726
∇geq = init_diffmat (JNT, jac_backend, ∇geq_prep, nZ̃, neq)
726
727
∇geqfuncs! = Vector {Function} (undef, neq)
727
728
for i in eachindex (∇geqfuncs!)
0 commit comments