Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reduce allocation for estimators based on augmented NonLinModel #44

Merged
merged 1 commit into from
Apr 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 16 additions & 23 deletions src/controller/execute.jl
Original file line number Diff line number Diff line change
Expand Up @@ -103,24 +103,25 @@ julia> round.(getinfo(mpc)[:Ŷ], digits=3)
```
"""
function getinfo(mpc::PredictiveController{NT}) where NT<:Real
model = mpc.estim.model
info = Dict{Symbol, Union{JuMP._SolutionSummary, Vector{NT}, NT}}()
Ŷ, u = similar(mpc.Ŷop), similar(mpc.estim.lastu0)
Ŷ, u, û = similar(mpc.Ŷop), similar(model.uop), similar(model.uop)
x̂, x̂next = similar(mpc.estim.x̂), similar(mpc.estim.x̂)
Ŷ, x̂end = predict!(Ŷ, x̂, x̂next, u, mpc, mpc.estim.model, mpc.ΔŨ)
Ŷ, x̂end = predict!(Ŷ, x̂, x̂next, u, , mpc, model, mpc.ΔŨ)
U = mpc.S̃*mpc.ΔŨ + mpc.T_lastu
Ȳ, Ū = similar(Ŷ), similar(U)
J = obj_nonlinprog!(U, Ȳ, Ū, mpc, mpc.estim.model, Ŷ, mpc.ΔŨ)
info[:ΔU] = mpc.ΔŨ[1:mpc.Hc*mpc.estim.model.nu]
J = obj_nonlinprog!(U, Ȳ, Ū, mpc, model, Ŷ, mpc.ΔŨ)
info[:ΔU] = mpc.ΔŨ[1:mpc.Hc*model.nu]
info[:ϵ] = isinf(mpc.C) ? NaN : mpc.ΔŨ[end]
info[:J] = J
info[:U] = U
info[:u] = info[:U][1:mpc.estim.model.nu]
info[:d] = mpc.d0 + mpc.estim.model.dop
info[:u] = info[:U][1:model.nu]
info[:d] = mpc.d0 + model.dop
info[:D̂] = mpc.D̂0 + mpc.Dop
info[:ŷ] = mpc.ŷ
info[:Ŷ] = Ŷ
info[:x̂end] = x̂end
info[:Ŷs] = mpc.Ŷop - repeat(mpc.estim.model.yop, mpc.Hp) # Ŷop = Ŷs + Yop
info[:Ŷs] = mpc.Ŷop - repeat(model.yop, mpc.Hp) # Ŷop = Ŷs + Yop
info[:R̂y] = mpc.R̂y
info[:R̂u] = mpc.R̂u
info = addinfo!(info, mpc)
Expand Down Expand Up @@ -296,16 +297,14 @@ function linconstraint!(mpc::PredictiveController, ::SimModel)
end

@doc raw"""
predict!(Ŷ, x̂, _ , _ , mpc::PredictiveController, model::LinModel, ΔŨ) -> Ŷ, x̂end
predict!(Ŷ, x̂, _ , _ , _ , mpc::PredictiveController, model::LinModel, ΔŨ) -> Ŷ, x̂end

Compute the predictions `Ŷ` and terminal states `x̂end` if model is a [`LinModel`](@ref).

The method mutates `Ŷ` and `x̂` vector arguments. The `x̂end` vector is used for
the terminal constraints applied on ``\mathbf{x̂}_{k-1}(k+H_p)``.
"""
function predict!(
Ŷ, x̂, _ , _ , mpc::PredictiveController, ::LinModel, ΔŨ::Vector{NT}
) where {NT<:Real}
function predict!(Ŷ, x̂, _ , _ , _ , mpc::PredictiveController, ::LinModel, ΔŨ)
# in-place operations to reduce allocations :
Ŷ .= mul!(Ŷ, mpc.Ẽ, ΔŨ) .+ mpc.F
x̂ .= mul!(x̂, mpc.con.ẽx̂, ΔŨ) .+ mpc.con.fx̂
Expand All @@ -314,15 +313,13 @@ function predict!(
end

@doc raw"""
predict!(Ŷ, x̂, x̂next, u, mpc::PredictiveController, model::SimModel, ΔŨ) -> Ŷ, x̂end
predict!(Ŷ, x̂, x̂next, u, û, mpc::PredictiveController, model::SimModel, ΔŨ) -> Ŷ, x̂end

Compute both vectors if `model` is not a [`LinModel`](@ref).

The method mutates `Ŷ`, `x̂`, `x̂next` and `u` arguments.
The method mutates `Ŷ`, `x̂`, `x̂next`, `u` and `` arguments.
"""
function predict!(
Ŷ, x̂, x̂next, u, mpc::PredictiveController, model::SimModel, ΔŨ::Vector{NT}
) where {NT<:Real}
function predict!(Ŷ, x̂, x̂next, u, û, mpc::PredictiveController, model::SimModel, ΔŨ)
nu, ny, nd, Hp, Hc = model.nu, model.ny, model.nd, mpc.Hp, mpc.Hc
u0 = u
x̂ .= mpc.estim.x̂
Expand All @@ -332,7 +329,7 @@ function predict!(
if j ≤ Hc
u0 .+= @views ΔŨ[(1 + nu*(j-1)):(nu*j)]
end
f̂!(x̂next, mpc.estim, model, x̂, u0, d0)
f̂!(x̂next, û, mpc.estim, model, x̂, u0, d0)
x̂ .= x̂next
d0 = @views mpc.D̂0[(1 + nd*(j-1)):(nd*j)]
ŷ = @views Ŷ[(1 + ny*(j-1)):(ny*j)]
Expand All @@ -352,9 +349,7 @@ The function is called by the nonlinear optimizer of [`NonLinMPC`](@ref) control
also be called on any [`PredictiveController`](@ref)s to evaluate the objective function `J`
at specific input increments `ΔŨ` and predictions `Ŷ` values. It mutates the `U` argument.
"""
function obj_nonlinprog!(
U , _ , _ , mpc::PredictiveController, model::LinModel, Ŷ, ΔŨ::Vector{NT}
) where {NT<:Real}
function obj_nonlinprog!(U, _ , _ , mpc::PredictiveController, model::LinModel, Ŷ, ΔŨ)
J = obj_quadprog(ΔŨ, mpc.H̃, mpc.q̃) + mpc.p[]
if !iszero(mpc.E)
U .= mul!(U, mpc.S̃, ΔŨ) .+ mpc.T_lastu
Expand All @@ -373,9 +368,7 @@ function `dot(x, A, x)` is a performant way of calculating `x'*A*x`. This method
`U`, `Ȳ` and `Ū` arguments (input over `Hp`, and output and input setpoint tracking error,
respectively).
"""
function obj_nonlinprog!(
U, Ȳ, Ū, mpc::PredictiveController, model::SimModel, Ŷ, ΔŨ::Vector{NT}
) where {NT<:Real}
function obj_nonlinprog!(U, Ȳ, Ū, mpc::PredictiveController, model::SimModel, Ŷ, ΔŨ)
# --- output setpoint tracking term ---
Ȳ .= mpc.R̂y .- Ŷ
JR̂y = dot(Ȳ, mpc.M_Hp, Ȳ)
Expand Down
4 changes: 1 addition & 3 deletions src/controller/explicitmpc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -197,9 +197,7 @@ The solution is ``\mathbf{ΔŨ = - H̃^{-1} q̃}``, see [`init_quadprog`](@ref)
optim_objective!(mpc::ExplicitMPC) = lmul!(-1, ldiv!(mpc.ΔŨ, mpc.H̃_chol, mpc.q̃))

"Compute the predictions but not the terminal states if `mpc` is an [`ExplicitMPC`](@ref)."
function predict!(
Ŷ, x̂, _ , _ , mpc::ExplicitMPC, ::LinModel, ΔŨ::Vector{NT}
) where {NT<:Real}
function predict!(Ŷ, x̂, _ , _ , _ , mpc::ExplicitMPC, ::LinModel, ΔŨ)
# in-place operations to reduce allocations :
Ŷ .= mul!(Ŷ, mpc.Ẽ, ΔŨ) .+ mpc.F
x̂ .= NaN
Expand Down
17 changes: 9 additions & 8 deletions src/controller/nonlinmpc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -310,6 +310,7 @@ function init_optimization!(mpc::NonLinMPC, optim::JuMP.GenericModel{JNT}) where
x̂_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nx̂), Nc)
x̂next_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nx̂), Nc)
u_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nu), Nc)
û_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nu), Nc)
Ȳ_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nŶ), Nc)
Ū_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nU), Nc)
function Jfunc(ΔŨtup::JNT...)
Expand All @@ -318,8 +319,8 @@ function init_optimization!(mpc::NonLinMPC, optim::JuMP.GenericModel{JNT}) where
ΔŨ = collect(ΔŨtup)
if ΔŨtup !== last_ΔŨtup_float
x̂, x̂next = get_tmp(x̂_cache, ΔŨ1), get_tmp(x̂next_cache, ΔŨ1)
u = get_tmp(u_cache, ΔŨ1)
Ŷ, x̂end = predict!(Ŷ, x̂, x̂next, u, mpc, model, ΔŨ)
u, û = get_tmp(u_cache, ΔŨ1), get_tmp(û_cache, ΔŨ1)
Ŷ, x̂end = predict!(Ŷ, x̂, x̂next, u, û, mpc, model, ΔŨ)
g = get_tmp(g_cache, ΔŨ1)
g = con_nonlinprog!(g, mpc, model, x̂end, Ŷ, ΔŨ)
last_ΔŨtup_float = ΔŨtup
Expand All @@ -333,8 +334,8 @@ function init_optimization!(mpc::NonLinMPC, optim::JuMP.GenericModel{JNT}) where
ΔŨ = collect(ΔŨtup)
if ΔŨtup !== last_ΔŨtup_dual
x̂, x̂next = get_tmp(x̂_cache, ΔŨ1), get_tmp(x̂next_cache, ΔŨ1)
u = get_tmp(u_cache, ΔŨ1)
Ŷ, x̂end = predict!(Ŷ, x̂, x̂next, u, mpc, model, ΔŨ)
u, û = get_tmp(u_cache, ΔŨ1), get_tmp(û_cache, ΔŨ1)
Ŷ, x̂end = predict!(Ŷ, x̂, x̂next, u, û, mpc, model, ΔŨ)
g = get_tmp(g_cache, ΔŨ1)
g = con_nonlinprog!(g, mpc, model, x̂end, Ŷ, ΔŨ)
last_ΔŨtup_dual = ΔŨtup
Expand All @@ -349,8 +350,8 @@ function init_optimization!(mpc::NonLinMPC, optim::JuMP.GenericModel{JNT}) where
Ŷ = get_tmp(Ŷ_cache, ΔŨ1)
ΔŨ = collect(ΔŨtup)
x̂, x̂next = get_tmp(x̂_cache, ΔŨ1), get_tmp(x̂next_cache, ΔŨ1)
u = get_tmp(u_cache, ΔŨ1)
Ŷ, x̂end = predict!(Ŷ, x̂, x̂next, u, mpc, model, ΔŨ)
u, û = get_tmp(u_cache, ΔŨ1), get_tmp(û_cache, ΔŨ1)
Ŷ, x̂end = predict!(Ŷ, x̂, x̂next, u, û, mpc, model, ΔŨ)
g = con_nonlinprog!(g, mpc, model, x̂end, Ŷ, ΔŨ)
last_ΔŨtup_float = ΔŨtup
end
Expand All @@ -363,8 +364,8 @@ function init_optimization!(mpc::NonLinMPC, optim::JuMP.GenericModel{JNT}) where
Ŷ = get_tmp(Ŷ_cache, ΔŨ1)
ΔŨ = collect(ΔŨtup)
x̂, x̂next = get_tmp(x̂_cache, ΔŨ1), get_tmp(x̂next_cache, ΔŨ1)
u = get_tmp(u_cache, ΔŨ1)
Ŷ, x̂end = predict!(Ŷ, x̂, x̂next, u, mpc, model, ΔŨ)
u, û = get_tmp(u_cache, ΔŨ1), get_tmp(û_cache, ΔŨ1)
Ŷ, x̂end = predict!(Ŷ, x̂, x̂next, u, û, mpc, model, ΔŨ)
g = con_nonlinprog!(g, mpc, model, x̂end, Ŷ, ΔŨ)
last_ΔŨtup_dual = ΔŨtup
end
Expand Down
26 changes: 17 additions & 9 deletions src/estimator/execute.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ function remove_op!(estim::StateEstimator, u, ym, d)
end

@doc raw"""
f̂!(x̂next, estim::StateEstimator, model::SimModel, x̂, u, d) -> nothing
f̂!(x̂next, û, estim::StateEstimator, model::SimModel, x̂, u, d) -> nothing

Mutating state function ``\mathbf{f̂}`` of the augmented model.

Expand All @@ -27,22 +27,26 @@ function returns the next state of the augmented model, defined as:
\mathbf{ŷ}(k) &= \mathbf{ĥ}\Big(\mathbf{x̂}(k), \mathbf{d}(k)\Big)
\end{aligned}
```
where ``\mathbf{x̂}(k+1)`` is stored in `x̂next` argument.
where ``\mathbf{x̂}(k+1)`` is stored in `x̂next` argument. The method mutates `x̂next` and `û`
in place, the latter stores the input vector of the augmented model ``\mathbf{u + ŷ_{s_u}}``.
"""
function f̂!(x̂next, estim::StateEstimator, model::SimModel, x̂, u, d)
function f̂!(x̂next, û, estim::StateEstimator, model::SimModel, x̂, u, d)
# `@views` macro avoid copies with matrix slice operator e.g. [a:b]
@views x̂d, x̂s = x̂[1:model.nx], x̂[model.nx+1:end]
@views x̂d_next, x̂s_next = x̂next[1:model.nx], x̂next[model.nx+1:end]
T = promote_type(eltype(x̂), eltype(u))
û = Vector{T}(undef, model.nu) # TODO: avoid this allocation if possible
û .= u .+ mul!(û, estim.Cs_u, x̂s)
mul!(û, estim.Cs_u, x̂s)
û .+= u
f!(x̂d_next, model, x̂d, û, d)
mul!(x̂s_next, estim.As, x̂s)
return nothing
end

"Use the augmented model matrices if `model` is a [`LinModel`](@ref)."
function f̂!(x̂next, estim::StateEstimator, ::LinModel, x̂, u, d)
"""
f̂!(x̂next, _ , estim::StateEstimator, model::LinModel, x̂, u, d) -> nothing

Use the augmented model matrices if `model` is a [`LinModel`](@ref).
"""
function f̂!(x̂next, _ , estim::StateEstimator, ::LinModel, x̂, u, d)
mul!(x̂next, estim.Â, x̂)
mul!(x̂next, estim.B̂u, u, 1, 1)
mul!(x̂next, estim.B̂d, d, 1, 1)
Expand All @@ -61,7 +65,11 @@ function ĥ!(ŷ, estim::StateEstimator, model::SimModel, x̂, d)
mul!(ŷ, estim.Cs_y, x̂s, 1, 1)
return nothing
end
"Use the augmented model matrices if `model` is a [`LinModel`](@ref)."
"""
ĥ!(ŷ, estim::StateEstimator, model::LinModel, x̂, d) -> nothing

Use the augmented model matrices if `model` is a [`LinModel`](@ref).
"""
function ĥ!(ŷ, estim::StateEstimator, ::LinModel, x̂, d)
mul!(ŷ, estim.Ĉ, x̂)
mul!(ŷ, estim.D̂d, d, 1, 1)
Expand Down
6 changes: 3 additions & 3 deletions src/estimator/internal_model.jl
Original file line number Diff line number Diff line change
Expand Up @@ -144,16 +144,16 @@ function matrices_internalmodel(model::SimModel{NT}) where NT<:Real
end

@doc raw"""
f̂!(x̂next, ::InternalModel, model::NonLinModel, x̂, u, d)
f̂!(x̂next, _ , estim::InternalModel, model::NonLinModel, x̂, u, d)

State function ``\mathbf{f̂}`` of [`InternalModel`](@ref) for [`NonLinModel`](@ref).

It calls `model.f!(x̂next, x̂, u ,d)` since this estimator does not augment the states.
"""
f̂!(x̂next, ::InternalModel, model::NonLinModel, x̂, u, d) = model.f!(x̂next, x̂, u, d)
f̂!(x̂next, _ , ::InternalModel, model::NonLinModel, x̂, u, d) = model.f!(x̂next, x̂, u, d)

@doc raw"""
ĥ!(ŷ, ::InternalModel, model::NonLinModel, x̂, d)
ĥ!(ŷ, estim::InternalModel, model::NonLinModel, x̂, d)

Output function ``\mathbf{ĥ}`` of [`InternalModel`](@ref), it calls `model.h!`.
"""
Expand Down
21 changes: 14 additions & 7 deletions src/estimator/kalman.jl
Original file line number Diff line number Diff line change
Expand Up @@ -570,6 +570,7 @@ function update_estimate!(estim::UnscentedKalmanFilter{NT}, u, ym, d) where NT<:
γ, m̂, Ŝ = estim.γ, estim.m̂, estim.Ŝ
# --- initialize matrices ---
X̂, X̂_next = Matrix{NT}(undef, nx̂, nσ), Matrix{NT}(undef, nx̂, nσ)
û = Vector{NT}(undef, estim.model.nu)
ŷm = Vector{NT}(undef, nym)
ŷ = Vector{NT}(undef, estim.model.ny)
Ŷm = Matrix{NT}(undef, nym, nσ)
Expand Down Expand Up @@ -604,7 +605,7 @@ function update_estimate!(estim::UnscentedKalmanFilter{NT}, u, ym, d) where NT<:
X̂_cor[:, nx̂+2:end] .-= γ_sqrt_P̂_cor
X̂_next = similar(X̂_cor)
for j in axes(X̂_next, 2)
@views f̂!(X̂_next[:, j], estim, estim.model, X̂_cor[:, j], u, d)
@views f̂!(X̂_next[:, j], û, estim, estim.model, X̂_cor[:, j], u, d)
end
x̂_next = mul!(x̂, X̂_next, m̂)
X̄_next = X̂_next
Expand Down Expand Up @@ -765,10 +766,15 @@ function update_estimate!(
estim::ExtendedKalmanFilter{NT}, u, ym, d=empty(estim.x̂)
) where NT<:Real
model = estim.model
x̂next, ŷ = Vector{NT}(undef, estim.nx̂), Vector{NT}(undef, model.ny)
F̂ = ForwardDiff.jacobian((x̂next, x̂) -> f̂!(x̂next, estim, model, x̂, u, d), x̂next, estim.x̂)
Ĥ = ForwardDiff.jacobian((ŷ, x̂) -> ĥ!(ŷ, estim, model, x̂, d), ŷ, estim.x̂)
return update_estimate_kf!(estim, u, ym, d, F̂, Ĥ[estim.i_ym, :], estim.P̂, estim.x̂)
nx̂, nu, ny = estim.nx̂, model.nu, model.ny
x̂, P̂ = estim.x̂, estim.P̂
# concatenate x̂next and û vectors to allows û vector with dual numbers for auto diff:
x̂nextû, ŷ = Vector{NT}(undef, nx̂ + nu), Vector{NT}(undef, ny)
f̂AD! = (x̂nextû, x̂) -> @views f̂!(x̂nextû[1:nx̂], x̂nextû[nx̂+1:end], estim, model, x̂, u, d)
ĥAD! = (ŷ, x̂) -> ĥ!(ŷ, estim, model, x̂, d)
F̂ = ForwardDiff.jacobian(f̂AD!, x̂nextû, x̂)[1:nx̂, :]
Ĥm = ForwardDiff.jacobian(ĥAD!, ŷ, x̂)[estim.i_ym, :]
return update_estimate_kf!(estim, u, ym, d, F̂, Ĥm, P̂, x̂)
end

"Set `estim.P̂` to `estim.P̂0` for the time-varying Kalman Filters."
Expand Down Expand Up @@ -810,15 +816,16 @@ allocations. See e.g. [`KalmanFilter`](@ref) docstring for the equations.
"""
function update_estimate_kf!(estim::StateEstimator{NT}, u, ym, d, Â, Ĉm, P̂, x̂) where NT<:Real
Q̂, R̂, M̂, K̂ = estim.Q̂, estim.R̂, estim.M̂, estim.K̂
nx̂, nu, ny = estim.nx̂, estim.model.nu, estim.model.ny
x̂next, û, ŷ = Vector{NT}(undef, nx̂), Vector{NT}(undef, nu), Vector{NT}(undef, ny)
mul!(M̂, P̂, Ĉm')
rdiv!(M̂, cholesky!(Hermitian(Ĉm * P̂ * Ĉm' .+ R̂)))
mul!(K̂, Â, M̂)
x̂next, ŷ = Vector{NT}(undef, estim.nx̂), Vector{NT}(undef, estim.model.ny)
ĥ!(ŷ, estim, estim.model, x̂, d)
ŷm = @views ŷ[estim.i_ym]
v̂ = ŷm
v̂ .= ym .- ŷm
f̂!(x̂next, estim, estim.model, x̂, u, d)
f̂!(x̂next, û, estim, estim.model, x̂, u, d)
mul!(x̂next, K̂, v̂, 1, 1)
estim.x̂ .= x̂next
P̂.data .= Â * (P̂ .- M̂ * Ĉm * P̂) * Â' .+ Q̂ # .data is necessary for Hermitians
Expand Down
21 changes: 11 additions & 10 deletions src/estimator/mhe/construct.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1003,15 +1003,16 @@ function init_optimization!(
end
He = estim.He
nV̂, nX̂, ng = He*estim.nym, He*estim.nx̂, length(con.i_g)
nx̂, nŷ = estim.nx̂, model.ny
nx̂, nŷ, nu = estim.nx̂, model.ny, model.nu
# see init_optimization!(mpc::NonLinMPC, optim) for details on the inspiration
Jfunc, gfunc = let estim=estim, model=model, nZ̃=nZ̃ , nV̂=nV̂, nX̂=nX̂, ng=ng, nx̂=nx̂, nŷ=nŷ
Jfunc, gfunc = let estim=estim, model=model, nZ̃=nZ̃, nV̂=nV̂, nX̂=nX̂, ng=ng, nx̂=nx̂, nu=nu, nŷ=nŷ
Nc = nZ̃ + 3
last_Z̃tup_float, last_Z̃tup_dual = nothing, nothing
V̂_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nV̂), Nc)
g_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, ng), Nc)
X̂_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nX̂), Nc)
x̄_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nx̂), Nc)
û_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nu), Nc)
ŷ_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nŷ), Nc)
function Jfunc(Z̃tup::JNT...)
Z̃1 = Z̃tup[begin]
Expand All @@ -1020,8 +1021,8 @@ function init_optimization!(
if Z̃tup !== last_Z̃tup_float
g = get_tmp(g_cache, Z̃1)
X̂ = get_tmp(X̂_cache, Z̃1)
ŷ = get_tmp(ŷ_cache, Z̃1)
V̂, X̂ = predict!(V̂, X̂, ŷ, estim, model, Z̃)
û, ŷ = get_tmp(û_cache, Z̃1), get_tmp(ŷ_cache, Z̃1)
V̂, X̂ = predict!(V̂, X̂, û, ŷ, estim, model, Z̃)
g = con_nonlinprog!(g, estim, model, X̂, V̂, Z̃)
last_Z̃tup_float = Z̃tup
end
Expand All @@ -1035,8 +1036,8 @@ function init_optimization!(
if Z̃tup !== last_Z̃tup_dual
g = get_tmp(g_cache, Z̃1)
X̂ = get_tmp(X̂_cache, Z̃1)
ŷ = get_tmp(ŷ_cache, Z̃1)
V̂, X̂ = predict!(V̂, X̂, ŷ, estim, model, Z̃)
û, ŷ = get_tmp(û_cache, Z̃1), get_tmp(ŷ_cache, Z̃1)
V̂, X̂ = predict!(V̂, X̂, û, ŷ, estim, model, Z̃)
g = con_nonlinprog!(g, estim, model, X̂, V̂, Z̃)
last_Z̃tup_dual = Z̃tup
end
Expand All @@ -1050,8 +1051,8 @@ function init_optimization!(
Z̃ = collect(Z̃tup)
V̂ = get_tmp(V̂_cache, Z̃1)
X̂ = get_tmp(X̂_cache, Z̃1)
ŷ = get_tmp(ŷ_cache, Z̃1)
V̂, X̂ = predict!(V̂, X̂, ŷ, estim, model, Z̃)
û, ŷ = get_tmp(û_cache, Z̃1), get_tmp(ŷ_cache, Z̃1)
V̂, X̂ = predict!(V̂, X̂, û, ŷ, estim, model, Z̃)
g = con_nonlinprog!(g, estim, model, X̂, V̂, Z̃)
last_Z̃tup_float = Z̃tup
end
Expand All @@ -1064,8 +1065,8 @@ function init_optimization!(
Z̃ = collect(Z̃tup)
V̂ = get_tmp(V̂_cache, Z̃1)
X̂ = get_tmp(X̂_cache, Z̃1)
ŷ = get_tmp(ŷ_cache, Z̃1)
V̂, X̂ = predict!(V̂, X̂, ŷ, estim, model, Z̃)
û, ŷ = get_tmp(û_cache, Z̃1), get_tmp(ŷ_cache, Z̃1)
V̂, X̂ = predict!(V̂, X̂, û, ŷ, estim, model, Z̃)
g = con_nonlinprog!(g, estim, model, X̂, V̂, Z̃)
last_Z̃tup_dual = Z̃tup
end
Expand Down
Loading
Loading