diff --git a/Project.toml b/Project.toml index 141b0d3b8..5f8ac20c0 100644 --- a/Project.toml +++ b/Project.toml @@ -1,11 +1,12 @@ name = "ModelPredictiveControl" uuid = "61f9bdb8-6ae4-484a-811f-bbf86720c31c" -version = "1.16.3" +version = "1.17.0" authors = ["Francis Gagnon"] [deps] ControlSystemsBase = "aaaaaaaa-a6ca-5380-bf3e-84a91bcd477e" DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" +FastGaussQuadrature = "442a2c76-b920-505d-bb47-c5924d526838" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" JuMP = "4076af6c-e467-56ae-b986-b466b2749572" @@ -33,6 +34,7 @@ ControlSystemsBase = "1.18.2" DAQP = "0.6, 0.7.1" DifferentiationInterface = "0.7.11" Documenter = "1" +FastGaussQuadrature = "1.1.0" FiniteDiff = "2" ForwardDiff = "0.10, 1" Ipopt = "1" diff --git a/docs/src/internals/predictive_control.md b/docs/src/internals/predictive_control.md index ef79ef649..7babd1305 100644 --- a/docs/src/internals/predictive_control.md +++ b/docs/src/internals/predictive_control.md @@ -25,6 +25,7 @@ ModelPredictiveControl.relaxterminal ModelPredictiveControl.augmentdefect ModelPredictiveControl.init_quadprog ModelPredictiveControl.init_stochpred +ModelPredictiveControl.init_orthocolloc ModelPredictiveControl.init_matconstraint_mpc ModelPredictiveControl.get_nonlinobj_op(::NonLinMPC, ::ModelPredictiveControl.GenericModel) ModelPredictiveControl.get_nonlincon_oracle(::NonLinMPC, ::ModelPredictiveControl.GenericModel) diff --git a/docs/src/internals/state_estim.md b/docs/src/internals/state_estim.md index 6b92cc2d7..2a8df67d0 100644 --- a/docs/src/internals/state_estim.md +++ b/docs/src/internals/state_estim.md @@ -27,6 +27,7 @@ ModelPredictiveControl.get_nonlincon_oracle(::MovingHorizonEstimator, ::ModelPre ```@docs ModelPredictiveControl.f̂! ModelPredictiveControl.ĥ! +ModelPredictiveControl.f̂_input! ``` ## Update Quadratic Optimization diff --git a/docs/src/public/predictive_control.md b/docs/src/public/predictive_control.md index 4250a4b14..6cbcb8945 100644 --- a/docs/src/public/predictive_control.md +++ b/docs/src/public/predictive_control.md @@ -120,4 +120,10 @@ MultipleShooting ```@docs TrapezoidalCollocation -``` \ No newline at end of file +``` + +### OrthogonalCollocation + +```@docs +OrthogonalCollocation +``` diff --git a/src/ModelPredictiveControl.jl b/src/ModelPredictiveControl.jl index 61fdcf537..72aabe892 100644 --- a/src/ModelPredictiveControl.jl +++ b/src/ModelPredictiveControl.jl @@ -37,6 +37,8 @@ import JuMP: @variable, @operator, @constraint, @objective import OSQP, Ipopt +import FastGaussQuadrature + export SimModel, LinModel, NonLinModel export DiffSolver, RungeKutta, ForwardEuler export setop!, setname! @@ -48,7 +50,8 @@ export MovingHorizonEstimator export ManualEstimator export default_nint, initstate! export PredictiveController, ExplicitMPC, LinMPC, NonLinMPC, setconstraint!, moveinput! -export TranscriptionMethod, SingleShooting, MultipleShooting, TrapezoidalCollocation +export TranscriptionMethod, SingleShooting, MultipleShooting +export TrapezoidalCollocation, OrthogonalCollocation export SimResult, getinfo, sim! include("general.jl") diff --git a/src/controller/construct.jl b/src/controller/construct.jl index 8b32bac3b..97520bb2c 100644 --- a/src/controller/construct.jl +++ b/src/controller/construct.jl @@ -692,6 +692,105 @@ function validate_args(mpc::PredictiveController, ry, d, lastu, D̂, R̂y, R̂u) size(R̂u) ≠ (nu*Hp,) && throw(DimensionMismatch("R̂u size $(size(R̂u)) ≠ manip. input size × Hp ($(nu*Hp),)")) end +@doc raw""" + init_ZtoΔU(estim::StateEstimator, transcription::TranscriptionMethod, Hp, Hc) -> PΔu + +Init decision variables to input increments over ``H_c`` conversion matrix `PΔu`. + +The conversion from the decision variables ``\mathbf{Z}`` to ``\mathbf{ΔU}``, the input +increments over ``H_c``, is computed by: +```math +\mathbf{ΔU} = \mathbf{P_{Δu}} \mathbf{Z} +``` + +in which ``\mathbf{P_{Δu}}`` is defined in the Extended Help section. + +# Extended Help +!!! details "Extended Help" + Following the decision variable definition of the [`TranscriptionMethod`](@ref), the + conversion matrix ``\mathbf{P_{Δu}}``, we have: + - ``\mathbf{P_{Δu}} = \mathbf{I}`` if `transcription` is a [`SingleShooting`](@ref) + - ``\mathbf{P_{Δu}} = [\begin{smallmatrix}\mathbf{I} & \mathbf{0} \end{smallmatrix}]`` otherwise. + The matrix is store as as `SparseMatrixCSC` to support both cases efficiently. +""" +function init_ZtoΔU( + estim::StateEstimator{NT}, transcription::TranscriptionMethod, Hp, Hc +) where {NT<:Real} + I_nu_Hc = sparse(Matrix{NT}(I, estim.model.nu*Hc, estim.model.nu*Hc)) + nZ = get_nZ(estim, transcription, Hp, Hc) + nΔU = estim.model.nu*Hc + PΔu = [I_nu_Hc spzeros(NT, nΔU, nZ - nΔU)] + return PΔu +end + +@doc raw""" + init_ZtoU(estim, transcription, Hp, Hc, nb) -> Pu, Tu + +Init decision variables to inputs over ``H_p`` conversion matrices. + +The conversion from the decision variables ``\mathbf{Z}`` to ``\mathbf{U}``, the manipulated +inputs over ``H_p``, is computed by: +```math +\mathbf{U} = \mathbf{P_u} \mathbf{Z} + \mathbf{T_u} \mathbf{u}(k-1) +``` +The ``\mathbf{P_u}`` and ``\mathbf{T_u}`` matrices are defined in the Extended Help section. + +# Extended Help +!!! details "Extended Help" + With ``n_i``, the ``i``th element of the ``\mathbf{n_b}`` vector defined in [`move_blocking`](@ref) + documentation, we introduce the ``\mathbf{Q}(n_i)`` matrix of size `(nu*ni, nu)`: + ```math + \mathbf{Q}(n_i) = \begin{bmatrix} + \mathbf{I} \\ + \mathbf{I} \\ + \vdots \\ + \mathbf{I} \end{bmatrix} + ``` + The ``\mathbf{U}`` vector and the conversion matrices are defined as: + ```math + \mathbf{U} = \begin{bmatrix} + \mathbf{u}(k + 0) \\ + \mathbf{u}(k + 1) \\ + \vdots \\ + \mathbf{u}(k + H_p - 1) \end{bmatrix} , \quad + \mathbf{P_u^†} = \begin{bmatrix} + \mathbf{Q}(n_1) & \mathbf{0} & \cdots & \mathbf{0} \\ + \mathbf{Q}(n_2) & \mathbf{Q}(n_2) & \cdots & \mathbf{0} \\ + \vdots & \vdots & \ddots & \vdots \\ + \mathbf{Q}(n_{H_c}) & \mathbf{Q}(n_{H_c}) & \cdots & \mathbf{Q}(n_{H_c}) \end{bmatrix} , \quad + \mathbf{T_u} = \begin{bmatrix} + \mathbf{I} \\ + \mathbf{I} \\ + \vdots \\ + \mathbf{I} \end{bmatrix} + ``` + and, depending on the transcription method, we have: + - ``\mathbf{P_u} = \mathbf{P_u^†}`` if `transcription` is a [`SingleShooting`](@ref) + - ``\mathbf{P_u} = [\begin{smallmatrix}\mathbf{P_u^†} & \mathbf{0} \end{smallmatrix}]`` + if `transcription` is a [`MultipleShooting`](@ref) + The conversion matrices are stored as `SparseMatrixCSC` since it was benchmarked that it + is generally more performant than normal dense matrices, even for small `nu`, `Hp` and + `Hc` values. Using `Bool` element type and `BitMatrix` is also slower. +""" +function init_ZtoU( + estim::StateEstimator{NT}, transcription::TranscriptionMethod, Hp, Hc, nb +) where {NT<:Real} + nu = estim.model.nu + I_nu = sparse(Matrix{NT}(I, nu, nu)) + PuDagger = Matrix{NT}(undef, nu*Hp, nu*Hc) + for i=1:Hc + ni = nb[i] + Q_ni = repeat(I_nu, ni, 1) + iRows = (1:nu*ni) .+ @views nu*sum(nb[1:i-1]) + PuDagger[iRows, :] = [repeat(Q_ni, 1, i) spzeros(nu*ni, nu*(Hc-i))] + end + PuDagger = sparse(PuDagger) + nZ = get_nZ(estim, transcription, Hp, Hc) + Pu = [PuDagger spzeros(NT, nu*Hp, nZ - nu*Hc)] + Tu = repeat(I_nu, Hp) + return Pu, Tu +end + @doc raw""" init_quadprog( model::LinModel, transcriptions::TranscriptionMethod, weights::ControllerWeights, diff --git a/src/controller/execute.jl b/src/controller/execute.jl index bb059ca4e..80134fffa 100644 --- a/src/controller/execute.jl +++ b/src/controller/execute.jl @@ -142,7 +142,7 @@ function getinfo(mpc::PredictiveController{NT}) where NT<:Real info = Dict{Symbol, Any}() ΔŨ = Vector{NT}(undef, nΔŨ) x̂0end = similar(mpc.estim.x̂0) - K0 = Vector{NT}(undef, nK) + K = Vector{NT}(undef, nK) Ue, Ŷe = Vector{NT}(undef, nUe), Vector{NT}(undef, nŶe) U0, Ŷ0 = similar(mpc.Uop), similar(mpc.Yop) Û0, X̂0 = Vector{NT}(undef, nÛ0), Vector{NT}(undef, nX̂0) @@ -150,7 +150,7 @@ function getinfo(mpc::PredictiveController{NT}) where NT<:Real D̂ = buffer.D̂ U0 = getU0!(U0, mpc, Z̃) ΔŨ = getΔŨ!(ΔŨ, mpc, transcription, Z̃) - Ŷ0, x̂0end = predict!(Ŷ0, x̂0end, X̂0, Û0, K0, mpc, model, transcription, U0, Z̃) + Ŷ0, x̂0end = predict!(Ŷ0, x̂0end, X̂0, Û0, K, mpc, model, transcription, U0, Z̃) Ue, Ŷe = extended_vectors!(Ue, Ŷe, mpc, U0, Ŷ0) U .= U0 .+ mpc.Uop Ŷ .= Ŷ0 .+ mpc.Yop diff --git a/src/controller/legacy.jl b/src/controller/legacy.jl index 2892154a8..e093cbf33 100644 --- a/src/controller/legacy.jl +++ b/src/controller/legacy.jl @@ -23,21 +23,21 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT J::Vector{JNT} = zeros(JNT, 1) ΔŨ::Vector{JNT} = zeros(JNT, nΔŨ) x̂0end::Vector{JNT} = zeros(JNT, nx̂) - K0::Vector{JNT} = zeros(JNT, nK) + K::Vector{JNT} = zeros(JNT, nK) Ue::Vector{JNT}, Ŷe::Vector{JNT} = zeros(JNT, nUe), zeros(JNT, nŶe) U0::Vector{JNT}, Ŷ0::Vector{JNT} = zeros(JNT, nU), zeros(JNT, nŶ) Û0::Vector{JNT}, X̂0::Vector{JNT} = zeros(JNT, nU), zeros(JNT, nX̂) gc::Vector{JNT}, g::Vector{JNT} = zeros(JNT, nc), zeros(JNT, ng) geq::Vector{JNT} = zeros(JNT, neq) # ---------------------- objective function ------------------------------------------- - function Jfunc!(Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq) - update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃) + function Jfunc!(Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq) + update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc, Z̃) return obj_nonlinprog!(Ŷ0, U0, mpc, model, Ue, Ŷe, ΔŨ) end Z̃_∇J = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call ∇J_context = ( Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0), - Cache(Û0), Cache(K0), Cache(X̂0), + Cache(Û0), Cache(K), Cache(X̂0), Cache(gc), Cache(g), Cache(geq), ) ∇J_prep = prepare_gradient(Jfunc!, grad, Z̃_∇J, ∇J_context...; strict) @@ -64,14 +64,14 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT end end # --------------------- inequality constraint functions ------------------------------- - function gfunc!(g, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, geq) - update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃) + function gfunc!(g, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, geq) + update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc, Z̃) return g end Z̃_∇g = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call ∇g_context = ( Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0), - Cache(Û0), Cache(K0), Cache(X̂0), + Cache(Û0), Cache(K), Cache(X̂0), Cache(gc), Cache(geq), ) # temporarily enable all the inequality constraints for sparsity detection: @@ -109,14 +109,14 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT ∇g_funcs![i] = ∇gfuncs_i! end # --------------------- equality constraint functions --------------------------------- - function geqfunc!(geq, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g) - update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃) + function geqfunc!(geq, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g) + update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc, Z̃) return geq end Z̃_∇geq = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call ∇geq_context = ( Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0), - Cache(Û0), Cache(K0), Cache(X̂0), + Cache(Û0), Cache(K), Cache(X̂0), Cache(gc), Cache(g) ) ∇geq_prep = prepare_jacobian(geqfunc!, geq, jac, Z̃_∇geq, ∇geq_context...; strict) diff --git a/src/controller/nonlinmpc.jl b/src/controller/nonlinmpc.jl index 35d39e69a..0c4e7ebf2 100644 --- a/src/controller/nonlinmpc.jl +++ b/src/controller/nonlinmpc.jl @@ -42,6 +42,9 @@ struct NonLinMPC{ weights::CW JE::JEfunc p::PT + Mo::SparseMatrixCSC{NT, Int} + Co::SparseMatrixCSC{NT, Int} + λo::NT R̂u::Vector{NT} R̂y::Vector{NT} lastu0::Vector{NT} @@ -120,6 +123,7 @@ struct NonLinMPC{ d0, D̂0, D̂e = zeros(NT, nd), zeros(NT, nd*Hp), zeros(NT, nd + nd*Hp) Uop, Yop, Dop = repeat(model.uop, Hp), repeat(model.yop, Hp), repeat(model.dop, Hp) test_custom_functions(NT, model, JE, gc!, nc, Uop, Yop, Dop, p) + Mo, Co, λo = init_orthocolloc(model, transcription) nZ̃ = get_nZ(estim, transcription, Hp, Hc) + nϵ Z̃ = zeros(NT, nZ̃) buffer = PredictiveControllerBuffer(estim, transcription, Hp, Hc, nϵ) @@ -130,6 +134,7 @@ struct NonLinMPC{ Hp, Hc, nϵ, nb, weights, JE, p, + Mo, Co, λo, R̂u, R̂y, lastu0, P̃Δu, P̃u, Tu, Tu_lastu0, @@ -572,7 +577,7 @@ function addinfo!(info, mpc::NonLinMPC{NT}) where NT<:Real nΔŨ, nUe, nŶe = nu*Hc + nϵ, nU + nu, nŶ + ny ΔŨ = zeros(NT, nΔŨ) x̂0end = zeros(NT, nx̂) - K0 = zeros(NT, nK) + K = zeros(NT, nK) Ue, Ŷe = zeros(NT, nUe), zeros(NT, nŶe) U0, Ŷ0 = zeros(NT, nU), zeros(NT, nŶ) Û0, X̂0 = zeros(NT, nU), zeros(NT, nX̂) @@ -580,11 +585,11 @@ function addinfo!(info, mpc::NonLinMPC{NT}) where NT<:Real geq = zeros(NT, neq) J_cache = ( Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0), - Cache(Û0), Cache(K0), Cache(X̂0), + Cache(Û0), Cache(K), Cache(X̂0), Cache(gc), Cache(g), Cache(geq), ) - function J!(Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq) - update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃) + function J!(Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq) + update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc, Z̃) return obj_nonlinprog!(Ŷ0, U0, mpc, model, Ue, Ŷe, ΔŨ) end if !isnothing(mpc.hessian) @@ -597,11 +602,11 @@ function addinfo!(info, mpc::NonLinMPC{NT}) where NT<:Real con.i_g .= 1 # temporarily set all constraints as finite so g is entirely computed ∇g_cache = ( Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0), - Cache(Û0), Cache(K0), Cache(X̂0), + Cache(Û0), Cache(K), Cache(X̂0), Cache(gc), Cache(geq), ) - function g!(g, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, geq) - update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃) + function g!(g, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, geq) + update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc, Z̃) return nothing end g, ∇g = value_and_jacobian(g!, g, mpc.jacobian, mpc.Z̃, ∇g_cache...) @@ -624,11 +629,11 @@ function addinfo!(info, mpc::NonLinMPC{NT}) where NT<:Real λ[old_i_g] = λi ∇²g_cache = ( Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0), - Cache(Û0), Cache(K0), Cache(X̂0), + Cache(Û0), Cache(K), Cache(X̂0), Cache(gc), Cache(geq), Cache(g) ) - function ℓ_g(Z̃, λ, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, geq, g) - update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃) + function ℓ_g(Z̃, λ, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, geq, g) + update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc, Z̃) return dot(λ, g) end ∇²ℓg = hessian(ℓ_g, mpc.hessian, mpc.Z̃, Constant(λ), ∇²g_cache...) @@ -639,11 +644,11 @@ function addinfo!(info, mpc::NonLinMPC{NT}) where NT<:Real # --- equality constraint derivatives --- geq_cache = ( Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0), - Cache(Û0), Cache(K0), Cache(X̂0), + Cache(Û0), Cache(K), Cache(X̂0), Cache(gc), Cache(g) ) - function geq!(geq, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g) - update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃) + function geq!(geq, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g) + update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc, Z̃) return nothing end geq, ∇geq = value_and_jacobian(geq!, geq, mpc.jacobian, mpc.Z̃, geq_cache...) @@ -664,11 +669,11 @@ function addinfo!(info, mpc::NonLinMPC{NT}) where NT<:Real end ∇²geq_cache = ( Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0), - Cache(Û0), Cache(K0), Cache(X̂0), + Cache(Û0), Cache(K), Cache(X̂0), Cache(gc), Cache(geq), Cache(g) ) - function ℓ_geq(Z̃, λeq, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, geq, g) - update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃) + function ℓ_geq(Z̃, λeq, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, geq, g) + update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc, Z̃) return dot(λeq, geq) end ∇²ℓgeq = hessian(ℓ_geq, mpc.hessian, mpc.Z̃, Constant(λeq), ∇²geq_cache...) @@ -792,20 +797,20 @@ function get_nonlinobj_op(mpc::NonLinMPC, optim::JuMP.GenericModel{JNT}) where J J::Vector{JNT} = zeros(JNT, 1) ΔŨ::Vector{JNT} = zeros(JNT, nΔŨ) x̂0end::Vector{JNT} = zeros(JNT, nx̂) - K0::Vector{JNT} = zeros(JNT, nK) + K::Vector{JNT} = zeros(JNT, nK) Ue::Vector{JNT}, Ŷe::Vector{JNT} = zeros(JNT, nUe), zeros(JNT, nŶe) U0::Vector{JNT}, Ŷ0::Vector{JNT} = zeros(JNT, nU), zeros(JNT, nŶ) Û0::Vector{JNT}, X̂0::Vector{JNT} = zeros(JNT, nU), zeros(JNT, nX̂) gc::Vector{JNT}, g::Vector{JNT} = zeros(JNT, nc), zeros(JNT, ng) geq::Vector{JNT} = zeros(JNT, neq) - function J!(Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq) - update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃) + function J!(Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq) + update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc, Z̃) return obj_nonlinprog!(Ŷ0, U0, mpc, model, Ue, Ŷe, ΔŨ) end Z̃_J = fill(myNaN, nZ̃) # NaN to force update at first call J_cache = ( Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0), - Cache(Û0), Cache(K0), Cache(X̂0), + Cache(Û0), Cache(K), Cache(X̂0), Cache(gc), Cache(g), Cache(geq), ) ∇J_prep = prepare_gradient(J!, grad, Z̃_J, J_cache...; strict) @@ -918,7 +923,7 @@ function get_nonlincon_oracle(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JN myNaN, myInf = convert(JNT, NaN), convert(JNT, Inf) ΔŨ::Vector{JNT} = zeros(JNT, nΔŨ) x̂0end::Vector{JNT} = zeros(JNT, nx̂) - K0::Vector{JNT} = zeros(JNT, nK) + K::Vector{JNT} = zeros(JNT, nK) Ue::Vector{JNT}, Ŷe::Vector{JNT} = zeros(JNT, nUe), zeros(JNT, nŶe) U0::Vector{JNT}, Ŷ0::Vector{JNT} = zeros(JNT, nU), zeros(JNT, nŶ) Û0::Vector{JNT}, X̂0::Vector{JNT} = zeros(JNT, nU), zeros(JNT, nX̂) @@ -926,20 +931,20 @@ function get_nonlincon_oracle(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JN gi::Vector{JNT}, geq::Vector{JNT} = zeros(JNT, ngi), zeros(JNT, neq) λi::Vector{JNT}, λeq::Vector{JNT} = rand(JNT, ngi), rand(JNT, neq) # -------------- inequality constraint: nonlinear oracle ----------------------------- - function gi!(gi, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, geq, g) - update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃) + function gi!(gi, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, geq, g) + update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc, Z̃) gi .= @views g[i_g] return nothing end - function ℓ_gi(Z̃, λi, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, geq, g, gi) - update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃) + function ℓ_gi(Z̃, λi, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, geq, g, gi) + update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc, Z̃) gi .= @views g[i_g] return dot(λi, gi) end Z̃_∇gi = fill(myNaN, nZ̃) # NaN to force update at first call ∇gi_cache = ( Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0), - Cache(Û0), Cache(K0), Cache(X̂0), + Cache(Û0), Cache(K), Cache(X̂0), Cache(gc), Cache(geq), Cache(g) ) ∇gi_prep = prepare_jacobian(gi!, gi, jac, Z̃_∇gi, ∇gi_cache...; strict) @@ -948,7 +953,7 @@ function get_nonlincon_oracle(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JN if !isnothing(hess) ∇²gi_cache = ( Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0), - Cache(Û0), Cache(K0), Cache(X̂0), + Cache(Û0), Cache(K), Cache(X̂0), Cache(gc), Cache(geq), Cache(g), Cache(gi) ) ∇²gi_prep = prepare_hessian( @@ -991,18 +996,18 @@ function get_nonlincon_oracle(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JN eval_hessian_lagrangian = isnothing(hess) ? nothing : ∇²gi_func! ) # ------------- equality constraints : nonlinear oracle ------------------------------ - function geq!(geq, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g) - update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃) + function geq!(geq, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g) + update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc, Z̃) return nothing end - function ℓ_geq(Z̃, λeq, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, geq, g) - update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃) + function ℓ_geq(Z̃, λeq, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, geq, g) + update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc, Z̃) return dot(λeq, geq) end Z̃_∇geq = fill(myNaN, nZ̃) # NaN to force update at first call ∇geq_cache = ( Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0), - Cache(Û0), Cache(K0), Cache(X̂0), + Cache(Û0), Cache(K), Cache(X̂0), Cache(gc), Cache(g) ) ∇geq_prep = prepare_jacobian(geq!, geq, jac, Z̃_∇geq, ∇geq_cache...; strict) @@ -1011,7 +1016,7 @@ function get_nonlincon_oracle(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JN if !isnothing(hess) ∇²geq_cache = ( Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0), - Cache(Û0), Cache(K0), Cache(X̂0), + Cache(Û0), Cache(K), Cache(X̂0), Cache(gc), Cache(geq), Cache(g) ) ∇²geq_prep = prepare_hessian( @@ -1057,7 +1062,7 @@ end """ update_predictions!( - ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, + ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc::PredictiveController, Z̃ ) -> nothing @@ -1066,17 +1071,17 @@ Update in-place all vectors for the predictions of `mpc` controller at decision The method mutates all the arguments before the `mpc` argument. """ function update_predictions!( - ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc::PredictiveController, Z̃ + ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K, X̂0, gc, g, geq, mpc::PredictiveController, Z̃ ) model, transcription = mpc.estim.model, mpc.transcription U0 = getU0!(U0, mpc, Z̃) ΔŨ = getΔŨ!(ΔŨ, mpc, transcription, Z̃) - Ŷ0, x̂0end = predict!(Ŷ0, x̂0end, X̂0, Û0, K0, mpc, model, transcription, U0, Z̃) + Ŷ0, x̂0end = predict!(Ŷ0, x̂0end, X̂0, Û0, K, mpc, model, transcription, U0, Z̃) Ue, Ŷe = extended_vectors!(Ue, Ŷe, mpc, U0, Ŷ0) ϵ = getϵ(mpc, Z̃) gc = con_custom!(gc, mpc, Ue, Ŷe, ϵ) g = con_nonlinprog!(g, mpc, model, transcription, x̂0end, Ŷ0, gc, ϵ) - geq = con_nonlinprogeq!(geq, X̂0, Û0, K0, mpc, model, transcription, U0, Z̃) + geq = con_nonlinprogeq!(geq, X̂0, Û0, K, mpc, model, transcription, U0, Z̃) return nothing end diff --git a/src/controller/transcription.jl b/src/controller/transcription.jl index be0148ccd..8590083bd 100644 --- a/src/controller/transcription.jl +++ b/src/controller/transcription.jl @@ -1,8 +1,10 @@ +const COLLOCATION_NODE_TYPE = Float64 + """ Abstract supertype of all transcription methods of [`PredictiveController`](@ref). -The module currently supports [`SingleShooting`](@ref), [`MultipleShooting`](@ref) and -[`TrapezoidalCollocation`](@ref) transcription methods. +The module currently supports [`SingleShooting`](@ref), [`MultipleShooting`](@ref), +[`TrapezoidalCollocation`](@ref) and [`OrthogonalCollocation`](@ref) transcription methods. """ abstract type TranscriptionMethod end abstract type ShootingMethod <: TranscriptionMethod end @@ -81,15 +83,17 @@ only. The decision variables are the same as for [`MultipleShooting`](@ref), hen computational costs. See the same docstring for descriptions of `f_threads` and `h_threads` keywords. The `h` argument is `0` or `1`, for piecewise constant or linear manipulated inputs ``\mathbf{u}`` (`h=1` is slightly less expensive). Note that the various [`DiffSolver`](@ref) -here assume zero-order hold, so `h=1` will induce a plant-model mismatch if the plant is -simulated with these solvers. +here assume zero-order hold, so `h=1` will induce a plant-model mismatch if the plant is +simulated with these solvers. Measured disturbances ``\mathbf{d}`` are piecewise linear. This transcription computes the predictions by calling the continuous-time model in the equality constraint function and by using the implicit trapezoidal rule. It can handle -moderately stiff systems and is A-stable. Note that the built-in [`StateEstimator`](@ref) -will still use the `solver` provided at the construction of the [`NonLinModel`](@ref) to -estimate the plant states, not the trapezoidal rule (see `supersample` option of -[`RungeKutta`](@ref) for stiff systems). See Extended Help for more details. +moderately stiff systems and is A-stable. See Extended Help for more details. + +!!! warning + The built-in [`StateEstimator`](@ref) will still use the `solver` provided at the + construction of the [`NonLinModel`](@ref) to estimate the plant states, not the + trapezoidal rule (see `supersample` option of [`RungeKutta`](@ref) for stiff systems). Sparse optimizers like `Ipopt` and sparse Jacobian computations are recommended for this transcription method. @@ -104,153 +108,205 @@ transcription method. """ struct TrapezoidalCollocation <: CollocationMethod h::Int - nc::Int + no::Int f_threads::Bool h_threads::Bool function TrapezoidalCollocation(h::Int=0; f_threads=false, h_threads=false) if !(h == 0 || h == 1) throw(ArgumentError("h argument must be 0 or 1 for TrapezoidalCollocation.")) end - nc = 2 # 2 collocation points per interval for trapezoidal rule - return new(h, nc, f_threads, h_threads) + no = 2 # 2 collocation points per intervals for trapezoidal rule + return new(h, no, f_threads, h_threads) end end -function validate_transcription(::LinModel, ::CollocationMethod) - throw(ArgumentError("Collocation methods are not supported for LinModel.")) - return nothing -end -function validate_transcription(::NonLinModel{<:Real, <:EmptySolver}, ::CollocationMethod) - throw(ArgumentError("Collocation methods require continuous-time NonLinModel.")) - return nothing -end -validate_transcription(::SimModel, ::TranscriptionMethod) = nothing - -"Get the number of elements in the optimization decision vector `Z`." -function get_nZ(estim::StateEstimator, ::SingleShooting, Hp, Hc) - return estim.model.nu*Hc -end -function get_nZ(estim::StateEstimator, ::TranscriptionMethod, Hp, Hc) - return estim.model.nu*Hc + estim.nx̂*Hp -end - -"Get length of the `k` vector with all the solver intermediate steps or all the collocation pts." -get_nk(model::SimModel, ::ShootingMethod) = model.nk -get_nk(model::SimModel, transcription::CollocationMethod) = model.nx*transcription.nc @doc raw""" - init_ZtoΔU(estim::StateEstimator, transcription::TranscriptionMethod, Hp, Hc) -> PΔu + OrthogonalCollocation( + h::Int=0, no=3; f_threads=false, h_threads=false, roots=:gaussradau + ) -Init decision variables to input increments over ``H_c`` conversion matrix `PΔu`. +Construct an orthogonal collocation on finite elements [`TranscriptionMethod`](@ref). -The conversion from the decision variables ``\mathbf{Z}`` to ``\mathbf{ΔU}``, the input -increments over ``H_c``, is computed by: +Also known as pseudo-spectral method. It supports continuous-time [`NonLinModel`](@ref)s +only. The `h` argument is the hold order for ``\mathbf{u}``, and `no` argument, the number +of collocation points ``n_o``. Only zero-order hold is currently implemented, so `h` must +be `0`. The decision variable is similar to [`MultipleShooting`](@ref), but it also includes +the collocation points: ```math -\mathbf{ΔU} = \mathbf{P_{Δu}} \mathbf{Z} +\mathbf{Z} = \begin{bmatrix} \mathbf{ΔU} \\ \mathbf{X̂_0} \\ \mathbf{K} \end{bmatrix} ``` +where ``\mathbf{K}`` encompasses all the intermediate stages of the deterministic states +(the first `nx` elements of ``\mathbf{x̂}``): +```math +\mathbf{K} = \begin{bmatrix} + \mathbf{k}_{1}(k+0) \\ + \mathbf{k}_{2}(k+0) \\ + \vdots \\ + \mathbf{k}_{n_o}(k+0) \\ + \mathbf{k}_{1}(k+1) \\ + \mathbf{k}_{2}(k+1) \\ + \vdots \\ + \mathbf{k}_{n_o}(k+H_p-1) \end{bmatrix} +``` +and ``\mathbf{k}_i(k+j)`` is the deterministic state prediction for the ``i``th collocation +point at the ``j``th stage/interval/finite element (details in Extended Help). The `roots` +keyword argument is either `:gaussradau` or `:gausslegendre`, for Gauss-Radau or +Gauss-Legendre quadrature, respectively. See [`MultipleShooting`](@ref) docstring for +descriptions of `f_threads` and `h_threads` keywords. + +This transcription computes the predictions by enforcing the collocation and continuity +constraints at the collocation points. It is efficient for highly stiff systems, but +generally more expensive than the other methods for non-stiff systems. See Extended Help for +more details. + +!!! warning + The built-in [`StateEstimator`](@ref) will still use the `solver` provided at the + construction of the [`NonLinModel`](@ref) to estimate the plant states, not orthogonal + collocation (see `supersample` option of [`RungeKutta`](@ref) for stiff systems). -in which ``\mathbf{P_{Δu}}`` is defined in the Extended Help section. +Sparse optimizers like `Ipopt` and sparse Jacobian computations are highly recommended for +this transcription method (sparser formulation than [`MultipleShooting`](@ref)). # Extended Help !!! details "Extended Help" - Following the decision variable definition of the [`TranscriptionMethod`](@ref), the - conversion matrix ``\mathbf{P_{Δu}}``, we have: - - ``\mathbf{P_{Δu}} = \mathbf{I}`` if `transcription` is a [`SingleShooting`](@ref) - - ``\mathbf{P_{Δu}} = [\begin{smallmatrix}\mathbf{I} & \mathbf{0} \end{smallmatrix}]`` otherwise. - The matrix is store as as `SparseMatrixCSC` to support both cases efficiently. + As explained in the Extended Help of [`TrapezoidalCollocation`](@ref), the stochastic + states are left out of the ``\mathbf{K}`` vector since collocation methods require + continuous-time dynamics and the stochastic model is discrete. + + The collocation points are located at the roots of orthogonal polynomials, which is + "optimal" for approximating the state trajectories with polynomials of degree ``n_o``. + The method then enforces the system dynamics at these points. The Gauss-Legendre scheme + is more accurate than Gauss-Radau but only A-stable, while the latter being L-stable. + See [`con_nonlinprogeq!`](@ref) for details on the implementation. """ -function init_ZtoΔU end - -function init_ZtoΔU( - estim::StateEstimator{NT}, ::SingleShooting, _ , Hc -) where {NT<:Real} - PΔu = sparse(Matrix{NT}(I, estim.model.nu*Hc, estim.model.nu*Hc)) - return PΔu -end - -function init_ZtoΔU( - estim::StateEstimator{NT}, ::TranscriptionMethod, Hp, Hc -) where {NT<:Real} - I_nu_Hc = sparse(Matrix{NT}(I, estim.model.nu*Hc, estim.model.nu*Hc)) - PΔu = [I_nu_Hc spzeros(NT, estim.model.nu*Hc, estim.nx̂*Hp)] - return PΔu +struct OrthogonalCollocation <: CollocationMethod + h::Int + no::Int + f_threads::Bool + h_threads::Bool + τ::Vector{COLLOCATION_NODE_TYPE} + function OrthogonalCollocation( + h::Int=0, no::Int=3; f_threads=false, h_threads=false, roots=:gaussradau + ) + if !(h == 0) + throw(ArgumentError("Only the zero-order hold (h=0) is currently implemented.")) + end + if roots==:gaussradau + x, _ = FastGaussQuadrature.gaussradau(no, COLLOCATION_NODE_TYPE) + # we reverse the nodes to include the τ=1.0 node: + τ = (reverse(-x) .+ 1) ./ 2 + elseif roots==:gausslegendre + x, _ = FastGaussQuadrature.gausslegendre(no) + # converting [-1, 1] to [0, 1] (see + # https://en.wikipedia.org/wiki/Gaussian_quadrature#Change_of_interval): + τ = (x .+ 1) ./ 2 + else + throw(ArgumentError("roots argument must be :gaussradau or :gausslegendre.")) + end + return new(h, no, f_threads, h_threads, τ) + end end @doc raw""" - init_ZtoU(estim, transcription, Hp, Hc, nb) -> Pu, Tu + init_orthocolloc(model::SimModel, transcription::OrthogonalCollocation) -> Mo, Co, λo -Init decision variables to inputs over ``H_p`` conversion matrices. +Init the differentiation and continuity matrices for [`OrthogonalCollocation`](@ref). -The conversion from the decision variables ``\mathbf{Z}`` to ``\mathbf{U}``, the manipulated -inputs over ``H_p``, is computed by: +Introducing ``τ_i``, the ``i``th root of the orthogonal polynomial normalized to the +interval ``[0, 1]``, and ``τ_0=0``, each state trajectories are approximated by a distinct +polynomial of degree ``n_o``. The differentiation matrix ``\mathbf{M_o}``, continuity +matrix ``\mathbf{C_o}`` and continuity coefficient ``λ_o`` are pre-computed with: ```math -\mathbf{U} = \mathbf{P_u} \mathbf{Z} + \mathbf{T_u} \mathbf{u}(k-1) +\begin{aligned} + \mathbf{P_o} &= \begin{bmatrix} + τ_1^1 \mathbf{I} & τ_1^2 \mathbf{I} & \cdots & τ_1^{n_o} \mathbf{I} \\ + τ_2^1 \mathbf{I} & τ_2^2 \mathbf{I} & \cdots & τ_2^{n_o} \mathbf{I} \\ + \vdots & \vdots & \ddots & \vdots \\ + τ_{n_o}^1 \mathbf{I} & τ_{n_o}^2 \mathbf{I} & \cdots & τ_{n_o}^{n_o} \mathbf{I} \end{bmatrix} \\ + \mathbf{Ṗ_o} &= \begin{bmatrix} + τ_1^0 \mathbf{I} & 2τ_1^1 \mathbf{I} & \cdots & n_o τ_1^{n_o-1} \mathbf{I} \\ + τ_2^0 \mathbf{I} & 2τ_2^1 \mathbf{I} & \cdots & n_o τ_2^{n_o-1} \mathbf{I} \\ + \vdots & \vdots & \ddots & \vdots \\ + τ_{n_o}^0 \mathbf{I} & 2τ_{n_o}^1 \mathbf{I} & \cdots & n_o τ_{n_o}^{n_o-1} \mathbf{I} \end{bmatrix} \\ + \mathbf{M_o} &= \frac{1}{T_s} \mathbf{Ṗ_o} \mathbf{P_o}^{-1} \\ + \mathbf{C_o} &= \begin{bmatrix} + L_1(1) \mathbf{I} & L_2(1) \mathbf{I} & \cdots & L_{n_o}(1) \mathbf{I} \end{bmatrix} \\ + λ_o &= L_0(1) +\end{aligned} +``` +where ``\mathbf{P_o}`` is a matrix to evaluate the polynamial values w/o the Y-intercept, +and ``\mathbf{Ṗ_o}``, to evaluate its derivatives. The Lagrange polynomial ``L_j(τ)`` bases +are defined as: +```math +L_j(τ) = \prod_{i=0, i≠j}^{n_o} \frac{τ - τ_i}{τ_j - τ_i} ``` -The ``\mathbf{P_u}`` and ``\mathbf{T_u}`` matrices are defined in the Extended Help section. - -# Extended Help -!!! details "Extended Help" - With ``n_i``, the ``i``th element of the ``\mathbf{n_b}`` vector defined in [`move_blocking`](@ref) - documentation, we introduce the ``\mathbf{Q}(n_i)`` matrix of size `(nu*ni, nu)`: - ```math - \mathbf{Q}(n_i) = \begin{bmatrix} - \mathbf{I} \\ - \mathbf{I} \\ - \vdots \\ - \mathbf{I} \end{bmatrix} - ``` - The ``\mathbf{U}`` vector and the conversion matrices are defined as: - ```math - \mathbf{U} = \begin{bmatrix} - \mathbf{u}(k + 0) \\ - \mathbf{u}(k + 1) \\ - \vdots \\ - \mathbf{u}(k + H_p - 1) \end{bmatrix} , \quad - \mathbf{P_u^†} = \begin{bmatrix} - \mathbf{Q}(n_1) & \mathbf{0} & \cdots & \mathbf{0} \\ - \mathbf{Q}(n_2) & \mathbf{Q}(n_2) & \cdots & \mathbf{0} \\ - \vdots & \vdots & \ddots & \vdots \\ - \mathbf{Q}(n_{H_c}) & \mathbf{Q}(n_{H_c}) & \cdots & \mathbf{Q}(n_{H_c}) \end{bmatrix} , \quad - \mathbf{T_u} = \begin{bmatrix} - \mathbf{I} \\ - \mathbf{I} \\ - \vdots \\ - \mathbf{I} \end{bmatrix} - ``` - and, depending on the transcription method, we have: - - ``\mathbf{P_u} = \mathbf{P_u^†}`` if `transcription` is a [`SingleShooting`](@ref) - - ``\mathbf{P_u} = [\begin{smallmatrix}\mathbf{P_u^†} & \mathbf{0} \end{smallmatrix}]`` - if `transcription` is a [`MultipleShooting`](@ref) - The conversion matrices are stored as `SparseMatrixCSC` since it was benchmarked that it - is generally more performant than normal dense matrices, even for small `nu`, `Hp` and - `Hc` values. Using `Bool` element type and `BitMatrix` is also slower. """ -function init_ZtoU( - estim::StateEstimator{NT}, transcription::TranscriptionMethod, Hp, Hc, nb +function init_orthocolloc( + model::SimModel{NT}, transcription::OrthogonalCollocation ) where {NT<:Real} - nu = estim.model.nu - I_nu = sparse(Matrix{NT}(I, nu, nu)) - PuDagger = Matrix{NT}(undef, nu*Hp, nu*Hc) - for i=1:Hc - ni = nb[i] - Q_ni = repeat(I_nu, ni, 1) - iRows = (1:nu*ni) .+ @views nu*sum(nb[1:i-1]) - PuDagger[iRows, :] = [repeat(Q_ni, 1, i) spzeros(nu*ni, nu*(Hc-i))] + nx, no = model.nx, transcription.no + τ = transcription.τ + Po = Matrix{NT}(undef, nx*no, nx*no) # polynomial matrix (w/o the Y-intercept term) + Ṗo = Matrix{NT}(undef, nx*no, nx*no) # polynomial derivative matrix + for j=1:no, i=1:no + iRows = (1:nx) .+ nx*(i-1) + iCols = (1:nx) .+ nx*(j-1) + Po[iRows, iCols] = (τ[i]^j)*I(nx) + Ṗo[iRows, iCols] = (j*τ[i]^(j-1))*I(nx) + end + Mo = sparse((Ṗo/Po)/model.Ts) + Co = Matrix{NT}(undef, nx, nx*no) + for j=1:no + iCols = (1:nx) .+ nx*(j-1) + Co[:, iCols] = lagrange_end(j, transcription)*I(nx) end - PuDagger = sparse(PuDagger) - Pu = init_PUmat(estim, transcription, Hp, Hc, PuDagger) - Tu = repeat(I_nu, Hp) - return Pu, Tu + Co = sparse(Co) + λo = lagrange_end(0, transcription) + return Mo, Co, λo +end +"Return empty sparse matrices and `NaN` for other [`TranscriptionMethod`](@ref)" +init_orthocolloc(::SimModel, ::TranscriptionMethod) = spzeros(0,0), spzeros(0,0), NaN + +"Evaluate the Lagrange basis polynomial ``L_j`` at `τ=1`." +function lagrange_end(j, transcription::OrthogonalCollocation) + τ_val = 1 + τ_values = [0; transcription.τ] # including the τ=0 node for the Lagrange polynomials + j_index = j + 1 # because of the τ=0 node + τj = τ_values[j_index] + Lj = 1 + for i in eachindex(τ_values) + i == j_index && continue + τi = τ_values[i] + Lj *= (τ_val - τi)/(τj - τi) + end + return Lj end -function init_PUmat(_,::SingleShooting,_,_,PuDagger::AbstractMatrix{NT}) where NT<:Real - return PuDagger +function validate_transcription(::LinModel, ::CollocationMethod) + throw(ArgumentError("Collocation methods are not supported for LinModel.")) + return nothing end -function init_PUmat( - estim, ::TranscriptionMethod, Hp, _ , PuDagger::AbstractMatrix{NT} -) where NT<:Real - return [PuDagger spzeros(NT, estim.model.nu*Hp, estim.nx̂*Hp)] +function validate_transcription(::NonLinModel{<:Real, <:EmptySolver}, ::CollocationMethod) + throw(ArgumentError("Collocation methods require continuous-time NonLinModel.")) + return nothing end +validate_transcription(::SimModel, ::TranscriptionMethod) = nothing + +"Get the number of elements in the optimization decision vector `Z`." +function get_nZ(estim::StateEstimator, ::SingleShooting, Hp, Hc) + return estim.model.nu*Hc +end +function get_nZ(estim::StateEstimator, ::TranscriptionMethod, Hp, Hc) + return estim.model.nu*Hc + estim.nx̂*Hp +end +function get_nZ(estim::StateEstimator, transcription::OrthogonalCollocation, Hp, Hc) + return estim.model.nu*Hc + estim.nx̂*Hp + estim.model.nx*transcription.no*Hp +end + +"Get length of the `k` vector with all the solver intermediate steps or all the collocation pts." +get_nk(model::SimModel, ::ShootingMethod) = model.nk +get_nk(model::SimModel, transcription::CollocationMethod) = model.nx*transcription.no @doc raw""" init_predmat( @@ -518,11 +574,15 @@ given in the Extended Help section. !!! details "Extended Help" The terminal state matrices all appropriately sized zero matrices ``\mathbf{0}``, except for ``\mathbf{e_x̂} = [\begin{smallmatrix}\mathbf{0} & \mathbf{I}\end{smallmatrix}]`` + if `transcription` is a [`MultipleShooting`](@ref), and ``\mathbf{e_x̂} = + [\begin{smallmatrix}\mathbf{0} & \mathbf{I} & \mathbf{0}\end{smallmatrix}]`` otherwise. """ function init_predmat( model::NonLinModel, estim::StateEstimator{NT}, transcription::TranscriptionMethod, Hp, Hc, _ ) where {NT<:Real} nu, nx̂, nd = model.nu, estim.nx̂, model.nd + nΔU = nu*Hc + nX̂0 = nx̂*Hp nZ = get_nZ(estim, transcription, Hp, Hc) E = zeros(NT, 0, nZ) G = zeros(NT, 0, nd) @@ -530,7 +590,8 @@ function init_predmat( K = zeros(NT, 0, nx̂) V = zeros(NT, 0, nu) B = zeros(NT, 0) - ex̂ = [zeros(NT, nx̂, Hc*nu + (Hp-1)*nx̂) I] + myzeros = zeros(nx̂, nZ - nΔU - nX̂0) + ex̂ = [zeros(NT, nx̂, nΔU + nX̂0 - nx̂) I myzeros] gx̂ = zeros(NT, nx̂, nd) jx̂ = zeros(NT, nx̂, nd*Hp) kx̂ = zeros(NT, nx̂, nx̂) @@ -901,8 +962,7 @@ Set and return the warm-start value of `Z̃var` for [`SingleShooting`](@ref) tra If supported by `mpc.optim`, it warm-starts the solver at: ```math -\mathbf{Z̃_s} = -\begin{bmatrix} +\mathbf{Z̃_s} = \begin{bmatrix} \mathbf{Δu}(k+0|k-1) \\ \mathbf{Δu}(k+1|k-1) \\ \vdots \\ @@ -916,9 +976,63 @@ last control period ``k-1``, and ``ϵ(k-1)``, the slack variable of the last con """ function set_warmstart!(mpc::PredictiveController, ::SingleShooting, Z̃var) nu, Hc, Z̃s = mpc.estim.model.nu, mpc.Hc, mpc.buffer.Z̃ + nΔU = nu*Hc + # --- input increments ΔU --- + Z̃s[1:(nΔU-nu)] .= @views mpc.Z̃[nu+1:nΔU] + Z̃s[(nΔU-nu+1):(nΔU)] .= 0 + # --- slack variable ϵ --- + mpc.nϵ == 1 && (Z̃s[end] = mpc.Z̃[end]) + JuMP.set_start_value.(Z̃var, Z̃s) + return Z̃s +end + +@doc raw""" + set_warmstart!(mpc::PredictiveController, ::OrthogonalCollocation, Z̃var) -> Z̃s + +Set and return the warm-start value of `Z̃var` for other [`OrthogonalCollocation`](@ref). + +It warm-starts the solver at: +```math +\mathbf{Z̃_s} = \begin{bmatrix} + \mathbf{Δu}(k+0|k-1) \\ + \mathbf{Δu}(k+1|k-1) \\ + \vdots \\ + \mathbf{Δu}(k+H_c-2|k-1) \\ + \mathbf{0} \\ + \mathbf{x̂_0}(k+1|k-1) \\ + \mathbf{x̂_0}(k+2|k-1) \\ + \vdots \\ + \mathbf{x̂_0}(k+H_p-1|k-1) \\ + \mathbf{x̂_0}(k+H_p-1|k-1) \\ + \mathbf{k}(k+0|k-1) \\ + \mathbf{k}(k+1|k-1) \\ + \vdots \\ + \mathbf{k}(k+H_p-2|k-1) \\ + \mathbf{k}(k+H_p-2|k-1) \\ + ϵ(k-1) +\end{bmatrix} +``` +where ``\mathbf{x̂_0}(k+j|k-1)`` is the predicted state for time ``k+j`` computed at the +last control period ``k-1``, expressed as a deviation from the operating point +``\mathbf{x̂_{op}}``. The vector ``\mathbf{k}(k+j|k-1)`` include the ``n_o`` intermediate +stage predictions for the interval ``k+j``, and is also computed at the last control period. +""" +function set_warmstart!( + mpc::PredictiveController, transcription::OrthogonalCollocation, Z̃var +) + nu, nx̂ = mpc.estim.model.nu, mpc.estim.nx̂ + Hp, Hc, Z̃s = mpc.Hp, mpc.Hc, mpc.buffer.Z̃ + nk = get_nk(mpc.estim.model, transcription) + nΔU, nX̂, nK = nu*Hc, nx̂*Hp, nk*Hp # --- input increments ΔU --- - Z̃s[1:(Hc*nu-nu)] .= @views mpc.Z̃[nu+1:Hc*nu] - Z̃s[(Hc*nu-nu+1):(Hc*nu)] .= 0 + Z̃s[1:(nΔU-nu)] .= @views mpc.Z̃[(nu+1):(nΔU)] + Z̃s[(nΔU-nu+1):(nΔU)] .= 0 + # --- predicted states X̂0 --- + Z̃s[(nΔU+1):(nΔU+nX̂-nx̂)] .= @views mpc.Z̃[(nΔU+nx̂+1):(nΔU+nX̂)] + Z̃s[(nΔU+nX̂-nx̂+1):(nΔU+nX̂)] .= @views mpc.Z̃[(nΔU+nX̂-nx̂+1):(nΔU+nX̂)] + # --- collocation points K --- + Z̃s[(nΔU+nX̂+1):(nΔU+nX̂+nK-nk)] .= @views mpc.Z̃[(nΔU+nX̂+nk+1):(nΔU+nX̂+nK)] + Z̃s[(nΔU+nX̂+nK-nk+1):(nΔU+nX̂+nK)] .= @views mpc.Z̃[(nΔU+nX̂+nK-nk+1):(nΔU+nX̂+nK)] # --- slack variable ϵ --- mpc.nϵ == 1 && (Z̃s[end] = mpc.Z̃[end]) JuMP.set_start_value.(Z̃var, Z̃s) @@ -932,8 +1046,7 @@ Set and return the warm-start value of `Z̃var` for other [`TranscriptionMethod` It warm-starts the solver at: ```math -\mathbf{Z̃_s} = -\begin{bmatrix} +\mathbf{Z̃_s} = \begin{bmatrix} \mathbf{Δu}(k+0|k-1) \\ \mathbf{Δu}(k+1|k-1) \\ \vdots \\ @@ -953,12 +1066,13 @@ last control period ``k-1``, expressed as a deviation from the operating point """ function set_warmstart!(mpc::PredictiveController, ::TranscriptionMethod, Z̃var) nu, nx̂, Hp, Hc, Z̃s = mpc.estim.model.nu, mpc.estim.nx̂, mpc.Hp, mpc.Hc, mpc.buffer.Z̃ + nΔU, nX̂ = nu*Hc, nx̂*Hp # --- input increments ΔU --- - Z̃s[1:(Hc*nu-nu)] .= @views mpc.Z̃[nu+1:Hc*nu] - Z̃s[(Hc*nu-nu+1):(Hc*nu)] .= 0 + Z̃s[1:(nΔU-nu)] .= @views mpc.Z̃[nu+1:nΔU] + Z̃s[(nΔU-nu+1):(nΔU)] .= 0 # --- predicted states X̂0 --- - Z̃s[(Hc*nu+1):(Hc*nu+Hp*nx̂-nx̂)] .= @views mpc.Z̃[(Hc*nu+nx̂+1):(Hc*nu+Hp*nx̂)] - Z̃s[(Hc*nu+Hp*nx̂-nx̂+1):(Hc*nu+Hp*nx̂)] .= @views mpc.Z̃[(Hc*nu+Hp*nx̂-nx̂+1):(Hc*nu+Hp*nx̂)] + Z̃s[(nΔU+1):(nΔU+nX̂-nx̂)] .= @views mpc.Z̃[(nΔU+nx̂+1):(nΔU+nX̂)] + Z̃s[(nΔU+nX̂-nx̂+1):(nΔU+nX̂)] .= @views mpc.Z̃[(nΔU+nX̂-nx̂+1):(nΔU+nX̂)] # --- slack variable ϵ --- mpc.nϵ == 1 && (Z̃s[end] = mpc.Z̃[end]) JuMP.set_start_value.(Z̃var, Z̃s) @@ -1007,24 +1121,25 @@ end @doc raw""" predict!( - Ŷ0, x̂0end, X̂0, Û0, K0, + Ŷ0, x̂0end, X̂0, Û0, K, mpc::PredictiveController, model::NonLinModel, transcription::SingleShooting, U0, _ ) -> Ŷ0, x̂0end Compute vectors if `model` is a [`NonLinModel`](@ref) and for [`SingleShooting`](@ref). -The method mutates `Ŷ0`, `x̂0end`, `X̂0`, `Û0` and `K0` arguments. The augmented model of +The method mutates `Ŷ0`, `x̂0end`, `X̂0`, `Û0` and `K` arguments. The augmented model of [`f̂!`](@ref) and [`ĥ!`](@ref) functions is called recursively in a `for` loop: ```math \begin{aligned} -\mathbf{x̂_0}(k+1) &= \mathbf{f̂}\Big(\mathbf{x̂_0}(k), \mathbf{u_0}(k), \mathbf{d̂_0}(k) \Big) \\ -\mathbf{ŷ_0}(k) &= \mathbf{ĥ}\Big(\mathbf{x̂_0}(k), \mathbf{d̂_0}(k) \Big) +\mathbf{x̂_0}(k+j+1) &= \mathbf{f̂}\Big(\mathbf{x̂_0}(k+j), \mathbf{u_0}(k+j), \mathbf{d̂_0}(k+j) \Big) \\ +\mathbf{ŷ_0}(k+j) &= \mathbf{ĥ}\Big(\mathbf{x̂_0}(k+j), \mathbf{d̂_0}(k+j) \Big) \end{aligned} ``` +for ``j = 0, 1, ... , H_p``. """ function predict!( - Ŷ0, x̂0end, X̂0, Û0, K0, + Ŷ0, x̂0end, X̂0, Û0, K, mpc::PredictiveController, model::NonLinModel, ::SingleShooting, U0, _ ) @@ -1035,9 +1150,9 @@ function predict!( for j=1:Hp u0 = @views U0[(1 + nu*(j-1)):(nu*j)] û0 = @views Û0[(1 + nu*(j-1)):(nu*j)] - k0 = @views K0[(1 + nk*(j-1)):(nk*j)] + k = @views K[(1 + nk*(j-1)):(nk*j)] x̂0next = @views X̂0[(1 + nx̂*(j-1)):(nx̂*j)] - f̂!(x̂0next, û0, k0, mpc.estim, model, x̂0, u0, d̂0) + f̂!(x̂0next, û0, k, mpc.estim, model, x̂0, u0, d̂0) x̂0 = @views X̂0[(1 + nx̂*(j-1)):(nx̂*j)] d̂0 = @views D̂0[(1 + nd*(j-1)):(nd*j)] ŷ0 = @views Ŷ0[(1 + ny*(j-1)):(ny*j)] @@ -1060,9 +1175,10 @@ Compute vectors if `model` is a [`NonLinModel`](@ref) and other [`TranscriptionM The method mutates `Ŷ0` and `x̂0end` arguments. The augmented output function [`ĥ!`](@ref) is called multiple times in a `for` loop: ```math -\mathbf{ŷ_0}(k) = \mathbf{ĥ}\Big(\mathbf{x̂_0}(k), \mathbf{d̂_0}(k) \Big) +\mathbf{ŷ_0}(k+j) = \mathbf{ĥ}\Big(\mathbf{x̂_0}(k+j), \mathbf{d̂_0}(k+j) \Big) ``` -in which ``\mathbf{x̂_0}`` is the augmented state extracted from the decision variable `Z̃`. +for ``j = 1, 2, ... , H_p``, and in which ``\mathbf{x̂_0}`` is the augmented state extracted +from the decision variable `Z̃`. """ function predict!( Ŷ0, x̂0end, _, _, _, @@ -1171,24 +1287,25 @@ end @doc raw""" con_nonlinprogeq!( - geq, X̂0, Û0, K0 + geq, X̂0, Û0, K mpc::PredictiveController, model::NonLinModel, transcription::MultipleShooting, U0, Z̃ - ) + ) -> geq Nonlinear equality constrains for [`NonLinModel`](@ref) and [`MultipleShooting`](@ref). -The method mutates the `geq`, `X̂0`, `Û0` and `K0` vectors in argument. The nonlinear +The method mutates the `geq`, `X̂0`, `Û0` and `K` vectors in argument. The nonlinear equality constraints `geq` only includes the augmented state defects, computed with: ```math -\mathbf{ŝ}(k+1) = \mathbf{f̂}\Big(\mathbf{x̂_0}(k), \mathbf{u_0}(k), \mathbf{d̂_0}(k)\Big) - - \mathbf{x̂_0}(k+1) +\mathbf{ŝ}(k+j+1) = \mathbf{f̂}\Big(\mathbf{x̂_0}(k+j), \mathbf{u_0}(k+j), \mathbf{d̂_0}(k+j)\Big) + - \mathbf{x̂_0}(k+j+1) ``` -in which the augmented state ``\mathbf{x̂_0}`` are extracted from the decision variables -`Z̃`, and ``\mathbf{f̂}`` is the augmented state function defined in [`f̂!`](@ref). +for ``j = 0, 1, ... , H_p-1``, and in which the augmented state ``\mathbf{x̂_0}`` are +extracted from the decision variables `Z̃`, and ``\mathbf{f̂}`` is the augmented state +function defined in [`f̂!`](@ref). """ function con_nonlinprogeq!( - geq, X̂0, Û0, K0, + geq, X̂0, Û0, K, mpc::PredictiveController, model::NonLinModel, transcription::MultipleShooting, U0, Z̃ ) nu, nx̂, nd, nk = model.nu, mpc.estim.nx̂, model.nd, model.nk @@ -1199,63 +1316,59 @@ function con_nonlinprogeq!( X̂0_Z̃ = @views Z̃[(nΔU+1):(nΔU+nX̂)] @threadsif f_threads for j=1:Hp if j < 2 - x̂0 = @views mpc.estim.x̂0[1:nx̂] - d̂0 = @views mpc.d0[1:nd] + x̂0_Z̃ = @views mpc.estim.x̂0[1:nx̂] + d̂0 = @views mpc.d0[1:nd] else - x̂0 = @views X̂0_Z̃[(1 + nx̂*(j-2)):(nx̂*(j-1))] - d̂0 = @views D̂0[(1 + nd*(j-2)):(nd*(j-1))] + x̂0_Z̃ = @views X̂0_Z̃[(1 + nx̂*(j-2)):(nx̂*(j-1))] + d̂0 = @views D̂0[(1 + nd*(j-2)):(nd*(j-1))] end u0 = @views U0[(1 + nu*(j-1)):(nu*j)] û0 = @views Û0[(1 + nu*(j-1)):(nu*j)] - k0 = @views K0[(1 + nk*(j-1)):(nk*j)] + k = @views K[(1 + nk*(j-1)):(nk*j)] x̂0next = @views X̂0[(1 + nx̂*(j-1)):(nx̂*j)] x̂0next_Z̃ = @views X̂0_Z̃[(1 + nx̂*(j-1)):(nx̂*j)] ŝnext = @views geq[(1 + nx̂*(j-1)):(nx̂*j)] - f̂!(x̂0next, û0, k0, mpc.estim, model, x̂0, u0, d̂0) - ŝnext .= x̂0next .- x̂0next_Z̃ + f̂!(x̂0next, û0, k, mpc.estim, model, x̂0_Z̃, u0, d̂0) + ŝnext .= @. x̂0next - x̂0next_Z̃ end return geq end @doc raw""" con_nonlinprogeq!( - geq, X̂0, Û0, K0 + geq, X̂0, Û0, K̇ mpc::PredictiveController, model::NonLinModel, transcription::TrapezoidalCollocation, U0, Z̃ - ) + ) -> geq Nonlinear equality constrains for [`NonLinModel`](@ref) and [`TrapezoidalCollocation`](@ref). -The method mutates the `geq`, `X̂0`, `Û0` and `K0` vectors in argument. - -The nonlinear equality constraints `geq` only includes the state defects. The deterministic -and stochastic states are handled separately since collocation methods require continuous- -time state-space models, and the stochastic model of the unmeasured disturbances -is discrete-time. The deterministic and stochastic defects are respectively computed with: +The method mutates the `geq`, `X̂0`, `Û0` and `K̇` vectors in argument. The nonlinear equality +constraints `geq` only includes the state defects. The deterministic and stochastic states +are handled separately since collocation methods require continuous-time state-space models, +and the stochastic model of the unmeasured disturbances is discrete-time. The deterministic +and stochastic defects are respectively computed with: ```math \begin{aligned} -\mathbf{s_d}(k+1) &= \mathbf{x_0}(k) - \mathbf{x_0}(k+1) - + 0.5 T_s (\mathbf{k}_1 + \mathbf{k}_2) \\ -\mathbf{s_s}(k+1) &= \mathbf{A_s x_s}(k) - \mathbf{x_s}(k+1) +\mathbf{s_d}(k+j+1) &= \mathbf{x_0}(k+j) + 0.5 T_s [\mathbf{k̇}_1(k+j) + \mathbf{k̇}_2(k+j)] + - \mathbf{x_0}(k+j+1) \\ +\mathbf{s_s}(k+j+1) &= \mathbf{A_s x_s}(k+j) - \mathbf{x_s}(k+j+1) \end{aligned} ``` -in which ``\mathbf{x_0}`` and ``\mathbf{x_s}`` are the deterministic and stochastic states -extracted from the decision variables `Z̃`. The ``\mathbf{k}`` coefficients are -evaluated from the continuous-time function `model.f!` and: +for ``j = 0, 1, ... , H_p-1``, and in which ``\mathbf{x_0}`` and ``\mathbf{x_s}`` are the +deterministic and stochastic states extracted from the decision variables `Z̃`. The +``\mathbf{k̇}`` coefficients are evaluated from the continuous-time function `model.f!` and: ```math \begin{aligned} -\mathbf{k}_1 &= \mathbf{f}\Big(\mathbf{x_0}(k), \mathbf{û_0}(k), \mathbf{d̂_0}(k) \Big) \\ -\mathbf{k}_2 &= \mathbf{f}\Big(\mathbf{x_0}(k+1), \mathbf{û_0}(k+h), \mathbf{d̂_0}(k+1)\Big) +\mathbf{k̇}_1(k+j) &= \mathbf{f}\Big(\mathbf{x_0}(k+j), \mathbf{û_0}(k+j), \mathbf{d̂_0}(k+j), \mathbf{p}\Big) \\ +\mathbf{k̇}_2(k+j) &= \mathbf{f}\Big(\mathbf{x_0}(k+j+1), \mathbf{û_0}(k+j+h), \mathbf{d̂_0}(k+j+1), \mathbf{p}\Big) \end{aligned} ``` -in which ``h`` is the hold order `transcription.h` and the disturbed input is: -```math -\mathbf{û_0}(k) = \mathbf{u_0}(k) + \mathbf{C_{s_u} x_s}(k) -``` -the ``\mathbf{A_s, C_{s_u}}`` matrices are defined in [`init_estimstoch`](@ref) doc. +in which ``h`` is the hold order `transcription.h` and the disturbed input ``\mathbf{û_0}`` +is defined in [`f̂_input!`](@ref). """ function con_nonlinprogeq!( - geq, X̂0, Û0, K0, + geq, X̂0, Û0, K̇, mpc::PredictiveController, model::NonLinModel, transcription::TrapezoidalCollocation, U0, Z̃ ) @@ -1269,35 +1382,36 @@ function con_nonlinprogeq!( X̂0_Z̃ = @views Z̃[(nΔU+1):(nΔU+nX̂)] @threadsif f_threads for j=1:Hp if j < 2 - x̂0 = @views mpc.estim.x̂0[1:nx̂] - d̂0 = @views mpc.d0[1:nd] + x̂0_Z̃ = @views mpc.estim.x̂0[1:nx̂] + d̂0 = @views mpc.d0[1:nd] else - x̂0 = @views X̂0_Z̃[(1 + nx̂*(j-2)):(nx̂*(j-1))] - d̂0 = @views D̂0[(1 + nd*(j-2)):(nd*(j-1))] + x̂0_Z̃ = @views X̂0_Z̃[(1 + nx̂*(j-2)):(nx̂*(j-1))] + d̂0 = @views D̂0[(1 + nd*(j-2)):(nd*(j-1))] end - k0 = @views K0[(1 + nk*(j-1)):(nk*j)] + k̇ = @views K̇[(1 + nk*(j-1)):(nk*j)] d̂0next = @views D̂0[(1 + nd*(j-1)):(nd*j)] x̂0next = @views X̂0[(1 + nx̂*(j-1)):(nx̂*j)] x̂0next_Z̃ = @views X̂0_Z̃[(1 + nx̂*(j-1)):(nx̂*j)] - ŝnext = @views geq[(1 + nx̂*(j-1)):(nx̂*j)] - x0 = @views x̂0[1:nx] - xsnext = @views x̂0next[nx+1:end] - x0next_Z̃, xsnext_Z̃ = @views x̂0next_Z̃[1:nx], x̂0next_Z̃[nx+1:end] - sdnext, ssnext = @views ŝnext[1:nx], ŝnext[nx+1:end] - k1, k2 = @views k0[1:nx], k0[nx+1:2*nx] + sdnext = @views geq[(1 + nx̂*(j-1) ):(nx̂*(j-1) + nx)] + ssnext = @views geq[(1 + nx̂*(j-1) + nx):(nx̂*j )] + x0_Z̃ = @views x̂0_Z̃[1:nx] + x0next_Z̃ = @views x̂0next_Z̃[1:nx] + k̇1, k̇2 = @views k̇[1:nx], k̇[nx+1:2*nx] # ----------------- stochastic defects ----------------------------------------- - fs!(x̂0next, mpc.estim, model, x̂0) + xsnext = @views x̂0next[nx+1:end] + xsnext_Z̃ = @views x̂0next_Z̃[nx+1:end] + fs!(x̂0next, mpc.estim, model, x̂0_Z̃) ssnext .= @. xsnext - xsnext_Z̃ - # ----------------- deterministic defects -------------------------------------- + # ----------------- deterministic defects: trapezoidal collocation ------------- u0 = @views U0[(1 + nu*(j-1)):(nu*j)] û0 = @views Û0[(1 + nu*(j-1)):(nu*j)] - f̂_input!(û0, mpc.estim, model, x̂0, u0) + f̂_input!(û0, mpc.estim, model, x̂0_Z̃, u0) if f_threads || h < 1 || j < 2 # we need to recompute k1 with multi-threading, even with h==1, since the # last iteration (j-1) may not be executed (iterations are re-orderable) - model.f!(k1, x0, û0, d̂0, p) + model.f!(k̇1, x0_Z̃, û0, d̂0, p) else - k1 .= @views K0[(1 + nk*(j-1)-nx):(nk*(j-1))] # k2 of of the last iter. j-1 + k̇1 .= @views K̇[(1 + nk*(j-1)-nx):(nk*(j-1))] # k2 of of the last iter. j-1 end if h < 1 || j ≥ Hp # j = Hp special case: u(k+Hp-1) = u(k+Hp) since Hc ≤ Hp implies Δu(k+Hp) = 0 @@ -1307,8 +1421,122 @@ function con_nonlinprogeq!( û0next = @views Û0[(1 + nu*j):(nu*(j+1))] f̂_input!(û0next, mpc.estim, model, x̂0next_Z̃, u0next) end - model.f!(k2, x0next_Z̃, û0next, d̂0next, p) - sdnext .= @. x0 - x0next_Z̃ + 0.5*Ts*(k1 + k2) + model.f!(k̇2, x0next_Z̃, û0next, d̂0next, p) + sdnext .= @. x0_Z̃ - x0next_Z̃ + 0.5*Ts*(k̇1 + k̇2) + end + return geq +end + + +@doc raw""" + con_nonlinprogeq!( + geq, X̂0, Û0, K̇, + mpc::PredictiveController, model::NonLinModel, transcription::OrthogonalCollocation, + U0, Z̃ + ) -> geq + +Nonlinear equality constrains for [`NonLinModel`](@ref) and [`OrthogonalCollocation`](@ref). + +The method mutates the `geq`, `X̂0`, `Û0` and `K̇` vectors in argument. The defects between +the deterministic state derivative at the ``n_o`` collocation points and the model dynamics +are computed by: +```math +\mathbf{s_k}(k+j) + = \mathbf{M_o} \begin{bmatrix} + \mathbf{k}_1(k+j) - \mathbf{x_0}(k+j) \\ + \mathbf{k}_2(k+j) - \mathbf{x_0}(k+j) \\ + \vdots \\ + \mathbf{k}_{n_o}(k+j) - \mathbf{x_0}(k+j) \\ \end{bmatrix} + - \begin{bmatrix} + \mathbf{k̇}_1(k+j) \\ + \mathbf{k̇}_2(k+j) \\ + \vdots \\ + \mathbf{k̇}_{n_o}(k+j) \end{bmatrix} +``` +for ``j = 0, 1, ... , H_p-1``, and knowing that the ``\mathbf{k}_i(k+j)`` vectors are +extracted from the decision variable `Z̃`. The ``\mathbf{x_0}`` vectors are the +deterministic state extracted from `Z̃`. The ``\mathbf{k̇}_i`` derivative for the ``i``th +collocation point is computed from the continuous-time function `model.f!` and: +```math +\mathbf{k̇}_i(k+j) = \mathbf{f}\Big(\mathbf{k}_i(k+j), \mathbf{û_0}(k+j), \mathbf{d̂}_i(k+j), \mathbf{p}\Big) +``` +The disturbed input ``\mathbf{û_0}(k+j)`` is defined in [`f̂_input!`](@ref). Based on the +normalized time ``τ_i ∈ [0, 1]``, measured disturbances are linearly interpolated: +```math +\mathbf{d̂}_i(k+j) = (1-τ_i)\mathbf{d̂_0}(k+j) + τ_i\mathbf{d̂_0}(k+j+1) +``` +The defects for the stochastic states ``\mathbf{s_s}`` are computed as in the +[`TrapezoidalCollocation`](@ref) method, and the ones for the continuity constraint of the +deterministic state trajectories are given by: +```math +\mathbf{s_c}(k+j+1) + = \mathbf{C_o} \begin{bmatrix} + \mathbf{k}_1(k+j) \\ + \mathbf{k}_2(k+j) \\ + \vdots \\ + \mathbf{k}_{n_o}(k+j) \end{bmatrix} + + λ_o \mathbf{x_0}(k+j) - \mathbf{x_0}(k+j+1) +``` +for ``j = 0, 1, ... , H_p-1``. The differentiation matrix ``\mathbf{M_o}``, the continuity +matrix ``\mathbf{C_o}`` and the coefficient ``λ_o`` are introduced in [`init_orthocolloc`](@ref). +""" +function con_nonlinprogeq!( + geq, X̂0, Û0, K̇, + mpc::PredictiveController, model::NonLinModel, transcription::OrthogonalCollocation, + U0, Z̃ +) + nu, nx̂, nd, nx = model.nu, mpc.estim.nx̂, model.nd, model.nx + Hp, Hc = mpc.Hp, mpc.Hc + nΔU, nX̂ = nu*Hc, nx̂*Hp + f_threads = transcription.f_threads + p = model.p + no, τ = transcription.no, transcription.τ + Mo, Co, λo = mpc.Mo, mpc.Co, mpc.λo + nk = get_nk(model, transcription) + nx̂_nk = nx̂ + nk + D̂0 = mpc.D̂0 + X̂0_Z̃, K_Z̃ = @views Z̃[(nΔU+1):(nΔU+nX̂)], Z̃[(nΔU+nX̂+1):(nΔU+nX̂+nk*Hp)] + di = mpc.estim.buffer.d + ΔK = similar(K̇) # TODO: remove this allocation + @threadsif f_threads for j=1:Hp + if j < 2 + x̂0_Z̃ = @views mpc.estim.x̂0[1:nx̂] + d̂0 = @views mpc.d0[1:nd] + else + x̂0_Z̃ = @views X̂0_Z̃[(1 + nx̂*(j-2)):(nx̂*(j-1))] + d̂0 = @views D̂0[(1 + nd*(j-2)):(nd*(j-1))] + end + k̇ = @views K̇[(1 + nk*(j-1)):(nk*j)] + Δk = @views ΔK[(1 + nk*(j-1)):(nk*j)] + k_Z̃ = @views K_Z̃[(1 + nk*(j-1)):(nk*j)] + d̂0next = @views D̂0[(1 + nd*(j-1)):(nd*j)] + x̂0next = @views X̂0[(1 + nx̂*(j-1)):(nx̂*j)] + x̂0next_Z̃ = @views X̂0_Z̃[(1 + nx̂*(j-1)):(nx̂*j)] + scnext = @views geq[(1 + nx̂_nk*(j-1) ):(nx̂_nk*(j-1) + nx)] + ssnext = @views geq[(1 + nx̂_nk*(j-1) + nx):(nx̂_nk*(j-1) + nx̂)] + sk = @views geq[(1 + nx̂_nk*(j-1) + nx̂):(nx̂_nk*j )] + x0_Z̃ = @views x̂0_Z̃[1:nx] + x0next_Z̃ = @views x̂0next_Z̃[1:nx] + # ----------------- stochastic defects ----------------------------------------- + xsnext = @views x̂0next[nx+1:end] + xsnext_Z̃ = @views x̂0next_Z̃[nx+1:end] + fs!(x̂0next, mpc.estim, model, x̂0_Z̃) + ssnext .= @. xsnext - xsnext_Z̃ + # ----------------- collocation constraint defects ----------------------------- + u0 = @views U0[(1 + nu*(j-1)):(nu*j)] + û0 = @views Û0[(1 + nu*(j-1)):(nu*j)] + f̂_input!(û0, mpc.estim, model, x̂0_Z̃, u0) + for i=1:no + k̇i = @views k̇[(1 + (i-1)*nx):(i*nx)] + Δki = @views Δk[(1 + (i-1)*nx):(i*nx)] + ki_Z̃ = @views k_Z̃[(1 + (i-1)*nx):(i*nx)] + Δki .= @. ki_Z̃ - x0_Z̃ + di .= (1-τ[i]).*d̂0 .+ τ[i].*d̂0next + model.f!(k̇i, ki_Z̃, û0, d̂0, p) + end + sk .= mul!(sk, Mo, Δk) .- k̇ + # ----------------- continuity constraint defects ------------------------------ + scnext .= mul!(scnext, Co, k_Z̃) .+ (λo.*x0_Z̃) .- x0next_Z̃ end return geq end diff --git a/src/estimator/execute.jl b/src/estimator/execute.jl index e1fa8b076..717b66b15 100644 --- a/src/estimator/execute.jl +++ b/src/estimator/execute.jl @@ -14,7 +14,7 @@ function remove_op!(estim::StateEstimator, ym, d, u=nothing) end @doc raw""" - f̂!(x̂0next, û0, k0, estim::StateEstimator, model::SimModel, x̂0, u0, d0) -> nothing + f̂!(x̂0next, û0, k, estim::StateEstimator, model::SimModel, x̂0, u0, d0) -> nothing Mutating state function ``\mathbf{f̂}`` of the augmented model. @@ -27,8 +27,8 @@ the function returns the next state of the augmented model, as deviation vectors \end{aligned} ``` where ``\mathbf{x̂_0}(k+1)`` is stored in `x̂0next` argument. The method mutates `x̂0next`, -`û0` and `k0` in place. The argument `û0` stores the disturbed input of the augmented model -``\mathbf{û_0}``, and `k0`, the intermediate stage values of `model.solver`, when applicable. +`û0` and `k` in place. The argument `û0` stores the disturbed input of the augmented model +``\mathbf{û_0}``, and `k`, the intermediate stage values of `model.solver`, when applicable. The model parameter `model.p` is not included in the function signature for conciseness. The operating points are handled inside ``\mathbf{f̂}``. See Extended Help for details on ``\mathbf{û_0, f̂}`` and ``\mathbf{ĥ}`` implementations. @@ -61,8 +61,8 @@ The operating points are handled inside ``\mathbf{f̂}``. See Extended Help for are computed by [`augment_model`](@ref) (almost always zeros in practice for [`NonLinModel`](@ref)). """ -function f̂!(x̂0next, û0, k0, estim::StateEstimator, model::SimModel, x̂0, u0, d0) - return f̂!(x̂0next, û0, k0, model, estim.As, estim.Cs_u, estim.f̂op, estim.x̂op, x̂0, u0, d0) +function f̂!(x̂0next, û0, k, estim::StateEstimator, model::SimModel, x̂0, u0, d0) + return f̂!(x̂0next, û0, k, model, estim.As, estim.Cs_u, estim.f̂op, estim.x̂op, x̂0, u0, d0) end @doc raw""" @@ -92,23 +92,23 @@ function f̂!(x̂0next, _ , _ , estim::StateEstimator, ::LinModel, x̂0, u0, d0) end """ - f̂!(x̂0next, û0, k0, model::SimModel, As, Cs_u, f̂op, x̂op, x̂0, u0, d0) + f̂!(x̂0next, û0, k, model::SimModel, As, Cs_u, f̂op, x̂op, x̂0, u0, d0) Same than [`f̂!`](@ref) for [`SimModel`](@ref) but without the `estim` argument. """ -function f̂!(x̂0next, û0, k0, model::SimModel, As, Cs_u, f̂op, x̂op, x̂0, u0, d0) +function f̂!(x̂0next, û0, k, model::SimModel, As, Cs_u, f̂op, x̂op, x̂0, u0, d0) # `@views` macro avoid copies with matrix slice operator e.g. [a:b] @views xd, xs = x̂0[1:model.nx], x̂0[model.nx+1:end] @views xdnext, xsnext = x̂0next[1:model.nx], x̂0next[model.nx+1:end] mul!(û0, Cs_u, xs) # ys_u = Cs_u*xs û0 .+= u0 # û0 = u0 + ys_u - f!(xdnext, k0, model, xd, û0, d0, model.p) + f!(xdnext, k, model, xd, û0, d0, model.p) mul!(xsnext, As, xs) x̂0next .+= f̂op .- x̂op return nothing end -#TODO: delete the following two generic functions and replace with linear eq. constraints +#TODO: delete the following generic functions and replace with linear eq. constraints """ fs!(x̂0next, estim::StateEstimator, model::SimModel, x̂0) -> nothing @@ -124,7 +124,14 @@ end @doc raw""" f̂_input!(û0, estim::StateEstimator, model::SimModel, x̂0, u0) -> nothing -Compute the disturbed input of the augmented model ``\mathbf{û_0}`` from `x̂0` and `u0`. +Compute the disturbed input ``\mathbf{û_0}`` of the augmented model from `x̂0` and `u0`. + +It mutates `û0` in place with the following equation: +```math +\mathbf{û_0}(k) = \mathbf{u_0}(k) + \mathbf{C_{s_u} x_s}(k) +``` +where ``\mathbf{C_{s_u}}`` is defined in [`init_estimstoch`](@ref), and ``\mathbf{x_s}`` is +extracted from `x̂0` as the last `estim.nxs` elements. """ function f̂_input!(û0, estim::StateEstimator, model::SimModel, x̂0, u0) xs = @views x̂0[model.nx+1:end] diff --git a/src/estimator/internal_model.jl b/src/estimator/internal_model.jl index 0a5e04524..24418af80 100644 --- a/src/estimator/internal_model.jl +++ b/src/estimator/internal_model.jl @@ -168,14 +168,14 @@ function matrices_internalmodel(model::SimModel{NT}) where NT<:Real end @doc raw""" - f̂!(x̂0next, _ , k0, estim::InternalModel, model::NonLinModel, x̂0, u0, d0) + f̂!(x̂0next, _ , k, estim::InternalModel, model::NonLinModel, x̂0, u0, d0) State function ``\mathbf{f̂}`` of [`InternalModel`](@ref) for [`NonLinModel`](@ref). It calls [`f!`](@ref) directly since this estimator does not augment the states. """ -function f̂!(x̂0next, _ , k0, estim::InternalModel, model::NonLinModel, x̂0, u0, d0) - f!(x̂0next, k0, model, x̂0, u0, d0, model.p) +function f̂!(x̂0next, _ , k, estim::InternalModel, model::NonLinModel, x̂0, u0, d0) + f!(x̂0next, k, model, x̂0, u0, d0, model.p) x̂0next .+= estim.f̂op .- estim.x̂op return nothing end @@ -275,7 +275,7 @@ Compute the current stochastic output estimation `ŷs` for [`InternalModel`](@r """ function correct_estimate!(estim::InternalModel, y0m, d0) ŷ0d = estim.buffer.ŷ - h!(ŷ0d, estim.model, estim.x̂d, d0, estim.model.p) + ĥ!(ŷ0d, estim, estim.model, estim.x̂d, d0) ŷs = estim.ŷs for j in eachindex(ŷs) # broadcasting was allocating unexpectedly, so we use a loop if j in estim.i_ym @@ -307,8 +307,8 @@ function update_estimate!(estim::InternalModel, _ , d0, u0) model = estim.model x̂d, x̂s, ŷs = estim.x̂d, estim.x̂s, estim.ŷs # -------------- deterministic model --------------------- - x̂dnext, k0 = estim.buffer.x̂, estim.buffer.k - f!(x̂dnext, k0, model, x̂d, u0, d0, model.p) + x̂dnext, û0, k = estim.buffer.x̂, estim.buffer.û, estim.buffer.k + f̂!(x̂dnext, û0, k, estim, estim.model, x̂d, u0, d0) x̂d .= x̂dnext # this also updates estim.x̂0 (they are the same object) # --------------- stochastic model ----------------------- x̂snext = estim.x̂snext diff --git a/src/estimator/kalman.jl b/src/estimator/kalman.jl index 26eae242b..d08e11b4c 100644 --- a/src/estimator/kalman.jl +++ b/src/estimator/kalman.jl @@ -862,7 +862,7 @@ function update_estimate!(estim::UnscentedKalmanFilter, y0m, d0, u0) x̂0corr, X̂0corr, P̂corr = estim.x̂0, estim.X̂0, estim.cov.P̂ Q̂, nx̂ = estim.cov.Q̂, estim.nx̂ γ, m̂, Ŝ = estim.γ, estim.m̂, estim.Ŝ - x̂0next, û0, k0 = estim.buffer.x̂, estim.buffer.û, estim.buffer.k + x̂0next, û0, k = estim.buffer.x̂, estim.buffer.û, estim.buffer.k # in-place operations to reduce allocations: P̂corr_temp = Hermitian(estim.buffer.P̂, :L) P̂corr_temp .= P̂corr @@ -875,7 +875,7 @@ function update_estimate!(estim::UnscentedKalmanFilter, y0m, d0, u0) X̂0next = X̂0corr for j in axes(X̂0next, 2) @views x̂0corr .= X̂0corr[:, j] - @views f̂!(X̂0next[:, j], û0, k0, estim, estim.model, x̂0corr, u0, d0) + @views f̂!(X̂0next[:, j], û0, k, estim, estim.model, x̂0corr, u0, d0) end x̂0next .= mul!(x̂0corr, X̂0next, m̂) X̄0next = estim.X̄0 @@ -1101,8 +1101,8 @@ function get_ekf_linfuncs(NT, model, i_ym, nint_u, nint_ym, jacobian) nu, ny, nd, nk = model.nu, model.ny, model.nd, model.nk nx̂ = model.nx + size(As, 1) x̂op = f̂op = zeros(nx̂) # not important for Jacobian computations - function f̂_ekf!(x̂0next, x̂0, û0, k0, u0, d0) - return f̂!(x̂0next, û0, k0, model, As, Cs_u, f̂op, x̂op, x̂0, u0, d0) + function f̂_ekf!(x̂0next, x̂0, û0, k, u0, d0) + return f̂!(x̂0next, û0, k, model, As, Cs_u, f̂op, x̂op, x̂0, u0, d0) end ĥ_ekf!(ŷ0, x̂0, d0) = ĥ!(ŷ0, model, Cs_y, x̂0, d0) strict = Val(true) @@ -1110,15 +1110,15 @@ function get_ekf_linfuncs(NT, model, i_ym, nint_u, nint_ym, jacobian) ŷ0 = zeros(NT, ny) x̂0 = zeros(NT, nx̂) û0 = Cache(zeros(NT, nu)) - k0 = Cache(zeros(NT, nk)) + k = Cache(zeros(NT, nk)) cst_u0 = Constant(rand(NT, nu)) cst_d0 = Constant(rand(NT, nd)) F̂prep = prepare_jacobian( - f̂_ekf!, x̂0next, jacobian, x̂0, û0, k0, cst_u0, cst_d0; strict + f̂_ekf!, x̂0next, jacobian, x̂0, û0, k, cst_u0, cst_d0; strict ) Ĥprep = prepare_jacobian(ĥ_ekf!, ŷ0, jacobian, x̂0, cst_d0; strict) function linfuncF̂!(F̂, x̂0next, backend, x̂0, cst_u0, cst_d0) - return jacobian!(f̂_ekf!, x̂0next, F̂, F̂prep, backend, x̂0, û0, k0, cst_u0, cst_d0) + return jacobian!(f̂_ekf!, x̂0next, F̂, F̂prep, backend, x̂0, û0, k, cst_u0, cst_d0) end function linfuncĤ!(Ĥ, ŷ0, backend, x̂0, cst_d0) return jacobian!(ĥ_ekf!, ŷ0, Ĥ, Ĥprep, backend, x̂0, cst_d0) @@ -1253,9 +1253,9 @@ They predict the state `x̂` and covariance `P̂` with the same equations. See function predict_estimate_kf!(estim::Union{KalmanFilter, ExtendedKalmanFilter}, u0, d0, Â) x̂0corr, P̂corr = estim.x̂0, estim.cov.P̂ Q̂ = estim.cov.Q̂ - x̂0next, û0, k0 = estim.buffer.x̂, estim.buffer.û, estim.buffer.k + x̂0next, û0, k = estim.buffer.x̂, estim.buffer.û, estim.buffer.k # in-place operations to reduce allocations: - f̂!(x̂0next, û0, k0, estim, estim.model, x̂0corr, u0, d0) + f̂!(x̂0next, û0, k, estim, estim.model, x̂0corr, u0, d0) P̂corr_Âᵀ = estim.buffer.P̂ mul!(P̂corr_Âᵀ, P̂corr, Â') Â_P̂corr_Âᵀ = estim.buffer.Q̂ diff --git a/src/estimator/mhe/construct.jl b/src/estimator/mhe/construct.jl index a29a08d85..a0abc1d0e 100644 --- a/src/estimator/mhe/construct.jl +++ b/src/estimator/mhe/construct.jl @@ -1385,17 +1385,17 @@ function get_nonlinobj_op( myNaN = convert(JNT, NaN) J::Vector{JNT} = zeros(JNT, 1) V̂::Vector{JNT}, X̂0::Vector{JNT} = zeros(JNT, nV̂), zeros(JNT, nX̂) - k0::Vector{JNT} = zeros(JNT, nk) + k::Vector{JNT} = zeros(JNT, nk) û0::Vector{JNT}, ŷ0::Vector{JNT} = zeros(JNT, nu), zeros(JNT, nŷ) g::Vector{JNT} = zeros(JNT, ng) x̄::Vector{JNT} = zeros(JNT, nx̂) - function J!(Z̃, V̂, X̂0, û0, k0, ŷ0, g, x̄) - update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃) + function J!(Z̃, V̂, X̂0, û0, k, ŷ0, g, x̄) + update_prediction!(V̂, X̂0, û0, k, ŷ0, g, estim, Z̃) return obj_nonlinprog!(x̄, estim, model, V̂, Z̃) end Z̃_J = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call J_cache = ( - Cache(V̂), Cache(X̂0), Cache(û0), Cache(k0), Cache(ŷ0), + Cache(V̂), Cache(X̂0), Cache(û0), Cache(k), Cache(ŷ0), Cache(g), Cache(x̄), ) @@ -1490,23 +1490,23 @@ function get_nonlincon_oracle( strict = Val(true) myNaN, myInf = convert(JNT, NaN), convert(JNT, Inf) V̂::Vector{JNT}, X̂0::Vector{JNT} = zeros(JNT, nV̂), zeros(JNT, nX̂) - k0::Vector{JNT} = zeros(JNT, nk) + k::Vector{JNT} = zeros(JNT, nk) û0::Vector{JNT}, ŷ0::Vector{JNT} = zeros(JNT, nu), zeros(JNT, nŷ) g::Vector{JNT}, gi::Vector{JNT} = zeros(JNT, ng), zeros(JNT, ngi) λi::Vector{JNT} = rand(JNT, ngi) # -------------- inequality constraint: nonlinear oracle ------------------------- - function gi!(gi, Z̃, V̂, X̂0, û0, k0, ŷ0, g) - update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃) + function gi!(gi, Z̃, V̂, X̂0, û0, k, ŷ0, g) + update_prediction!(V̂, X̂0, û0, k, ŷ0, g, estim, Z̃) gi .= @views g[i_g] return nothing end - function ℓ_gi(Z̃, λi, V̂, X̂0, û0, k0, ŷ0, g, gi) - update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃) + function ℓ_gi(Z̃, λi, V̂, X̂0, û0, k, ŷ0, g, gi) + update_prediction!(V̂, X̂0, û0, k, ŷ0, g, estim, Z̃) gi .= @views g[i_g] return dot(λi, gi) end Z̃_∇gi = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call - ∇gi_cache = (Cache(V̂), Cache(X̂0), Cache(û0), Cache(k0), Cache(ŷ0), Cache(g)) + ∇gi_cache = (Cache(V̂), Cache(X̂0), Cache(û0), Cache(k), Cache(ŷ0), Cache(g)) # temporarily "fill" the estimation window for the preparation of the gradient: estim.Nk[] = He ∇gi_prep = prepare_jacobian(gi!, gi, jac, Z̃_∇gi, ∇gi_cache...; strict) @@ -1515,7 +1515,7 @@ function get_nonlincon_oracle( ∇gi_structure = init_diffstructure(∇gi) if !isnothing(hess) ∇²gi_cache = ( - Cache(V̂), Cache(X̂0), Cache(û0), Cache(k0), Cache(ŷ0), Cache(g), Cache(gi) + Cache(V̂), Cache(X̂0), Cache(û0), Cache(k), Cache(ŷ0), Cache(g), Cache(gi) ) estim.Nk[] = He # see comment above ∇²gi_prep = prepare_hessian( diff --git a/src/estimator/mhe/execute.jl b/src/estimator/mhe/execute.jl index b558f3df0..58e1ca468 100644 --- a/src/estimator/mhe/execute.jl +++ b/src/estimator/mhe/execute.jl @@ -122,8 +122,8 @@ function getinfo(estim::MovingHorizonEstimator{NT}) where NT<:Real nx̃ = nε + nx̂ info = Dict{Symbol, Any}() V̂, X̂0 = similar(estim.Y0m[1:nym*Nk]), similar(estim.X̂0[1:nx̂*Nk]) - û0, k0, ŷ0 = buffer.û, buffer.k, buffer.ŷ - V̂, X̂0 = predict_mhe!(V̂, X̂0, û0, k0, ŷ0, estim, model, estim.Z̃) + û0, k, ŷ0 = buffer.û, buffer.k, buffer.ŷ + V̂, X̂0 = predict_mhe!(V̂, X̂0, û0, k, ŷ0, estim, model, estim.Z̃) x̂0arr = @views estim.Z̃[nx̃-nx̂+1:nx̃] x̄ = estim.x̂0arr_old - x̂0arr X̂0 = [x̂0arr; X̂0] @@ -190,17 +190,17 @@ function addinfo!( He = estim.He nV̂, nX̂, ng = He*nym, He*nx̂, length(con.i_g) V̂, X̂0 = zeros(NT, nV̂), zeros(NT, nX̂) - k0 = zeros(NT, nk) + k = zeros(NT, nk) û0, ŷ0 = zeros(NT, nu), zeros(NT, nŷ) g = zeros(NT, ng) x̄ = zeros(NT, nx̂) J_cache = ( - Cache(V̂), Cache(X̂0), Cache(û0), Cache(k0), Cache(ŷ0), + Cache(V̂), Cache(X̂0), Cache(û0), Cache(k), Cache(ŷ0), Cache(g), Cache(x̄), ) - function J!(Z̃, V̂, X̂0, û0, k0, ŷ0, g, x̄) - update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃) + function J!(Z̃, V̂, X̂0, û0, k, ŷ0, g, x̄) + update_prediction!(V̂, X̂0, û0, k, ŷ0, g, estim, Z̃) return obj_nonlinprog!(x̄, estim, model, V̂, Z̃) end if !isnothing(estim.hessian) @@ -211,9 +211,9 @@ function addinfo!( # --- inequality constraint derivatives --- old_i_g = copy(estim.con.i_g) estim.con.i_g .= 1 # temporarily set all constraints as finite so g is entirely computed - ∇g_cache = (Cache(V̂), Cache(X̂0), Cache(û0), Cache(k0), Cache(ŷ0)) - function g!(g, Z̃, V̂, X̂0, û0, k0, ŷ0) - update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃) + ∇g_cache = (Cache(V̂), Cache(X̂0), Cache(û0), Cache(k), Cache(ŷ0)) + function g!(g, Z̃, V̂, X̂0, û0, k, ŷ0) + update_prediction!(V̂, X̂0, û0, k, ŷ0, g, estim, Z̃) return nothing end g, ∇g = value_and_jacobian(g!, g, estim.jacobian, estim.Z̃, ∇g_cache...) @@ -234,9 +234,9 @@ function addinfo!( end λ = zeros(NT, ng) λ[old_i_g] .= λi - ∇²g_cache = (Cache(V̂), Cache(X̂0), Cache(û0), Cache(k0), Cache(ŷ0), Cache(g)) - function ℓ_g(Z̃, λ, V̂, X̂0, û0, k0, ŷ0, g) - update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃) + ∇²g_cache = (Cache(V̂), Cache(X̂0), Cache(û0), Cache(k), Cache(ŷ0), Cache(g)) + function ℓ_g(Z̃, λ, V̂, X̂0, û0, k, ŷ0, g) + update_prediction!(V̂, X̂0, û0, k, ŷ0, g, estim, Z̃) return dot(λ, g) end ∇²ℓg = hessian(ℓ_g, estim.hessian, estim.Z̃, Constant(λ), ∇²g_cache...) @@ -513,9 +513,9 @@ function optim_objective!(estim::MovingHorizonEstimator{NT}) where NT<:Real estim.Z̃ .= JuMP.value.(Z̃var) end # --------- update estimate ----------------------- - û0, ŷ0, k0 = buffer.û, buffer.ŷ, buffer.k + û0, ŷ0, k = buffer.û, buffer.ŷ, buffer.k estim.Ŵ[1:nŵ*Nk] .= @views estim.Z̃[nx̃+1:nx̃+nŵ*Nk] # update Ŵ with optimum for warm-start - V̂, X̂0 = predict_mhe!(V̂, X̂0, û0, k0, ŷ0, estim, model, estim.Z̃) + V̂, X̂0 = predict_mhe!(V̂, X̂0, û0, k, ŷ0, estim, model, estim.Z̃) x̂0next = @views X̂0[end-nx̂+1:end] estim.x̂0 .= x̂0next return estim.Z̃ @@ -550,7 +550,7 @@ function set_warmstart_mhe!(V̂, X̂0, estim::MovingHorizonEstimator{NT}, Z̃var nε, nx̂, nŵ, nZ̃, Nk = estim.nε, estim.nx̂, estim.nx̂, length(estim.Z̃), estim.Nk[] nx̃ = nε + nx̂ Z̃s = Vector{NT}(undef, nZ̃) # TODO: remove this allocation - û0, ŷ0, x̄, k0 = buffer.û, buffer.ŷ, buffer.x̂, buffer.k + û0, ŷ0, x̄, k = buffer.û, buffer.ŷ, buffer.x̂, buffer.k # --- slack variable ε --- estim.nε == 1 && (Z̃s[begin] = estim.Z̃[begin]) # --- arrival state estimate x̂0arr --- @@ -558,7 +558,7 @@ function set_warmstart_mhe!(V̂, X̂0, estim::MovingHorizonEstimator{NT}, Z̃var # --- process noise estimates Ŵ --- Z̃s[nx̃+1:end] = estim.Ŵ # verify definiteness of objective function: - V̂, X̂0 = predict_mhe!(V̂, X̂0, û0, k0, ŷ0, estim, model, Z̃s) + V̂, X̂0 = predict_mhe!(V̂, X̂0, û0, k, ŷ0, estim, model, Z̃s) Js = obj_nonlinprog!(x̄, estim, model, V̂, Z̃s) if !isfinite(Js) Z̃s[nx̃+1:end] = 0 @@ -725,7 +725,7 @@ function predict_mhe!(V̂, X̂0, _ , _ , _ , estim::MovingHorizonEstimator, ::Li end @doc raw""" - predict_mhe!(V̂, X̂0, û0, k0, ŷ0, estim::MovingHorizonEstimator, model::SimModel, Z̃) -> V̂, X̂0 + predict_mhe!(V̂, X̂0, û0, k, ŷ0, estim::MovingHorizonEstimator, model::SimModel, Z̃) -> V̂, X̂0 Compute the vectors when `model` is *not* a [`LinModel`](@ref). @@ -733,7 +733,7 @@ The function mutates `V̂`, `X̂0`, `û0` and `ŷ0` vector arguments. The augm [`f̂!`](@ref) and [`ĥ!`](@ref) is called recursively in a `for` loop from ``j=1`` to ``N_k``, and by adding the estimated process noise ``\mathbf{ŵ}``. """ -function predict_mhe!(V̂, X̂0, û0, k0, ŷ0, estim::MovingHorizonEstimator, model::SimModel, Z̃) +function predict_mhe!(V̂, X̂0, û0, k, ŷ0, estim::MovingHorizonEstimator, model::SimModel, Z̃) nε, Nk = estim.nε, estim.Nk[] nu, nd, nx̂, nŵ, nym = model.nu, model.nd, estim.nx̂, estim.nx̂, estim.nym nx̃ = nε + nx̂ @@ -745,7 +745,7 @@ function predict_mhe!(V̂, X̂0, û0, k0, ŷ0, estim::MovingHorizonEstimator, u0 = @views estim.U0[ (1 + nu * (j-1)):(nu*j)] ŵ = @views Z̃[(1 + nx̃ + nŵ*(j-1)):(nx̃ + nŵ*j)] x̂0next = @views X̂0[(1 + nx̂ *(j-1)):(nx̂ *j)] - f̂!(x̂0next, û0, k0, estim, model, x̂0, u0, d0) + f̂!(x̂0next, û0, k, estim, model, x̂0, u0, d0) x̂0next .+= ŵ y0nextm = @views estim.Y0m[(1 + nym * (j-1)):(nym*j)] d0next = @views estim.D0[(1 + nd*j):(nd*(j+1))] @@ -764,7 +764,7 @@ function predict_mhe!(V̂, X̂0, û0, k0, ŷ0, estim::MovingHorizonEstimator, ŷ0m = @views ŷ0[estim.i_ym] V̂[(1 + nym*(j-1)):(nym*j)] .= y0m .- ŷ0m x̂0next = @views X̂0[(1 + nx̂ *(j-1)):(nx̂ *j)] - f̂!(x̂0next, û0, k0, estim, model, x̂0, u0, d0) + f̂!(x̂0next, û0, k, estim, model, x̂0, u0, d0) x̂0next .+= ŵ x̂0 = x̂0next end @@ -774,15 +774,15 @@ end """ - update_predictions!(V̂, X̂0, û0, k0, ŷ0, g, estim::MovingHorizonEstimator, Z̃) + update_predictions!(V̂, X̂0, û0, k, ŷ0, g, estim::MovingHorizonEstimator, Z̃) Update in-place the vectors for the predictions of `estim` estimator at decision vector `Z̃`. The method mutates all the arguments before `estim` argument. """ -function update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim::MovingHorizonEstimator, Z̃) +function update_prediction!(V̂, X̂0, û0, k, ŷ0, g, estim::MovingHorizonEstimator, Z̃) model = estim.model - V̂, X̂0 = predict_mhe!(V̂, X̂0, û0, k0, ŷ0, estim, model, Z̃) + V̂, X̂0 = predict_mhe!(V̂, X̂0, û0, k, ŷ0, estim, model, Z̃) ε = getε(estim, Z̃) g = con_nonlinprog_mhe!(g, estim, model, X̂0, V̂, ε) return nothing diff --git a/src/estimator/mhe/legacy.jl b/src/estimator/mhe/legacy.jl index 31041867c..5cc1d2f0d 100644 --- a/src/estimator/mhe/legacy.jl +++ b/src/estimator/mhe/legacy.jl @@ -20,18 +20,18 @@ function get_optim_functions( myNaN = convert(JNT, NaN) J::Vector{JNT} = zeros(JNT, 1) V̂::Vector{JNT}, X̂0::Vector{JNT} = zeros(JNT, nV̂), zeros(JNT, nX̂) - k0::Vector{JNT} = zeros(JNT, nk) + k::Vector{JNT} = zeros(JNT, nk) û0::Vector{JNT}, ŷ0::Vector{JNT} = zeros(JNT, nu), zeros(JNT, nŷ) g::Vector{JNT} = zeros(JNT, ng) x̄::Vector{JNT} = zeros(JNT, nx̂) # --------------------- objective functions ------------------------------------------- - function Jfunc!(Z̃, V̂, X̂0, û0, k0, ŷ0, g, x̄) - update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃) + function Jfunc!(Z̃, V̂, X̂0, û0, k, ŷ0, g, x̄) + update_prediction!(V̂, X̂0, û0, k, ŷ0, g, estim, Z̃) return obj_nonlinprog!(x̄, estim, model, V̂, Z̃) end Z̃_∇J = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call ∇J_context = ( - Cache(V̂), Cache(X̂0), Cache(û0), Cache(k0), Cache(ŷ0), + Cache(V̂), Cache(X̂0), Cache(û0), Cache(k), Cache(ŷ0), Cache(g), Cache(x̄), ) @@ -57,12 +57,12 @@ function get_optim_functions( return ∇Jarg .= ∇J end # --------------------- inequality constraint functions ------------------------------- - function gfunc!(g, Z̃, V̂, X̂0, û0, k0, ŷ0) - return update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃) + function gfunc!(g, Z̃, V̂, X̂0, û0, k, ŷ0) + return update_prediction!(V̂, X̂0, û0, k, ŷ0, g, estim, Z̃) end Z̃_∇g = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call ∇g_context = ( - Cache(V̂), Cache(X̂0), Cache(û0), Cache(k0), Cache(ŷ0), + Cache(V̂), Cache(X̂0), Cache(û0), Cache(k), Cache(ŷ0), ) # temporarily enable all the inequality constraints for sparsity detection: estim.con.i_g .= true diff --git a/src/model/linearization.jl b/src/model/linearization.jl index 5b05d6267..7d0aad91b 100644 --- a/src/model/linearization.jl +++ b/src/model/linearization.jl @@ -163,7 +163,7 @@ function linearize!( nonlinmodel = model buffer = nonlinmodel.buffer # --- remove the operating points of the nonlinear model (typically zeros) --- - x0, u0, d0, k0 = buffer.x, buffer.u, buffer.d, buffer.k + x0, u0, d0, k = buffer.x, buffer.u, buffer.d, buffer.k x0 .= x .- nonlinmodel.xop u0 .= u .- nonlinmodel.uop d0 .= d .- nonlinmodel.dop @@ -175,7 +175,7 @@ function linearize!( y0 .+= nonlinmodel.yop y = y0 # --- compute the nonlinear model next state at operating points --- - f!(x0next, k0, nonlinmodel, x0, u0, d0, model.p) + f!(x0next, k, nonlinmodel, x0, u0, d0, model.p) x0next .+= nonlinmodel.fop xnext = x0next # xnext = f(x0,u0,d0) + fop - xop + xop = f(x0,u0,d0) + fop # --- recompute x since it was modified in buffer.x --- diff --git a/src/model/nonlinmodel.jl b/src/model/nonlinmodel.jl index 37f0b2e0c..4877a0f6a 100644 --- a/src/model/nonlinmodel.jl +++ b/src/model/nonlinmodel.jl @@ -298,15 +298,15 @@ LinModel(model::NonLinModel; kwargs...) = linearize(model; kwargs...) """ - f!(x0next, k0, model::NonLinModel, x0, u0, d0, p) + f!(x0next, k, model::NonLinModel, x0, u0, d0, p) Compute `x0next` using the [`DiffSolver`](@ref) in `model.solver` and `model.f!`. -The method mutates `x0next` and `k0` arguments in-place. The latter is used to store the +The method mutates `x0next` and `k` arguments in-place. The latter is used to store the intermediate stage values of the solver. """ -function f!(x0next, k0, model::NonLinModel, x0, u0, d0, p) - return solver_f!(x0next, k0, model.f!, model.Ts, model.solver, x0, u0, d0, p) +function f!(x0next, k, model::NonLinModel, x0, u0, d0, p) + return solver_f!(x0next, k, model.f!, model.Ts, model.solver, x0, u0, d0, p) end """ diff --git a/src/sim_model.jl b/src/sim_model.jl index 26b638b69..c02d87826 100644 --- a/src/sim_model.jl +++ b/src/sim_model.jl @@ -247,10 +247,10 @@ julia> x = updatestate!(model, [1]) """ function updatestate!(model::SimModel{NT}, u, d=model.buffer.empty) where NT <: Real validate_args(model::SimModel, d, u) - u0, d0, x0next, k0 = model.buffer.u, model.buffer.d, model.buffer.x, model.buffer.k + u0, d0, x0next, k = model.buffer.u, model.buffer.d, model.buffer.x, model.buffer.k u0 .= u .- model.uop d0 .= d .- model.dop - f!(x0next, k0, model, model.x0, u0, d0, model.p) + f!(x0next, k, model, model.x0, u0, d0, model.p) x0next .+= model.fop .- model.xop model.x0 .= x0next xnext = x0next diff --git a/test/1_test_sim_model.jl b/test/1_test_sim_model.jl index 1639460f1..e4c4fe8d5 100644 --- a/test/1_test_sim_model.jl +++ b/test/1_test_sim_model.jl @@ -327,16 +327,16 @@ end nonlinmodel3 = NonLinModel(f1!,h1!,Ts,1,1,1,1,solver=RungeKutta()) linmodel3 = linearize(nonlinmodel3; x, u, d) x0, u0, d0 = x - nonlinmodel3.xop, u - nonlinmodel3.uop, d - nonlinmodel3.dop - x0next, k0, y0 = nonlinmodel3.buffer.x, nonlinmodel3.buffer.k, nonlinmodel3.buffer.y + x0next, k, y0 = nonlinmodel3.buffer.x, nonlinmodel3.buffer.k, nonlinmodel3.buffer.y backend = AutoForwardDiff() - f_A(x0next, x0, k0) = ModelPredictiveControl.f!(x0next, k0, nonlinmodel3, x0, u0, d0, nonlinmodel3.p) - f_Bu(x0next, u0, k0) = ModelPredictiveControl.f!(x0next, k0, nonlinmodel3, x0, u0, d0, nonlinmodel3.p) - f_Bd(x0next, d0, k0) = ModelPredictiveControl.f!(x0next, k0, nonlinmodel3, x0, u0, d0, nonlinmodel3.p) + f_A(x0next, x0, k) = ModelPredictiveControl.f!(x0next, k, nonlinmodel3, x0, u0, d0, nonlinmodel3.p) + f_Bu(x0next, u0, k) = ModelPredictiveControl.f!(x0next, k, nonlinmodel3, x0, u0, d0, nonlinmodel3.p) + f_Bd(x0next, d0, k) = ModelPredictiveControl.f!(x0next, k, nonlinmodel3, x0, u0, d0, nonlinmodel3.p) h_C(y0, x0) = ModelPredictiveControl.h!(y0, nonlinmodel3, x0, d0, nonlinmodel3.p) h_Dd(y0, d0) = ModelPredictiveControl.h!(y0, nonlinmodel3, x0, d0, nonlinmodel3.p) - A = jacobian(f_A, x0next, backend, x0, Cache(k0)) - Bu = jacobian(f_Bu, x0next, backend, u0, Cache(k0)) - Bd = jacobian(f_Bd, x0next, backend, d0, Cache(k0)) + A = jacobian(f_A, x0next, backend, x0, Cache(k)) + Bu = jacobian(f_Bu, x0next, backend, u0, Cache(k)) + Bd = jacobian(f_Bd, x0next, backend, d0, Cache(k)) C = jacobian(h_C, y0, backend, x0) Dd = jacobian(h_Dd, y0, backend, d0) @test linmodel3.A ≈ A diff --git a/test/3_test_predictive_control.jl b/test/3_test_predictive_control.jl index c5f09dbce..59d074260 100644 --- a/test/3_test_predictive_control.jl +++ b/test/3_test_predictive_control.jl @@ -810,6 +810,7 @@ end @test_throws ErrorException NonLinMPC(linmodel1, Hp=15, JE = (_,_,_)->0.0) @test_throws ErrorException NonLinMPC(linmodel1, Hp=15, gc = (_,_,_,_)->[0.0], nc=1) @test_throws ErrorException NonLinMPC(linmodel1, Hp=15, gc! = (_,_,_,_)->[0.0], nc=1) + @test_throws ArgumentError NonLinMPC(linmodel1, transcription=TrapezoidalCollocation()) @test_logs (:warn, Regex(".*")) NonLinMPC(linmodel1, Hp=15, JE=(Ue,_,_,_)->Ue) @test_logs (:warn, Regex(".*")) NonLinMPC(linmodel1, Hp=15, gc=(Ue,_,_,_,_)->Ue, nc=0) @@ -875,10 +876,11 @@ end @test isa(nmpc15.optim, JuMP.GenericModel{Float64}) # Ipopt does not support Float32 @test_throws ArgumentError NonLinMPC(nonlinmodel) - @test_throws ArgumentError NonLinMPC(nonlinmodel, transcription=TrapezoidalCollocation()) - @test_throws ArgumentError NonLinMPC(nonlinmodel, transcription=TrapezoidalCollocation(2)) - @test_throws ErrorException NonLinMPC(linmodel1, oracle=false, hessian=AutoFiniteDiff()) - @test_throws ArgumentError NonLinMPC(nonlinmodel, Wy=[1 0;0 1]) + @test_throws ArgumentError NonLinMPC(nonlinmodel, Hp=2, transcription=TrapezoidalCollocation()) + @test_throws ArgumentError NonLinMPC(nonlinmodel, Hp=2, transcription=TrapezoidalCollocation(2)) + @test_throws ErrorException NonLinMPC(linmodel1, Hp=2, oracle=false, hessian=AutoFiniteDiff()) + @test_throws ArgumentError NonLinMPC(nonlinmodel, Hp=2, Wy=[1 0;0 1]) + @test_throws ArgumentError OrthogonalCollocation(roots=:gausslobatto) end @testitem "NonLinMPC moves and getinfo (LinModel)" setup=[SetupMPCtests] begin @@ -993,8 +995,20 @@ end preparestate!(nmpc5_1, [0.0]) u = moveinput!(nmpc5_1, [1/0.001]) @test u ≈ [1.0] atol=5e-2 - nonlinmodel2 = NonLinModel{Float32}(f, h, 3000.0, 1, 2, 1, 1, solver=nothing, p=linmodel2) + + transcription = OrthogonalCollocation(0, 4) + nmpc6 = NonLinMPC(InternalModel(nonlinmodel_c); Nwt=[0], Hp=100, Hc=1, transcription) + preparestate!(nmpc6, [0.0]) + u = moveinput!(nmpc6, [1/0.001]) + @test u ≈ [1.0] atol=5e-2 + + transcription = OrthogonalCollocation(roots=:gausslegendre) + nmpc6_1 = NonLinMPC(InternalModel(nonlinmodel_c); Nwt=[0], Hp=100, Hc=1, transcription) + preparestate!(nmpc6_1, [0.0]) + u = moveinput!(nmpc6_1, [1/0.001]) + @test u ≈ [1.0] atol=5e-2 + nonlinmodel2 = NonLinModel{Float32}(f, h, 3000.0, 1, 2, 1, 1, solver=nothing, p=linmodel2) nmpc7 = NonLinMPC(nonlinmodel2, Hp=10) y = similar(nonlinmodel2.yop) ModelPredictiveControl.h!(y, nonlinmodel2, Float32[0,0], Float32[0], nonlinmodel2.p) @@ -1024,8 +1038,8 @@ end nonlinmodel, Nwt=[0], Hp=100, Hc=1, gradient=AutoFiniteDiff(), jacobian=AutoFiniteDiff(), - hessian=true), - ymax=[100], ymin=[-100] + hessian=AutoFiniteDiff() + ), ymax=[100], ymin=[-100] ) preparestate!(nmpc10, [0], [0]) u = moveinput!(nmpc10, [10], [0])