From d3c68c20c12f25adb09a743532a152f327b3c215 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Mon, 10 Mar 2025 10:22:45 +0100 Subject: [PATCH 01/28] Unrelated cleanups --- core/src/callback.jl | 1 - core/src/util.jl | 17 +---------------- core/src/validation.jl | 8 -------- 3 files changed, 1 insertion(+), 25 deletions(-) diff --git a/core/src/callback.jl b/core/src/callback.jl index 75cd5e87c..d5cfd568f 100644 --- a/core/src/callback.jl +++ b/core/src/callback.jl @@ -7,7 +7,6 @@ Returns the CallbackSet and the SavedValues for flow. function create_callbacks( parameters::Parameters, config::Config, - u0::Vector, saveat, )::Tuple{CallbackSet, SavedResults} (; starttime, basin, flow_boundary, level_boundary, user_demand) = parameters diff --git a/core/src/util.jl b/core/src/util.jl index 7ab5fd345..c5083ec23 100644 --- a/core/src/util.jl +++ b/core/src/util.jl @@ -457,17 +457,6 @@ function has_external_demand( return false, nothing end -function Base.get( - constraints::JuMP.Containers.DenseAxisArray, - node_id::NodeID, -)::Union{JuMP.ConstraintRef, Nothing} - if node_id in only(constraints.axes) - constraints[node_id] - else - nothing - end -end - """ Get the time interval between (flow) saves """ @@ -564,9 +553,6 @@ function get_variable_ref( (; basin) = p errors = false - # Only built here because it is needed to obtain indices - u = build_state_vector(p) - ref = if node_id.type == NodeType.Basin && variable == "level" PreallocationRef(basin.current_properties.current_level, node_id.idx) elseif variable == "flow_rate" && node_id.type != NodeType.FlowBoundary @@ -791,11 +777,10 @@ function get_jac_prototype(du0, u0, p, t0) end # Custom overloads -reduction_factor(x::GradientTracer, threshold::Real) = x +reduction_factor(x::GradientTracer, ::Real) = x low_storage_factor_resistance_node(storage, q::GradientTracer, inflow_id, outflow_id) = q relaxed_root(x::GradientTracer, threshold::Real) = x get_level_from_storage(basin::Basin, state_idx::Int, storage::GradientTracer) = storage -stop_declining_negative_storage!(du, u::Vector{<:GradientTracer}) = nothing @kwdef struct MonitoredBackTracking{B, V} linesearch::B = BackTracking() diff --git a/core/src/validation.jl b/core/src/validation.jl index 413aece6e..73a0b47bb 100644 --- a/core/src/validation.jl +++ b/core/src/validation.jl @@ -92,14 +92,6 @@ controllablefields(::Val{:PidControl}) = Set((:active, :target, :proportional, :integral, :derivative)) controllablefields(nodetype) = Set{Symbol}() -function variable_names(s::Any) - filter(x -> !(x in (:node_id, :control_state)), fieldnames(s)) -end -function variable_nt(s::Any) - names = variable_names(typeof(s)) - NamedTuple{names}((getfield(s, x) for x in names)) -end - "Get the right sort by function (by in `sort(x; by)`) given the Schema" function sort_by end # Not using any fallbacks to avoid forgetting to add the correct sorting. From 64091bc6de4d149d3467cf0aefcc867499a34cbb Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Mon, 10 Mar 2025 11:52:07 +0100 Subject: [PATCH 02/28] Introduce DifferentiationInterface --- Manifest.toml | 4 ++-- Project.toml | 2 ++ core/Project.toml | 4 ++++ core/src/Ribasim.jl | 6 +++++- core/src/config.jl | 5 ----- core/src/model.jl | 37 ++++++++++++++++++++++++++++++------- core/src/util.jl | 12 ------------ 7 files changed, 43 insertions(+), 27 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 50b99d19a..05648e29e 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.11.3" manifest_format = "2.0" -project_hash = "7591f8aefbf83678f96ece6aa98d0232f3c083ff" +project_hash = "f1f6d1cb011ce63d93f72a0bbd984e2839b32e42" [[deps.ADTypes]] git-tree-sha1 = "fb97701c117c8162e84dfcf80215caa904aef44f" @@ -2083,7 +2083,7 @@ weakdeps = ["Distributed"] DistributedExt = "Distributed" [[deps.Ribasim]] -deps = ["ADTypes", "Accessors", "Arrow", "BasicModelInterface", "CodecZstd", "Configurations", "DBInterface", "DataInterpolations", "DataStructures", "Dates", "DiffEqBase", "DiffEqCallbacks", "EnumX", "FiniteDiff", "Graphs", "HiGHS", "IterTools", "JuMP", "Legolas", "LineSearches", "LinearAlgebra", "LinearSolve", "Logging", "LoggingExtras", "MetaGraphsNext", "OrdinaryDiffEqBDF", "OrdinaryDiffEqCore", "OrdinaryDiffEqLowOrderRK", "OrdinaryDiffEqNonlinearSolve", "OrdinaryDiffEqRosenbrock", "OrdinaryDiffEqSDIRK", "OrdinaryDiffEqTsit5", "PreallocationTools", "SQLite", "SciMLBase", "SparseArrays", "SparseConnectivityTracer", "StructArrays", "Tables", "TerminalLoggers", "TranscodingStreams"] +deps = ["ADTypes", "Accessors", "Arrow", "BasicModelInterface", "CodecZstd", "Configurations", "DBInterface", "DataInterpolations", "DataStructures", "Dates", "DiffEqBase", "DiffEqCallbacks", "DifferentiationInterface", "EnumX", "FiniteDiff", "Graphs", "HiGHS", "IterTools", "JuMP", "Legolas", "LineSearches", "LinearAlgebra", "LinearSolve", "Logging", "LoggingExtras", "MetaGraphsNext", "OrdinaryDiffEqBDF", "OrdinaryDiffEqCore", "OrdinaryDiffEqLowOrderRK", "OrdinaryDiffEqNonlinearSolve", "OrdinaryDiffEqRosenbrock", "OrdinaryDiffEqSDIRK", "OrdinaryDiffEqTsit5", "PreallocationTools", "SQLite", "SciMLBase", "SparseArrays", "SparseConnectivityTracer", "SparseMatrixColorings", "StructArrays", "Tables", "TerminalLoggers", "TranscodingStreams"] path = "core" uuid = "aac5e3d9-0b8f-4d4f-8241-b1a7a9632635" version = "2025.1.0" diff --git a/Project.toml b/Project.toml index 7dc910f42..264c52b1b 100644 --- a/Project.toml +++ b/Project.toml @@ -18,6 +18,7 @@ DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" +DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" EnumX = "4e289a0a-7415-4d19-859d-a7e5c4648b56" FindFirstFunctions = "64ca27bc-2ba2-4a57-88aa-44e436879224" FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" @@ -57,6 +58,7 @@ SQLite = "0aa819cd-b072-5ff4-a722-6bc24af294d9" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" +SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76" Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" diff --git a/core/Project.toml b/core/Project.toml index 2ed0ddaeb..5cdde1ede 100644 --- a/core/Project.toml +++ b/core/Project.toml @@ -17,6 +17,7 @@ DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" +DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" EnumX = "4e289a0a-7415-4d19-859d-a7e5c4648b56" FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" @@ -42,6 +43,7 @@ SQLite = "0aa819cd-b072-5ff4-a722-6bc24af294d9" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" +SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" TerminalLoggers = "5d786b92-1e48-4d6f-9151-6b4477ca9bed" @@ -63,6 +65,7 @@ DataStructures = "0.18" Dates = "1" DiffEqBase = "6.155" DiffEqCallbacks = "3.6, 4" +DifferentiationInterface = "0.6.42" EnumX = "1.0" FiniteDiff = "2.21" Graphs = "1.9" @@ -89,6 +92,7 @@ SQLite = "1.5.1" SciMLBase = "2.36" SparseArrays = "1" SparseConnectivityTracer = "0.6.8" +SparseMatrixColorings = "0.4.13" Statistics = "1" StructArrays = "0.6.13, 0.7" TOML = "1" diff --git a/core/src/Ribasim.jl b/core/src/Ribasim.jl index bdade9934..d658c1ed4 100644 --- a/core/src/Ribasim.jl +++ b/core/src/Ribasim.jl @@ -14,6 +14,10 @@ For more granular access, see: """ module Ribasim +using ADTypes: AutoForwardDiff, AutoFiniteDiff +using DifferentiationInterface: AutoSparse, prepare_jacobian, jacobian! +using SparseMatrixColorings: sparsity_pattern + # Algorithms for solving ODEs. using OrdinaryDiffEqCore: OrdinaryDiffEqCore, get_du, AbstractNLSolver using DiffEqBase: DiffEqBase, calculate_residuals! @@ -36,7 +40,7 @@ using SciMLBase: # Automatically detecting the sparsity pattern of the Jacobian of water_balance! # through operator overloading -using SparseConnectivityTracer: TracerSparsityDetector, jacobian_sparsity, GradientTracer +using SparseConnectivityTracer: GradientTracer # For efficient sparse computations using SparseArrays: SparseMatrixCSC, spzeros diff --git a/core/src/config.jl b/core/src/config.jl index 9c1415d49..ff5c7c68b 100644 --- a/core/src/config.jl +++ b/core/src/config.jl @@ -13,7 +13,6 @@ using DataStructures: DefaultDict using Dates: DateTime using Logging: LogLevel, Debug, Info, Warn, Error using ..Ribasim: Ribasim, isnode, nodetype -using ADTypes: AutoForwardDiff, AutoFiniteDiff using OrdinaryDiffEqCore: OrdinaryDiffEqAlgorithm, OrdinaryDiffEqNewtonAdaptiveAlgorithm using OrdinaryDiffEqNonlinearSolve: NLNewton using OrdinaryDiffEqLowOrderRK: Euler, RK4 @@ -310,10 +309,6 @@ function algorithm(solver::Solver; u0 = [])::OrdinaryDiffEqAlgorithm kwargs[:step_limiter!] = Ribasim.limit_flow! end - if function_accepts_kwarg(algotype, :autodiff) - kwargs[:autodiff] = solver.autodiff ? AutoForwardDiff() : AutoFiniteDiff() - end - algotype(; kwargs...) end diff --git a/core/src/model.jl b/core/src/model.jl index 5c9cf54ba..56d51fc94 100644 --- a/core/src/model.jl +++ b/core/src/model.jl @@ -28,6 +28,33 @@ struct Model{T} end end +function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) + backend = if solver.autodiff + AutoForwardDiff() + else + AutoFiniteDiff() + end + + if solver.sparse + backend = AutoSparse(backend) + end + + t = 0.0 + + # Activate all nodes to catch all possible state dependencies + p.all_nodes_active = true + prep = prepare_jacobian((du, u) -> water_balance(du, u, p, t), du, backend, u) + p.all_nodes_active = false + + jac_prototype = sparsity_pattern(prep) + + jac = + (J, u, p, t) -> + jacobian!((du, u) -> water_balance!(du, u, p, t), du, J, prep, backend, u) + + return jac_prototype, jac +end + function Model(config_path::AbstractString)::Model config = Config(config_path) if !valid_config(config) @@ -159,17 +186,13 @@ function Model(config::Config)::Model tstops = sort(unique(vcat(tstops...))) adaptive, dt = convert_dt(config.solver.dt) - jac_prototype = if config.solver.sparse - get_jac_prototype(du0, u0, parameters, t0) - else - nothing - end - RHS = ODEFunction(water_balance!; jac_prototype) + jac_prototype, jac = get_jac_eval(du0, u0, parameters, config.solver) + RHS = ODEFunction(water_balance!; jac_prototype, jac) prob = ODEProblem(RHS, u0, timespan, parameters) @debug "Setup ODEProblem." - callback, saved = create_callbacks(parameters, config, u0, saveat) + callback, saved = create_callbacks(parameters, config, saveat) @debug "Created callbacks." # Run water_balance! before initializing the integrator. This is because diff --git a/core/src/util.jl b/core/src/util.jl index c5083ec23..467ce8bbd 100644 --- a/core/src/util.jl +++ b/core/src/util.jl @@ -764,18 +764,6 @@ function relaxed_root(x, threshold) end end -function get_jac_prototype(du0, u0, p, t0) - p.all_nodes_active = true - jac_prototype = jacobian_sparsity( - (du, u) -> water_balance!(du, u, p, t0), - du0, - u0, - TracerSparsityDetector(), - ) - p.all_nodes_active = false - jac_prototype -end - # Custom overloads reduction_factor(x::GradientTracer, ::Real) = x low_storage_factor_resistance_node(storage, q::GradientTracer, inflow_id, outflow_id) = q From 732f0749f51da0a36464cc08b4abeb93dd2f67fe Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Mon, 10 Mar 2025 14:33:15 +0100 Subject: [PATCH 03/28] Fix most tests --- core/src/config.jl | 5 +++++ core/src/model.jl | 6 +++--- core/test/utils_test.jl | 16 ++++++++-------- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/core/src/config.jl b/core/src/config.jl index ff5c7c68b..77cbedba6 100644 --- a/core/src/config.jl +++ b/core/src/config.jl @@ -8,6 +8,7 @@ Ribasim.config is a submodule mainly to avoid name clashes between the configura """ module config +using ADTypes: AutoForwardDiff, AutoFiniteDiff using Configurations: Configurations, @option, from_toml, @type_alias using DataStructures: DefaultDict using Dates: DateTime @@ -309,6 +310,10 @@ function algorithm(solver::Solver; u0 = [])::OrdinaryDiffEqAlgorithm kwargs[:step_limiter!] = Ribasim.limit_flow! end + if function_accepts_kwarg(algotype, :autodiff) + kwargs[:autodiff] = solver.autodiff ? AutoForwardDiff() : AutoFiniteDiff() + end + algotype(; kwargs...) end diff --git a/core/src/model.jl b/core/src/model.jl index 56d51fc94..e8ebf2973 100644 --- a/core/src/model.jl +++ b/core/src/model.jl @@ -36,17 +36,17 @@ function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) end if solver.sparse - backend = AutoSparse(backend) + backend = AutoSparse(backend; sparsity_detector = TracerSparsityDetector()) end t = 0.0 # Activate all nodes to catch all possible state dependencies p.all_nodes_active = true - prep = prepare_jacobian((du, u) -> water_balance(du, u, p, t), du, backend, u) + prep = prepare_jacobian((du, u) -> water_balance!(du, u, p, t), du, backend, u) p.all_nodes_active = false - jac_prototype = sparsity_pattern(prep) + jac_prototype = solver.sparse ? sparsity_pattern(prep) : nothing jac = (J, u, p, t) -> diff --git a/core/test/utils_test.jl b/core/test/utils_test.jl index c2230cad3..44feb0e10 100644 --- a/core/test/utils_test.jl +++ b/core/test/utils_test.jl @@ -257,16 +257,16 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/basic/ribasim.toml") - cfg = Ribasim.Config(toml_path) - db_path = Ribasim.database_path(cfg) + config = Ribasim.Config(toml_path) + db_path = Ribasim.database_path(config) db = SQLite.DB(db_path) - p = Ribasim.Parameters(db, cfg) + p = Ribasim.Parameters(db, config) close(db) t0 = 0.0 u0 = Ribasim.build_state_vector(p) du0 = copy(u0) - jac_prototype = Ribasim.get_jac_prototype(du0, u0, p, t0) + jac_prototype, _ = Ribasim.get_jac_eval(du0, u0, p, config.solver) # rows, cols, _ = findnz(jac_prototype) #! format: off @@ -279,15 +279,15 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/pid_control/ribasim.toml") - cfg = Ribasim.Config(toml_path) - db_path = Ribasim.database_path(cfg) + config = Ribasim.Config(toml_path) + db_path = Ribasim.database_path(config) db = SQLite.DB(db_path) - p = Ribasim.Parameters(db, cfg) + p = Ribasim.Parameters(db, config) close(db) u0 = Ribasim.build_state_vector(p) du0 = copy(u0) - jac_prototype = Ribasim.get_jac_prototype(du0, u0, p, t0) + jac_prototype, _ = Ribasim.get_jac_eval(du0, u0, p, config.solver) #! format: off rows_expected = [1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2] From b5a569383425383855267271c11a5e0b5167613d Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Mon, 10 Mar 2025 14:41:20 +0100 Subject: [PATCH 04/28] bring back TracerSparsityDetector --- core/src/Ribasim.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/Ribasim.jl b/core/src/Ribasim.jl index d658c1ed4..2c2080e78 100644 --- a/core/src/Ribasim.jl +++ b/core/src/Ribasim.jl @@ -40,7 +40,7 @@ using SciMLBase: # Automatically detecting the sparsity pattern of the Jacobian of water_balance! # through operator overloading -using SparseConnectivityTracer: GradientTracer +using SparseConnectivityTracer: GradientTracer, TracerSparsityDetector # For efficient sparse computations using SparseArrays: SparseMatrixCSC, spzeros From 387d3d4eb9557d3562e715009d087d1f1c864bc6 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Mon, 10 Mar 2025 15:53:03 +0100 Subject: [PATCH 05/28] Most comments adressed --- core/src/Ribasim.jl | 7 ++++--- core/src/config.jl | 6 +++++- core/src/model.jl | 17 ++++++++--------- core/src/read.jl | 16 ++++++++-------- core/src/util.jl | 6 ++++-- core/test/config_test.jl | 2 +- 6 files changed, 30 insertions(+), 24 deletions(-) diff --git a/core/src/Ribasim.jl b/core/src/Ribasim.jl index 2c2080e78..a3e5424ad 100644 --- a/core/src/Ribasim.jl +++ b/core/src/Ribasim.jl @@ -15,8 +15,8 @@ For more granular access, see: module Ribasim using ADTypes: AutoForwardDiff, AutoFiniteDiff -using DifferentiationInterface: AutoSparse, prepare_jacobian, jacobian! -using SparseMatrixColorings: sparsity_pattern +using DifferentiationInterface: AutoSparse, Constant, prepare_jacobian, jacobian! +using SparseMatrixColorings: GreedyColoringAlgorithm, sparsity_pattern # Algorithms for solving ODEs. using OrdinaryDiffEqCore: OrdinaryDiffEqCore, get_du, AbstractNLSolver @@ -69,7 +69,8 @@ using DataInterpolations: integral, AbstractInterpolation, ExtrapolationType -using DataInterpolations.ExtrapolationType: Constant, Periodic, Extension, Linear +using DataInterpolations.ExtrapolationType: + Constant as ConstantExtrapolation, Periodic, Extension, Linear # Modeling language for Mathematical Optimization. # Used for allocation, see the docs: https://ribasim.org/dev/allocation.html diff --git a/core/src/config.jl b/core/src/config.jl index 77cbedba6..38a9aaa6a 100644 --- a/core/src/config.jl +++ b/core/src/config.jl @@ -25,6 +25,7 @@ using OrdinaryDiffEqRosenbrock: Rosenbrock23, Rodas4P, Rodas5P export Config, Solver, Results, Logging, Toml export algorithm, camel_case, + get_ad_type, snake_case, input_path, database_path, @@ -290,6 +291,9 @@ function function_accepts_kwarg(f, kwarg)::Bool return false end +get_ad_type(solver::Solver) = + solver.autodiff ? AutoForwardDiff(; tag = :Ribasim) : AutoFiniteDiff() + "Create an OrdinaryDiffEqAlgorithm from solver config" function algorithm(solver::Solver; u0 = [])::OrdinaryDiffEqAlgorithm algotype = get(algorithms, solver.algorithm, nothing) @@ -311,7 +315,7 @@ function algorithm(solver::Solver; u0 = [])::OrdinaryDiffEqAlgorithm end if function_accepts_kwarg(algotype, :autodiff) - kwargs[:autodiff] = solver.autodiff ? AutoForwardDiff() : AutoFiniteDiff() + kwargs[:autodiff] = get_ad_type(solver) end algotype(; kwargs...) diff --git a/core/src/model.jl b/core/src/model.jl index e8ebf2973..e55ee330d 100644 --- a/core/src/model.jl +++ b/core/src/model.jl @@ -29,14 +29,14 @@ struct Model{T} end function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) - backend = if solver.autodiff - AutoForwardDiff() - else - AutoFiniteDiff() - end + backend = get_ad_type(solver) if solver.sparse - backend = AutoSparse(backend; sparsity_detector = TracerSparsityDetector()) + backend = AutoSparse( + backend; + sparsity_detector = TracerSparsityDetector(), + coloring_algorithm = GreedyColoringAlgorithm(), + ) end t = 0.0 @@ -48,9 +48,8 @@ function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) jac_prototype = solver.sparse ? sparsity_pattern(prep) : nothing - jac = - (J, u, p, t) -> - jacobian!((du, u) -> water_balance!(du, u, p, t), du, J, prep, backend, u) + jac(J, u, p, t) = + jacobian!((du, u) -> water_balance!(du, u, p, t), du, J, prep, backend, u) return jac_prototype, jac end diff --git a/core/src/read.jl b/core/src/read.jl index f5ccaa575..9dbeb06f8 100644 --- a/core/src/read.jl +++ b/core/src/read.jl @@ -195,7 +195,7 @@ function parse_static_and_time( [val, val], trivial_timespan; cache_parameters = true, - extrapolation = Constant, + extrapolation = ConstantExtrapolation, ) end getfield(out, parameter_name)[node_id.idx] = val @@ -955,7 +955,7 @@ function Basin(db::DB, config::Config, graph::MetaGraph)::Basin LinearInterpolation.( area, level; - extrapolation_left = Constant, + extrapolation_left = ConstantExtrapolation, extrapolation_right = Extension, cache_parameters = true, ) @@ -1324,7 +1324,7 @@ function user_demand_static!( return_factor[user_demand_idx] = LinearInterpolation( fill(first_row.return_factor, 2), return_factor_old.t; - extrapolation = Constant, + extrapolation = ConstantExtrapolation, cache_parameters = true, ) min_level[user_demand_idx] = first_row.min_level @@ -1337,7 +1337,7 @@ function user_demand_static!( demand_itp[user_demand_idx][demand_priority_idx] = LinearInterpolation( fill(demand_row, 2), demand_itp_old.t; - extrapolation = Constant, + extrapolation = ConstantExtrapolation, cache_parameters = true, ) demand[user_demand_idx, demand_priority_idx] = demand_row @@ -1557,7 +1557,7 @@ function push_constant_interpolation!( itp = ConstantInterpolation( output, input; - extrapolation = cyclic_time ? Periodic : Constant, + extrapolation = cyclic_time ? Periodic : ConstantExtrapolation, cache_parameters = true, ) push!(constant_interpolations, itp) @@ -1568,7 +1568,7 @@ function static_lookup(lookup_index::Int)::IndexLookup return ConstantInterpolation( [lookup_index], [0.0]; - extrapolation = Constant, + extrapolation = ConstantExtrapolation, cache_parameters = true, ) end @@ -1607,7 +1607,7 @@ function Subgrid(db::DB, config::Config, basin::Basin)::Subgrid hh_itp = LinearInterpolation( subgrid_level, basin_level; - extrapolation_left = Constant, + extrapolation_left = ConstantExtrapolation, extrapolation_right = Linear, cache_parameters = true, ) @@ -1656,7 +1656,7 @@ function Subgrid(db::DB, config::Config, basin::Basin)::Subgrid hh_itp = LinearInterpolation( subgrid_level, basin_level; - extrapolation_left = Constant, + extrapolation_left = ConstantExtrapolation, extrapolation_right = Linear, cache_parameters = true, ) diff --git a/core/src/util.jl b/core/src/util.jl index 467ce8bbd..9d354dd87 100644 --- a/core/src/util.jl +++ b/core/src/util.jl @@ -84,7 +84,7 @@ function get_scalar_interpolation( return interpolation_type( parameter, times; - extrapolation = cyclic_time ? Periodic : Constant, + extrapolation = cyclic_time ? Periodic : ConstantExtrapolation, cache_parameters = true, ) end @@ -125,7 +125,7 @@ function qh_interpolation( return LinearInterpolation( flow_rate, level; - extrapolation_left = Constant, + extrapolation_left = ConstantExtrapolation, extrapolation_right = Linear, cache_parameters = true, ) @@ -295,6 +295,8 @@ function reduction_factor(x::T, threshold::Real)::T where {T <: Real} end end +# SparseConnectivityTracer overloads + function get_low_storage_factor( current_low_storage_factor::Vector{T}, id::NodeID, diff --git a/core/test/config_test.jl b/core/test/config_test.jl index 3eccac95a..864b3a8a6 100644 --- a/core/test/config_test.jl +++ b/core/test/config_test.jl @@ -57,7 +57,7 @@ end Solver(; algorithm = "DoesntExist"), ) @test alg_autodiff(algorithm(Solver(; algorithm = "QNDF", autodiff = true))) == - AutoForwardDiff() + AutoForwardDiff(; tag = :Ribasim) @test alg_autodiff(algorithm(Solver(; algorithm = "QNDF", autodiff = false))) == AutoFiniteDiff() @test alg_autodiff(algorithm(Solver(; algorithm = "QNDF"))) == AutoFiniteDiff() From 46af5f43e4fb17cf70c04f3ec1520225a88d3410 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Tue, 11 Mar 2025 14:49:17 +0100 Subject: [PATCH 06/28] Subdividing parameters first part --- core/src/Ribasim.jl | 12 +- core/src/allocation_init.jl | 38 +++--- core/src/callback.jl | 8 +- core/src/model.jl | 45 +++++-- core/src/parameter.jl | 239 +++++++++++++----------------------- core/src/read.jl | 124 +++++++------------ core/src/solve.jl | 65 +++++----- core/src/util.jl | 148 ++++++++++------------ core/src/validation.jl | 2 +- 9 files changed, 291 insertions(+), 390 deletions(-) diff --git a/core/src/Ribasim.jl b/core/src/Ribasim.jl index a3e5424ad..a0f534dc9 100644 --- a/core/src/Ribasim.jl +++ b/core/src/Ribasim.jl @@ -14,8 +14,9 @@ For more granular access, see: """ module Ribasim +# Requirements for automatic differentiation using ADTypes: AutoForwardDiff, AutoFiniteDiff -using DifferentiationInterface: AutoSparse, Constant, prepare_jacobian, jacobian! +using DifferentiationInterface: AutoSparse, Constant, Cache, prepare_jacobian, jacobian! using SparseMatrixColorings: GreedyColoringAlgorithm, sparsity_pattern # Algorithms for solving ODEs. @@ -48,15 +49,6 @@ using SparseArrays: SparseMatrixCSC, spzeros # Linear algebra using LinearAlgebra: mul! -# PreallocationTools is used because the RHS function (water_balance!) gets called with different input types -# for u, du: -# - Float64 for normal calls -# - Dual numbers for automatic differentiation with ForwardDiff -# - GradientTracer for automatic Jacobian sparsity detection with SparseConnectivityTracer -# The computations inside the rhs go trough preallocated arrays of the required type which are created by LazyBufferCache. -# Retrieving a cache from a LazyBufferCache looks like indexing: https://docs.sciml.ai/PreallocationTools/stable/#LazyBufferCache -using PreallocationTools: LazyBufferCache - # Interpolation functionality, used for e.g. # basin profiles and TabulatedRatingCurve. See also the node # references in the docs. diff --git a/core/src/allocation_init.jl b/core/src/allocation_init.jl index e7c3ca6a2..0fab2a483 100644 --- a/core/src/allocation_init.jl +++ b/core/src/allocation_init.jl @@ -4,10 +4,10 @@ which is a type of sparse arrays that in this case takes NodeID in stead of Int E.g. capacity[(node_a, node_b)] gives the capacity of link (node_a, node_b). """ function get_subnetwork_capacity( - p::Parameters, + p_non_diff::ParametersNonDiff, subnetwork_id::Int32, )::JuMP.Containers.SparseAxisArray{Float64, 2, Tuple{NodeID, NodeID}} - (; graph) = p + (; graph) = p_non_diff node_ids_subnetwork = graph[].node_ids[subnetwork_id] dict = Dict{Tuple{NodeID, NodeID}, Float64}() @@ -28,7 +28,7 @@ function get_subnetwork_capacity( # Find flow constraints for this link if is_flow_constraining(id_src.type) - node_src = getfield(p, graph[id_src].type) + node_src = getfield(p_non_diff, graph[id_src].type) max_flow_rate = node_src.max_flow_rate[id_src.idx] capacity_node_src = @@ -37,7 +37,7 @@ function get_subnetwork_capacity( capacity_link = min(capacity_link, capacity_node_src) end if is_flow_constraining(id_dst.type) - node_dst = getfield(p, graph[id_dst].type) + node_dst = getfield(p_non_diff, graph[id_dst].type) max_flow_rate = node_dst.max_flow_rate[id_dst.idx] capacity_node_dst = @@ -72,10 +72,10 @@ and subnetwork allocation network (defined by their capacity objects). """ function add_subnetwork_connections!( capacity::JuMP.Containers.SparseAxisArray{Float64, 2, Tuple{NodeID, NodeID}}, - p::Parameters, + p_non_diff::ParametersNonDiff, subnetwork_id::Int32, )::Nothing - (; allocation) = p + (; allocation) = p_non_diff (; main_network_connections) = allocation # Add the connections to the main network @@ -100,11 +100,11 @@ dictionary wrapper. The keys of this dictionary define the which links are used in the allocation optimization problem. """ function get_capacity( - p::Parameters, + p_non_diff::ParametersNonDiff, subnetwork_id::Int32, )::JuMP.Containers.SparseAxisArray{Float64, 2, Tuple{NodeID, NodeID}} - capacity = get_subnetwork_capacity(p, subnetwork_id) - add_subnetwork_connections!(capacity, p, subnetwork_id) + capacity = get_subnetwork_capacity(p_non_diff, subnetwork_id) + add_subnetwork_connections!(capacity, p_non_diff, subnetwork_id) return capacity end @@ -176,10 +176,10 @@ flow over link <= link capacity function add_constraints_capacity!( problem::JuMP.Model, capacity::JuMP.Containers.SparseAxisArray{Float64, 2, Tuple{NodeID, NodeID}}, - p::Parameters, + p_non_diff::ParametersNonDiff, subnetwork_id::Int32, )::Nothing - (; main_network_connections) = p.allocation + (; main_network_connections) = p_non_diff.allocation main_network_source_links = main_network_connections[subnetwork_id] F = problem[:F] @@ -291,10 +291,10 @@ sum(flows out of basin) == sum(flows into basin) + flow from storage and vertica """ function add_constraints_conservation_node!( problem::JuMP.Model, - p::Parameters, + p_non_diff::ParametersNonDiff, subnetwork_id::Int32, )::Nothing - (; graph) = p + (; graph) = p_non_diff F = problem[:F] F_basin_in = problem[:F_basin_in] F_basin_out = problem[:F_basin_out] @@ -405,7 +405,7 @@ end Construct the allocation problem for the current subnetwork as a JuMP model. """ function allocation_problem( - p::Parameters, + p_non_diff::ParametersNonDiff, sources::OrderedDict{Tuple{NodeID, NodeID}, AllocationSource}, capacity::JuMP.Containers.SparseAxisArray{Float64, 2, Tuple{NodeID, NodeID}}, subnetwork_id::Int32, @@ -427,8 +427,8 @@ function allocation_problem( add_variables_flow_buffer!(problem, sources) # Add constraints to problem - add_constraints_conservation_node!(problem, p, subnetwork_id) - add_constraints_capacity!(problem, capacity, p, subnetwork_id) + add_constraints_conservation_node!(problem, p_non_diff, subnetwork_id) + add_constraints_capacity!(problem, capacity, p_non_diff, subnetwork_id) add_constraints_boundary_source!(problem, sources) add_constraints_main_network_source!(problem, sources) add_constraints_user_source!(problem, sources) @@ -443,17 +443,17 @@ Construct the JuMP.jl problem for allocation. """ function AllocationModel( subnetwork_id::Int32, - p::Parameters, + p_non_diff::ParametersNonDiff, sources::OrderedDict{Tuple{NodeID, NodeID}, AllocationSource}, Δt_allocation::Float64, )::AllocationModel - capacity = get_capacity(p, subnetwork_id) + capacity = get_capacity(p_non_diff, subnetwork_id) source_priorities = unique(source.source_priority for source in values(sources)) sources = OrderedDict( link => source for (link, source) in sources if source.subnetwork_id == subnetwork_id ) - problem = allocation_problem(p, sources, capacity, subnetwork_id) + problem = allocation_problem(p_non_diff, sources, capacity, subnetwork_id) flow = JuMP.Containers.SparseAxisArray(Dict(only(problem[:F].axes) .=> 0.0)) return AllocationModel(; diff --git a/core/src/callback.jl b/core/src/callback.jl index d5cfd568f..76cf13c1f 100644 --- a/core/src/callback.jl +++ b/core/src/callback.jl @@ -456,11 +456,11 @@ function save_solver_stats(u, t, integrator) end function check_negative_storage(u, t, integrator)::Nothing - (; basin) = integrator.p - (; node_id, current_properties) = basin - (; current_storage) = current_properties + (; p_non_diff, p_diff, p_mutable) = p + node_id = p_non_diff.basin + (; current_storage) = p_diff.basin_cache du = get_du(integrator) - set_current_basin_properties!(du, u, integrator.p, t) + set_current_basin_properties!(du, u, p_non_diff, p_diff, p_mutable, t) current_storage = current_storage[du] errors = false diff --git a/core/src/model.jl b/core/src/model.jl index e55ee330d..55fedac17 100644 --- a/core/src/model.jl +++ b/core/src/model.jl @@ -29,6 +29,7 @@ struct Model{T} end function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) + (; p_non_diff, p_diff, p_mutable) = p backend = get_ad_type(solver) if solver.sparse @@ -42,14 +43,33 @@ function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) t = 0.0 # Activate all nodes to catch all possible state dependencies - p.all_nodes_active = true - prep = prepare_jacobian((du, u) -> water_balance!(du, u, p, t), du, backend, u) - p.all_nodes_active = false + p_mutable.all_nodes_active = true + prep = prepare_jacobian( + water_balance!, + du, + backend, + u, + Constant(p_non_diff), + Cache(p_diff), + Constant(p_mutable), + Constant(t), + ) + p_mutable.all_nodes_active = false jac_prototype = solver.sparse ? sparsity_pattern(prep) : nothing - jac(J, u, p, t) = - jacobian!((du, u) -> water_balance!(du, u, p, t), du, J, prep, backend, u) + jac(J, u, p, t) = jacobian!( + water_balance!, + du, + J, + prep, + backend, + u, + Constant(p.p_non_diff), + Cache(p.p_diff), + Constant(p.p_mutable), + Constant(t), + ) return jac_prototype, jac end @@ -87,11 +107,12 @@ function Model(config::Config)::Model t0 = zero(t_end) timespan = (t0, t_end) - local parameters, tstops + local parameters, p_non_diff, p_diff, p_mutable, tstops try parameters = Parameters(db, config) + (; p_non_diff, p_diff, p_mutable) = parameters - if !valid_discrete_control(parameters, config) + if !valid_discrete_control(parameters.p_non_diff, config) error("Invalid discrete control state definition(s).") end @@ -108,7 +129,7 @@ function Model(config::Config)::Model pump, tabulated_rating_curve, user_demand, - ) = parameters + ) = p_non_diff if !valid_pid_connectivity(pid_control.node_id, pid_control.listen_node_id, graph) error("Invalid PidControl connectivity.") end @@ -165,20 +186,20 @@ function Model(config::Config)::Model end @debug "Read database into memory." - u0 = build_state_vector(parameters) + u0 = build_state_vector(parameters.p_non_diff) du0 = zero(u0) - @reset parameters.u_prev_saveat = zero(u0) + @reset p_non_diff.u_prev_saveat = zero(u0) # The Solver algorithm alg = algorithm(config.solver; u0) # Synchronize level with storage - set_current_basin_properties!(du0, u0, parameters, t0) + set_current_basin_properties!(du0, u0, p_non_diff, p_diff, p_mutable, t0) # Previous level is used to estimate the minimum level that was attained during a time step # in limit_flow! - parameters.basin.level_prev .= parameters.basin.current_properties.current_level[u0] + p_non_diff.basin.level_prev .= p_diff.cache_basin.current_level saveat = convert_saveat(config.solver.saveat, t_end) saveat isa Float64 && push!(tstops, range(0, t_end; step = saveat)) diff --git a/core/src/parameter.jl b/core/src/parameter.jl index 16e042670..852d85d7e 100644 --- a/core/src/parameter.jl +++ b/core/src/parameter.jl @@ -126,20 +126,6 @@ const ScalarInterpolation = LinearInterpolation{ const IndexLookup = ConstantInterpolation{Vector{Int64}, Vector{Float64}, Vector{Float64}, Int64, (1,)} -set_zero!(v) = v .= zero(eltype(v)) -const Cache = LazyBufferCache{Returns{Int}, typeof(set_zero!)} - -""" -Cache for in place computations within water_balance!, with different eltypes -for different situations: -- Symbolics.Num for Jacobian sparsity detection -- ForwardDiff.Dual for automatic differentiation -- Float64 for normal calls - -The caches are always initialized with zeros -""" -cache(len::Int)::Cache = LazyBufferCache(Returns(len); initializer! = set_zero!) - @eval @enumx AllocationSourceType $(fieldnames(Ribasim.config.SourcePriority)...) # Support creating a AllocationSourceTuple enum instance from a symbol @@ -362,20 +348,19 @@ abstract type AbstractDemandNode <: AbstractParameterNode end """ Caches of current basin properties """ -struct CurrentBasinProperties - current_storage::Cache +struct BasinCache{T} + current_storage::Vector{T} # Low storage factor for reducing flows out of drying basins # given the current storages - current_low_storage_factor::Cache - current_level::Cache - current_area::Cache - current_cumulative_precipitation::Cache - current_cumulative_drainage::Cache - function CurrentBasinProperties(n) - new((cache(n) for _ in 1:6)...) - end + current_low_storage_factor::Vector{T} + current_level::Vector{T} + current_area::Vector{T} + current_cumulative_precipitation::Vector{T} + current_cumulative_drainage::Vector{T} end +BasinCache(n) = BasinCache((zeros(n) for _ in 1:6)...) + @kwdef struct ConcentrationData # Config setting to enable/disable evaporation of mass evaporate_mass::Bool = true @@ -447,8 +432,6 @@ of vectors or Arrow Tables, and is added to avoid type instabilities. cumulative_drainage::Vector{Float64} = zeros(length(node_id)) cumulative_precipitation_saveat::Vector{Float64} = zeros(length(node_id)) cumulative_drainage_saveat::Vector{Float64} = zeros(length(node_id)) - # Cache this to avoid recomputation - current_properties::CurrentBasinProperties = CurrentBasinProperties(length(node_id)) # Discrete values for interpolation storage_to_level::Vector{ LinearInterpolationIntInv{ @@ -618,7 +601,6 @@ inflow_link: incoming flow link metadata outflow_link: outgoing flow link metadata The ID of the source node is always the ID of the Pump node active: whether this node is active and thus contributes flow -flow_rate_cache: target flow rate flow_rate: timeseries for transient flow data if available min_flow_rate: The minimal flow rate of the pump max_flow_rate: The maximum flow rate of the pump @@ -632,7 +614,6 @@ continuous_control_type: one of None, ContinuousControl, PidControl inflow_link::Vector{LinkMetadata} = [] outflow_link::Vector{LinkMetadata} = [] active::Vector{Bool} = fill(true, length(node_id)) - flow_rate_cache::Cache = cache(length(node_id)) flow_rate::Vector{ScalarInterpolation} = ScalarInterpolation[] min_flow_rate::Vector{ScalarInterpolation} = ScalarInterpolation[] max_flow_rate::Vector{ScalarInterpolation} = ScalarInterpolation[] @@ -641,40 +622,6 @@ continuous_control_type: one of None, ContinuousControl, PidControl control_mapping::Dict{Tuple{NodeID, String}, ControlStateUpdate} continuous_control_type::Vector{ContinuousControlType.T} = fill(ContinuousControlType.None, length(node_id)) - - function Pump( - node_id, - inflow_link, - outflow_link, - active, - flow_rate_cache, - flow_rate, - min_flow_rate, - max_flow_rate, - min_upstream_level, - max_downstream_level, - control_mapping, - continuous_control_type, - ) - if valid_flow_rates(node_id, flow_rate[Float64[]], control_mapping) - return new( - node_id, - inflow_link, - outflow_link, - active, - flow_rate_cache, - flow_rate, - min_flow_rate, - max_flow_rate, - min_upstream_level, - max_downstream_level, - control_mapping, - continuous_control_type, - ) - else - error("Invalid Pump flow rate(s).") - end - end end """ @@ -684,7 +631,6 @@ inflow_link: incoming flow link metadata. outflow_link: outgoing flow link metadata. The ID of the source node is always the ID of the Outlet node active: whether this node is active and thus contributes flow -flow_rate_cache: target flow rate flow_rate: timeseries for transient flow data if available min_flow_rate: The minimal flow rate of the outlet max_flow_rate: The maximum flow rate of the outlet @@ -698,7 +644,6 @@ continuous_control_type: one of None, ContinuousControl, PidControl inflow_link::Vector{LinkMetadata} = [] outflow_link::Vector{LinkMetadata} = [] active::Vector{Bool} = fill(true, length(node_id)) - flow_rate_cache::Cache = cache(length(node_id)) flow_rate::Vector{ScalarInterpolation} = ScalarInterpolation[] min_flow_rate::Vector{ScalarInterpolation} = ScalarInterpolation[] max_flow_rate::Vector{ScalarInterpolation} = ScalarInterpolation[] @@ -707,40 +652,6 @@ continuous_control_type: one of None, ContinuousControl, PidControl control_mapping::Dict{Tuple{NodeID, String}, ControlStateUpdate} = Dict() continuous_control_type::Vector{ContinuousControlType.T} = fill(ContinuousControlType.None, length(node_id)) - - function Outlet( - node_id, - inflow_link, - outflow_link, - active, - flow_rate_cache, - flow_rate, - min_flow_rate, - max_flow_rate, - min_upstream_level, - max_downstream_level, - control_mapping, - continuous_control_type, - ) - if valid_flow_rates(node_id, flow_rate[Float64[]], control_mapping) - return new( - node_id, - inflow_link, - outflow_link, - active, - flow_rate_cache, - flow_rate, - min_flow_rate, - max_flow_rate, - min_upstream_level, - max_downstream_level, - control_mapping, - continuous_control_type, - ) - else - error("Invalid Outlet flow rate(s).") - end - end end """ @@ -750,30 +661,47 @@ node_id: node ID of the Terminal node node_id::Vector{NodeID} end -""" -A variant on `Base.Ref` where the source array is a vector that is possibly wrapped in a ForwardDiff.LazyBufferCache, -or a reference to the state derivative vector du. -Retrieve value with get_value(ref::PreallocationRef, val) where `val` determines the return type. -""" -struct PreallocationRef - vector::Cache - idx::Int - from_du::Bool - function PreallocationRef(vector::Cache, idx::Int; from_du = false) - new(vector, idx, from_du) +@kwdef struct ParametersDiff{T} <: AbstractVector{T} + cache_basin::BasinCache{T} = BasinCache(0) + cache_flow_rate_pump::Vector{T} = Float64[] + cache_flow_rate_outlet::Vector{T} = Float64[] + cache_error_pid_control::Vector{T} = Float64[] +end + +@enumx ParameterDiffType flow_rate_pump flow_rate_outlet basin_level + +@kwdef struct ParametersDiffRef + type::ParameterDiffType.T = ParameterDiffType.pump_flow_rate + idx::Int = 0 + from_du::Bool = false +end + +function get_vector(type::ParameterDiffType.T, p_diff::ParametersDiff) + if type == ParameterDiffType.flow_rate_pump + p_diff.cache_flow_rate_pump + elseif type == ParameterDiffType.flow_rate_outlet + p_diff.cache_flow_rate_outlet + else # type == ParameterDiffType.basin_level + p_diff.cache_basin.current_level end end -get_value(ref::PreallocationRef, du) = ref.from_du ? du[ref.idx] : ref.vector[du][ref.idx] +function get_value(ref::ParametersDiffRef, p_diff::ParametersDiff, du::Vector) + if ref.from_du + du[ref.idx] + else + get_vector(ref.type, p_diff)[ref.idx] + end +end -function set_value!(ref::PreallocationRef, value, du)::Nothing - ref.vector[du][ref.idx] = value - return nothing +function set_value!(ref::ParametersDiffRef, p_diff::ParametersDiff, value) + @assert !ref.from_du + get_vector(ref.type, p_diff)[ref.idx] = value end @kwdef struct SubVariable listen_node_id::NodeID - variable_ref::PreallocationRef + variable_ref::ParametersDiffRef variable::String weight::Float64 look_ahead::Float64 @@ -830,7 +758,7 @@ end node_id::Vector{NodeID} compound_variable::Vector{CompoundVariable} controlled_variable::Vector{String} - target_ref::Vector{PreallocationRef} + target_ref::Vector{ParametersDiffRef} func::Vector{ScalarInterpolation} end @@ -846,20 +774,18 @@ target_ref: reference to the controlled flow_rate value proportional: proportionality coefficient error integral: proportionality coefficient error integral derivative: proportionality coefficient error derivative -error: the current error; basin_target - current_level -dictionary from (node_id, control_state) to target flow rate +control_mapping: dictionary from (node_id, control_state) to target flow rate """ @kwdef struct PidControl <: AbstractParameterNode node_id::Vector{NodeID} active::Vector{Bool} listen_node_id::Vector{NodeID} target::Vector{ScalarInterpolation} - target_ref::Vector{PreallocationRef} + target_ref::Vector{ParametersDiffRef} = + Vector{ParametersDiffRef}(undef, length(node_id)) proportional::Vector{ScalarInterpolation} integral::Vector{ScalarInterpolation} derivative::Vector{ScalarInterpolation} - error::Cache = cache(length(node_id)) - controlled_basins::Vector{NodeID} control_mapping::Dict{Tuple{NodeID, String}, ControlStateUpdate} end @@ -1011,45 +937,54 @@ function StateRanges(u_ids::NamedTuple)::StateRanges return StateRanges(ranges...) end -@kwdef mutable struct Parameters{C3, C4, C6, C7, C8} - const starttime::DateTime - const graph::ModelGraph - const allocation::Allocation - const basin::Basin{C3, C4} - const linear_resistance::LinearResistance - const manning_resistance::ManningResistance - const tabulated_rating_curve::TabulatedRatingCurve - const level_boundary::LevelBoundary{C6} - const flow_boundary::FlowBoundary{C7} - const pump::Pump - const outlet::Outlet - const terminal::Terminal - const discrete_control::DiscreteControl - const continuous_control::ContinuousControl - const pid_control::PidControl - const user_demand::UserDemand{C8} - const level_demand::LevelDemand - const flow_demand::FlowDemand - const subgrid::Subgrid - # Per state the in- and outflow links associated with that state (if they exist) - const state_inflow_link::Vector{LinkMetadata} = LinkMetadata[] - const state_outflow_link::Vector{LinkMetadata} = LinkMetadata[] +@kwdef mutable struct ParametersMutable all_nodes_active::Bool = false tprev::Float64 = 0.0 +end + +@kwdef struct ParametersNonDiff{C1, C2, C3, C4, C5} + starttime::DateTime + graph::ModelGraph + allocation::Allocation + basin::Basin{C1, C2} + linear_resistance::LinearResistance + manning_resistance::ManningResistance + tabulated_rating_curve::TabulatedRatingCurve + level_boundary::LevelBoundary{C3} + flow_boundary::FlowBoundary{C4} + pump::Pump + outlet::Outlet + terminal::Terminal + discrete_control::DiscreteControl + continuous_control::ContinuousControl + pid_control::PidControl + user_demand::UserDemand{C5} + level_demand::LevelDemand + flow_demand::FlowDemand + subgrid::Subgrid + # Per state the in- and outflow links associated with that state (if they exist) + state_inflow_link::Vector{LinkMetadata} = LinkMetadata[] + state_outflow_link::Vector{LinkMetadata} = LinkMetadata[] # Sparse matrix for combining flows into storages - const flow_to_storage::SparseMatrixCSC{Float64, Int64} = spzeros(1, 1) + flow_to_storage::SparseMatrixCSC{Float64, Int64} = spzeros(1, 1) # Water balance tolerances - const water_balance_abstol::Float64 - const water_balance_reltol::Float64 + water_balance_abstol::Float64 + water_balance_reltol::Float64 # State at previous saveat - const u_prev_saveat::Vector{Float64} = Float64[] + u_prev_saveat::Vector{Float64} = Float64[] # Node ID associated with each state - const node_id::Vector{NodeID} = NodeID[] + node_id::Vector{NodeID} = NodeID[] # Range per states component - const state_ranges::StateRanges = StateRanges() + state_ranges::StateRanges = StateRanges() end -# To opt-out of type checking for ForwardDiff -function DiffEqBase.anyeltypedual(::Parameters, ::Type{Val{counter}}) where {counter} - Any +@kwdef struct Parameters{C1, C2, C3, C4, C5} + p_non_diff::ParametersNonDiff{C1, C2, C3, C4, C5} + p_diff::ParametersDiff{Float64} = ParametersDiff(; + cache_basin = BasinCache(length(p_non_diff.basin.node_id)), + cache_flow_rate_pump = zeros(length(p_non_diff.pump.node_id)), + cache_flow_rate_outlet = zeros(length(p_non_diff.pump.node_id)), + cache_error_pid_control = zeros(length(p_non_diff.pid_control.node_id)), + ) + p_mutable::ParametersMutable = ParametersMutable() end diff --git a/core/src/read.jl b/core/src/read.jl index 9dbeb06f8..f77e546d3 100644 --- a/core/src/read.jl +++ b/core/src/read.jl @@ -253,11 +253,11 @@ const conservative_nodetypes = Set{NodeType.T}([ ]) function get_allocation_sources_in_order!( - p::Parameters, + p_non_diff::ParametersNonDiff, db::DB, config::Config, )::OrderedDict{Tuple{NodeID, NodeID}, AllocationSource} - (; graph, user_demand, allocation) = p + (; graph, user_demand, allocation) = p_non_diff (; subnetwork_demands, subnetwork_allocateds, @@ -296,7 +296,7 @@ function get_allocation_sources_in_order!( # node which connects to multiple basins # - One source node can imply multiple sources in the case of a LevelBoundary with multiple # neighbors - node_id = NodeID(Symbol(row.node_type), row.node_id, p) + node_id = NodeID(Symbol(row.node_type), row.node_id, p_non_diff) links = Tuple{NodeID, NodeID}[] if !ismissing(row.subnetwork_id) # E.g. :manning_resistance @@ -337,7 +337,7 @@ function get_allocation_sources_in_order!( elseif row.subnetwork_id != 1 # Not in the main network for main_network_id in filter!( id -> graph[id].subnetwork_id == 1, # Connects to the main network - collect(inflow_ids(p.graph, node_id)), + collect(inflow_ids(graph, node_id)), ) is_source = true source_priority = default_source_priority_dict[:subnetwork_inlet] @@ -381,8 +381,12 @@ function get_allocation_sources_in_order!( ) end -function initialize_allocation!(p::Parameters, db::DB, config::Config)::Nothing - (; graph, allocation) = p +function initialize_allocation!( + p_non_diff::ParametersNonDiff, + db::DB, + config::Config, +)::Nothing + (; graph, allocation) = p_non_diff (; subnetwork_ids, allocation_models, main_network_connections) = allocation subnetwork_ids_ = sort(collect(keys(graph[].node_ids))) @@ -400,12 +404,12 @@ function initialize_allocation!(p::Parameters, db::DB, config::Config)::Nothing main_network_connections[subnetwork_id] = Tuple{NodeID, NodeID}[] end - sources = get_allocation_sources_in_order!(p, db, config) + sources = get_allocation_sources_in_order!(p_non_diff, db, config) for subnetwork_id in subnetwork_ids_ push!( allocation_models, - AllocationModel(subnetwork_id, p, sources, config.allocation.timestep), + AllocationModel(subnetwork_id, p_non_diff, sources, config.allocation.timestep), ) end return nothing @@ -737,23 +741,20 @@ function Pump(db::DB, config::Config, graph::MetaGraph)::Pump end (; node_id) = parsed_parameters - - # If flow rate is set by PidControl or ContinuousControl, it is part of the AD Jacobian computations - flow_rate_cache = cache(length(node_id)) - flow_rate_cache[Float64[]] .= [itp(0) for itp in parsed_parameters.flow_rate] + continuous_control_type = get_continuous_control_type(graph, node_id) return Pump(; node_id, inflow_link = inflow_link.(Ref(graph), node_id), outflow_link = outflow_link.(Ref(graph), node_id), parsed_parameters.active, - flow_rate_cache, parsed_parameters.flow_rate, parsed_parameters.min_flow_rate, parsed_parameters.max_flow_rate, parsed_parameters.min_upstream_level, parsed_parameters.max_downstream_level, parsed_parameters.control_mapping, + continuous_control_type, ) end @@ -789,30 +790,21 @@ function Outlet(db::DB, config::Config, graph::MetaGraph)::Outlet error("Errors occurred when parsing Outlet data.") end - node_id = - NodeID.( - NodeType.Outlet, - parsed_parameters.node_id, - eachindex(parsed_parameters.node_id), - ) - - # If flow rate is set by PidControl or ContinuousControl, it is part of the AD Jacobian computations - flow_rate_cache = cache(length(node_id)) - flow_rate_cache[Float64[], length(node_id)] .= - [itp(0) for itp in parsed_parameters.flow_rate] + (; node_id) = parsed_parameters + continuous_control_type = get_continuous_control_type(graph, node_id) return Outlet(; node_id, inflow_link = inflow_link.(Ref(graph), node_id), outflow_link = outflow_link.(Ref(graph), node_id), parsed_parameters.active, - flow_rate_cache, parsed_parameters.flow_rate, parsed_parameters.min_flow_rate, parsed_parameters.max_flow_rate, parsed_parameters.control_mapping, parsed_parameters.min_upstream_level, parsed_parameters.max_downstream_level, + continuous_control_type, ) end @@ -1030,8 +1022,7 @@ References to listened parameters are added later. function CompoundVariable( variables_compound_variable, node_type::NodeType.T, - node_ids_all::Vector{NodeID}, - placeholder_vector; + node_ids_all::Vector{NodeID}; conditions_compound_variable = nothing, starttime = nothing, cyclic_time = false, @@ -1047,7 +1038,7 @@ function CompoundVariable( for row in variables_compound_variable listen_node_id = NodeID(row.listen_node_id, node_ids_all) # Placeholder until actual ref is known - variable_ref = PreallocationRef(placeholder_vector, 0) + variable_ref = ParametersDiffRef() variable = row.variable # Default to weight = 1.0 if not specified weight = coalesce(row.weight, 1.0) @@ -1071,8 +1062,6 @@ end function parse_variables_and_conditions(ids::Vector{Int32}, db::DB, config::Config) condition = load_structvector(db, config, DiscreteControlConditionV1) compound_variable = load_structvector(db, config, DiscreteControlVariableV1) - - placeholder_vector = cache(1) compound_variables = Vector{CompoundVariable}[] cyclic_times = get_cyclic_time(db, "DiscreteControl") errors = false @@ -1114,8 +1103,7 @@ function parse_variables_and_conditions(ids::Vector{Int32}, db::DB, config::Conf CompoundVariable( variables_compound_variable, NodeType.DiscreteControl, - node_ids_all, - placeholder_vector; + node_ids_all; conditions_compound_variable, config.starttime, cyclic_time, @@ -1206,7 +1194,6 @@ function continuous_control_functions(db, config, ids) end function continuous_control_compound_variables(db::DB, config::Config, ids) - placeholder_vector = cache(1) node_ids_all = get_node_ids(db) data = load_structvector(db, config, ContinuousControlVariableV1) @@ -1228,7 +1215,7 @@ function continuous_control_compound_variables(db::DB, config::Config, ids) compound_variables end -function ContinuousControl(db::DB, config::Config, graph::MetaGraph)::ContinuousControl +function ContinuousControl(db::DB, config::Config)::ContinuousControl compound_variable = load_structvector(db, config, ContinuousControlVariableV1) node_id = get_node_ids(db, NodeType.ContinuousControl) @@ -1239,7 +1226,7 @@ function ContinuousControl(db::DB, config::Config, graph::MetaGraph)::Continuous compound_variable = continuous_control_compound_variables(db, config, ids) # References to the controlled parameters, filled in later when they are known - target_refs = PreallocationRef[] + target_refs = ParametersDiffRef[] if errors error("Errors encountered when parsing ContinuousControl data.") @@ -1254,11 +1241,11 @@ function ContinuousControl(db::DB, config::Config, graph::MetaGraph)::Continuous ) end -function PidControl(db::DB, config::Config, graph::MetaGraph)::PidControl +function PidControl(db::DB, config::Config)::PidControl static = load_structvector(db, config, PidControlStaticV1) time = load_structvector(db, config, PidControlTimeV1) - _, _, node_ids, valid = static_and_time_node_ids(db, static, time, NodeType.PidControl) + _, _, node_id, valid = static_and_time_node_ids(db, static, time, NodeType.PidControl) if !valid error("Problems encountered when parsing PidControl static and time node IDs.") @@ -1272,34 +1259,17 @@ function PidControl(db::DB, config::Config, graph::MetaGraph)::PidControl error("Errors occurred when parsing PidControl data.") end - pid_error = cache(length(node_ids)) - target_ref = PreallocationRef[] - - controlled_basins = Set{NodeID}() - for id in node_ids - controlled_node = only(outneighbor_labels_type(graph, id, LinkType.control)) - for id_inout in inoutflow_ids(graph, controlled_node) - if id_inout.type == NodeType.Basin - push!(controlled_basins, id_inout) - end - end - end - controlled_basins = collect(controlled_basins) - all_node_ids = get_node_ids(db) listen_node_id = NodeID.(parsed_parameters.listen_node_id, Ref(all_node_ids)) return PidControl(; - node_id = node_ids, + node_id, parsed_parameters.active, listen_node_id, parsed_parameters.target, - target_ref, parsed_parameters.proportional, parsed_parameters.integral, parsed_parameters.derivative, - error = pid_error, - controlled_basins, parsed_parameters.control_mapping, ) end @@ -1815,8 +1785,8 @@ function Parameters(db::DB, config::Config)::Parameters outlet = Outlet(db, config, graph), terminal = Terminal(db, config), discrete_control = DiscreteControl(db, config, graph), - continuous_control = ContinuousControl(db, config, graph), - pid_control = PidControl(db, config, graph), + continuous_control = ContinuousControl(db, config), + pid_control = PidControl(db, config), user_demand = UserDemand(db, config, graph), level_demand = LevelDemand(db, config), flow_demand = FlowDemand(db, config), @@ -1848,25 +1818,18 @@ function Parameters(db::DB, config::Config)::Parameters flow_to_storage = build_flow_to_storage(state_ranges, n_states, basin, connector_nodes) state_inflow_link, state_outflow_link = get_state_flow_links(graph, nodes) - p = Parameters(; + set_target_ref!( + nodes.pid_control.target_ref, + nodes.pid_control.node_id, + fill("flow_rate", length(node_id)), + state_ranges, + ) + + p_non_diff = ParametersNonDiff(; config.starttime, graph, allocation, - basin, - nodes.linear_resistance, - nodes.manning_resistance, - nodes.tabulated_rating_curve, - nodes.level_boundary, - nodes.flow_boundary, - nodes.pump, - nodes.outlet, - nodes.terminal, - nodes.discrete_control, - nodes.continuous_control, - nodes.pid_control, - nodes.user_demand, - nodes.level_demand, - nodes.flow_demand, + nodes..., subgrid, state_inflow_link, state_outflow_link, @@ -1877,17 +1840,18 @@ function Parameters(db::DB, config::Config)::Parameters state_ranges, ) - collect_control_mappings!(p) - set_continuous_control_type!(p) - set_listen_variable_refs!(p) - set_discrete_controlled_variable_refs!(p) - set_continuously_controlled_variable_refs!(p) + collect_control_mappings!(p_non_diff) + set_listen_variable_refs!(p_non_diff) + set_discrete_controlled_variable_refs!(p_non_diff) # Allocation data structures if config.allocation.use_allocation - initialize_allocation!(p, db, config) + initialize_allocation!(p_non_diff, db, config) end - return p + + # TODO: Set and check pump and outlet flow rates + + return Parameters(; p_non_diff) end function get_node_ids_int32(db::DB, node_type)::Vector{Int32} diff --git a/core/src/solve.jl b/core/src/solve.jl index 68a8b6fcf..daea8315f 100644 --- a/core/src/solve.jl +++ b/core/src/solve.jl @@ -1,14 +1,25 @@ +# Wrapper method used by OrdinaryDiffEq; the other method is used +# for Jacobian evaluation with DifferentiationInterface.jl +water_balance!(du::Vector, u::Vector, p::Parameters, t::Number)::Nothing = + water_balance(du, u, p.p_non_diff, p.p_diff, p.p_mutable, t) + """ The right hand side function of the system of ODEs set up by Ribasim. """ -function water_balance!(du::Vector, u::Vector, p::Parameters, t::Number)::Nothing - (; current_storage, current_low_storage_factor, current_level) = - p.basin.current_properties +function water_balance!( + du::Vector, + u::Vector, + p_non_diff::ParametersNonDiff, + p_diff::ParametersDiff, + p_mutable::ParametersMutable, + t::Number, +)::Nothing + (; current_storage, current_low_storage_factor, current_level) = p_diff.cache_basin du .= 0.0 # Ensures current_* vectors are current - set_current_basin_properties!(du, u, p, t) + set_current_basin_properties!(du, u, p_non_diff, p_diff, p_mutable, t) current_storage = current_storage[du] current_low_storage_factor = current_low_storage_factor[du] @@ -72,12 +83,13 @@ state u and the time t. function set_current_basin_properties!( du::Vector, u::Vector, - p::Parameters, + p_non_diff::ParametersNonDiff, + p_diff::ParametersDiff, + p_mutable::ParametersMutable, t::Number, )::Nothing - (; basin) = p - (; current_properties, cumulative_precipitation, cumulative_drainage, vertical_flux) = - basin + (; basin) = p_non_diff + (; cumulative_precipitation, cumulative_drainage, vertical_flux) = p_non_diff.basin (; current_storage, current_low_storage_factor, @@ -85,17 +97,10 @@ function set_current_basin_properties!( current_area, current_cumulative_precipitation, current_cumulative_drainage, - ) = current_properties - - current_storage = current_storage[du] - current_low_storage_factor = current_low_storage_factor[du] - current_level = current_level[du] - current_area = current_area[du] - current_cumulative_precipitation = current_cumulative_precipitation[du] - current_cumulative_drainage = current_cumulative_drainage[du] + ) = p_diff.cache_basin # The exact cumulative precipitation and drainage up to the t of this water_balance call - dt = t - p.tprev + dt = t - p_mutable.tprev for node_id in basin.node_id fixed_area = basin_areas(basin, node_id.idx)[end] current_cumulative_precipitation[node_id.idx] = @@ -104,7 +109,7 @@ function set_current_basin_properties!( end @. current_cumulative_drainage = cumulative_drainage + dt * vertical_flux.drainage - formulate_storages!(current_storage, du, u, p, t) + formulate_storages!(current_storage, u, p_non_diff, p_diff, p_mutable, t) for (id, s) in zip(basin.node_id, current_storage) i = id.idx @@ -116,13 +121,16 @@ end function formulate_storages!( current_storage::AbstractVector, - du::Vector, u::Vector, - p::Parameters, + p_non_diff::ParametersNonDiff, + p_diff::ParametersDiff, + p_mutable::ParametersMutable, t::Number; add_initial_storage::Bool = true, )::Nothing - (; basin, flow_boundary, tprev, flow_to_storage) = p + (; basin, flow_boundary, flow_to_storage) = p_non_diff + (; cache_basin) = p_diff + (; tprev) = p_mutable # Current storage: initial condition + # total inflows and outflows since the start # of the simulation @@ -132,7 +140,7 @@ function formulate_storages!( current_storage .= 0.0 end mul!(current_storage, flow_to_storage, u, 1, 1) - formulate_storage!(current_storage, basin, du) + formulate_storage!(cache_basin) formulate_storage!(current_storage, tprev, t, flow_boundary) return nothing end @@ -140,12 +148,10 @@ end """ The storage contributions of the forcings that are not part of the state. """ -function formulate_storage!(current_storage::AbstractVector, basin::Basin, du::Vector) - (; current_cumulative_precipitation, current_cumulative_drainage) = - basin.current_properties +function formulate_storage!(cache_basin::BasinCache) + (; current_storage, current_cumulative_precipitation, current_cumulative_drainage) = + cache_basin - current_cumulative_precipitation = current_cumulative_precipitation[du] - current_cumulative_drainage = current_cumulative_drainage[du] current_storage .+= current_cumulative_precipitation current_storage .+= current_cumulative_drainage end @@ -757,6 +763,7 @@ flow rates for the last time step if these flow rate bounds are known. """ function limit_flow!(u::Vector, integrator::DEIntegrator, p::Parameters, t::Number)::Nothing (; uprev, dt) = integrator + (; p_non_diff, p_diff, p_mutable) = p (; pump, outlet, @@ -766,7 +773,7 @@ function limit_flow!(u::Vector, integrator::DEIntegrator, p::Parameters, t::Numb basin, allocation, state_ranges, - ) = p + ) = p_non_diff u_tabulated_rating_curve = view(u, state_ranges.tabulated_rating_curve) u_pump = view(u, state_ranges.pump) @@ -788,7 +795,7 @@ function limit_flow!(u::Vector, integrator::DEIntegrator, p::Parameters, t::Numb # storage and level attained in the last time step to estimate whether there was an effect # of reduction factors du = get_du(integrator) - set_current_basin_properties!(du, u, p, t) + set_current_basin_properties!(du, u, p_non_diff, p_diff, p_mutable, t) current_storage = basin.current_properties.current_storage[u] current_level = basin.current_properties.current_level[u] diff --git a/core/src/util.jl b/core/src/util.jl index 9d354dd87..7862701b2 100644 --- a/core/src/util.jl +++ b/core/src/util.jl @@ -401,48 +401,24 @@ function get_external_demand_priority_idx(p::Parameters, node_id::NodeID)::Int return findsorted(allocation.demand_priorities_all, demand_priority) end -""" -Set continuous_control_type for those pumps and outlets that are controlled by either -PidControl or ContinuousControl -""" -function set_continuous_control_type!(p::Parameters)::Nothing - (; continuous_control, pid_control) = p - errors = false - - errors = set_continuous_control_type!( - p, - continuous_control.node_id, - ContinuousControlType.Continuous, - ) - errors |= - set_continuous_control_type!(p, pid_control.node_id, ContinuousControlType.PID) - - if errors - error("Errors occurred when parsing ContinuousControl and PidControl connectivity") - end - return nothing -end - -function set_continuous_control_type!( - p::Parameters, - node_id::Vector{NodeID}, - continuous_control_type::ContinuousControlType.T, -)::Bool - (; graph, pump, outlet) = p - errors = false +const control_type_mapping = Dict{NodeType.T, ContinuousControlType.T}( + NodeType.PidControl => ContinuousControlType.PID, + NodeType.ContinuousControl => ContinuousControlType.Continuous, +) +function get_continuous_control_type(graph::MetaGraph, node_id::Vector{NodeID}) + continuous_control_type = fill(ContinuousControlType.None, length(node_id)) for id in node_id - id_controlled = only(outneighbor_labels_type(graph, id, LinkType.control)) - if id_controlled.type == NodeType.Pump - pump.continuous_control_type[id_controlled.idx] = continuous_control_type - elseif id_controlled.type == NodeType.Outlet - outlet.continuous_control_type[id_controlled.idx] = continuous_control_type - else - errors = true - @error "Only Pump and Outlet can be controlled by PidController, got $id_controlled" + control_inneighbors = collect(inneighbor_labels_type(graph, id, LinkType.control)) + if length(control_inneighbors) == 1 + control_inneighbor = only(control_inneighbors) + continuous_control_type[id.idx] = + get(control_type_mapping, control_inneighbor.type, ContinuousControl.None) + elseif length(control_inneighbors) > 1 + error("$id has more than 1 control inneighbors.") end end - return errors + return continuous_control_type end function has_external_demand( @@ -536,9 +512,9 @@ function convert_truth_state(boolean_vector)::String String(UInt8.(ifelse.(boolean_vector, 'T', 'F'))) end -function NodeID(type::Symbol, value::Integer, p::Parameters)::NodeID +function NodeID(type::Symbol, value::Integer, p_non_diff::ParametersNonDiff)::NodeID node_type = NodeType.T(type) - node = getfield(p, snake_case(type)) + node = getfield(p_non_diff, snake_case(type)) idx = searchsortedfirst(node.node_id, NodeID(node_type, value, 0)) return NodeID(node_type, value, idx) end @@ -547,34 +523,41 @@ end Get the reference to a parameter """ function get_variable_ref( - p::Parameters, node_id::NodeID, - variable::String; + variable::String, + state_ranges::StateRanges; listen::Bool = true, -)::Tuple{PreallocationRef, Bool} - (; basin) = p +)::Tuple{ParametersDiffRef, Bool} errors = false ref = if node_id.type == NodeType.Basin && variable == "level" - PreallocationRef(basin.current_properties.current_level, node_id.idx) + ParametersDiffRef(; type = ParameterDiffType.basin_level, node_id.idx) elseif variable == "flow_rate" && node_id.type != NodeType.FlowBoundary if listen if node_id.type ∉ conservative_nodetypes errors = true @error "Cannot listen to flow_rate of $node_id, the node type must be one of $conservative_node_types." - Ref(Float64[], 0) + ParametersDiffRef() else # Index in the state vector (inflow) - flow_idx = get_state_index(p.state_ranges, node_id) - PreallocationRef(cache(1), flow_idx; from_du = true) + idx = get_state_index(state_ranges, node_id) + ParametersDiffRef(; idx, from_du = true) end else - node = getfield(p, snake_case(node_id)) - PreallocationRef(node.flow_rate_cache, node_id.idx) + type = if node_id.type == NodeType.Pump + ParameterDiffType.flow_rate_pump + elseif node_id.type == NodeType.Outlet + ParameterDiffType.flow_rate_outlet + else + errors = true + @error "Cannot set the flow rate of $node_id." + ParameterDiffType.flow_rate_pump + end + ParametersDiffRef(; type, node_id.idx) end else # Placeholder to obtain correct type - PreallocationRef(cache(1), 0) + ParametersDiffRef() end return ref, errors end @@ -582,8 +565,8 @@ end """ Set references to all variables that are listened to by discrete/continuous control """ -function set_listen_variable_refs!(p::Parameters)::Nothing - (; discrete_control, continuous_control) = p +function set_listen_variable_refs!(p_non_diff::ParametersNonDiff)::Nothing + (; discrete_control, continuous_control, state_ranges) = p_non_diff compound_variable_sets = [discrete_control.compound_variables..., continuous_control.compound_variable] errors = false @@ -592,8 +575,11 @@ function set_listen_variable_refs!(p::Parameters)::Nothing for compound_variable in compound_variables (; subvariables) = compound_variable for (j, subvariable) in enumerate(subvariables) - ref, error = - get_variable_ref(p, subvariable.listen_node_id, subvariable.variable) + ref, error = get_variable_ref( + subvariable.listen_node_id, + subvariable.variable, + state_ranges, + ) if !error subvariables[j] = @set subvariable.variable_ref = ref end @@ -611,9 +597,9 @@ end """ Set references to all variables that are controlled by discrete control """ -function set_discrete_controlled_variable_refs!(p::Parameters)::Nothing - for nodetype in propertynames(p) - node = getfield(p, nodetype) +function set_discrete_controlled_variable_refs!(p_non_diff::ParametersNonDiff)::Nothing + for nodetype in propertynames(p_non_diff) + node = getfield(p_non_diff, nodetype) if node isa AbstractParameterNode && hasfield(typeof(node), :control_mapping) control_mapping::Dict{Tuple{NodeID, String}, ControlStateUpdate} = node.control_mapping @@ -647,26 +633,22 @@ function set_discrete_controlled_variable_refs!(p::Parameters)::Nothing return nothing end -function set_continuously_controlled_variable_refs!(p::Parameters)::Nothing - (; continuous_control, pid_control, graph) = p +function set_target_ref!( + target_ref::Vector{ParametersDiffRef}, + node_id::Vector{NodeID}, + controlled_variable::Vector{String}, + state_ranges::StateRanges, +) errors = false - for (node, controlled_variable) in ( - (continuous_control, continuous_control.controlled_variable), - (pid_control, fill("flow_rate", length(pid_control.node_id))), - ) - for (id, controlled_variable) in zip(node.node_id, controlled_variable) - controlled_node_id = only(outneighbor_labels_type(graph, id, LinkType.control)) - ref, error = - get_variable_ref(p, controlled_node_id, controlled_variable; listen = false) - push!(node.target_ref, ref) - errors |= error - end + for (i, (id, variable)) in enumerate(zip(node_id, controlled_variable)) + ref, error = get_variable_ref(id, variable, state_ranges; listen = false) + target_ref[i] = ref + errors |= error end if errors error("Errors encountered when setting continuously controlled variable refs.") end - return nothing end """ @@ -732,12 +714,12 @@ end Collect the control mappings of all controllable nodes in the DiscreteControl object for easy access """ -function collect_control_mappings!(p)::Nothing - (; control_mappings) = p.discrete_control +function collect_control_mappings!(p_non_diff::ParametersNonDiff)::Nothing + (; control_mappings) = p_non_diff.discrete_control for node_type in instances(NodeType.T) node_type == NodeType.Terminal && continue - node = getfield(p, snake_case(node_type)) + node = getfield(p_non_diff, snake_case(node_type)) if hasfield(typeof(node), :control_mapping) control_mappings[node_type] = node.control_mapping end @@ -838,7 +820,7 @@ function OrdinaryDiffEqNonlinearSolve.relax!( end "Create a NamedTuple of the node IDs per state component in the state order" -function state_node_ids(p::Union{Parameters, NamedTuple})::NamedTuple +function state_node_ids(p::Union{ParametersNonDiff, NamedTuple})::NamedTuple (; tabulated_rating_curve = p.tabulated_rating_curve.node_id, pump = p.pump.node_id, @@ -853,15 +835,15 @@ function state_node_ids(p::Union{Parameters, NamedTuple})::NamedTuple ) end -function build_state_vector(p::Parameters) +function build_state_vector(p_non_diff::ParametersNonDiff) # It is assumed that the horizontal flow states come first in - # p.state_inflow_link and p.state_outflow_link - u_ids = state_node_ids(p) - state_ranges = p.state_ranges - u = zeros(length(p.node_id)) - # Ensure p.node_id, p.state_ranges and u have the same length and order + # p_non_diff.state_inflow_link and p_non_diff.state_outflow_link + u_ids = state_node_ids(p_non_diff) + state_ranges = p_non_diff.state_ranges + u = zeros(length(p_non_diff.node_id)) + # Ensure p_non_diff.node_id, p_non_diff.state_ranges and u have the same length and order ranges = (getproperty(state_ranges, x) for x in propertynames(state_ranges)) - @assert length(u) == length(p.node_id) == mapreduce(length, +, ranges) + @assert length(u) == length(p_non_diff.node_id) == mapreduce(length, +, ranges) @assert keys(u_ids) == fieldnames(StateRanges) return u end diff --git a/core/src/validation.jl b/core/src/validation.jl index 73a0b47bb..773d48d97 100644 --- a/core/src/validation.jl +++ b/core/src/validation.jl @@ -531,7 +531,7 @@ Check: - Whether the supplied truth states have the proper length; - Whether look_ahead is only supplied for condition variables given by a time-series. """ -function valid_discrete_control(p::Parameters, config::Config)::Bool +function valid_discrete_control(p::ParametersNonDiff, config::Config)::Bool (; discrete_control, graph) = p (; node_id, logic_mapping) = discrete_control From 79162e27bd5f585753b18462f40bff812e8f5972 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Wed, 12 Mar 2025 15:10:58 +0100 Subject: [PATCH 07/28] Introduce vector cache --- core/src/allocation_optim.jl | 172 ++++++++++++--------- core/src/callback.jl | 105 +++++++------ core/src/concentration.jl | 7 +- core/src/graph.jl | 6 +- core/src/model.jl | 37 +++-- core/src/parameter.jl | 136 ++++++++-------- core/src/read.jl | 7 +- core/src/solve.jl | 290 ++++++++++++++--------------------- core/src/util.jl | 82 ++++++---- core/src/write.jl | 34 ++-- core/test/allocation_test.jl | 3 +- core/test/run_models_test.jl | 4 +- 12 files changed, 440 insertions(+), 443 deletions(-) diff --git a/core/src/allocation_optim.jl b/core/src/allocation_optim.jl index a6ec614c9..be4a0623d 100644 --- a/core/src/allocation_optim.jl +++ b/core/src/allocation_optim.jl @@ -30,12 +30,12 @@ Set the objective for the given demand priority. function set_objective_demand_priority!( allocation_model::AllocationModel, u::Vector, - p::Parameters, + p_non_diff::ParametersNonDiff, t::Float64, demand_priority_idx::Int, )::Nothing (; problem, subnetwork_id, capacity) = allocation_model - (; graph, user_demand, flow_demand, allocation, basin) = p + (; graph, user_demand, flow_demand, allocation, basin) = p_non_diff (; node_id, demand_reduced) = user_demand (; main_network_connections, subnetwork_demands) = allocation F = problem[:F] @@ -74,7 +74,8 @@ function set_objective_demand_priority!( has_external_demand(graph, to_node_id, :flow_demand) # FlowDemand if has_demand - flow_demand_priority_idx = get_external_demand_priority_idx(p, to_node_id) + flow_demand_priority_idx = + get_external_demand_priority_idx(p_non_diff, to_node_id) d = demand_priority_idx == flow_demand_priority_idx ? flow_demand.demand[demand_node_id.idx] : 0.0 @@ -88,7 +89,7 @@ function set_objective_demand_priority!( # Terms for LevelDemand nodes F_basin_in = problem[:F_basin_in] for node_id in only(F_basin_in.axes) - basin_demand_priority_idx = get_external_demand_priority_idx(p, node_id) + basin_demand_priority_idx = get_external_demand_priority_idx(p_non_diff, node_id) d = basin_demand_priority_idx == demand_priority_idx ? get_basin_demand(allocation_model, u, p, t, node_id) : 0.0 @@ -108,12 +109,12 @@ Assign the allocations to the UserDemand or subnetwork as determined by the solu """ function assign_allocations!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, demand_priority_idx::Int, optimization_type::OptimizationType.T, )::Nothing (; subnetwork_id, capacity, flow) = allocation_model - (; graph, user_demand, allocation) = p + (; graph, user_demand, allocation) = p_non_diff (; subnetwork_demands, subnetwork_allocateds, main_network_connections) = allocation main_network_source_links = main_network_connections[subnetwork_id] for link in keys(capacity.data) @@ -163,11 +164,11 @@ allocate: the total flow allocated to this inlet from the main network """ function set_initial_capacities_inlet!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, optimization_type::OptimizationType.T, )::Nothing (; sources) = allocation_model - (; allocation) = p + (; allocation) = p_non_diff (; subnetwork_id) = allocation_model (; subnetwork_allocateds, main_network_connections) = allocation @@ -197,12 +198,12 @@ as the average flow over the last Δt_allocation of the source in the physical l """ function set_initial_capacities_source!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, )::Nothing (; sources) = allocation_model (; subnetwork_id) = allocation_model - mean_input_flows_subnetwork_ = mean_input_flows_subnetwork(p, subnetwork_id) + mean_input_flows_subnetwork_ = mean_input_flows_subnetwork(p_non_diff, subnetwork_id) for link in keys(mean_input_flows_subnetwork_) source = sources[link] @@ -242,11 +243,11 @@ user return flow and flow demand buffers. """ function increase_source_capacities!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, t::AbstractFloat, )::Nothing (; problem, sources) = allocation_model - (; user_demand) = p + (; user_demand) = p_non_diff for source in values(sources) (; link) = source @@ -274,9 +275,9 @@ the smallest max_flow_rate of a node on this link """ function set_initial_capacities_link!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, )::Nothing - (; main_network_connections) = p.allocation + (; main_network_connections) = p_non_diff.allocation (; problem, capacity, subnetwork_id) = allocation_model constraints_capacity = problem[:capacity] main_network_source_links = main_network_connections[subnetwork_id] @@ -411,7 +412,7 @@ vertical fluxes + the disk of storage above the maximum level / Δt_allocation function set_initial_capacities_basin!( allocation_model::AllocationModel, u::Vector, - p::Parameters, + p_non_diff::ParametersNonDiff, t::Float64, )::Nothing (; problem, sources) = allocation_model @@ -420,7 +421,7 @@ function set_initial_capacities_basin!( for node_id in only(constraints_outflow.axes) source = sources[(node_id, node_id)] @assert source.type == AllocationSourceType.level_demand - source.capacity = get_basin_capacity(allocation_model, u, p, t, node_id) + source.capacity = get_basin_capacity(allocation_model, u, p_non_diff, t, node_id) end return nothing end @@ -431,11 +432,11 @@ by either a coupled model or a timeseries """ function set_initial_demands_user!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, t::Float64, )::Nothing (; subnetwork_id) = allocation_model - (; graph, user_demand, allocation) = p + (; graph, user_demand, allocation) = p_non_diff (; node_id, demand_from_timeseries, demand_itp, demand, demand_reduced) = user_demand # Read the demand from the interpolated timeseries @@ -459,18 +460,18 @@ Set the initial demand of each basin in the subnetwork as function set_initial_demands_level!( allocation_model::AllocationModel, u::Vector, - p::Parameters, + p_non_diff::ParametersNonDiff, t::Float64, )::Nothing (; subnetwork_id, problem) = allocation_model - (; graph, basin) = p + (; graph, basin) = p_non_diff (; demand) = basin node_ids_level_demand = only(problem[:basin_outflow].axes) for id in node_ids_level_demand if graph[id].subnetwork_id == subnetwork_id - demand[id.idx] = get_basin_demand(allocation_model, u, p, t, id) + demand[id.idx] = get_basin_demand(allocation_model, u, p_non_diff, t, id) end end @@ -482,10 +483,10 @@ Set the initial capacities of the UserDemand return flow sources to 0. """ function set_initial_capacities_returnflow!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, )::Nothing (; problem, sources) = allocation_model - (; user_demand) = p + (; user_demand) = p_non_diff constraints_outflow = problem[:source_user] for node_id in only(constraints_outflow.axes) @@ -502,12 +503,12 @@ from the total flow demand (which will be used at the demand priority of the flo """ function reduce_demands!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, demand_priority_idx::Int, user_demand::UserDemand, )::Nothing (; problem, subnetwork_id) = allocation_model - (; graph) = p + (; graph) = p_non_diff (; node_id, demand_reduced) = user_demand F = problem[:F] @@ -532,11 +533,11 @@ to obtain the reduced demand used for goal programming function reduce_demands!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, ::Int, ::LevelDemand, )::Nothing - (; graph, basin) = p + (; graph, basin) = p_non_diff (; demand) = basin (; subnetwork_id, problem) = allocation_model F_basin_in = problem[:F_basin_in] @@ -557,10 +558,10 @@ interpolated value from the given timeseries. """ function set_initial_demands_flow!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, t::Float64, )::Nothing - (; flow_demand, graph) = p + (; flow_demand, graph) = p_non_diff (; subnetwork_id) = allocation_model for node_id in flow_demand.node_id @@ -578,11 +579,11 @@ Flow from any demand priority counts. """ function reduce_demands!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, ::Int, flow_demand::FlowDemand, )::Nothing - (; graph) = p + (; graph) = p_non_diff (; problem, subnetwork_id) = allocation_model F = problem[:F] @@ -625,12 +626,12 @@ Save the demands and allocated flows for UserDemand and Basin. Note: Basin supply (negative demand) is only saved for the first demand priority. """ function save_demands_and_allocations!( - p::Parameters, + p_non_diff::ParametersNonDiff, allocation_model::AllocationModel, t::Float64, demand_priority_idx::Int, )::Nothing - (; graph, allocation, user_demand, flow_demand, basin) = p + (; graph, allocation, user_demand, flow_demand, basin) = p_non_diff (; record_demand, demand_priorities_all, mean_realized_flows) = allocation (; subnetwork_id, sources, flow) = allocation_model node_ids = graph[].node_ids[subnetwork_id] @@ -702,14 +703,14 @@ end Save the allocation flows per basin and physical link. """ function save_allocation_flows!( - p::Parameters, + p_non_diff::ParametersNonDiff, t::Float64, allocation_model::AllocationModel, demand_priority::Int32, optimization_type::OptimizationType.T, )::Nothing (; flow, subnetwork_id, sources) = allocation_model - (; allocation, graph) = p + (; allocation, graph) = p_non_diff (; record_flow) = allocation links_allocation = keys(flow.data) @@ -784,11 +785,11 @@ end function allocate_to_users_from_connected_basin!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, demand_priority_idx::Int, )::Nothing (; flow, problem, sources) = allocation_model - (; graph, user_demand) = p + (; graph, user_demand) = p_non_diff # Get all UserDemand nodes from this subnetwork node_ids_user_demand = only(problem[:source_user].axes) @@ -877,12 +878,12 @@ function optimize_per_source!( allocation_model::AllocationModel, demand_priority_idx::Integer, u::Vector, - p::Parameters, + p_non_diff::ParametersNonDiff, t::AbstractFloat, optimization_type::OptimizationType.T, )::Nothing (; problem, sources, subnetwork_id, flow) = allocation_model - (; demand_priorities_all) = p.allocation + (; demand_priorities_all) = p_non_diff.allocation F_basin_in = problem[:F_basin_in] F_basin_out = problem[:F_basin_out] @@ -905,7 +906,13 @@ function optimize_per_source!( # of an existing objective function because this is not supported for # quadratic terms: # https://jump.dev/JuMP.jl/v1.16/manual/objective/#Modify-an-objective-coefficient - set_objective_demand_priority!(allocation_model, u, p, t, demand_priority_idx) + set_objective_demand_priority!( + allocation_model, + u, + p_non_diff, + t, + demand_priority_idx, + ) # Set only the capacity of the current source to nonzero set_source_capacity!(allocation_model, source, optimization_type) @@ -925,15 +932,20 @@ function optimize_per_source!( end # Adjust capacities for the optimization for the next source - increase_source_capacities!(allocation_model, p, t) + increase_source_capacities!(allocation_model, p_non_diff, t) reduce_source_capacity!(problem, source) reduce_link_capacities!(allocation_model) # Adjust demands for next optimization (in case of internal_sources -> collect_demands) - for parameter in propertynames(p) - demand_node = getfield(p, parameter) + for parameter in propertynames(p_non_diff) + demand_node = getfield(p_non_diff, parameter) if demand_node isa AbstractDemandNode - reduce_demands!(allocation_model, p, demand_priority_idx, demand_node) + reduce_demands!( + allocation_model, + p_non_diff, + demand_priority_idx, + demand_node, + ) end end @@ -947,7 +959,7 @@ function optimize_per_source!( end # Adjust allocated flow to basins - increase_allocateds!(p.basin, problem) + increase_allocateds!(p_non_diff.basin, problem) end return nothing end @@ -971,13 +983,13 @@ end function optimize_demand_priority!( allocation_model::AllocationModel, u::Vector, - p::Parameters, + p_non_diff::ParametersNonDiff, t::Float64, demand_priority_idx::Int, optimization_type::OptimizationType.T, )::Nothing (; flow) = allocation_model - (; basin, allocation) = p + (; basin, allocation) = p_non_diff (; demand_priorities_all) = allocation # Start the values of the flows at this demand priority at 0.0 @@ -990,20 +1002,36 @@ function optimize_demand_priority!( # Allocate to UserDemand nodes from the directly connected basin # This happens outside the JuMP optimization - allocate_to_users_from_connected_basin!(allocation_model, p, demand_priority_idx) + allocate_to_users_from_connected_basin!( + allocation_model, + p_non_diff, + demand_priority_idx, + ) # Solve the allocation problem for this demand priority per source - optimize_per_source!(allocation_model, demand_priority_idx, u, p, t, optimization_type) + optimize_per_source!( + allocation_model, + demand_priority_idx, + u, + p_non_diff, + t, + optimization_type, + ) # Assign the allocations to the UserDemand or subnetwork for this demand priority - assign_allocations!(allocation_model, p, demand_priority_idx, optimization_type) + assign_allocations!( + allocation_model, + p_non_diff, + demand_priority_idx, + optimization_type, + ) # Save the demands and allocated flows for all nodes that have these - save_demands_and_allocations!(p, allocation_model, t, demand_priority_idx) + save_demands_and_allocations!(p_non_diff, allocation_model, t, demand_priority_idx) # Save the flows over all links in the subnetwork save_allocation_flows!( - p, + p_non_diff, t, allocation_model, demand_priorities_all[demand_priority_idx], @@ -1018,22 +1046,22 @@ Set the initial capacities and demands which are reduced by usage. function set_initial_values!( allocation_model::AllocationModel, u::Vector, - p::Parameters, + p_non_diff::ParametersNonDiff, t::Float64, )::Nothing - set_initial_capacities_source!(allocation_model, p) - set_initial_capacities_link!(allocation_model, p) - set_initial_capacities_basin!(allocation_model, u, p, t) + set_initial_capacities_source!(allocation_model, p_non_diff) + set_initial_capacities_link!(allocation_model, p_non_diff) + set_initial_capacities_basin!(allocation_model, u, p_non_diff, t) set_initial_capacities_buffer!(allocation_model) - set_initial_capacities_returnflow!(allocation_model, p) + set_initial_capacities_returnflow!(allocation_model, p_non_diff) for source in values(allocation_model.sources) source.capacity_reduced = source.capacity end - set_initial_demands_user!(allocation_model, p, t) - set_initial_demands_level!(allocation_model, u, p, t) - set_initial_demands_flow!(allocation_model, p, t) + set_initial_demands_user!(allocation_model, p_non_diff, t) + set_initial_demands_level!(allocation_model, u, p_non_diff, t) + set_initial_demands_flow!(allocation_model, p_non_diff, t) return nothing end @@ -1063,26 +1091,26 @@ Update the allocation optimization problem for the given subnetwork with the pro and flows, solve the allocation problem and assign the results to the UserDemand. """ function collect_demands!( - p::Parameters, + p_non_diff::ParametersNonDiff, allocation_model::AllocationModel, t::Float64, u::Vector, )::Nothing - (; allocation) = p + (; allocation) = p_non_diff (; subnetwork_id) = allocation_model (; demand_priorities_all, subnetwork_demands, main_network_connections) = allocation ## Find internal sources optimization_type = OptimizationType.internal_sources - set_initial_capacities_inlet!(allocation_model, p, optimization_type) - set_initial_values!(allocation_model, u, p, t) + set_initial_capacities_inlet!(allocation_model, p_non_diff, optimization_type) + set_initial_values!(allocation_model, u, p_non_diff, t) # Loop over demand priorities for demand_priority_idx in eachindex(demand_priorities_all) optimize_demand_priority!( allocation_model, u, - p, + p_non_diff, t, demand_priority_idx, optimization_type, @@ -1101,7 +1129,7 @@ function collect_demands!( end end - set_initial_capacities_inlet!(allocation_model, p, optimization_type) + set_initial_capacities_inlet!(allocation_model, p_non_diff, optimization_type) # When collecting demands, only flow should be available # from the main to subnetwork connections @@ -1112,7 +1140,7 @@ function collect_demands!( optimize_demand_priority!( allocation_model, u, - p, + p_non_diff, t, demand_priority_idx, optimization_type, @@ -1121,24 +1149,24 @@ function collect_demands!( end function allocate_demands!( - p::Parameters, + p_non_diff::ParametersNonDiff, allocation_model::AllocationModel, t::Float64, u::Vector, )::Nothing optimization_type = OptimizationType.allocate - (; demand_priorities_all) = p.allocation + (; demand_priorities_all) = p_non_diff.allocation - set_initial_capacities_inlet!(allocation_model, p, optimization_type) + set_initial_capacities_inlet!(allocation_model, p_non_diff, optimization_type) - set_initial_values!(allocation_model, u, p, t) + set_initial_values!(allocation_model, u, p_non_diff, t) # Loop over the demand priorities for demand_priority_idx in eachindex(demand_priorities_all) optimize_demand_priority!( allocation_model, u, - p, + p_non_diff, t, demand_priority_idx, optimization_type, diff --git a/core/src/callback.jl b/core/src/callback.jl index 76cf13c1f..9c4de57ce 100644 --- a/core/src/callback.jl +++ b/core/src/callback.jl @@ -5,11 +5,11 @@ are combined to a CallbackSet that goes to the integrator. Returns the CallbackSet and the SavedValues for flow. """ function create_callbacks( - parameters::Parameters, + p_non_diff::ParametersNonDiff, config::Config, saveat, )::Tuple{CallbackSet, SavedResults} - (; starttime, basin, flow_boundary, level_boundary, user_demand) = parameters + (; starttime, basin, flow_boundary, level_boundary, user_demand) = p_non_diff callbacks = SciMLBase.DECallback[] # Check for negative storage @@ -111,12 +111,13 @@ Specifically, we first use all the inflows to update the mass of the Basins, rec the Basin concentration(s) and then remove the mass that is being lost to the outflows. """ function update_cumulative_flows!(u, t, integrator)::Nothing + (; p_non_diff, p_mutable) = integrator.p (; p, tprev, dt) = integrator - (; basin, flow_boundary, allocation) = p + (; basin, flow_boundary, allocation) = p_non_diff (; vertical_flux) = basin # Update tprev - p.tprev = t + p_mutable.tprev = t # Update cumulative forcings which are integrated exactly @. basin.cumulative_drainage += vertical_flux.drainage * dt @@ -148,7 +149,8 @@ function update_cumulative_flows!(u, t, integrator)::Nothing # Update realized flows for allocation input for subnetwork_id in allocation.subnetwork_ids - mean_input_flows_subnetwork_ = mean_input_flows_subnetwork(p, subnetwork_id) + mean_input_flows_subnetwork_ = + mean_input_flows_subnetwork(p_non_diff, subnetwork_id) for link in keys(mean_input_flows_subnetwork_) mean_input_flows_subnetwork_[link] += flow_update_on_link(integrator, link) end @@ -175,7 +177,8 @@ end function update_concentrations!(u, t, integrator)::Nothing (; uprev, p, tprev, dt) = integrator - (; basin, flow_boundary, state_ranges) = p + (; p_non_diff, diff_cache) = p + (; basin, flow_boundary, state_ranges, cache_ranges) = p_non_diff (; vertical_flux, concentration_data) = basin (; evaporate_mass, cumulative_in, concentration_state, concentration, mass) = concentration_data @@ -250,9 +253,12 @@ function update_concentrations!(u, t, integrator)::Nothing end # Update the Basin concentrations again based on the removed mass - concentration_state .= mass ./ basin.current_properties.current_storage[u] - basin.storage_prev .= basin.current_properties.current_storage[u] - basin.level_prev .= basin.current_properties.current_level[u] + current_storage = view(diff_cache, cache_ranges.current_storage) + current_level = view(diff_cache, cache_ranges.current_level) + + concentration_state .= mass ./ current_storage + basin.storage_prev .= current_storage + basin.level_prev .= current_level return nothing end @@ -266,7 +272,7 @@ function flow_update_on_link( link_src::Tuple{NodeID, NodeID}, )::Float64 (; u, uprev, p, t, tprev, dt) = integrator - (; basin, flow_boundary, state_ranges) = p + (; basin, flow_boundary, state_ranges) = p.p_non_diff (; vertical_flux) = basin u_evaporation = view(u, state_ranges.evaporation) @@ -289,7 +295,7 @@ function flow_update_on_link( 0.0 end else - flow_idx = get_state_index(p.state_ranges, link_src) + flow_idx = get_state_index(state_ranges, link_src) u[flow_idx] - uprev[flow_idx] end end @@ -299,10 +305,11 @@ Save the storages and levels at the latest t. """ function save_basin_state(u, t, integrator) (; p) = integrator - (; basin) = p + (; p_non_diff, diff_cache) = p + (; cache_ranges) = p_non_diff + current_storage = view(diff_cache, cache_ranges.current_storage) + current_level = view(diff_cache, cache_ranges.current_level) du = get_du(integrator) - current_storage = basin.current_properties.current_storage[du] - current_level = basin.current_properties.current_level[du] water_balance!(du, u, p, t) SavedBasinState(; storage = copy(current_storage), level = copy(current_level), t) end @@ -314,7 +321,8 @@ inflow and outflow per Basin. """ function save_flow(u, t, integrator) (; p) = integrator - (; basin, state_inflow_link, state_outflow_link, flow_boundary, u_prev_saveat) = p + (; basin, state_inflow_link, state_outflow_link, flow_boundary, u_prev_saveat) = + p.p_non_diff Δt = get_Δt(integrator) flow_mean = (u - u_prev_saveat) / Δt @@ -386,13 +394,21 @@ function check_water_balance_error!( Δt::Float64, )::Nothing (; u, p, t) = integrator - (; basin, water_balance_abstol, water_balance_reltol, state_ranges) = p + (; p_non_diff, diff_cache) = p + (; + basin, + water_balance_abstol, + water_balance_reltol, + state_ranges, + cache_ranges, + starttime, + ) = p_non_diff errors = false - current_storage = basin.current_properties.current_storage[u] + current_storage = view(diff_cache, cache_ranges.current_storage) # The initial storage is irrelevant for the storage rate and can only cause # floating point truncation errors - formulate_storages!(current_storage, u, u, p, t; add_initial_storage = false) + formulate_storages!(u, p, t; add_initial_storage = false) evaporation = view(saved_flow.flow, state_ranges.evaporation) infiltration = view(saved_flow.flow, state_ranges.infiltration) @@ -436,7 +452,7 @@ function check_water_balance_error!( saved_flow.relative_error[id.idx] = relative_error end if errors - t = datetime_since(t, p.starttime) + t = datetime_since(t, starttime) error("Too large water balance error(s) detected at t = $t") end @@ -456,15 +472,14 @@ function save_solver_stats(u, t, integrator) end function check_negative_storage(u, t, integrator)::Nothing - (; p_non_diff, p_diff, p_mutable) = p - node_id = p_non_diff.basin - (; current_storage) = p_diff.basin_cache - du = get_du(integrator) - set_current_basin_properties!(du, u, p_non_diff, p_diff, p_mutable, t) - current_storage = current_storage[du] + (; p) = integrator + (; p_non_diff, diff_cache) = p + (; basin, cache_ranges) = p_non_diff + current_storage = view(diff_cache, cache_ranges.current_storage) + set_current_basin_properties!(u, p, t) errors = false - for id in node_id + for id in basin.node_id if current_storage[id.idx] < 0 @error "Negative storage detected in $id" errors = true @@ -492,7 +507,7 @@ Apply the discrete control logic. There's somewhat of a complex structure: """ function apply_discrete_control!(u, t, integrator)::Nothing (; p) = integrator - (; discrete_control) = p + (; discrete_control) = p.p_non_diff (; node_id, truth_state, compound_variables) = discrete_control du = get_du(integrator) water_balance!(du, u, p, t) @@ -661,9 +676,9 @@ end function update_subgrid_level!(integrator)::Nothing (; p, t) = integrator - du = get_du(integrator) - basin_level = p.basin.current_properties.current_level[du] - subgrid = integrator.p.subgrid + (; p_non_diff, diff_cache) = p + basin_level = view(diff_cache, p_non_diff.cache_ranges.current_level) + subgrid = p_non_diff.subgrid # First update the all the subgrids with static h(h) relations for (level_index, basin_index, hh_itp) in zip( @@ -688,7 +703,7 @@ end "Interpolate the levels and save them to SavedValues" function save_subgrid_level(u, t, integrator) update_subgrid_level!(integrator) - return copy(integrator.p.subgrid.level) + return copy(integrator.p.p_non_diff.subgrid.level) end "Update one current vertical flux from an interpolation at time t." @@ -714,7 +729,7 @@ in the ConstantInterpolations we use, failing the vertical_flux_means test. """ function update_basin!(integrator)::Nothing (; p, t) = integrator - (; basin) = p + (; basin) = p.p_non_diff update_basin!(basin, t) return nothing @@ -735,11 +750,11 @@ end "Load updates from 'Basin / concentration' into the parameters" function update_basin_conc!(integrator)::Nothing - (; p) = integrator - (; basin) = p + (; p_non_diff) = integrator.p + (; basin, starttime) = p_non_diff (; node_id, concentration_data, concentration_time) = basin (; concentration, substances) = concentration_data - t = datetime_since(integrator.t, integrator.p.starttime) + t = datetime_since(integrator.t, starttime) rows = searchsorted(concentration_time.time, t) timeblock = view(concentration_time, rows) @@ -755,12 +770,12 @@ end "Load updates from 'concentration' tables into the parameters" function update_conc!(integrator, parameter, nodetype)::Nothing - (; p) = integrator - node = getproperty(p, parameter) - (; basin) = p + (; p_non_diff) = integrator.p + (; basin, starttime) = p_non_diff + node = getproperty(p_non_diff, parameter) (; node_id, concentration, concentration_time) = node (; substances) = basin.concentration_data - t = datetime_since(integrator.t, integrator.p.starttime) + t = datetime_since(integrator.t, starttime) rows = searchsorted(concentration_time.time, t) timeblock = view(concentration_time, rows) @@ -782,14 +797,12 @@ update_userd_conc!(integrator)::Nothing = "Solve the allocation problem for all demands and assign allocated abstractions." function update_allocation!(integrator)::Nothing (; p, t, u) = integrator - (; allocation, basin) = p - (; current_storage) = basin.current_properties + (; p_non_diff) = p + (; allocation) = p_non_diff (; allocation_models, mean_input_flows, mean_realized_flows) = allocation # Make sure current storages are up to date - du = get_du(integrator) - current_storage = current_storage[du] - formulate_storages!(current_storage, du, u, p, t) + formulate_storages!(u, p, t) # Don't run the allocation algorithm if allocation is not active # (Specifically for running Ribasim via the BMI) @@ -813,7 +826,7 @@ function update_allocation!(integrator)::Nothing # If a main network is present, collect demands of subnetworks if has_main_network(allocation) for allocation_model in Iterators.drop(allocation_models, 1) - collect_demands!(p, allocation_model, t, u) + collect_demands!(p_non_diff, allocation_model, t, u) end end @@ -821,7 +834,7 @@ function update_allocation!(integrator)::Nothing # If a main network is present this is solved first, # which provides allocation to the subnetworks for allocation_model in allocation_models - allocate_demands!(p, allocation_model, t, u) + allocate_demands!(p_non_diff, allocation_model, t, u) end # Reset the mean flows diff --git a/core/src/concentration.jl b/core/src/concentration.jl index 4cea74033..a6d470283 100644 --- a/core/src/concentration.jl +++ b/core/src/concentration.jl @@ -3,7 +3,7 @@ Process mass updates for UserDemand separately as the inflow and outflow are decoupled in the states """ function mass_updates_user_demand!(integrator::DEIntegrator)::Nothing - (; basin, user_demand) = integrator.p + (; basin, user_demand) = integrator.p.p_non_diff (; concentration_state, mass) = basin.concentration_data @views for (inflow_link, outflow_link) in @@ -35,7 +35,8 @@ end Process all mass inflows to basins """ function mass_inflows_basin!(integrator::DEIntegrator)::Nothing - (; basin, state_inflow_link, state_outflow_link, level_boundary) = integrator.p + (; basin, state_inflow_link, state_outflow_link, level_boundary) = + integrator.p.p_non_diff (; cumulative_in, concentration_state, mass) = basin.concentration_data for (inflow_link, outflow_link) in zip(state_inflow_link, state_outflow_link) @@ -92,7 +93,7 @@ end Process all mass outflows from Basins """ function mass_outflows_basin!(integrator::DEIntegrator)::Nothing - (; state_inflow_link, state_outflow_link, basin) = integrator.p + (; state_inflow_link, state_outflow_link, basin) = integrator.p.p_non_diff (; mass, concentration_state) = basin.concentration_data @views for (inflow_link, outflow_link) in zip(state_inflow_link, state_outflow_link) diff --git a/core/src/graph.jl b/core/src/graph.jl index dc394969c..c461fa844 100644 --- a/core/src/graph.jl +++ b/core/src/graph.jl @@ -237,7 +237,7 @@ function get_flow( link::Tuple{NodeID, NodeID}; boundary_flow = nothing, ) - (; flow_boundary) = p + (; flow_boundary) = p.p_non_diff from_id = link[1] if from_id.type == NodeType.FlowBoundary if boundary_flow === nothing @@ -247,13 +247,13 @@ function get_flow( boundary_flow[from_id.idx] end else - flow[get_state_index(p.state_ranges, link)] + flow[get_state_index(p.p_non_diff.state_ranges, link)] end end function get_influx(du::Vector, id::NodeID, p::Parameters) @assert id.type == NodeType.Basin - (; basin, state_ranges) = p + (; basin, state_ranges) = p.non_diff (; vertical_flux) = basin du_evaporation = view(du, state_ranges.evaporation) du_infiltration = view(du, state_ranges.infiltration) diff --git a/core/src/model.jl b/core/src/model.jl index 55fedac17..449b21bfa 100644 --- a/core/src/model.jl +++ b/core/src/model.jl @@ -29,16 +29,16 @@ struct Model{T} end function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) - (; p_non_diff, p_diff, p_mutable) = p + (; p_non_diff, diff_cache, p_mutable) = p backend = get_ad_type(solver) - if solver.sparse - backend = AutoSparse( - backend; - sparsity_detector = TracerSparsityDetector(), - coloring_algorithm = GreedyColoringAlgorithm(), - ) - end + # if solver.sparse + # backend = AutoSparse( + # backend; + # sparsity_detector = TracerSparsityDetector(), + # coloring_algorithm = GreedyColoringAlgorithm(), + # ) + # end t = 0.0 @@ -50,13 +50,14 @@ function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) backend, u, Constant(p_non_diff), - Cache(p_diff), + Cache(diff_cache), Constant(p_mutable), Constant(t), ) p_mutable.all_nodes_active = false - jac_prototype = solver.sparse ? sparsity_pattern(prep) : nothing + #jac_prototype = solver.sparse ? sparsity_pattern(prep) : nothing + jac_prototype = nothing jac(J, u, p, t) = jacobian!( water_balance!, @@ -66,7 +67,7 @@ function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) backend, u, Constant(p.p_non_diff), - Cache(p.p_diff), + Cache(diff_cache), Constant(p.p_mutable), Constant(t), ) @@ -107,10 +108,10 @@ function Model(config::Config)::Model t0 = zero(t_end) timespan = (t0, t_end) - local parameters, p_non_diff, p_diff, p_mutable, tstops + local parameters, p_non_diff, diff_cache, p_mutable, tstops try parameters = Parameters(db, config) - (; p_non_diff, p_diff, p_mutable) = parameters + (; p_non_diff, diff_cache, p_mutable) = parameters if !valid_discrete_control(parameters.p_non_diff, config) error("Invalid discrete control state definition(s).") @@ -189,17 +190,15 @@ function Model(config::Config)::Model u0 = build_state_vector(parameters.p_non_diff) du0 = zero(u0) - @reset p_non_diff.u_prev_saveat = zero(u0) - # The Solver algorithm alg = algorithm(config.solver; u0) # Synchronize level with storage - set_current_basin_properties!(du0, u0, p_non_diff, p_diff, p_mutable, t0) + set_current_basin_properties!(u0, parameters, t0) # Previous level is used to estimate the minimum level that was attained during a time step # in limit_flow! - p_non_diff.basin.level_prev .= p_diff.cache_basin.current_level + p_non_diff.basin.level_prev .= view(diff_cache, p_non_diff.cache_ranges.current_level) saveat = convert_saveat(config.solver.saveat, t_end) saveat isa Float64 && push!(tstops, range(0, t_end; step = saveat)) @@ -212,7 +211,7 @@ function Model(config::Config)::Model prob = ODEProblem(RHS, u0, timespan, parameters) @debug "Setup ODEProblem." - callback, saved = create_callbacks(parameters, config, saveat) + callback, saved = create_callbacks(p_non_diff, config, saveat) @debug "Created callbacks." # Run water_balance! before initializing the integrator. This is because @@ -245,7 +244,7 @@ function Model(config::Config)::Model ) @debug "Setup integrator." - if config.allocation.use_allocation && is_active(parameters.allocation) + if config.allocation.use_allocation && is_active(p_non_diff.allocation) set_initial_allocation_mean_flows!(integrator) end diff --git a/core/src/parameter.jl b/core/src/parameter.jl index f125ab8d2..2a8f3abec 100644 --- a/core/src/parameter.jl +++ b/core/src/parameter.jl @@ -370,22 +370,6 @@ abstract type AbstractParameterNode end abstract type AbstractDemandNode <: AbstractParameterNode end -""" -Caches of current basin properties -""" -struct BasinCache{T} - current_storage::Vector{T} - # Low storage factor for reducing flows out of drying basins - # given the current storages - current_low_storage_factor::Vector{T} - current_level::Vector{T} - current_area::Vector{T} - current_cumulative_precipitation::Vector{T} - current_cumulative_drainage::Vector{T} -end - -BasinCache(n) = BasinCache((zeros(n) for _ in 1:6)...) - @kwdef struct ConcentrationData # Config setting to enable/disable evaporation of mass evaporate_mass::Bool = true @@ -686,47 +670,61 @@ node_id: node ID of the Terminal node node_id::Vector{NodeID} end -@kwdef struct ParametersDiff{T} <: AbstractVector{T} - cache_basin::BasinCache{T} = BasinCache(0) - cache_flow_rate_pump::Vector{T} = Float64[] - cache_flow_rate_outlet::Vector{T} = Float64[] - cache_error_pid_control::Vector{T} = Float64[] +""" +Collection of ranges that cover all the components of the cache vector `diff_cache`." + +It is used to create views of `diff_cache`, which allows `diff_cache` to be a vector +in stead of a custom struct which is compatible with more automatic differentiation backends. +""" +@kwdef struct CacheRanges + current_storage::UnitRange{Int64} = 1:0 + # Low storage factor for reducing flows out of drying basins + # given the current storages + current_low_storage_factor::UnitRange{Int64} = 1:0 + current_level::UnitRange{Int64} = 1:0 + current_area::UnitRange{Int64} = 1:0 + current_cumulative_precipitation::UnitRange{Int64} = 1:0 + current_cumulative_drainage::UnitRange{Int64} = 1:0 + flow_rate_pump::UnitRange{Int64} = 1:0 + flow_rate_outlet::UnitRange{Int64} = 1:0 + error_pid_control::UnitRange{Int64} = 1:0 +end + +function CacheRanges(nodes::NamedTuple) + n_basin = length(nodes.basin.node_id) + n_pump = length(nodes.pump.node_id) + n_outlet = length(nodes.outlet.node_id) + n_pid_control = length(nodes.pid_control.node_id) + lengths = [fill(n_basin, 6)..., n_pump, n_outlet, n_pid_control] + CacheRanges(ranges(lengths)...) end -@enumx ParameterDiffType flow_rate_pump flow_rate_outlet basin_level +@enumx DiffCacheType flow_rate_pump flow_rate_outlet basin_level -@kwdef struct ParametersDiffRef - type::ParameterDiffType.T = ParameterDiffType.pump_flow_rate +@kwdef struct DiffCacheRef + type::DiffCacheType.T = DiffCacheType.pump_flow_rate idx::Int = 0 from_du::Bool = false end -function get_vector(type::ParameterDiffType.T, p_diff::ParametersDiff) - if type == ParameterDiffType.flow_rate_pump - p_diff.cache_flow_rate_pump - elseif type == ParameterDiffType.flow_rate_outlet - p_diff.cache_flow_rate_outlet - else # type == ParameterDiffType.basin_level - p_diff.cache_basin.current_level +function get_cache_view( + diff_cache::Vector, + type::DiffCacheType.T, + cache_ranges::CacheRanges, +) + cache_range = if type == DiffCacheType.flow_rate_pump + cache_ranges.flow_rate_pump + elseif type == DiffCacheType.flow_rate_outlet + cache_ranges.flow_rate_outlet + else # type == DiffCacheType.basin_level + cache_ranges.current_level end -end - -function get_value(ref::ParametersDiffRef, p_diff::ParametersDiff, du::Vector) - if ref.from_du - du[ref.idx] - else - get_vector(ref.type, p_diff)[ref.idx] - end -end - -function set_value!(ref::ParametersDiffRef, p_diff::ParametersDiff, value) - @assert !ref.from_du - get_vector(ref.type, p_diff)[ref.idx] = value + view(diff_cache, cache_range) end @kwdef struct SubVariable listen_node_id::NodeID - variable_ref::ParametersDiffRef + variable_ref::DiffCacheRef variable::String weight::Float64 look_ahead::Float64 @@ -783,7 +781,7 @@ end node_id::Vector{NodeID} compound_variable::Vector{CompoundVariable} controlled_variable::Vector{String} - target_ref::Vector{ParametersDiffRef} + target_ref::Vector{DiffCacheRef} func::Vector{ScalarInterpolation} end @@ -806,8 +804,7 @@ control_mapping: dictionary from (node_id, control_state) to target flow rate active::Vector{Bool} listen_node_id::Vector{NodeID} target::Vector{ScalarInterpolation} - target_ref::Vector{ParametersDiffRef} = - Vector{ParametersDiffRef}(undef, length(node_id)) + target_ref::Vector{DiffCacheRef} = Vector{DiffCacheRef}(undef, length(node_id)) proportional::Vector{ScalarInterpolation} integral::Vector{ScalarInterpolation} derivative::Vector{ScalarInterpolation} @@ -950,17 +947,7 @@ It is used to create views of `u`, and an low-latency alternative to making `u` integral::UnitRange{Int64} = 1:0 end -function StateRanges(u_ids::NamedTuple)::StateRanges - lengths = map(length, u_ids) - # from the lengths of the components - # construct [1:n_pump, (n_pump+1):(n_pump+n_outlet)] - # which are used to create views into the data array - bounds = pushfirst!(cumsum(lengths), 0) - ranges = [range(p[1] + 1, p[2]) for p in IterTools.partition(bounds, 2, 1)] - # standardize empty ranges to 1:0 for easier testing - replace!(x -> isempty(x) ? (1:0) : x, ranges) - return StateRanges(ranges...) -end +StateRanges(u_ids::NamedTuple) = StateRanges(ranges(map(length, collect(u_ids)))...) @kwdef mutable struct ParametersMutable all_nodes_active::Bool = false @@ -999,17 +986,32 @@ end u_prev_saveat::Vector{Float64} = Float64[] # Node ID associated with each state node_id::Vector{NodeID} = NodeID[] - # Range per states component + # Range per states or cache component state_ranges::StateRanges = StateRanges() + cache_ranges::CacheRanges = CacheRanges() end -@kwdef struct Parameters{C1, C2, C3, C4, C5} +length_cache(p_non_diff::ParametersNonDiff) = + 6 * length(p_non_diff.basin.node_id) + + length(p_non_diff.pump.node_id) + + length(p_non_diff.outlet.node_id) + + length(p_non_diff.pid_control.node_id) + +@kwdef struct Parameters{C1, C2, C3, C4, C5, T} p_non_diff::ParametersNonDiff{C1, C2, C3, C4, C5} - p_diff::ParametersDiff{Float64} = ParametersDiff(; - cache_basin = BasinCache(length(p_non_diff.basin.node_id)), - cache_flow_rate_pump = zeros(length(p_non_diff.pump.node_id)), - cache_flow_rate_outlet = zeros(length(p_non_diff.pump.node_id)), - cache_error_pid_control = zeros(length(p_non_diff.pid_control.node_id)), - ) + diff_cache::Vector{T} = zeros(length_cache(p_non_diff)) p_mutable::ParametersMutable = ParametersMutable() end + +function get_value(ref::DiffCacheRef, p::Parameters, du::Vector) + if ref.from_du + du[ref.idx] + else + get_cache_view(p.diff_cache, ref.type, p.non_diff.cache_ranges)[ref.idx] + end +end + +function set_value!(ref::DiffCacheRef, p::Parameters, value) + @assert !ref.from_du + get_cache_view(p.diff_cache, ref.type, p.non_diff.cache_ranges)[ref.idx] = value +end diff --git a/core/src/read.jl b/core/src/read.jl index f77e546d3..4829805dc 100644 --- a/core/src/read.jl +++ b/core/src/read.jl @@ -1038,7 +1038,7 @@ function CompoundVariable( for row in variables_compound_variable listen_node_id = NodeID(row.listen_node_id, node_ids_all) # Placeholder until actual ref is known - variable_ref = ParametersDiffRef() + variable_ref = DiffCacheRef() variable = row.variable # Default to weight = 1.0 if not specified weight = coalesce(row.weight, 1.0) @@ -1226,7 +1226,7 @@ function ContinuousControl(db::DB, config::Config)::ContinuousControl compound_variable = continuous_control_compound_variables(db, config, ids) # References to the controlled parameters, filled in later when they are known - target_refs = ParametersDiffRef[] + target_refs = DiffCacheRef[] if errors error("Errors encountered when parsing ContinuousControl data.") @@ -1815,6 +1815,7 @@ function Parameters(db::DB, config::Config)::Parameters node_id = reduce(vcat, u_ids) n_states = length(node_id) state_ranges = StateRanges(u_ids) + cache_ranges = CacheRanges(nodes) flow_to_storage = build_flow_to_storage(state_ranges, n_states, basin, connector_nodes) state_inflow_link, state_outflow_link = get_state_flow_links(graph, nodes) @@ -1836,8 +1837,10 @@ function Parameters(db::DB, config::Config)::Parameters flow_to_storage, config.solver.water_balance_abstol, config.solver.water_balance_reltol, + u_prev_saveat = zeros(n_states), node_id, state_ranges, + cache_ranges, ) collect_control_mappings!(p_non_diff) diff --git a/core/src/solve.jl b/core/src/solve.jl index 82ab28b11..5be325ca5 100644 --- a/core/src/solve.jl +++ b/core/src/solve.jl @@ -1,7 +1,7 @@ # Wrapper method used by OrdinaryDiffEq; the other method is used # for Jacobian evaluation with DifferentiationInterface.jl water_balance!(du::Vector, u::Vector, p::Parameters, t::Number)::Nothing = - water_balance(du, u, p.p_non_diff, p.p_diff, p.p_mutable, t) + water_balance!(du, u, p.p_non_diff, p.diff_cache, p.p_mutable, t) """ The right hand side function of the system of ODEs set up by Ribasim. @@ -10,20 +10,16 @@ function water_balance!( du::Vector, u::Vector, p_non_diff::ParametersNonDiff, - p_diff::ParametersDiff, + diff_cache::Vector, p_mutable::ParametersMutable, t::Number, )::Nothing - (; current_storage, current_low_storage_factor, current_level) = p_diff.cache_basin + p = Parameters(p_non_diff, diff_cache, p_mutable) du .= 0.0 # Ensures current_* vectors are current - set_current_basin_properties!(du, u, p_non_diff, p_diff, p_mutable, t) - - current_storage = current_storage[du] - current_low_storage_factor = current_low_storage_factor[du] - current_level = current_level[du] + set_current_basin_properties!(u, p, t) # Notes on the ordering of these formulations: # - Continuous control can depend on flows (which are not continuously controlled themselves), @@ -35,39 +31,25 @@ function water_balance!( update_vertical_flux!(du, p) # Formulate intermediate flows (non continuously controlled) - formulate_flows!(du, p, t, current_low_storage_factor, current_level) + formulate_flows!(du, p, t) # Compute continuous control formulate_continuous_control!(du, p, t) # Formulate intermediate flows (controlled by ContinuousControl) - formulate_flows!( - du, - p, - t, - current_low_storage_factor, - current_level; - continuous_control_type = ContinuousControlType.Continuous, - ) + formulate_flows!(du, p, t; continuous_control_type = ContinuousControlType.Continuous) # Compute PID control formulate_pid_control!(du, u, p, t) # Formulate intermediate flow (controlled by PID control) - formulate_flows!( - du, - p, - t, - current_low_storage_factor, - current_level; - continuous_control_type = ContinuousControlType.PID, - ) + formulate_flows!(du, p, t; continuous_control_type = ContinuousControlType.PID) return nothing end function formulate_continuous_control!(du, p, t)::Nothing - (; compound_variable, target_ref, func) = p.continuous_control + (; compound_variable, target_ref, func) = p.p_non_diff.continuous_control for (cvar, ref, func_) in zip(compound_variable, target_ref, func) value = compound_variable_value(cvar, p, du, t) @@ -80,28 +62,21 @@ end Compute the storages, levels and areas of all Basins given the state u and the time t. """ -function set_current_basin_properties!( - du::Vector, - u::Vector, - p_non_diff::ParametersNonDiff, - p_diff::ParametersDiff, - p_mutable::ParametersMutable, - t::Number, -)::Nothing - (; basin) = p_non_diff - (; cumulative_precipitation, cumulative_drainage, vertical_flux) = p_non_diff.basin - (; - current_storage, - current_low_storage_factor, - current_level, - current_area, - current_cumulative_precipitation, - current_cumulative_drainage, - ) = p_diff.cache_basin +function set_current_basin_properties!(u::Vector, p::Parameters, t::Number)::Nothing + (; p_non_diff, diff_cache, p_mutable) = p + (; basin, cache_ranges) = p_non_diff + (; node_id, cumulative_precipitation, cumulative_drainage, vertical_flux) = basin + current_storage = view(diff_cache, cache_ranges.current_storage) + current_low_storage_factor = view(diff_cache, cache_ranges.current_low_storage_factor) + current_level = view(diff_cache, cache_ranges.current_level) + current_area = view(diff_cache, cache_ranges.current_area) + current_cumulative_precipitation = + view(diff_cache, cache_ranges.current_cumulative_precipitation) + current_cumulative_drainage = view(diff_cache, cache_ranges.current_cumulative_drainage) # The exact cumulative precipitation and drainage up to the t of this water_balance call dt = t - p_mutable.tprev - for node_id in basin.node_id + for node_id in node_id fixed_area = basin_areas(basin, node_id.idx)[end] current_cumulative_precipitation[node_id.idx] = cumulative_precipitation[node_id.idx] + @@ -109,7 +84,7 @@ function set_current_basin_properties!( end @. current_cumulative_drainage = cumulative_drainage + dt * vertical_flux.drainage - formulate_storages!(current_storage, u, p_non_diff, p_diff, p_mutable, t) + formulate_storages!(u, p, t) for (id, s) in zip(basin.node_id, current_storage) i = id.idx @@ -120,17 +95,15 @@ function set_current_basin_properties!( end function formulate_storages!( - current_storage::AbstractVector, u::Vector, - p_non_diff::ParametersNonDiff, - p_diff::ParametersDiff, - p_mutable::ParametersMutable, + p::Parameters, t::Number; add_initial_storage::Bool = true, )::Nothing - (; basin, flow_boundary, flow_to_storage) = p_non_diff - (; cache_basin) = p_diff + (; p_non_diff, diff_cache, p_mutable) = p + (; basin, flow_boundary, flow_to_storage, cache_ranges) = p_non_diff (; tprev) = p_mutable + current_storage = view(diff_cache, cache_ranges.current_level) # Current storage: initial condition + # total inflows and outflows since the start # of the simulation @@ -140,7 +113,7 @@ function formulate_storages!( current_storage .= 0.0 end mul!(current_storage, flow_to_storage, u, 1, 1) - formulate_storage!(cache_basin) + formulate_storage!(diff_cache, cache_ranges) formulate_storage!(current_storage, tprev, t, flow_boundary) return nothing end @@ -148,9 +121,11 @@ end """ The storage contributions of the forcings that are not part of the state. """ -function formulate_storage!(cache_basin::BasinCache) - (; current_storage, current_cumulative_precipitation, current_cumulative_drainage) = - cache_basin +function formulate_storage!(diff_cache::Vector, cache_ranges::CacheRanges) + current_storage = view(diff_cache, cache_ranges.current_storage) + current_cumulative_precipitation = + view(diff_cache, cache_ranges.current_cumulative_precipitation) + current_cumulative_drainage = view(diff_cache, cache_ranges.current_cumulative_drainage) current_storage .+= current_cumulative_precipitation current_storage .+= current_cumulative_drainage @@ -189,11 +164,11 @@ Smoothly let the evaporation flux go to 0 when at small water depths Currently at less than 0.1 m. """ function update_vertical_flux!(du::Vector, p::Parameters)::Nothing - (; basin, state_ranges) = p - (; vertical_flux, current_properties) = basin - (; current_level, current_area) = current_properties - current_level = current_level[du] - current_area = current_area[du] + (; p_non_diff, diff_cache) = p + (; basin, state_ranges, cache_ranges) = p_non_diff + (; vertical_flux) = basin + current_level = view(diff_cache, cache_ranges.current_level) + current_area = view(diff_cache, cache_ranges.current_area) du_evaporation = view(du, state_ranges.evaporation) du_infiltration = view(du, state_ranges.infiltration) @@ -215,11 +190,12 @@ function update_vertical_flux!(du::Vector, p::Parameters)::Nothing return nothing end -function set_error!(pid_control::PidControl, p::Parameters, du::Vector, t::Number) - (; basin) = p - (; listen_node_id, target, error) = pid_control - error = error[du] - current_level = basin.current_properties.current_level[du] +function set_error!(pid_control::PidControl, p::Parameters, t::Number) + (; p_non_diff, diff_cache) = p + (; cache_ranges) = p_non_diff + (; listen_node_id, target) = pid_control + current_level = view(diff_cache, cache_ranges.current_level) + error = view(diff_cache, cache_ranges.error_pid_control) for i in eachindex(listen_node_id) listened_node_id = listen_node_id[i] @@ -229,17 +205,18 @@ function set_error!(pid_control::PidControl, p::Parameters, du::Vector, t::Numbe end function formulate_pid_control!(du::Vector, u::Vector, p::Parameters, t::Number)::Nothing - (; basin, state_ranges, pid_control) = p - (; node_id, active, target, listen_node_id, error) = pid_control - (; current_area) = basin.current_properties + (; p_non_diff, diff_cache, p_mutable) = p + (; state_ranges, cache_ranges, pid_control) = p_non_diff + (; node_id, active, target, listen_node_id) = pid_control + du_integral = view(du, state_ranges.integral) u_integral = view(u, state_ranges.integral) + current_area = view(diff_cache, cache_ranges.current_area) + error = view(diff_cache, cache_ranges.error_pid_control) - current_area = current_area[du] - error = error[du] - all_nodes_active = p.all_nodes_active[] + all_nodes_active = p_mutable.all_nodes_active[] - set_error!(pid_control, p, du, t) + set_error!(pid_control, p, t) for (i, id) in enumerate(node_id) if !(active[i] || all_nodes_active) @@ -321,11 +298,10 @@ function formulate_flow!( user_demand::UserDemand, p::Parameters, t::Number, - current_low_storage_factor::Vector, - current_level::Vector, )::Nothing - (; allocation, state_ranges) = p - all_nodes_active = p.all_nodes_active[] + (; p_non_diff) = p + (; allocation, state_ranges) = p_non_diff + all_nodes_active = p.p_mutable.all_nodes_active[] du_user_demand_inflow = view(du, state_ranges.user_demand_inflow) du_user_demand_outflow = view(du, state_ranges.user_demand_outflow) @@ -358,12 +334,12 @@ function formulate_flow!( # Smoothly let abstraction go to 0 as the source basin dries out inflow_id = inflow_link.link[1] - factor_basin = get_low_storage_factor(current_low_storage_factor, inflow_id) + factor_basin = get_low_storage_factor(p, inflow_id) q *= factor_basin # Smoothly let abstraction go to 0 as the source basin # level reaches its minimum level - source_level = get_level(p, inflow_id, t, current_level) + source_level = get_level(p, inflow_id, t) Δsource_level = source_level - min_level factor_level = reduction_factor(Δsource_level, USER_DEMAND_MIN_LEVEL_THRESHOLD) q *= factor_level @@ -378,12 +354,14 @@ function formulate_flow!( linear_resistance::LinearResistance, p::Parameters, t::Number, - current_low_storage_factor::Vector, - current_level::Vector, )::Nothing - all_nodes_active = p.all_nodes_active[] + (; p_non_diff, p_mutable) = p + (; state_ranges) = p_non_diff + all_nodes_active = p_mutable.all_nodes_active[] (; node_id, active, resistance, max_flow_rate) = linear_resistance - du_linear_resistance = view(du, p.state_ranges.linear_resistance) + + du_linear_resistance = view(du, state_ranges.linear_resistance) + for id in node_id inflow_link = linear_resistance.inflow_link[id.idx] outflow_link = linear_resistance.outflow_link[id.idx] @@ -392,16 +370,11 @@ function formulate_flow!( outflow_id = outflow_link.link[2] if (active[id.idx] || all_nodes_active) - h_a = get_level(p, inflow_id, t, current_level) - h_b = get_level(p, outflow_id, t, current_level) + h_a = get_level(p, inflow_id, t) + h_b = get_level(p, outflow_id, t) q_unlimited = (h_a - h_b) / resistance[id.idx] q = clamp(q_unlimited, -max_flow_rate[id.idx], max_flow_rate[id.idx]) - q *= low_storage_factor_resistance_node( - current_low_storage_factor, - q, - inflow_id, - outflow_id, - ) + q *= low_storage_factor_resistance_node(p, q, inflow_id, outflow_id) du_linear_resistance[id.idx] = q end end @@ -413,14 +386,14 @@ function formulate_flow!( tabulated_rating_curve::TabulatedRatingCurve, p::Parameters, t::Number, - current_low_storage_factor::Vector, - current_level::Vector, )::Nothing - all_nodes_active = p.all_nodes_active[] + (; p_non_diff, p_mutable) = p + (; state_ranges) = p_non_diff + all_nodes_active = p_mutable.all_nodes_active[] (; node_id, active, interpolations, current_interpolation_index) = tabulated_rating_curve - du_tabulated_rating_curve = view(du, p.state_ranges.tabulated_rating_curve) + du_tabulated_rating_curve = view(du, state_ranges.tabulated_rating_curve) for id in node_id inflow_link = tabulated_rating_curve.inflow_link[id.idx] @@ -429,12 +402,12 @@ function formulate_flow!( outflow_id = outflow_link.link[2] max_downstream_level = tabulated_rating_curve.max_downstream_level[id.idx] - h_a = get_level(p, inflow_id, t, current_level) - h_b = get_level(p, outflow_id, t, current_level) + h_a = get_level(p, inflow_id, t) + h_b = get_level(p, outflow_id, t) Δh = h_a - h_b if active[id.idx] || all_nodes_active - factor = get_low_storage_factor(current_low_storage_factor, inflow_id) + factor = get_low_storage_factor(p, inflow_id) interpolation_index = current_interpolation_index[id.idx](t) qh = interpolations[interpolation_index] q = factor * qh(h_a) @@ -493,9 +466,9 @@ function formulate_flow!( manning_resistance::ManningResistance, p::Parameters, t::Number, - current_low_storage_factor::Vector, - current_level::Vector, )::Nothing + (; p_non_diff, p_mutable) = p + (; state_ranges) = p_non_diff (; node_id, active, @@ -507,9 +480,9 @@ function formulate_flow!( downstream_bottom, ) = manning_resistance - du_manning_resistance = view(du, p.state_ranges.manning_resistance) + du_manning_resistance = view(du, state_ranges.manning_resistance) - all_nodes_active = p.all_nodes_active[] + all_nodes_active = p_mutable.all_nodes_active[] for id in node_id inflow_link = manning_resistance.inflow_link[id.idx] outflow_link = manning_resistance.outflow_link[id.idx] @@ -521,8 +494,8 @@ function formulate_flow!( continue end - h_a = get_level(p, inflow_id, t, current_level) - h_b = get_level(p, outflow_id, t, current_level) + h_a = get_level(p, inflow_id, t) + h_b = get_level(p, outflow_id, t) bottom_a = upstream_bottom[id.idx] bottom_b = downstream_bottom[id.idx] @@ -550,12 +523,7 @@ function formulate_flow!( Δh = h_a - h_b q = A / n * ∛(R_h^2) * relaxed_root(Δh / L, 1e-5) - q *= low_storage_factor_resistance_node( - current_low_storage_factor, - q, - inflow_id, - outflow_id, - ) + q *= low_storage_factor_resistance_node(p, q, inflow_id, outflow_id) du_manning_resistance[id.idx] = q end return nothing @@ -566,12 +534,15 @@ function formulate_flow!( pump::Pump, p::Parameters, t::Number, - current_low_storage_factor::Vector, - current_level::Vector, continuous_control_type_::ContinuousControlType.T, )::Nothing - du_pump = view(du, p.state_ranges.pump) - all_nodes_active = p.all_nodes_active[] + (; p_non_diff, diff_cache, p_mutable) = p + (; state_ranges, cache_ranges) = p_non_diff + + du_pump = view(du, state_ranges.pump) + cache_flow_rate = view(diff_cache, cache_ranges.flow_rate_pump) + + all_nodes_active = p_mutable.all_nodes_active[] for ( id, inflow_link, @@ -589,7 +560,7 @@ function formulate_flow!( pump.inflow_link, pump.outflow_link, pump.active, - pump.flow_rate_cache[du], + cache_flow_rate, pump.flow_rate, pump.min_flow_rate, pump.max_flow_rate, @@ -610,10 +581,10 @@ function formulate_flow!( inflow_id = inflow_link.link[1] outflow_id = outflow_link.link[2] - src_level = get_level(p, inflow_id, t, current_level) - dst_level = get_level(p, outflow_id, t, current_level) + src_level = get_level(p, inflow_id, t) + dst_level = get_level(p, outflow_id, t) - factor = get_low_storage_factor(current_low_storage_factor, inflow_id) + factor = get_low_storage_factor(p, inflow_id) q = flow_rate * factor q *= reduction_factor(src_level - min_upstream_level(t), 0.02) @@ -630,12 +601,15 @@ function formulate_flow!( outlet::Outlet, p::Parameters, t::Number, - current_low_storage_factor::Vector, - current_level::Vector, continuous_control_type_::ContinuousControlType.T, )::Nothing - du_outlet = view(du, p.state_ranges.outlet) - all_nodes_active = p.all_nodes_active[] + (; p_non_diff, diff_cache, p_mutable) = p + (; state_ranges, cache_ranges) = p_non_diff + + du_outlet = view(du, state_ranges.outlet) + cache_flow_rate = view(diff_cache, cache_ranges.flow_rate_pump) + + all_nodes_active = p_mutable.all_nodes_active[] for ( id, inflow_link, @@ -653,7 +627,7 @@ function formulate_flow!( outlet.inflow_link, outlet.outflow_link, outlet.active, - outlet.flow_rate_cache[du], + cache_flow_rate, outlet.flow_rate, outlet.min_flow_rate, outlet.max_flow_rate, @@ -674,11 +648,11 @@ function formulate_flow!( inflow_id = inflow_link.link[1] outflow_id = outflow_link.link[2] - src_level = get_level(p, inflow_id, t, current_level) - dst_level = get_level(p, outflow_id, t, current_level) + src_level = get_level(p, inflow_id, t) + dst_level = get_level(p, outflow_id, t) q = flow_rate - q *= get_low_storage_factor(current_low_storage_factor, inflow_id) + q *= get_low_storage_factor(p, inflow_id) # No flow of outlet if source level is lower than target level Δlevel = src_level - dst_level @@ -695,9 +669,7 @@ end function formulate_flows!( du::AbstractVector, p::Parameters, - t::Number, - current_low_storage_factor::Vector, - current_level::Vector; + t::Number; continuous_control_type::ContinuousControlType.T = ContinuousControlType.None, )::Nothing (; @@ -707,53 +679,16 @@ function formulate_flows!( pump, outlet, user_demand, - ) = p + ) = p.p_non_diff - formulate_flow!( - du, - pump, - p, - t, - current_low_storage_factor, - current_level, - continuous_control_type, - ) - formulate_flow!( - du, - outlet, - p, - t, - current_low_storage_factor, - current_level, - continuous_control_type, - ) + formulate_flow!(du, pump, p, t, continuous_control_type) + formulate_flow!(du, outlet, p, t, continuous_control_type) if continuous_control_type == ContinuousControlType.None - formulate_flow!( - du, - linear_resistance, - p, - t, - current_low_storage_factor, - current_level, - ) - formulate_flow!( - du, - manning_resistance, - p, - t, - current_low_storage_factor, - current_level, - ) - formulate_flow!( - du, - tabulated_rating_curve, - p, - t, - current_low_storage_factor, - current_level, - ) - formulate_flow!(du, user_demand, p, t, current_low_storage_factor, current_level) + formulate_flow!(du, linear_resistance, p, t) + formulate_flow!(du, manning_resistance, p, t) + formulate_flow!(du, tabulated_rating_curve, p, t) + formulate_flow!(du, user_demand, p, t) end end @@ -763,7 +698,7 @@ flow rates for the last time step if these flow rate bounds are known. """ function limit_flow!(u::Vector, integrator::DEIntegrator, p::Parameters, t::Number)::Nothing (; uprev, dt) = integrator - (; p_non_diff, p_diff, p_mutable) = p + (; p_non_diff, diff_cache) = p (; pump, outlet, @@ -773,6 +708,7 @@ function limit_flow!(u::Vector, integrator::DEIntegrator, p::Parameters, t::Numb basin, allocation, state_ranges, + cache_ranges, ) = p_non_diff u_tabulated_rating_curve = view(u, state_ranges.tabulated_rating_curve) @@ -795,9 +731,9 @@ function limit_flow!(u::Vector, integrator::DEIntegrator, p::Parameters, t::Numb # storage and level attained in the last time step to estimate whether there was an effect # of reduction factors du = get_du(integrator) - set_current_basin_properties!(du, u, p_non_diff, p_diff, p_mutable, t) - current_storage = basin.current_properties.current_storage[u] - current_level = basin.current_properties.current_level[u] + set_current_basin_properties!(du, p, t) + current_storage = view(diff_cache, cache_ranges.current_storage) + current_level = view(diff_cache, cache_ranges.current_level) # TabulatedRatingCurve flow is in [0, ∞) and can be inactive for (id, active) in zip(tabulated_rating_curve.node_id, tabulated_rating_curve.active) diff --git a/core/src/util.jl b/core/src/util.jl index 7862701b2..a220a8f0e 100644 --- a/core/src/util.jl +++ b/core/src/util.jl @@ -158,11 +158,14 @@ Get the current water level of a node ID. The ID can belong to either a Basin or a LevelBoundary. du: tells ForwardDiff whether this call is for differentiation or not """ -function get_level(p::Parameters, node_id::NodeID, t::Number, current_level::Vector)::Number +function get_level(p::Parameters, node_id::NodeID, t::Number)::Number + (; p_non_diff, diff_cache) = p + (; cache_ranges) = p_non_diff if node_id.type == NodeType.Basin + current_level = view(diff_cache, cache_ranges.current_level) current_level[node_id.idx] elseif node_id.type == NodeType.LevelBoundary - p.level_boundary.level[node_id.idx](t) + p_non_diff.level_boundary.level[node_id.idx](t) elseif node_id.type == NodeType.Terminal # Terminal is like a bottomless pit. # A level at -Inf ensures we don't hit `max_downstream_level` reduction factors. @@ -297,11 +300,15 @@ end # SparseConnectivityTracer overloads -function get_low_storage_factor( - current_low_storage_factor::Vector{T}, - id::NodeID, -)::T where {T} - return id.type == NodeType.Basin ? current_low_storage_factor[id.idx] : one(T) +function get_low_storage_factor(p::Parameters, id::NodeID) + (; p_non_diff, diff_cache) = p + (; cache_ranges) = p_non_diff + current_low_storage_factor = view(diff_cache, cache_ranges.current_low_storage_factor) + if id.type == NodeType.Basin + current_low_storage_factor[id.idx] + else + one(eltype(current_low_storage_factor)) + end end """ @@ -309,15 +316,15 @@ For resistance nodes, give a reduction factor based on the upstream node as defined by the flow direction. """ function low_storage_factor_resistance_node( - current_low_storage_factor, - q, - inflow_id, - outflow_id, + p::Parameters, + q::Number, + inflow_id::NodeID, + outflow_id::NodeID, ) if q > 0 - get_low_storage_factor(current_low_storage_factor, inflow_id) + get_low_storage_factor(p, inflow_id) else - get_low_storage_factor(current_low_storage_factor, outflow_id) + get_low_storage_factor(p, outflow_id) end end @@ -440,7 +447,7 @@ Get the time interval between (flow) saves """ function get_Δt(integrator)::Float64 (; p, t, dt) = integrator - (; saveat) = p.graph[] + (; saveat) = p.p_non_diff.graph[] if iszero(saveat) dt elseif isinf(saveat) @@ -468,7 +475,7 @@ as input. Therefore we set the instantaneous flows as the mean flows as allocati """ function set_initial_allocation_mean_flows!(integrator)::Nothing (; u, p, t) = integrator - (; allocation, graph) = p + (; allocation) = p.p_non_diff (; mean_input_flows, mean_realized_flows, allocation_models) = allocation (; Δt_allocation) = allocation_models[1] @@ -527,37 +534,37 @@ function get_variable_ref( variable::String, state_ranges::StateRanges; listen::Bool = true, -)::Tuple{ParametersDiffRef, Bool} +)::Tuple{DiffCacheRef, Bool} errors = false ref = if node_id.type == NodeType.Basin && variable == "level" - ParametersDiffRef(; type = ParameterDiffType.basin_level, node_id.idx) + DiffCacheRef(; type = DiffCacheType.basin_level, node_id.idx) elseif variable == "flow_rate" && node_id.type != NodeType.FlowBoundary if listen if node_id.type ∉ conservative_nodetypes errors = true @error "Cannot listen to flow_rate of $node_id, the node type must be one of $conservative_node_types." - ParametersDiffRef() + DiffCacheRef() else # Index in the state vector (inflow) idx = get_state_index(state_ranges, node_id) - ParametersDiffRef(; idx, from_du = true) + DiffCacheRef(; idx, from_du = true) end else type = if node_id.type == NodeType.Pump - ParameterDiffType.flow_rate_pump + DiffCacheType.flow_rate_pump elseif node_id.type == NodeType.Outlet - ParameterDiffType.flow_rate_outlet + DiffCacheType.flow_rate_outlet else errors = true @error "Cannot set the flow rate of $node_id." - ParameterDiffType.flow_rate_pump + DiffCacheType.flow_rate_pump end - ParametersDiffRef(; type, node_id.idx) + DiffCacheRef(; type, node_id.idx) end else # Placeholder to obtain correct type - ParametersDiffRef() + DiffCacheRef() end return ref, errors end @@ -634,7 +641,7 @@ function set_discrete_controlled_variable_refs!(p_non_diff::ParametersNonDiff):: end function set_target_ref!( - target_ref::Vector{ParametersDiffRef}, + target_ref::Vector{DiffCacheRef}, node_id::Vector{NodeID}, controlled_variable::Vector{String}, state_ranges::StateRanges, @@ -984,9 +991,9 @@ end Check whether any storages are negative given the state u. """ function isoutofdomain(u, p, t) - (; current_storage) = p.basin.current_properties - current_storage = current_storage[u] - formulate_storages!(current_storage, u, u, p, t) + (; p_non_diff, diff_cache) = p + current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) + formulate_storages!(u, p, t) any(<(0), current_storage) end @@ -1005,7 +1012,7 @@ estimating the lowest storage achieved over the last time step. To make sure it is an underestimate of the minimum, 2LOW_STORAGE_THRESHOLD is subtracted from this lowest storage. This is done to not be too strict in clamping the flow in the limiter """ -function min_low_storage_factor(storage_now::Vector{T}, storage_prev, id) where {T} +function min_low_storage_factor(storage_now::AbstractVector{T}, storage_prev, id) where {T} if id.type == NodeType.Basin reduction_factor( min(storage_now[id.idx], storage_prev[id.idx]) - 2LOW_STORAGE_THRESHOLD, @@ -1023,7 +1030,7 @@ it is an underestimate of the minimum, 2USER_DEMAND_MIN_LEVEL_THRESHOLD is subtr This is done to not be too strict in clamping the flow in the limiter """ function min_low_user_demand_level_factor( - level_now::Vector{T}, + level_now::AbstractVector{T}, level_prev, min_level, id_user_demand, @@ -1040,8 +1047,8 @@ function min_low_user_demand_level_factor( end end -function mean_input_flows_subnetwork(p::Parameters, subnetwork_id::Int32) - (; mean_input_flows, subnetwork_ids) = p.allocation +function mean_input_flows_subnetwork(p_non_diff::ParametersNonDiff, subnetwork_id::Int32) + (; mean_input_flows, subnetwork_ids) = p_non_diff.allocation subnetwork_idx = searchsortedfirst(subnetwork_ids, subnetwork_id) return mean_input_flows[subnetwork_idx] end @@ -1103,3 +1110,14 @@ function get_timeseries_tstops( return tstops end + +function ranges(lengths::Vector{<:Integer}) + # from the lengths of the components + # construct [1:n_pump, (n_pump+1):(n_pump+n_outlet)] + # which are used to create views into the data array + bounds = pushfirst!(cumsum(lengths), 0) + ranges = [range(p[1] + 1, p[2]) for p in IterTools.partition(bounds, 2, 1)] + # standardize empty ranges to 1:0 for easier testing + replace!(x -> isempty(x) ? (1:0) : x, ranges) + return ranges +end diff --git a/core/src/write.jl b/core/src/write.jl index 8b85abfa4..acaf423ea 100644 --- a/core/src/write.jl +++ b/core/src/write.jl @@ -82,9 +82,9 @@ function get_storages_and_levels( level::Matrix{Float64}, } (; config, integrator, saved) = model - (; p) = integrator + (; p_non_diff) = integrator.p - node_id = p.basin.node_id::Vector{NodeID} + node_id = p_non_diff.basin.node_id::Vector{NodeID} tsteps = datetime_since.(tsaves(model), config.starttime) storage = zeros(length(node_id), length(tsteps)) @@ -101,16 +101,14 @@ end function basin_state_table( model::Model, )::@NamedTuple{node_id::Vector{Int32}, level::Vector{Float64}} - du = get_du(model.integrator) (; u, p, t) = model.integrator + (; p_non_diff, diff_cache) = p + current_level = view(diff_cache, p_non_diff.cache_ranges.current_level) # ensure the levels are up-to-date - set_current_basin_properties!(du, u, p, t) + set_current_basin_properties!(u, p, t) - return (; - node_id = Int32.(p.basin.node_id), - level = p.basin.current_properties.current_level[Float64[]], - ) + return (; node_id = Int32.(p.p_non_diff.basin.node_id), level = current_level) end "Create the basin result table from the saved data" @@ -132,7 +130,7 @@ function basin_table( relative_error::Vector{Float64}, } (; saved) = model - (; state_ranges) = model.integrator.p + (; state_ranges) = model.integrator.p.p_non_diff # The last timestep is not included; there is no period over which to compute flows. data = get_storages_and_levels(model) @@ -197,7 +195,7 @@ function solver_stats_table( (; time = datetime_since.( solver_stats.time[1:(end - 1)], - model.integrator.p.starttime, + model.integrator.p.p_non_diff.starttime, ), rhs_calls = diff(solver_stats.rhs_calls), linear_solves = diff(solver_stats.linear_solves), @@ -218,8 +216,8 @@ function flow_table( } (; config, saved, integrator) = model (; t, saveval) = saved.flow - (; p) = integrator - (; graph) = p + (; p_non_diff) = integrator.p + (; graph) = p_non_diff (; flow_links) = graph[] from_node_id = Int32[] @@ -270,8 +268,8 @@ function concentration_table( concentration::Vector{Float64}, } (; saved, integrator) = model - (; p) = integrator - (; basin) = p + (; p_non_diff) = integrator.p + (; basin) = p_non_diff # The last timestep is not included; there is no period over which to compute flows. data = get_storages_and_levels(model) @@ -300,7 +298,7 @@ function discrete_control_table( control_state::Vector{String}, } (; config) = model - (; record) = model.integrator.p.discrete_control + (; record) = model.integrator.p.p_non_diff.discrete_control time = datetime_since.(record.time, config.starttime) return (; time, record.control_node_id, record.truth_state, record.control_state) @@ -320,7 +318,7 @@ function allocation_table( realized::Vector{Float64}, } (; config) = model - (; record_demand) = model.integrator.p.allocation + (; record_demand) = model.integrator.p.p_non_diff.allocation time = datetime_since.(record_demand.time, config.starttime) return (; @@ -350,7 +348,7 @@ function allocation_flow_table( optimization_type::Vector{String}, } (; config) = model - (; record_flow) = model.integrator.p.allocation + (; record_flow) = model.integrator.p.p_non_diff.allocation time = datetime_since.(record_flow.time, config.starttime) @@ -377,7 +375,7 @@ function subgrid_level_table( } (; config, saved, integrator) = model (; t, saveval) = saved.subgrid_level - subgrid = integrator.p.subgrid + subgrid = integrator.p.p_non_diff.subgrid nelem = length(subgrid.level) ntsteps = length(t) diff --git a/core/test/allocation_test.jl b/core/test/allocation_test.jl index 000214a85..94ddccde1 100644 --- a/core/test/allocation_test.jl +++ b/core/test/allocation_test.jl @@ -248,9 +248,8 @@ end model = Ribasim.run(toml_path) (; u, p, t) = model.integrator (; current_storage) = p.basin.current_properties - current_storage = current_storage[Float64[]] du = get_du(model.integrator) - Ribasim.formulate_storages!(current_storage, du, u, p, t) + Ribasim.formulate_storages!(current_storage, u, p, t) @test current_storage ≈ Float32[ 1.0346908f6, diff --git a/core/test/run_models_test.jl b/core/test/run_models_test.jl index 3a87f12b5..1069b414c 100644 --- a/core/test/run_models_test.jl +++ b/core/test/run_models_test.jl @@ -388,11 +388,11 @@ end @test only(current_storage) ≈ 1000.0 # constant UserDemand withdraws to 0.9m or 900m3 due to min level = 0.9 BMI.update_until(model, 150day) - formulate_storages!(current_storage, u, u, p, t) + formulate_storages!(current_storage, u, p, t) @test only(current_storage) ≈ 900 atol = 5 # dynamic UserDemand withdraws to 0.5m or 500m3 due to min level = 0.5 BMI.update_until(model, 220day) - formulate_storages!(current_storage, u, u, p, t) + formulate_storages!(current_storage, u, p, t) @test only(current_storage) ≈ 500 atol = 1 # Trasient return factor From 448ca2856f3ef407faf6185f6e30d9d2511b4a0a Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Wed, 12 Mar 2025 15:35:04 +0100 Subject: [PATCH 08/28] Add SCT cache hack, fix minor bugs --- core/src/Ribasim.jl | 5 +++-- core/src/model.jl | 18 ++++++++++-------- core/src/parameter.jl | 2 +- core/src/solve.jl | 2 +- core/src/util.jl | 7 +++++-- core/src/write.jl | 3 ++- core/test/validation_test.jl | 6 +++--- 7 files changed, 25 insertions(+), 18 deletions(-) diff --git a/core/src/Ribasim.jl b/core/src/Ribasim.jl index a0f534dc9..4bf9fa7ab 100644 --- a/core/src/Ribasim.jl +++ b/core/src/Ribasim.jl @@ -17,7 +17,6 @@ module Ribasim # Requirements for automatic differentiation using ADTypes: AutoForwardDiff, AutoFiniteDiff using DifferentiationInterface: AutoSparse, Constant, Cache, prepare_jacobian, jacobian! -using SparseMatrixColorings: GreedyColoringAlgorithm, sparsity_pattern # Algorithms for solving ODEs. using OrdinaryDiffEqCore: OrdinaryDiffEqCore, get_du, AbstractNLSolver @@ -41,7 +40,9 @@ using SciMLBase: # Automatically detecting the sparsity pattern of the Jacobian of water_balance! # through operator overloading -using SparseConnectivityTracer: GradientTracer, TracerSparsityDetector +using SparseConnectivityTracer: + GradientTracer, TracerSparsityDetector, IndexSetGradientPattern +using SparseMatrixColorings: GreedyColoringAlgorithm, sparsity_pattern # For efficient sparse computations using SparseArrays: SparseMatrixCSC, spzeros diff --git a/core/src/model.jl b/core/src/model.jl index 449b21bfa..0a1a72c3e 100644 --- a/core/src/model.jl +++ b/core/src/model.jl @@ -32,15 +32,17 @@ function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) (; p_non_diff, diff_cache, p_mutable) = p backend = get_ad_type(solver) - # if solver.sparse - # backend = AutoSparse( - # backend; - # sparsity_detector = TracerSparsityDetector(), - # coloring_algorithm = GreedyColoringAlgorithm(), - # ) - # end + if solver.sparse + backend = AutoSparse( + backend; + sparsity_detector = TracerSparsityDetector(), + coloring_algorithm = GreedyColoringAlgorithm(), + ) + end t = 0.0 + diff_cache_SCT = + similar(diff_cache, GradientTracer{IndexSetGradientPattern{Int64, BitSet}}) # Activate all nodes to catch all possible state dependencies p_mutable.all_nodes_active = true @@ -50,7 +52,7 @@ function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) backend, u, Constant(p_non_diff), - Cache(diff_cache), + Cache(diff_cache_SCT), Constant(p_mutable), Constant(t), ) diff --git a/core/src/parameter.jl b/core/src/parameter.jl index 2a8f3abec..844357c6c 100644 --- a/core/src/parameter.jl +++ b/core/src/parameter.jl @@ -702,7 +702,7 @@ end @enumx DiffCacheType flow_rate_pump flow_rate_outlet basin_level @kwdef struct DiffCacheRef - type::DiffCacheType.T = DiffCacheType.pump_flow_rate + type::DiffCacheType.T = DiffCacheType.flow_rate_pump idx::Int = 0 from_du::Bool = false end diff --git a/core/src/solve.jl b/core/src/solve.jl index 5be325ca5..4f93f2fe0 100644 --- a/core/src/solve.jl +++ b/core/src/solve.jl @@ -103,7 +103,7 @@ function formulate_storages!( (; p_non_diff, diff_cache, p_mutable) = p (; basin, flow_boundary, flow_to_storage, cache_ranges) = p_non_diff (; tprev) = p_mutable - current_storage = view(diff_cache, cache_ranges.current_level) + current_storage = view(diff_cache, cache_ranges.current_storage) # Current storage: initial condition + # total inflows and outflows since the start # of the simulation diff --git a/core/src/util.jl b/core/src/util.jl index a220a8f0e..65e22ae53 100644 --- a/core/src/util.jl +++ b/core/src/util.jl @@ -419,8 +419,11 @@ function get_continuous_control_type(graph::MetaGraph, node_id::Vector{NodeID}) control_inneighbors = collect(inneighbor_labels_type(graph, id, LinkType.control)) if length(control_inneighbors) == 1 control_inneighbor = only(control_inneighbors) - continuous_control_type[id.idx] = - get(control_type_mapping, control_inneighbor.type, ContinuousControl.None) + continuous_control_type[id.idx] = get( + control_type_mapping, + control_inneighbor.type, + ContinuousControlType.None, + ) elseif length(control_inneighbors) > 1 error("$id has more than 1 control inneighbors.") end diff --git a/core/src/write.jl b/core/src/write.jl index acaf423ea..28ac78eb0 100644 --- a/core/src/write.jl +++ b/core/src/write.jl @@ -216,7 +216,8 @@ function flow_table( } (; config, saved, integrator) = model (; t, saveval) = saved.flow - (; p_non_diff) = integrator.p + (; p) = integrator + (; p_non_diff) = p (; graph) = p_non_diff (; flow_links) = graph[] diff --git a/core/test/validation_test.jl b/core/test/validation_test.jl index 073cf97f2..23f227a5f 100644 --- a/core/test/validation_test.jl +++ b/core/test/validation_test.jl @@ -189,12 +189,12 @@ end cfg = Ribasim.Config(toml_path) db_path = Ribasim.database_path(cfg) db = SQLite.DB(db_path) - p = Ribasim.Parameters(db, cfg) + (; p_non_diff) = Ribasim.Parameters(db, cfg) close(db) logger = TestLogger() with_logger(logger) do - @test !Ribasim.valid_discrete_control(p, cfg) + @test !Ribasim.valid_discrete_control(p_non_diff, cfg) end @test length(logger.logs) == 5 @@ -217,7 +217,7 @@ end @testitem "Pump/outlet flow rate sign validation" begin using Logging - using Ribasim: NodeID, NodeType, ControlStateUpdate, ParameterUpdate, cache + using Ribasim: NodeID, NodeType, ControlStateUpdate, ParameterUpdate using DataInterpolations: LinearInterpolation logger = TestLogger() From 828178ae7be42d8cd4cba8c2df8e3a154b8a876e Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Wed, 12 Mar 2025 19:28:21 +0100 Subject: [PATCH 09/28] Fix many tests --- core/src/Ribasim.jl | 9 +++- core/src/bmi.jl | 17 ++++--- core/src/callback.jl | 14 +++--- core/src/logging.jl | 4 +- core/src/model.jl | 54 ++++++++++++++++------ core/src/parameter.jl | 11 +++-- core/src/read.jl | 25 ++++++++-- core/src/solve.jl | 21 ++++++--- core/src/util.jl | 17 ++++--- core/src/validation.jl | 10 ++-- core/test/control_test.jl | 4 +- core/test/run_models_test.jl | 90 +++++++++++++++++++++--------------- core/test/utils_test.jl | 19 +++----- core/test/validation_test.jl | 50 +++++++++----------- 14 files changed, 209 insertions(+), 136 deletions(-) diff --git a/core/src/Ribasim.jl b/core/src/Ribasim.jl index 4bf9fa7ab..105f08369 100644 --- a/core/src/Ribasim.jl +++ b/core/src/Ribasim.jl @@ -16,7 +16,14 @@ module Ribasim # Requirements for automatic differentiation using ADTypes: AutoForwardDiff, AutoFiniteDiff -using DifferentiationInterface: AutoSparse, Constant, Cache, prepare_jacobian, jacobian! +using DifferentiationInterface: + AutoSparse, + Constant, + Cache, + prepare_jacobian, + jacobian!, + prepare_derivative, + derivative! # Algorithms for solving ODEs. using OrdinaryDiffEqCore: OrdinaryDiffEqCore, get_du, AbstractNLSolver diff --git a/core/src/bmi.jl b/core/src/bmi.jl index e358ead75..5925a281b 100644 --- a/core/src/bmi.jl +++ b/core/src/bmi.jl @@ -40,22 +40,25 @@ This uses a typeassert to ensure that the return type annotation doesn't create """ function BMI.get_value_ptr(model::Model, name::String)::Vector{Float64} (; u, p) = model.integrator - (; infiltration, user_demand_inflow) = p.state_ranges + (; p_non_diff, diff_cache) = p + (; state_ranges, cache_ranges, basin) = p_non_diff + (; infiltration, user_demand_inflow) = state_ranges + (; current_storage, current_level) = cache_ranges if name == "basin.storage" - p.basin.current_properties.current_storage[u]::Vector{Float64} + unsafe_array(view(diff_cache, current_storage))::Vector{Float64} elseif name == "basin.level" - p.basin.current_properties.current_level[u]::Vector{Float64} + unsafe_array(view(diff_cache, current_level))::Vector{Float64} elseif name == "basin.infiltration" - p.basin.vertical_flux.infiltration::Vector{Float64} + basin.vertical_flux.infiltration::Vector{Float64} elseif name == "basin.drainage" - p.basin.vertical_flux.drainage::Vector{Float64} + basin.vertical_flux.drainage::Vector{Float64} elseif name == "basin.cumulative_infiltration" unsafe_array(view(u, infiltration))::Vector{Float64} elseif name == "basin.cumulative_drainage" - p.basin.cumulative_drainage::Vector{Float64} + basin.cumulative_drainage::Vector{Float64} elseif name == "basin.subgrid_level" - p.subgrid.level::Vector{Float64} + subgrid.level::Vector{Float64} elseif name == "user_demand.demand" vec(p.user_demand.demand)::Vector{Float64} elseif name == "user_demand.cumulative_inflow" diff --git a/core/src/callback.jl b/core/src/callback.jl index 9c4de57ce..740f38d53 100644 --- a/core/src/callback.jl +++ b/core/src/callback.jl @@ -487,7 +487,7 @@ function check_negative_storage(u, t, integrator)::Nothing end if errors - t_datetime = datetime_since(integrator.t, integrator.p.starttime) + t_datetime = datetime_since(integrator.t, p_non_diff.starttime) error("Negative storages found at $t_datetime.") end return nothing @@ -556,7 +556,7 @@ function set_new_control_state!( truth_state::Vector{Bool}, )::Nothing (; p) = integrator - (; discrete_control) = p + (; discrete_control) = p.p_non_diff # Get the control state corresponding to the new truth state, # if one is defined @@ -593,11 +593,11 @@ Get a value for a condition. Currently supports getting levels from basins and f from flow boundaries. """ function get_value(subvariable::SubVariable, p::Parameters, du::AbstractVector, t::Float64) - (; flow_boundary, level_boundary, basin) = p - (; listen_node_id, look_ahead, variable, variable_ref) = subvariable + (; flow_boundary, level_boundary, basin) = p.p_non_diff + (; listen_node_id, look_ahead, variable, diff_cache_ref) = subvariable - if !iszero(variable_ref.idx) - return get_value(variable_ref, du) + if !iszero(diff_cache_ref.idx) + return get_value(diff_cache_ref, p, du) end if variable == "level" @@ -651,7 +651,7 @@ function get_allocation_model(p::Parameters, subnetwork_id::Int32)::AllocationMo end function set_control_params!(p::Parameters, node_id::NodeID, control_state::String)::Nothing - (; discrete_control, allocation) = p + (; discrete_control) = p.p_non_diff (; control_mappings) = discrete_control control_state_update = control_mappings[node_id.type][(node_id, control_state)] (; active, scalar_update, itp_update) = control_state_update diff --git a/core/src/logging.jl b/core/src/logging.jl index 267f8671c..791337a48 100644 --- a/core/src/logging.jl +++ b/core/src/logging.jl @@ -29,7 +29,7 @@ end function log_bottlenecks(model; converged::Bool) (; cache, p, u) = model.integrator - (; basin) = p + (; p_non_diff) = p level = converged ? LoggingExtras.Info : LoggingExtras.Warn @@ -41,7 +41,7 @@ function log_bottlenecks(model; converged::Bool) max_errors = 5 # Iterate over the errors in descending order for i in sortperm(flow_error; rev = true) - node_id = Symbol(p.node_id[i]) + node_id = Symbol(p_non_diff.node_id[i]) error = flow_error[i] isnan(error) && continue # NaN are sorted as largest # Stop reporting errors if they are too small or too many diff --git a/core/src/model.jl b/core/src/model.jl index 0a1a72c3e..350be55d7 100644 --- a/core/src/model.jl +++ b/core/src/model.jl @@ -28,28 +28,34 @@ struct Model{T} end end -function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) +""" +Get the Jacobian evaluation function via DifferentiationInterface.jl. +The time derivative is also supplied in case a Rosenbrock method is used. +""" +function get_diff_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) (; p_non_diff, diff_cache, p_mutable) = p backend = get_ad_type(solver) - if solver.sparse - backend = AutoSparse( + backend_jac = if solver.sparse + AutoSparse( backend; sparsity_detector = TracerSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm(), ) + else + backend end t = 0.0 diff_cache_SCT = - similar(diff_cache, GradientTracer{IndexSetGradientPattern{Int64, BitSet}}) + zeros(GradientTracer{IndexSetGradientPattern{Int64, BitSet}}, length(diff_cache)) # Activate all nodes to catch all possible state dependencies p_mutable.all_nodes_active = true - prep = prepare_jacobian( + jac_prep = prepare_jacobian( water_balance!, du, - backend, + backend_jac, u, Constant(p_non_diff), Cache(diff_cache_SCT), @@ -58,15 +64,14 @@ function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) ) p_mutable.all_nodes_active = false - #jac_prototype = solver.sparse ? sparsity_pattern(prep) : nothing - jac_prototype = nothing + jac_prototype = solver.sparse ? sparsity_pattern(jac_prep) : nothing jac(J, u, p, t) = jacobian!( water_balance!, du, J, - prep, - backend, + jac_prep, + backend_jac, u, Constant(p.p_non_diff), Cache(diff_cache), @@ -74,7 +79,30 @@ function get_jac_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) Constant(t), ) - return jac_prototype, jac + tgrad_prep = prepare_derivative( + water_balance!, + du, + backend, + t, + Constant(u), + Constant(p_non_diff), + Cache(diff_cache), + Constant(p_mutable), + ) + tgrad(dT, u, p, t) = derivative!( + water_balance!, + du, + dT, + tgrad_prep, + backend, + t, + Constant(u), + Constant(p.p_non_diff), + Cache(p.diff_cache), + Constant(p.p_mutable), + ) + + return jac_prototype, jac, tgrad end function Model(config_path::AbstractString)::Model @@ -207,8 +235,8 @@ function Model(config::Config)::Model tstops = sort(unique(vcat(tstops...))) adaptive, dt = convert_dt(config.solver.dt) - jac_prototype, jac = get_jac_eval(du0, u0, parameters, config.solver) - RHS = ODEFunction(water_balance!; jac_prototype, jac) + jac_prototype, jac, tgrad = get_diff_eval(du0, u0, parameters, config.solver) + RHS = ODEFunction(water_balance!; jac_prototype, jac, tgrad) prob = ODEProblem(RHS, u0, timespan, parameters) @debug "Setup ODEProblem." diff --git a/core/src/parameter.jl b/core/src/parameter.jl index 844357c6c..19bb017f1 100644 --- a/core/src/parameter.jl +++ b/core/src/parameter.jl @@ -323,10 +323,11 @@ end """ The parameter update associated with a certain control state for discrete control """ -@kwdef struct ControlStateUpdate{T <: AbstractInterpolation} +@kwdef struct ControlStateUpdate active::ParameterUpdate{Bool} scalar_update::Vector{ParameterUpdate{Float64}} = ParameterUpdate{Float64}[] - itp_update::Vector{ParameterUpdate{T}} = ParameterUpdate{ScalarInterpolation}[] + itp_update::Vector{ParameterUpdate{ScalarInterpolation}} = + ParameterUpdate{ScalarInterpolation}[] end """ @@ -724,7 +725,7 @@ end @kwdef struct SubVariable listen_node_id::NodeID - variable_ref::DiffCacheRef + diff_cache_ref::DiffCacheRef variable::String weight::Float64 look_ahead::Float64 @@ -1007,11 +1008,11 @@ function get_value(ref::DiffCacheRef, p::Parameters, du::Vector) if ref.from_du du[ref.idx] else - get_cache_view(p.diff_cache, ref.type, p.non_diff.cache_ranges)[ref.idx] + get_cache_view(p.diff_cache, ref.type, p.p_non_diff.cache_ranges)[ref.idx] end end function set_value!(ref::DiffCacheRef, p::Parameters, value) @assert !ref.from_du - get_cache_view(p.diff_cache, ref.type, p.non_diff.cache_ranges)[ref.idx] = value + get_cache_view(p.diff_cache, ref.type, p.p_non_diff.cache_ranges)[ref.idx] = value end diff --git a/core/src/read.jl b/core/src/read.jl index 4829805dc..3559cf0c6 100644 --- a/core/src/read.jl +++ b/core/src/read.jl @@ -743,6 +743,14 @@ function Pump(db::DB, config::Config, graph::MetaGraph)::Pump (; node_id) = parsed_parameters continuous_control_type = get_continuous_control_type(graph, node_id) + if !valid_flow_rates( + node_id, + parsed_parameters.flow_rate, + parsed_parameters.control_mapping, + ) + error("Invalid pump flow_rates found.") + end + return Pump(; node_id, inflow_link = inflow_link.(Ref(graph), node_id), @@ -793,6 +801,14 @@ function Outlet(db::DB, config::Config, graph::MetaGraph)::Outlet (; node_id) = parsed_parameters continuous_control_type = get_continuous_control_type(graph, node_id) + if !valid_flow_rates( + node_id, + parsed_parameters.flow_rate, + parsed_parameters.control_mapping, + ) + error("Invalid pump flow_rates found.") + end + return Outlet(; node_id, inflow_link = inflow_link.(Ref(graph), node_id), @@ -1038,14 +1054,14 @@ function CompoundVariable( for row in variables_compound_variable listen_node_id = NodeID(row.listen_node_id, node_ids_all) # Placeholder until actual ref is known - variable_ref = DiffCacheRef() + diff_cache_ref = DiffCacheRef() variable = row.variable # Default to weight = 1.0 if not specified weight = coalesce(row.weight, 1.0) # Default to look_ahead = 0.0 if not specified look_ahead = coalesce(row.look_ahead, 0.0) subvariable = - SubVariable(listen_node_id, variable_ref, variable, weight, look_ahead) + SubVariable(listen_node_id, diff_cache_ref, variable, weight, look_ahead) push!(subvariables, subvariable) end @@ -1824,6 +1840,7 @@ function Parameters(db::DB, config::Config)::Parameters nodes.pid_control.node_id, fill("flow_rate", length(node_id)), state_ranges, + graph, ) p_non_diff = ParametersNonDiff(; @@ -1844,8 +1861,8 @@ function Parameters(db::DB, config::Config)::Parameters ) collect_control_mappings!(p_non_diff) - set_listen_variable_refs!(p_non_diff) - set_discrete_controlled_variable_refs!(p_non_diff) + set_listen_diff_cache_refs!(p_non_diff) + set_discrete_controlled_diff_cache_refs!(p_non_diff) # Allocation data structures if config.allocation.use_allocation diff --git a/core/src/solve.jl b/core/src/solve.jl index 4f93f2fe0..fe9a8193e 100644 --- a/core/src/solve.jl +++ b/core/src/solve.jl @@ -1,11 +1,20 @@ -# Wrapper method used by OrdinaryDiffEq; the other method is used -# for Jacobian evaluation with DifferentiationInterface.jl -water_balance!(du::Vector, u::Vector, p::Parameters, t::Number)::Nothing = - water_balance!(du, u, p.p_non_diff, p.diff_cache, p.p_mutable, t) - """ The right hand side function of the system of ODEs set up by Ribasim. """ +water_balance!(du::Vector, u::Vector, p::Parameters, t::Number)::Nothing = + water_balance!(du, u, p.p_non_diff, p.diff_cache, p.p_mutable, t) + +# Method with `t` as second argument parsable by DifferentiationInterface.jl for time derivative computation +water_balance!( + du::Vector, + t::Number, + u::Vector, + p_non_diff::ParametersNonDiff, + diff_cache::Vector, + p_mutable::ParametersMutable, +) = water_balance!(du, u, p_non_diff, diff_cache, p_mutable, t) + +# Method with separate parameter parsable by DifferentiationInterface.jl for Jacobian computation function water_balance!( du::Vector, u::Vector, @@ -262,7 +271,7 @@ function formulate_pid_control!(du::Vector, u::Vector, p::Parameters, t::Number) end # Set flow_rate - set_value!(pid_control.target_ref[i], flow_rate, du) + set_value!(pid_control.target_ref[i], p, flow_rate) end return nothing end diff --git a/core/src/util.jl b/core/src/util.jl index 65e22ae53..09245eb77 100644 --- a/core/src/util.jl +++ b/core/src/util.jl @@ -532,7 +532,7 @@ end """ Get the reference to a parameter """ -function get_variable_ref( +function get_diff_cache_ref( node_id::NodeID, variable::String, state_ranges::StateRanges; @@ -575,7 +575,7 @@ end """ Set references to all variables that are listened to by discrete/continuous control """ -function set_listen_variable_refs!(p_non_diff::ParametersNonDiff)::Nothing +function set_listen_diff_cache_refs!(p_non_diff::ParametersNonDiff)::Nothing (; discrete_control, continuous_control, state_ranges) = p_non_diff compound_variable_sets = [discrete_control.compound_variables..., continuous_control.compound_variable] @@ -585,13 +585,13 @@ function set_listen_variable_refs!(p_non_diff::ParametersNonDiff)::Nothing for compound_variable in compound_variables (; subvariables) = compound_variable for (j, subvariable) in enumerate(subvariables) - ref, error = get_variable_ref( + ref, error = get_diff_cache_ref( subvariable.listen_node_id, subvariable.variable, state_ranges, ) if !error - subvariables[j] = @set subvariable.variable_ref = ref + subvariables[j] = @set subvariable.diff_cache_ref = ref end errors |= error end @@ -607,7 +607,7 @@ end """ Set references to all variables that are controlled by discrete control """ -function set_discrete_controlled_variable_refs!(p_non_diff::ParametersNonDiff)::Nothing +function set_discrete_controlled_diff_cache_refs!(p_non_diff::ParametersNonDiff)::Nothing for nodetype in propertynames(p_non_diff) node = getfield(p_non_diff, nodetype) if node isa AbstractParameterNode && hasfield(typeof(node), :control_mapping) @@ -648,10 +648,13 @@ function set_target_ref!( node_id::Vector{NodeID}, controlled_variable::Vector{String}, state_ranges::StateRanges, + graph::MetaGraph, ) errors = false for (i, (id, variable)) in enumerate(zip(node_id, controlled_variable)) - ref, error = get_variable_ref(id, variable, state_ranges; listen = false) + controlled_node_id = only(outneighbor_labels_type(graph, id, LinkType.control)) + ref, error = + get_diff_cache_ref(controlled_node_id, variable, state_ranges; listen = false) target_ref[i] = ref errors |= error end @@ -760,7 +763,7 @@ end # Custom overloads reduction_factor(x::GradientTracer, ::Real) = x -low_storage_factor_resistance_node(storage, q::GradientTracer, inflow_id, outflow_id) = q +low_storage_factor_resistance_node(::Parameters, q::GradientTracer, ::NodeID, ::NodeID) = q relaxed_root(x::GradientTracer, threshold::Real) = x get_level_from_storage(basin::Basin, state_idx::Int, storage::GradientTracer) = storage diff --git a/core/src/validation.jl b/core/src/validation.jl index 773d48d97..eb032f22e 100644 --- a/core/src/validation.jl +++ b/core/src/validation.jl @@ -267,8 +267,8 @@ Test whether static or discrete controlled flow rates are indeed non-negative. """ function valid_flow_rates( node_id::Vector{NodeID}, - flow_rate::Vector, - control_mapping::Dict, + flow_rate::Vector{ScalarInterpolation}, + control_mapping::Dict{Tuple{NodeID, String}, <:ControlStateUpdate}, )::Bool errors = false @@ -286,7 +286,7 @@ function valid_flow_rates( if flow_rate_ < 0.0 errors = true control_state = key[2] - @error "$id_controlled flow rates must be non-negative, found $flow_rate_ for control state '$control_state'." + @error "Negative flow rate(s) for $id_controlled, control state '$control_state' found." end end @@ -294,9 +294,9 @@ function valid_flow_rates( if id in ids_controlled continue end - if flow_rate_ < 0.0 + if minimum(flow_rate_.u) < 0.0 errors = true - @error "$id flow rates must be non-negative, found $flow_rate_." + @error "Negative flow rate(s) for $id found." end end diff --git a/core/test/control_test.jl b/core/test/control_test.jl index eb1aa5cd1..2f13a73cf 100644 --- a/core/test/control_test.jl +++ b/core/test/control_test.jl @@ -233,14 +233,14 @@ end @test compound_variable.subvariables[1] == SubVariable(; listen_node_id = NodeID(:FlowBoundary, 2, p), - variable_ref = compound_variable.subvariables[1].variable_ref, + diff_cache_ref = compound_variable.subvariables[1].diff_cache_ref, variable = "flow_rate", weight = 0.5, look_ahead = 0.0, ) @test compound_variable.subvariables[2] == SubVariable(; listen_node_id = NodeID(:FlowBoundary, 3, p), - variable_ref = compound_variable.subvariables[2].variable_ref, + diff_cache_ref = compound_variable.subvariables[2].diff_cache_ref, variable = "flow_rate", weight = 0.5, look_ahead = 0.0, diff --git a/core/test/run_models_test.jl b/core/test/run_models_test.jl index 1069b414c..32c75b84c 100644 --- a/core/test/run_models_test.jl +++ b/core/test/run_models_test.jl @@ -20,10 +20,10 @@ model = Ribasim.run(config) @test model isa Ribasim.Model @test successful_retcode(model) - (; p) = model.integrator + (; p_non_diff) = model.integrator.p - @test p.node_id == [0, 6, 6] - @test p.state_ranges == + @test p_non_diff.node_id == [0, 6, 6] + @test p_non_diff.state_ranges == StateRanges(; tabulated_rating_curve = 1:1, evaporation = 2:2, infiltration = 3:3) @test !ispath(control_path) @@ -93,7 +93,7 @@ # t0 has no flow, 2 flow links @test nrow(flow) == (nsaved - 1) * 2 @test nrow(basin) == nsaved - 1 - @test nrow(subgrid) == nsaved * length(p.subgrid.level) + @test nrow(subgrid) == nsaved * length(p_non_diff.subgrid.level) end @testset "Results values" begin @@ -116,12 +116,12 @@ # The exporter interpolates 1:1 for three subgrid elements, but shifted by 1.0 meter. basin_level = basin.level[1] - @test length(p.subgrid.level) == 3 - @test diff(p.subgrid.level) ≈ [-1.0, 2.0] + @test length(p_non_diff.subgrid.level) == 3 + @test diff(p_non_diff.subgrid.level) ≈ [-1.0, 2.0] @test subgrid.subgrid_id[1:3] == [11, 22, 33] @test subgrid.subgrid_level[1:3] ≈ [basin_level, basin_level - 1.0, basin_level + 1.0] - @test subgrid.subgrid_level[(end - 2):end] == p.subgrid.level + @test subgrid.subgrid_level[(end - 2):end] == p_non_diff.subgrid.level end end @@ -133,8 +133,10 @@ end @test ispath(toml_path) model = Ribasim.run(toml_path) @test model isa Ribasim.Model - (; basin, state_ranges) = model.integrator.p - @test basin.current_properties.current_storage[Float64[]] ≈ [1000] + (; p_non_diff, diff_cache) = model.integrator.p + (; basin, state_ranges, cache_ranges) = p_non_diff + current_storage = view(diff_cache, cache_ranges.current_storage) + @test current_storage ≈ [1000] @test basin.vertical_flux.precipitation == [0.0] @test basin.vertical_flux.drainage == [0.0] du = get_du(model.integrator) @@ -158,12 +160,15 @@ end (; integrator) = model du = get_du(integrator) (; u, p, t) = integrator + (; p_non_diff, diff_cache) = p + (; basin, state_ranges, cache_ranges) = p_non_diff + Ribasim.water_balance!(du, u, p, t) - stor = integrator.p.basin.current_properties.current_storage[du] - prec = p.basin.vertical_flux.precipitation - evap = view(du, p.state_ranges.evaporation) - drng = p.basin.vertical_flux.drainage - infl = view(du, p.state_ranges.infiltration) + stor = view(diff_cache, cache_ranges.current_storage) + prec = basin.vertical_flux.precipitation + evap = view(du, state_ranges.evaporation) + drng = basin.vertical_flux.drainage + infl = view(du, state_ranges.infiltration) # The dynamic data has missings, but these are not set. @test prec == [0.0] @test evap == [0.0] @@ -207,19 +212,21 @@ end (; integrator) = model (; p, alg) = integrator + (; p_non_diff, diff_cache) = p @test p isa Ribasim.Parameters - @test isconcretetype(typeof(p)) - @test all(isconcretetype, fieldtypes(typeof(p))) - @test p.node_id == [4, 5, 8, 7, 10, 12, 2, 1, 3, 6, 9, 1, 3, 6, 9] + @test isconcretetype(typeof(p_non_diff)) + @test all(isconcretetype, fieldtypes(typeof(p_non_diff))) + @test p_non_diff.node_id == [4, 5, 8, 7, 10, 12, 2, 1, 3, 6, 9, 1, 3, 6, 9] @test alg isa QNDF @test alg.step_limiter! == Ribasim.limit_flow! @test successful_retcode(model) @test length(model.integrator.sol) == 2 # start and end - @test model.integrator.p.basin.current_properties.current_storage[Float64[]] ≈ - Float32[804.22156, 803.6474, 495.18243, 1318.3053] skip = Sys.isapple() atol = 1.5 + current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) + @test current_storage ≈ Float32[804.22156, 803.6474, 495.18243, 1318.3053] skip = + Sys.isapple() atol = 1.5 @test length(logger.logs) > 10 @test logger.logs[1].level == Debug @@ -257,11 +264,12 @@ end @test model isa Ribasim.Model @test successful_retcode(model) @test allunique(Ribasim.tsaves(model)) - du = get_du(model.integrator) - precipitation = model.integrator.p.basin.vertical_flux.precipitation + (; p_non_diff, diff_cache) = model.integrator.p + precipitation = p_non_diff.basin.vertical_flux.precipitation @test length(precipitation) == 4 - @test model.integrator.p.basin.current_properties.current_storage[du] ≈ - Float32[698.6895, 698.143, 420.57407, 1334.486] atol = 2.0 skip = Sys.isapple() + current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) + @test current_storage ≈ Float32[698.6895, 698.143, 420.57407, 1334.486] atol = 2.0 skip = + Sys.isapple() end @testitem "Allocation example model" begin @@ -315,9 +323,10 @@ end model = Ribasim.run(toml_path) @test model isa Ribasim.Model @test successful_retcode(model) - @test model.integrator.p.basin.current_properties.current_storage[Float64[]] ≈ - Float32[368.31558, 365.68442] skip = Sys.isapple() - (; tabulated_rating_curve) = model.integrator.p + (; p_non_diff, diff_cache) = model.integrator.p + current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) + @test current_storage ≈ Float32[368.31558, 365.68442] skip = Sys.isapple() + (; tabulated_rating_curve) = p_non_diff # The first node is static, the first interpolation object always applies index_itp1 = tabulated_rating_curve.current_interpolation_index[1] @test only(index_itp1.u) == 1 @@ -343,8 +352,7 @@ end model = Ribasim.run(toml_path) @test successful_retcode(model) - p = model.integrator.p - (; level_boundary, outlet) = p + (; level_boundary, outlet) = model.integrator.p.p_non_diff (; level) = level_boundary level = level[1] @@ -381,23 +389,25 @@ end (; integrator) = model (; u, p, t, sol) = integrator - current_storage = p.basin.current_properties.current_storage[Float64[]] + (; p_non_diff, diff_cache) = p + (; p_non_diff, diff_cache) = model.integrator.p + current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) day = 86400.0 @test only(current_storage) ≈ 1000.0 # constant UserDemand withdraws to 0.9m or 900m3 due to min level = 0.9 BMI.update_until(model, 150day) - formulate_storages!(current_storage, u, p, t) + formulate_storages!(u, p, t) @test only(current_storage) ≈ 900 atol = 5 # dynamic UserDemand withdraws to 0.5m or 500m3 due to min level = 0.5 BMI.update_until(model, 220day) - formulate_storages!(current_storage, u, p, t) + formulate_storages!(u, p, t) @test only(current_storage) ≈ 500 atol = 1 # Trasient return factor flow = DataFrame(Ribasim.flow_table(model)) - return_factor_itp = model.integrator.p.user_demand.return_factor[3] + return_factor_itp = p_non_diff.user_demand.return_factor[3] flow_in = filter([:from_node_id, :to_node_id] => (from, to) -> (from, to) == (1, 4), flow) flow_out = @@ -477,7 +487,9 @@ end du = get_du(model.integrator) (; p, t) = model.integrator - h_actual = p.basin.current_properties.current_level[du][1:50] + (; p_non_diff, diff_cache) = p + current_level = view(diff_cache, p_non_diff.cache_ranges.current_level) + h_actual = current_level[1:50] x = collect(10.0:20.0:990.0) h_expected = standard_step_method(x, 5.0, 1.0, 0.04, h_actual[end], 1.0e-6) @@ -487,13 +499,17 @@ end # https://www.hec.usace.army.mil/confluence/rasdocs/ras1dtechref/latest/theoretical-basis-for-one-dimensional-and-two-dimensional-hydrodynamic-calculations/1d-steady-flow-water-surface-profiles/friction-loss-evaluation @test all(isapprox.(h_expected, h_actual; atol = 0.02)) # Test for conservation of mass, flow at the beginning == flow at the end - @test Ribasim.get_flow(du, p, t, (NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))) ≈ - 5.0 atol = 0.001 skip = Sys.isapple() @test Ribasim.get_flow( du, p, t, - (NodeID(:ManningResistance, 101, p), NodeID(:Basin, 102, p)), + (NodeID(:FlowBoundary, 1, p_non_diff), NodeID(:Basin, 2, p_non_diff)), + ) ≈ 5.0 atol = 0.001 skip = Sys.isapple() + @test Ribasim.get_flow( + du, + p, + t, + (NodeID(:ManningResistance, 101, p_non_diff), NodeID(:Basin, 102, p_non_diff)), ) ≈ 5.0 atol = 0.001 skip = Sys.isapple() end @@ -632,5 +648,5 @@ end basin_level = copy(BMI.get_value_ptr(model, "basin.level")) basin_level[2] += 1 @test basin_level ≈ df.subgrid_level[(end - 1):end] - @test basin_level ≈ model.integrator.p.subgrid.level + @test basin_level ≈ model.integrator.p.p_non_diff.subgrid.level end diff --git a/core/test/utils_test.jl b/core/test/utils_test.jl index 44feb0e10..707fb8cb2 100644 --- a/core/test/utils_test.jl +++ b/core/test/utils_test.jl @@ -30,11 +30,6 @@ end concentration_time = StructVector{Ribasim.BasinConcentrationV1}(undef, 0), ) - (; current_level, current_area) = basin.current_properties - - current_level[Float64[]] .= [2.0, 3.0] - current_area[Float64[]] .= [2.0, 3.0] - @test Ribasim.basin_levels(basin, 2)[1] === 4.0 @test Ribasim.basin_bottom(basin, NodeID(:Basin, 5, 1))[2] === 0.0 @test Ribasim.basin_bottom(basin, NodeID(:Basin, 7, 2))[2] === 4.0 @@ -264,7 +259,7 @@ end p = Ribasim.Parameters(db, config) close(db) t0 = 0.0 - u0 = Ribasim.build_state_vector(p) + u0 = Ribasim.build_state_vector(p.p_non_diff) du0 = copy(u0) jac_prototype, _ = Ribasim.get_jac_eval(du0, u0, p, config.solver) @@ -284,8 +279,9 @@ end db = SQLite.DB(db_path) p = Ribasim.Parameters(db, config) + (; p_non_diff) = p close(db) - u0 = Ribasim.build_state_vector(p) + u0 = Ribasim.build_state_vector(p_non_diff) du0 = copy(u0) jac_prototype, _ = Ribasim.get_jac_eval(du0, u0, p, config.solver) @@ -365,7 +361,7 @@ end end @testitem "Node types" begin - using Ribasim: nodetypes, NodeType, Parameters, AbstractParameterNode, snake_case + using Ribasim: nodetypes, NodeType, ParametersNonDiff, AbstractParameterNode, snake_case @test Set(nodetypes) == Set([ :Basin, @@ -390,7 +386,7 @@ end # It has a struct which is added to Parameters T = getproperty(Ribasim, nodetype) @test T <: AbstractParameterNode - @test hasfield(Parameters, snake_case(nodetype)) + @test hasfield(ParametersNonDiff, snake_case(nodetype)) end end end @@ -400,9 +396,8 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/basic/ribasim.toml") @test ispath(toml_path) model = Ribasim.Model(toml_path) - (; p) = model.integrator - n_basins = length(p.basin.node_id) - (; flow_to_storage, state_ranges) = p + (; basin, flow_to_storage, state_ranges) = model.integrator.p.p_non_diff + n_basins = length(basin.node_id) @test flow_to_storage[:, state_ranges.evaporation] == -I @test flow_to_storage[:, state_ranges.infiltration] == -I diff --git a/core/test/validation_test.jl b/core/test/validation_test.jl index 23f227a5f..11547a5bd 100644 --- a/core/test/validation_test.jl +++ b/core/test/validation_test.jl @@ -217,51 +217,45 @@ end @testitem "Pump/outlet flow rate sign validation" begin using Logging - using Ribasim: NodeID, NodeType, ControlStateUpdate, ParameterUpdate + using Ribasim: NodeID, NodeType, ControlStateUpdate, ParameterUpdate, valid_flow_rates using DataInterpolations: LinearInterpolation logger = TestLogger() + flow_rate = [LinearInterpolation([-1.0, 2.0], [0.0, 1.0])] with_logger(logger) do - flow_rate = cache(1) - flow_rate[Float64[]] .= -1 - @test_throws "Invalid Outlet flow rate(s)." Ribasim.Outlet(; - node_id = [NodeID(:Outlet, 1, 1)], - flow_rate, - ) + node_id = [NodeID(:Outlet, 1, 1)] + control_mapping = Dict{Tuple{NodeID, String}, ControlStateUpdate}() + @test !valid_flow_rates(node_id, flow_rate, control_mapping) end @test length(logger.logs) == 1 @test logger.logs[1].level == Error - @test logger.logs[1].message == "Outlet #1 flow rates must be non-negative, found -1.0." + @test logger.logs[1].message == "Negative flow rate(s) for Outlet #1 found." logger = TestLogger() with_logger(logger) do - flow_rate_cache = cache(1) - flow_rate_cache[Float64[]] .= -1 - @test_throws "Invalid Pump flow rate(s)." Ribasim.Pump(; - node_id = [NodeID(:Pump, 1, 1)], - flow_rate_cache, - control_mapping = Dict( - (NodeID(:Pump, 1, 1), "foo") => ControlStateUpdate(; - active = ParameterUpdate(:active, true), - itp_update = [ - ParameterUpdate( - :flow_rate, - LinearInterpolation([-1.0, -1.0], [0.0, 1.0]), - ), - ], - ), + node_id = [NodeID(:Pump, 1, 1)] + control_mapping = Dict( + (NodeID(:Pump, 1, 1), "foo") => ControlStateUpdate(; + active = ParameterUpdate(:active, true), + itp_update = [ + ParameterUpdate( + :flow_rate, + LinearInterpolation([-1.0, -1.0], [0.0, 1.0]), + ), + ], ), ) + @test !valid_flow_rates(node_id, flow_rate, control_mapping) end # Only the invalid control state flow_rate yields an error @test length(logger.logs) == 1 @test logger.logs[1].level == Error @test logger.logs[1].message == - "Pump #1 flow rates must be non-negative, found -1.0 for control state 'foo'." + "Negative flow rate(s) for Pump #1, control state 'foo' found." end @testitem "Link type validation" begin @@ -380,9 +374,9 @@ end config = Ribasim.Config(toml_path) model = Ribasim.Model(config) - parameters = model.integrator.p + (; p_non_diff) = model.integrator.p - (; graph, tabulated_rating_curve, basin) = parameters + (; graph, tabulated_rating_curve, basin) = p_non_diff tabulated_rating_curve.interpolations[1].t[1] = invalid_level logger = TestLogger() @@ -411,9 +405,9 @@ end config = Ribasim.Config(toml_path) model = Ribasim.Model(config) - parameters = model.integrator.p + (; p_non_diff) = model.integrator.p - (; graph, outlet, basin) = parameters + (; graph, outlet, basin) = p_non_diff outlet.min_upstream_level[1] = LinearInterpolation(fill(invalid_level, 2), zeros(2)) logger = TestLogger() From de0dbb7c75b3d6bafbebbe0c1d5dd44317d90faf Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Thu, 13 Mar 2025 07:58:19 +0100 Subject: [PATCH 10/28] Pass most tests --- core/src/allocation_optim.jl | 104 +++++-------- core/src/bmi.jl | 4 +- core/src/callback.jl | 12 +- core/src/graph.jl | 8 +- core/src/solve.jl | 16 +- core/src/util.jl | 14 +- core/src/write.jl | 2 +- core/test/allocation_test.jl | 282 ++++++++++++++++++----------------- core/test/bmi_test.jl | 2 +- core/test/control_test.jl | 50 +++---- core/test/equations_test.jl | 28 ++-- core/test/io_test.jl | 11 +- core/test/time_test.jl | 11 +- core/test/utils_test.jl | 4 +- 14 files changed, 275 insertions(+), 273 deletions(-) diff --git a/core/src/allocation_optim.jl b/core/src/allocation_optim.jl index be4a0623d..1d5ea2e18 100644 --- a/core/src/allocation_optim.jl +++ b/core/src/allocation_optim.jl @@ -29,12 +29,12 @@ Set the objective for the given demand priority. """ function set_objective_demand_priority!( allocation_model::AllocationModel, - u::Vector, - p_non_diff::ParametersNonDiff, + p::Parameters, t::Float64, demand_priority_idx::Int, )::Nothing (; problem, subnetwork_id, capacity) = allocation_model + (; p_non_diff) = p (; graph, user_demand, flow_demand, allocation, basin) = p_non_diff (; node_id, demand_reduced) = user_demand (; main_network_connections, subnetwork_demands) = allocation @@ -92,7 +92,7 @@ function set_objective_demand_priority!( basin_demand_priority_idx = get_external_demand_priority_idx(p_non_diff, node_id) d = basin_demand_priority_idx == demand_priority_idx ? - get_basin_demand(allocation_model, u, p, t, node_id) : 0.0 + get_basin_demand(allocation_model, p, t, node_id) : 0.0 basin.demand[node_id.idx] = d F_ld = F_basin_in[node_id] add_objective_term!(ex, d, F_ld) @@ -327,17 +327,13 @@ Get several variables associated with a basin: node does not exist) - The index of the basin """ -function get_basin_data( - allocation_model::AllocationModel, - p::Parameters, - u::Vector, - node_id::NodeID, -) - (; graph, basin) = p +function get_basin_data(allocation_model::AllocationModel, p::Parameters, node_id::NodeID) + (; p_non_diff, diff_cache) = p + (; graph, cache_ranges) = p_non_diff (; Δt_allocation, subnetwork_id) = allocation_model @assert node_id.type == NodeType.Basin - influx = mean_input_flows_subnetwork(p, subnetwork_id)[(node_id, node_id)] - storage_basin = basin.current_properties.current_storage[u][node_id.idx] + influx = mean_input_flows_subnetwork(p_non_diff, subnetwork_id)[(node_id, node_id)] + storage_basin = view(diff_cache, cache_ranges.current_storage)[node_id.idx] control_inneighbors = inneighbor_labels_type(graph, node_id, LinkType.control) if isempty(control_inneighbors) level_demand_idx = 0 @@ -357,15 +353,15 @@ Storages are converted to flows by dividing by the allocation timestep. """ function get_basin_capacity( allocation_model::AllocationModel, - u::Vector, p::Parameters, t::Float64, node_id::NodeID, )::Float64 - (; level_demand) = p + (; p_non_diff) = p + (; level_demand, basin) = p_non_diff @assert node_id.type == NodeType.Basin storage_basin, Δt_allocation, influx, level_demand_idx, basin_idx = - get_basin_data(allocation_model, p, u, node_id) + get_basin_data(allocation_model, p, node_id) if iszero(level_demand_idx) return 0.0 else @@ -373,7 +369,7 @@ function get_basin_capacity( if isinf(level_max) storage_max = Inf else - storage_max = get_storage_from_level(p.basin, basin_idx, level_max) + storage_max = get_storage_from_level(basin, basin_idx, level_max) end return max(0.0, (storage_basin - storage_max) / Δt_allocation + influx) end @@ -387,20 +383,19 @@ Storages are converted to flows by dividing by the allocation timestep. """ function get_basin_demand( allocation_model::AllocationModel, - u::Vector, p::Parameters, t::Float64, node_id::NodeID, )::Float64 - (; level_demand) = p + (; level_demand, basin) = p.p_non_diff @assert node_id.type == NodeType.Basin storage_basin, Δt_allocation, influx, level_demand_idx, basin_idx = - get_basin_data(allocation_model, p, u, node_id) + get_basin_data(allocation_model, p, node_id) if iszero(level_demand_idx) return 0.0 else level_min = level_demand.min_level[level_demand_idx](t) - storage_min = get_storage_from_level(p.basin, basin_idx, level_min) + storage_min = get_storage_from_level(basin, basin_idx, level_min) return max(0.0, (storage_min - storage_basin) / Δt_allocation - influx) end end @@ -411,8 +406,7 @@ vertical fluxes + the disk of storage above the maximum level / Δt_allocation """ function set_initial_capacities_basin!( allocation_model::AllocationModel, - u::Vector, - p_non_diff::ParametersNonDiff, + p::Parameters, t::Float64, )::Nothing (; problem, sources) = allocation_model @@ -421,7 +415,7 @@ function set_initial_capacities_basin!( for node_id in only(constraints_outflow.axes) source = sources[(node_id, node_id)] @assert source.type == AllocationSourceType.level_demand - source.capacity = get_basin_capacity(allocation_model, u, p_non_diff, t, node_id) + source.capacity = get_basin_capacity(allocation_model, p, t, node_id) end return nothing end @@ -459,19 +453,18 @@ Set the initial demand of each basin in the subnetwork as """ function set_initial_demands_level!( allocation_model::AllocationModel, - u::Vector, - p_non_diff::ParametersNonDiff, + p::Parameters, t::Float64, )::Nothing (; subnetwork_id, problem) = allocation_model - (; graph, basin) = p_non_diff + (; graph, basin) = p.p_non_diff (; demand) = basin node_ids_level_demand = only(problem[:basin_outflow].axes) for id in node_ids_level_demand if graph[id].subnetwork_id == subnetwork_id - demand[id.idx] = get_basin_demand(allocation_model, u, p_non_diff, t, id) + demand[id.idx] = get_basin_demand(allocation_model, p, t, id) end end @@ -652,7 +645,8 @@ function save_demands_and_allocations!( elseif node_id.type == NodeType.Basin && has_external_demand(graph, node_id, :level_demand)[1] # Basins with level demand - basin_demand_priority_idx = get_external_demand_priority_idx(p, node_id) + basin_demand_priority_idx = + get_external_demand_priority_idx(p_non_diff, node_id) if demand_priority_idx == 1 || basin_demand_priority_idx == demand_priority_idx has_demand = true @@ -877,12 +871,12 @@ Solve the allocation problem for a single (demand_priority, source_priority) pai function optimize_per_source!( allocation_model::AllocationModel, demand_priority_idx::Integer, - u::Vector, - p_non_diff::ParametersNonDiff, + p::Parameters, t::AbstractFloat, optimization_type::OptimizationType.T, )::Nothing (; problem, sources, subnetwork_id, flow) = allocation_model + (; p_non_diff) = p (; demand_priorities_all) = p_non_diff.allocation F_basin_in = problem[:F_basin_in] F_basin_out = problem[:F_basin_out] @@ -906,13 +900,7 @@ function optimize_per_source!( # of an existing objective function because this is not supported for # quadratic terms: # https://jump.dev/JuMP.jl/v1.16/manual/objective/#Modify-an-objective-coefficient - set_objective_demand_priority!( - allocation_model, - u, - p_non_diff, - t, - demand_priority_idx, - ) + set_objective_demand_priority!(allocation_model, p, t, demand_priority_idx) # Set only the capacity of the current source to nonzero set_source_capacity!(allocation_model, source, optimization_type) @@ -982,13 +970,13 @@ end function optimize_demand_priority!( allocation_model::AllocationModel, - u::Vector, - p_non_diff::ParametersNonDiff, + p::Parameters, t::Float64, demand_priority_idx::Int, optimization_type::OptimizationType.T, )::Nothing (; flow) = allocation_model + (; p_non_diff) = p (; basin, allocation) = p_non_diff (; demand_priorities_all) = allocation @@ -1009,14 +997,7 @@ function optimize_demand_priority!( ) # Solve the allocation problem for this demand priority per source - optimize_per_source!( - allocation_model, - demand_priority_idx, - u, - p_non_diff, - t, - optimization_type, - ) + optimize_per_source!(allocation_model, demand_priority_idx, p, t, optimization_type) # Assign the allocations to the UserDemand or subnetwork for this demand priority assign_allocations!( @@ -1045,13 +1026,13 @@ Set the initial capacities and demands which are reduced by usage. """ function set_initial_values!( allocation_model::AllocationModel, - u::Vector, - p_non_diff::ParametersNonDiff, + p::Parameters, t::Float64, )::Nothing + (; p_non_diff) = p set_initial_capacities_source!(allocation_model, p_non_diff) set_initial_capacities_link!(allocation_model, p_non_diff) - set_initial_capacities_basin!(allocation_model, u, p_non_diff, t) + set_initial_capacities_basin!(allocation_model, p, t) set_initial_capacities_buffer!(allocation_model) set_initial_capacities_returnflow!(allocation_model, p_non_diff) @@ -1060,7 +1041,7 @@ function set_initial_values!( end set_initial_demands_user!(allocation_model, p_non_diff, t) - set_initial_demands_level!(allocation_model, u, p_non_diff, t) + set_initial_demands_level!(allocation_model, p, t) set_initial_demands_flow!(allocation_model, p_non_diff, t) return nothing end @@ -1091,11 +1072,11 @@ Update the allocation optimization problem for the given subnetwork with the pro and flows, solve the allocation problem and assign the results to the UserDemand. """ function collect_demands!( - p_non_diff::ParametersNonDiff, + p::Parameters, allocation_model::AllocationModel, t::Float64, - u::Vector, )::Nothing + (; p_non_diff) = p (; allocation) = p_non_diff (; subnetwork_id) = allocation_model (; demand_priorities_all, subnetwork_demands, main_network_connections) = allocation @@ -1103,14 +1084,13 @@ function collect_demands!( ## Find internal sources optimization_type = OptimizationType.internal_sources set_initial_capacities_inlet!(allocation_model, p_non_diff, optimization_type) - set_initial_values!(allocation_model, u, p_non_diff, t) + set_initial_values!(allocation_model, p, t) # Loop over demand priorities for demand_priority_idx in eachindex(demand_priorities_all) optimize_demand_priority!( allocation_model, - u, - p_non_diff, + p, t, demand_priority_idx, optimization_type, @@ -1139,8 +1119,7 @@ function collect_demands!( for demand_priority_idx in eachindex(demand_priorities_all) optimize_demand_priority!( allocation_model, - u, - p_non_diff, + p, t, demand_priority_idx, optimization_type, @@ -1149,24 +1128,23 @@ function collect_demands!( end function allocate_demands!( - p_non_diff::ParametersNonDiff, + p::Parameters, allocation_model::AllocationModel, t::Float64, - u::Vector, )::Nothing optimization_type = OptimizationType.allocate + (; p_non_diff) = p (; demand_priorities_all) = p_non_diff.allocation set_initial_capacities_inlet!(allocation_model, p_non_diff, optimization_type) - set_initial_values!(allocation_model, u, p_non_diff, t) + set_initial_values!(allocation_model, p, t) # Loop over the demand priorities for demand_priority_idx in eachindex(demand_priorities_all) optimize_demand_priority!( allocation_model, - u, - p_non_diff, + p, t, demand_priority_idx, optimization_type, diff --git a/core/src/bmi.jl b/core/src/bmi.jl index 5925a281b..78bab8bae 100644 --- a/core/src/bmi.jl +++ b/core/src/bmi.jl @@ -41,7 +41,7 @@ This uses a typeassert to ensure that the return type annotation doesn't create function BMI.get_value_ptr(model::Model, name::String)::Vector{Float64} (; u, p) = model.integrator (; p_non_diff, diff_cache) = p - (; state_ranges, cache_ranges, basin) = p_non_diff + (; state_ranges, cache_ranges, basin, user_demand, subgrid) = p_non_diff (; infiltration, user_demand_inflow) = state_ranges (; current_storage, current_level) = cache_ranges @@ -60,7 +60,7 @@ function BMI.get_value_ptr(model::Model, name::String)::Vector{Float64} elseif name == "basin.subgrid_level" subgrid.level::Vector{Float64} elseif name == "user_demand.demand" - vec(p.user_demand.demand)::Vector{Float64} + vec(user_demand.demand)::Vector{Float64} elseif name == "user_demand.cumulative_inflow" unsafe_array(view(u, user_demand_inflow))::Vector{Float64} else diff --git a/core/src/callback.jl b/core/src/callback.jl index 740f38d53..bfe91d67c 100644 --- a/core/src/callback.jl +++ b/core/src/callback.jl @@ -639,9 +639,11 @@ function compound_variable_value(compound_variable::CompoundVariable, p, du, t) return value end -function get_allocation_model(p::Parameters, subnetwork_id::Int32)::AllocationModel - (; allocation) = p - (; subnetwork_ids, allocation_models) = allocation +function get_allocation_model( + p_non_diff::ParametersNonDiff, + subnetwork_id::Int32, +)::AllocationModel + (; subnetwork_ids, allocation_models) = p_non_diff.allocation idx = findsorted(subnetwork_ids, subnetwork_id) if isnothing(idx) error("Invalid allocation network ID $subnetwork_id.") @@ -826,7 +828,7 @@ function update_allocation!(integrator)::Nothing # If a main network is present, collect demands of subnetworks if has_main_network(allocation) for allocation_model in Iterators.drop(allocation_models, 1) - collect_demands!(p_non_diff, allocation_model, t, u) + collect_demands!(p, allocation_model, t) end end @@ -834,7 +836,7 @@ function update_allocation!(integrator)::Nothing # If a main network is present this is solved first, # which provides allocation to the subnetworks for allocation_model in allocation_models - allocate_demands!(p_non_diff, allocation_model, t, u) + allocate_demands!(p, allocation_model, t) end # Reset the mean flows diff --git a/core/src/graph.jl b/core/src/graph.jl index c461fa844..672dd5180 100644 --- a/core/src/graph.jl +++ b/core/src/graph.jl @@ -232,12 +232,12 @@ from the parameters, but integrated/averaged FlowBoundary flows must be provided """ function get_flow( flow::Vector, - p::Parameters, + p_non_diff::ParametersNonDiff, t::Number, link::Tuple{NodeID, NodeID}; boundary_flow = nothing, ) - (; flow_boundary) = p.p_non_diff + (; flow_boundary) = p_non_diff from_id = link[1] if from_id.type == NodeType.FlowBoundary if boundary_flow === nothing @@ -247,13 +247,13 @@ function get_flow( boundary_flow[from_id.idx] end else - flow[get_state_index(p.p_non_diff.state_ranges, link)] + flow[get_state_index(p_non_diff.state_ranges, link)] end end function get_influx(du::Vector, id::NodeID, p::Parameters) @assert id.type == NodeType.Basin - (; basin, state_ranges) = p.non_diff + (; basin, state_ranges) = p.p_non_diff (; vertical_flux) = basin du_evaporation = view(du, state_ranges.evaporation) du_infiltration = view(du, state_ranges.infiltration) diff --git a/core/src/solve.jl b/core/src/solve.jl index fe9a8193e..823f4ce34 100644 --- a/core/src/solve.jl +++ b/core/src/solve.jl @@ -263,7 +263,8 @@ function formulate_pid_control!(du::Vector, u::Vector, p::Parameters, t::Number) if !iszero(K_d) dlevel_demand = derivative(target[i], t) - dstorage_listened_basin_old = formulate_dstorage(du, p, t, listened_node_id) + dstorage_listened_basin_old = + formulate_dstorage(du, p_non_diff, t, listened_node_id) # The expression below is the solution to an implicit equation for # dstorage_listened_basin. This equation results from the fact that if the derivative # term in the PID controller is used, the controlled pump flow rate depends on itself. @@ -279,18 +280,23 @@ end """ Formulate the time derivative of the storage in a single Basin. """ -function formulate_dstorage(du::Vector, p::Parameters, t::Number, node_id::NodeID) - (; basin, state_ranges) = p +function formulate_dstorage( + du::Vector, + p_non_diff::ParametersNonDiff, + t::Number, + node_id::NodeID, +) + (; basin, state_ranges) = p_non_diff (; inflow_ids, outflow_ids, vertical_flux) = basin du_evaporation = view(du, state_ranges.evaporation) du_infiltration = view(du, state_ranges.infiltration) @assert node_id.type == NodeType.Basin dstorage = 0.0 for inflow_id in inflow_ids[node_id.idx] - dstorage += get_flow(du, p, t, (inflow_id, node_id)) + dstorage += get_flow(du, p_non_diff, t, (inflow_id, node_id)) end for outflow_id in outflow_ids[node_id.idx] - dstorage -= get_flow(du, p, t, (node_id, outflow_id)) + dstorage -= get_flow(du, p_non_diff, t, (node_id, outflow_id)) end fixed_area = basin_areas(basin, node_id.idx)[end] diff --git a/core/src/util.jl b/core/src/util.jl index 09245eb77..9a33e09b6 100644 --- a/core/src/util.jl +++ b/core/src/util.jl @@ -389,8 +389,11 @@ function get_all_demand_priorities(db::DB, config::Config;)::Vector{Int32} end end -function get_external_demand_priority_idx(p::Parameters, node_id::NodeID)::Int - (; graph, level_demand, flow_demand, allocation) = p +function get_external_demand_priority_idx( + p_non_diff::ParametersNonDiff, + node_id::NodeID, +)::Int + (; graph, level_demand, flow_demand, allocation) = p_non_diff inneighbor_control_ids = inneighbor_labels_type(graph, node_id, LinkType.control) if isempty(inneighbor_control_ids) return 0 @@ -478,7 +481,8 @@ as input. Therefore we set the instantaneous flows as the mean flows as allocati """ function set_initial_allocation_mean_flows!(integrator)::Nothing (; u, p, t) = integrator - (; allocation) = p.p_non_diff + (; p_non_diff) = p + (; allocation) = p_non_diff (; mean_input_flows, mean_realized_flows, allocation_models) = allocation (; Δt_allocation) = allocation_models[1] @@ -493,7 +497,7 @@ function set_initial_allocation_mean_flows!(integrator)::Nothing if link[1] == link[2] q = get_influx(du, link[1], p) else - q = get_flow(du, p, t, link) + q = get_flow(du, p_non_diff, t, link) end # Multiply by Δt_allocation as averaging divides by this factor # in update_allocation! @@ -507,7 +511,7 @@ function set_initial_allocation_mean_flows!(integrator)::Nothing if link[1] == link[2] mean_realized_flows[link] = -u[link[1].idx] else - q = get_flow(du, p, t, link) + q = get_flow(du, p_non_diff, t, link) mean_realized_flows[link] = q * Δt_allocation end end diff --git a/core/src/write.jl b/core/src/write.jl index 28ac78eb0..10855fe75 100644 --- a/core/src/write.jl +++ b/core/src/write.jl @@ -242,7 +242,7 @@ function flow_table( for (j, cvec) in enumerate(saveval) (; flow, flow_boundary) = cvec flow_rate[i + (j - 1) * nflow] = - get_flow(flow, p, 0.0, link; boundary_flow = flow_boundary) + get_flow(flow, p_non_diff, 0.0, link; boundary_flow = flow_boundary) end end diff --git a/core/test/allocation_test.jl b/core/test/allocation_test.jl index 94ddccde1..9c770185e 100644 --- a/core/test/allocation_test.jl +++ b/core/test/allocation_test.jl @@ -6,28 +6,33 @@ toml_path = normpath(@__DIR__, "../../generated_testmodels/subnetwork/ribasim.toml") @test ispath(toml_path) model = Ribasim.Model(toml_path) - p = model.integrator.p - (; graph, allocation) = p - - allocation.mean_input_flows[1][(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] = - 4.5 - allocation_model = p.allocation.allocation_models[1] + (; p) = model.integrator + (; p_non_diff) = p + (; graph, allocation, user_demand) = p_non_diff + + allocation.mean_input_flows[1][( + NodeID(:FlowBoundary, 1, p_non_diff), + NodeID(:Basin, 2, p_non_diff), + )] = 4.5 + allocation_model = allocation.allocation_models[1] (; flow) = allocation_model - u = Float64[] t = 0.0 - Ribasim.allocate_demands!(p, allocation_model, t, u) + Ribasim.allocate_demands!(p, allocation_model, t) # Last demand priority (= 2) flows - @test flow[(NodeID(:Basin, 2, p), NodeID(:Pump, 5, p))] ≈ 0.0 - @test flow[(NodeID(:Basin, 2, p), NodeID(:UserDemand, 10, p))] ≈ 0.5 - @test flow[(NodeID(:Basin, 8, p), NodeID(:UserDemand, 12, p))] ≈ 3.0 rtol = 1e-5 - @test flow[(NodeID(:UserDemand, 12, p), NodeID(:Basin, 8, p))] ≈ 1.0 rtol = 1e-5 - @test flow[(NodeID(:Basin, 6, p), NodeID(:Outlet, 7, p))] ≈ 2.0 rtol = 1e-5 - @test flow[(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] ≈ 0.5 - @test flow[(NodeID(:Basin, 6, p), NodeID(:UserDemand, 11, p))] ≈ 0.0 - - (; allocated) = p.user_demand + @test flow[(NodeID(:Basin, 2, p_non_diff), NodeID(:Pump, 5, p_non_diff))] ≈ 0.0 + @test flow[(NodeID(:Basin, 2, p_non_diff), NodeID(:UserDemand, 10, p_non_diff))] ≈ 0.5 + @test flow[(NodeID(:Basin, 8, p_non_diff), NodeID(:UserDemand, 12, p_non_diff))] ≈ 3.0 rtol = + 1e-5 + @test flow[(NodeID(:UserDemand, 12, p_non_diff), NodeID(:Basin, 8, p_non_diff))] ≈ 1.0 rtol = + 1e-5 + @test flow[(NodeID(:Basin, 6, p_non_diff), NodeID(:Outlet, 7, p_non_diff))] ≈ 2.0 rtol = + 1e-5 + @test flow[(NodeID(:FlowBoundary, 1, p_non_diff), NodeID(:Basin, 2, p_non_diff))] ≈ 0.5 + @test flow[(NodeID(:Basin, 6, p_non_diff), NodeID(:UserDemand, 11, p_non_diff))] ≈ 0.0 + + (; allocated) = user_demand @test allocated[1, :] ≈ [0.0, 0.5] @test allocated[2, :] ≈ [4.0, 0.0] rtol = 1e-5 @test allocated[3, :] ≈ [0.0, 3.0] atol = 1e-5 @@ -45,17 +50,18 @@ end model = Ribasim.run(toml_path) @test successful_retcode(model) - (; u, p, t) = model.integrator - (; user_demand) = p - allocation_model = p.allocation.allocation_models[1] - Ribasim.set_initial_values!(allocation_model, u, p, t) - Ribasim.set_objective_demand_priority!(allocation_model, u, p, t, 1) + (; p, t) = model.integrator + (; p_non_diff) = p + (; user_demand, allocation) = p_non_diff + allocation_model = allocation.allocation_models[1] + Ribasim.set_initial_values!(allocation_model, p, t) + Ribasim.set_objective_demand_priority!(allocation_model, p, t, 1) objective = JuMP.objective_function(allocation_model.problem) @test objective isa JuMP.QuadExpr # Quadratic expression F = allocation_model.problem[:F] - to_user_5 = F[(NodeID(:Basin, 4, p), NodeID(:UserDemand, 5, p))] - to_user_6 = F[(NodeID(:Basin, 4, p), NodeID(:UserDemand, 6, p))] + to_user_5 = F[(NodeID(:Basin, 4, p_non_diff), NodeID(:UserDemand, 5, p_non_diff))] + to_user_6 = F[(NodeID(:Basin, 4, p_non_diff), NodeID(:UserDemand, 6, p_non_diff))] @test objective.aff.constant ≈ sum(user_demand.demand) @test objective.aff.terms[to_user_5] ≈ -2.0 @@ -76,34 +82,37 @@ end ) @test ispath(toml_path) model = Ribasim.Model(toml_path) - p = model.integrator.p - (; allocation, graph) = p + (; p_non_diff) = model.integrator.p + (; allocation, graph) = p_non_diff (; main_network_connections, subnetwork_ids, allocation_models) = allocation @test Ribasim.has_main_network(allocation) @test Ribasim.is_main_network(first(subnetwork_ids)) # Connections from main network to subnetworks @test isempty(main_network_connections[1]) - @test only(main_network_connections[3]) == (NodeID(:Basin, 2, p), NodeID(:Pump, 11, p)) - @test only(main_network_connections[5]) == (NodeID(:Basin, 6, p), NodeID(:Pump, 24, p)) - @test only(main_network_connections[7]) == (NodeID(:Basin, 10, p), NodeID(:Pump, 38, p)) + @test only(main_network_connections[3]) == + (NodeID(:Basin, 2, p_non_diff), NodeID(:Pump, 11, p_non_diff)) + @test only(main_network_connections[5]) == + (NodeID(:Basin, 6, p_non_diff), NodeID(:Pump, 24, p_non_diff)) + @test only(main_network_connections[7]) == + (NodeID(:Basin, 10, p_non_diff), NodeID(:Pump, 38, p_non_diff)) # main-sub connections are part of main network allocation network - allocation_model_main_network = Ribasim.get_allocation_model(p, Int32(1)) + allocation_model_main_network = Ribasim.get_allocation_model(p_non_diff, Int32(1)) @test [ - (NodeID(:Basin, 2, p), NodeID(:Pump, 11, p)), - (NodeID(:Basin, 6, p), NodeID(:Pump, 24, p)), - (NodeID(:Basin, 10, p), NodeID(:Pump, 38, p)), + (NodeID(:Basin, 2, p_non_diff), NodeID(:Pump, 11, p_non_diff)), + (NodeID(:Basin, 6, p_non_diff), NodeID(:Pump, 24, p_non_diff)), + (NodeID(:Basin, 10, p_non_diff), NodeID(:Pump, 38, p_non_diff)), ] ⊆ keys(allocation_model_main_network.capacity.data) # In each subnetwork, the connection from the main network to the subnetwork is # interpreted as a source - @test Ribasim.get_allocation_model(p, Int32(3)).problem[:source_main_network].axes[1] == - [(NodeID(:Basin, 2, p), NodeID(:Pump, 11, p))] - @test Ribasim.get_allocation_model(p, Int32(5)).problem[:source_main_network].axes[1] == - [(NodeID(:Basin, 6, p), NodeID(:Pump, 24, p))] - @test Ribasim.get_allocation_model(p, Int32(7)).problem[:source_main_network].axes[1] == - [(NodeID(:Basin, 10, p), NodeID(:Pump, 38, p))] + @test Ribasim.get_allocation_model(p_non_diff, Int32(3)).problem[:source_main_network].axes[1] == + [(NodeID(:Basin, 2, p_non_diff), NodeID(:Pump, 11, p_non_diff))] + @test Ribasim.get_allocation_model(p_non_diff, Int32(5)).problem[:source_main_network].axes[1] == + [(NodeID(:Basin, 6, p_non_diff), NodeID(:Pump, 24, p_non_diff))] + @test Ribasim.get_allocation_model(p_non_diff, Int32(7)).problem[:source_main_network].axes[1] == + [(NodeID(:Basin, 10, p_non_diff), NodeID(:Pump, 38, p_non_diff))] end @testitem "Allocation with main network optimization problem" begin @@ -120,7 +129,8 @@ end model = Ribasim.Model(toml_path) (; p) = model.integrator - (; allocation, user_demand, graph, basin) = p + (; p_non_diff) = p + (; allocation, user_demand, graph, basin) = p_non_diff (; allocation_models, subnetwork_demands, @@ -133,56 +143,65 @@ end # Collecting demands u = Float64[] for allocation_model in allocation_models[2:end] - Ribasim.collect_demands!(p, allocation_model, t, u) + Ribasim.collect_demands!(p, allocation_model, t) end # See the difference between these values here and in # "subnetworks_with_sources" - @test subnetwork_demands[(NodeID(:Basin, 2, p), NodeID(:Pump, 11, p))] ≈ [4.0, 4.0, 0.0] atol = - 1e-4 - @test subnetwork_demands[(NodeID(:Basin, 6, p), NodeID(:Pump, 24, p))] ≈ - [0.001, 0.0, 0.0] atol = 1e-4 - @test subnetwork_demands[(NodeID(:Basin, 10, p), NodeID(:Pump, 38, p))][1:2] ≈ - [0.001, 0.002] atol = 1e-4 + @test subnetwork_demands[( + NodeID(:Basin, 2, p_non_diff), + NodeID(:Pump, 11, p_non_diff), + )] ≈ [4.0, 4.0, 0.0] atol = 1e-4 + @test subnetwork_demands[( + NodeID(:Basin, 6, p_non_diff), + NodeID(:Pump, 24, p_non_diff), + )] ≈ [0.001, 0.0, 0.0] atol = 1e-4 + @test subnetwork_demands[( + NodeID(:Basin, 10, p_non_diff), + NodeID(:Pump, 38, p_non_diff), + )][1:2] ≈ [0.001, 0.002] atol = 1e-4 # Solving for the main network, containing subnetworks as UserDemands allocation_model = allocation_models[1] (; problem) = allocation_model - main_source = - allocation_model.sources[(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] + main_source = allocation_model.sources[( + NodeID(:FlowBoundary, 1, p_non_diff), + NodeID(:Basin, 2, p_non_diff), + )] main_source.capacity_reduced = 4.5 - Ribasim.optimize_demand_priority!( - allocation_model, - u, - p, - t, - 1, - OptimizationType.allocate, - ) + Ribasim.optimize_demand_priority!(allocation_model, p, t, 1, OptimizationType.allocate) # Main network objective function F = problem[:F] objective = JuMP.objective_function(problem) objective_links = keys(objective.terms) - F_1 = F[(NodeID(:Basin, 2, p), NodeID(:Pump, 11, p))] - F_2 = F[(NodeID(:Basin, 6, p), NodeID(:Pump, 24, p))] - F_3 = F[(NodeID(:Basin, 10, p), NodeID(:Pump, 38, p))] + F_1 = F[(NodeID(:Basin, 2, p_non_diff), NodeID(:Pump, 11, p_non_diff))] + F_2 = F[(NodeID(:Basin, 6, p_non_diff), NodeID(:Pump, 24, p_non_diff))] + F_3 = F[(NodeID(:Basin, 10, p_non_diff), NodeID(:Pump, 38, p_non_diff))] @test JuMP.UnorderedPair(F_1, F_1) ∈ objective_links @test JuMP.UnorderedPair(F_2, F_2) ∈ objective_links @test JuMP.UnorderedPair(F_3, F_3) ∈ objective_links # Running full allocation algorithm (; Δt_allocation) = allocation_models[1] - mean_input_flows[1][(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] = - 4.5 * Δt_allocation + mean_input_flows[1][( + NodeID(:FlowBoundary, 1, p_non_diff), + NodeID(:Basin, 2, p_non_diff), + )] = 4.5 * Δt_allocation Ribasim.update_allocation!(model.integrator) - @test subnetwork_allocateds[NodeID(:Basin, 2, p), NodeID(:Pump, 11, p)] ≈ - [4.0, 0.49775, 0.0] atol = 1e-4 - @test subnetwork_allocateds[NodeID(:Basin, 6, p), NodeID(:Pump, 24, p)] ≈ - [0.001, 0.0, 0.0] rtol = 1e-3 - @test subnetwork_allocateds[NodeID(:Basin, 10, p), NodeID(:Pump, 38, p)] ≈ - [0.001, 0.00024888, 0.0] rtol = 1e-3 + @test subnetwork_allocateds[ + NodeID(:Basin, 2, p_non_diff), + NodeID(:Pump, 11, p_non_diff), + ] ≈ [4.0, 0.49775, 0.0] atol = 1e-4 + @test subnetwork_allocateds[ + NodeID(:Basin, 6, p_non_diff), + NodeID(:Pump, 24, p_non_diff), + ] ≈ [0.001, 0.0, 0.0] rtol = 1e-3 + @test subnetwork_allocateds[ + NodeID(:Basin, 10, p_non_diff), + NodeID(:Pump, 38, p_non_diff), + ] ≈ [0.001, 0.00024888, 0.0] rtol = 1e-3 # Test for existence of links in allocation flow record allocation_flow = DataFrame(record_flow) @@ -190,8 +209,11 @@ end allocation_flow, [:from_node_type, :from_node_id, :to_node_type, :to_node_id] => ByRow( - (a, b, c, d) -> - haskey(graph, NodeID(Symbol(a), b, p), NodeID(Symbol(c), d, p)), + (a, b, c, d) -> haskey( + graph, + NodeID(Symbol(a), b, p_non_diff), + NodeID(Symbol(c), d, p_non_diff), + ), ) => :link_exists, ) @test all(allocation_flow.link_exists) @@ -213,9 +235,9 @@ end ) @test ispath(toml_path) model = Ribasim.Model(toml_path) - p = model.integrator.p - - (; allocation, user_demand, graph, basin) = p + (; p) = model.integrator + (; p_non_diff) = p + (; allocation, user_demand, graph, basin) = p_non_diff (; allocation_models, subnetwork_demands, @@ -226,30 +248,40 @@ end t = 0.0 # Set flows of sources in subnetworks - mean_input_flows[2][(NodeID(:FlowBoundary, 58, p), NodeID(:Basin, 16, p))] = 1.0 - mean_input_flows[4][(NodeID(:FlowBoundary, 59, p), NodeID(:Basin, 44, p))] = 1e-3 + mean_input_flows[2][( + NodeID(:FlowBoundary, 58, p_non_diff), + NodeID(:Basin, 16, p_non_diff), + )] = 1.0 + mean_input_flows[4][( + NodeID(:FlowBoundary, 59, p_non_diff), + NodeID(:Basin, 44, p_non_diff), + )] = 1e-3 # Collecting demands - u = Float64[] for allocation_model in allocation_models[2:end] - Ribasim.collect_demands!(p, allocation_model, t, u) + Ribasim.collect_demands!(p, allocation_model, t) end # See the difference between these values here and in # "allocation with main network optimization problem", internal sources # lower the subnetwork demands - @test subnetwork_demands[(NodeID(:Basin, 2, p), NodeID(:Pump, 11, p))] ≈ [4.0, 4.0, 0.0] rtol = - 1e-4 - @test subnetwork_demands[(NodeID(:Basin, 6, p), NodeID(:Pump, 24, p))] ≈ - [0.001, 0.0, 0.0] rtol = 1e-4 - @test subnetwork_demands[(NodeID(:Basin, 10, p), NodeID(:Pump, 38, p))][1:2] ≈ - [0.001, 0.001] rtol = 1e-4 + @test subnetwork_demands[( + NodeID(:Basin, 2, p_non_diff), + NodeID(:Pump, 11, p_non_diff), + )] ≈ [4.0, 4.0, 0.0] rtol = 1e-4 + @test subnetwork_demands[( + NodeID(:Basin, 6, p_non_diff), + NodeID(:Pump, 24, p_non_diff), + )] ≈ [0.001, 0.0, 0.0] rtol = 1e-4 + @test subnetwork_demands[( + NodeID(:Basin, 10, p_non_diff), + NodeID(:Pump, 38, p_non_diff), + )][1:2] ≈ [0.001, 0.001] rtol = 1e-4 model = Ribasim.run(toml_path) (; u, p, t) = model.integrator - (; current_storage) = p.basin.current_properties - du = get_du(model.integrator) - Ribasim.formulate_storages!(current_storage, u, p, t) + Ribasim.formulate_storages!(u, p, t) + current_storage = view(p.diff_cache, p.p_non_diff.cache_ranges.current_storage) @test current_storage ≈ Float32[ 1.0346908f6, @@ -291,12 +323,14 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/level_demand/ribasim.toml") @test ispath(toml_path) model = Ribasim.Model(toml_path) - - p = model.integrator.p - (; user_demand, graph, allocation, basin, level_demand) = p + (; p_non_diff) = model.integrator.p + (; user_demand, graph, allocation, basin, level_demand) = p_non_diff # Initial "integrated" vertical flux - @test allocation.mean_input_flows[1][(NodeID(:Basin, 2, p), NodeID(:Basin, 2, p))] ≈ 1e2 + @test allocation.mean_input_flows[1][( + NodeID(:Basin, 2, p_non_diff), + NodeID(:Basin, 2, p_non_diff), + )] ≈ 1e2 Ribasim.solve!(model) @@ -308,9 +342,12 @@ end ϕ = 1e-3 # precipitation q = Ribasim.get_flow( du, - p, + p_non_diff, 0.0, - (Ribasim.NodeID(:FlowBoundary, 1, p), Ribasim.NodeID(:Basin, 2, p)), + ( + Ribasim.NodeID(:FlowBoundary, 1, p_non_diff), + Ribasim.NodeID(:Basin, 2, p_non_diff), + ), ) A = Ribasim.basin_areas(basin, 1)[1] l_max = level_demand.max_level[1](0) @@ -403,7 +440,8 @@ end @test ispath(toml_path) model = Ribasim.Model(toml_path) (; p) = model.integrator - (; graph, allocation, flow_demand, user_demand, level_boundary) = p + (; p_non_diff) = p + (; graph, allocation, flow_demand, user_demand, level_boundary) = p_non_diff # Test has_external_demand @test !any( @@ -412,7 +450,7 @@ end ) @test Ribasim.has_external_demand( graph, - NodeID(:TabulatedRatingCurve, 2, p), + NodeID(:TabulatedRatingCurve, 2, p_non_diff), :flow_demand, )[1] @@ -424,7 +462,7 @@ end F_flow_buffer_in = problem[:F_flow_buffer_in] F_flow_buffer_out = problem[:F_flow_buffer_out] - node_id_with_flow_demand = NodeID(:TabulatedRatingCurve, 2, p) + node_id_with_flow_demand = NodeID(:TabulatedRatingCurve, 2, p_non_diff) # Test flow conservation constraint containing flow buffer constraint_with_flow_buffer = @@ -436,21 +474,15 @@ end F_flow_buffer_out[node_id_with_flow_demand] t = 0.0 - (; u) = model.integrator optimization_type = OptimizationType.internal_sources - Ribasim.set_initial_values!(allocation_model, u, p, t) - sources[(NodeID(:LevelBoundary, 1, p), node_id_with_flow_demand)].capacity_reduced = - 2e-3 + Ribasim.set_initial_values!(allocation_model, p, t) + sources[( + NodeID(:LevelBoundary, 1, p_non_diff), + node_id_with_flow_demand, + )].capacity_reduced = 2e-3 # Priority 1 - Ribasim.optimize_demand_priority!( - allocation_model, - model.integrator.u, - p, - t, - 1, - optimization_type, - ) + Ribasim.optimize_demand_priority!(allocation_model, p, t, 1, optimization_type) objective = JuMP.objective_function(problem) @test JuMP.UnorderedPair( F_flow_buffer_in[node_id_with_flow_demand], @@ -461,50 +493,30 @@ end @test flow_demand.demand[1] ≈ flow_demand.demand_itp[1](t) - 0.001 rtol = 1e-3 ## Priority 2 - Ribasim.optimize_demand_priority!( - allocation_model, - model.integrator.u, - p, - t, - 2, - optimization_type, - ) + Ribasim.optimize_demand_priority!(allocation_model, p, t, 2, optimization_type) # No demand left @test flow_demand.demand[1] < 1e-10 # Allocated @test JuMP.value(only(F_flow_buffer_in)) ≈ only(flow_demand.demand) atol = 1e-10 ## Priority 3 - Ribasim.optimize_demand_priority!( - allocation_model, - model.integrator.u, - p, - t, - 3, - optimization_type, - ) + Ribasim.optimize_demand_priority!(allocation_model, p, t, 3, optimization_type) # The flow from the source is used up in previous demand priorities - @test flow[(NodeID(:LevelBoundary, 1, p), node_id_with_flow_demand)] ≈ 0 atol = 1e-10 + @test flow[(NodeID(:LevelBoundary, 1, p_non_diff), node_id_with_flow_demand)] ≈ 0 atol = + 1e-10 # So flow from the flow buffer is used for UserDemand #4 - @test flow[(node_id_with_flow_demand, NodeID(:Basin, 3, p))] ≈ 0.001 - @test flow[(NodeID(:Basin, 3, p), NodeID(:UserDemand, 4, p))] ≈ 0.001 + @test flow[(node_id_with_flow_demand, NodeID(:Basin, 3, p_non_diff))] ≈ 0.001 + @test flow[(NodeID(:Basin, 3, p_non_diff), NodeID(:UserDemand, 4, p_non_diff))] ≈ 0.001 # No flow coming from level boundary @test JuMP.value(F[(only(level_boundary.node_id), node_id_with_flow_demand)]) ≈ 0 atol = 1e-10 ## Priority 4 - Ribasim.optimize_demand_priority!( - allocation_model, - model.integrator.u, - p, - t, - 4, - optimization_type, - ) + Ribasim.optimize_demand_priority!(allocation_model, p, t, 4, optimization_type) # Realized flow demand model = Ribasim.run(toml_path) - record_demand = DataFrame(model.integrator.p.allocation.record_demand) + record_demand = DataFrame(model.integrator.p.p_non_diff.allocation.record_demand) df_rating_curve_2 = record_demand[record_demand.node_id .== 2, :] @test all(df_rating_curve_2.realized .≈ 0.002) diff --git a/core/test/bmi_test.jl b/core/test/bmi_test.jl index 37f377b5c..faefecd7f 100644 --- a/core/test/bmi_test.jl +++ b/core/test/bmi_test.jl @@ -89,7 +89,7 @@ end demand = BMI.get_value_ptr(model, "user_demand.demand") inflow = BMI.get_value_ptr(model, "user_demand.cumulative_inflow") # One year in seconds - year = model.integrator.p.user_demand.demand_itp[2][1].t[2] + year = model.integrator.p.p_non_diff.user_demand.demand_itp[2][1].t[2] demand_start = 1e-3 slope = 1e-3 / year day = 86400.0 diff --git a/core/test/control_test.jl b/core/test/control_test.jl index 2f13a73cf..fdaa95b94 100644 --- a/core/test/control_test.jl +++ b/core/test/control_test.jl @@ -9,16 +9,18 @@ normpath(@__DIR__, "../../generated_testmodels/pump_discrete_control/ribasim.toml") @test ispath(toml_path) model = Ribasim.run(toml_path) - p = model.integrator.p - (; discrete_control, graph, state_ranges) = p + (; p_non_diff) = model.integrator.p + (; discrete_control, pump, graph, state_ranges) = p_non_diff # Control input(flow rates) - pump_control_mapping = p.pump.control_mapping + pump_control_mapping = pump.control_mapping @test unique( - only(pump_control_mapping[(NodeID(:Pump, 4, p), "off")].itp_update).value.u, + only( + pump_control_mapping[(NodeID(:Pump, 4, p_non_diff), "off")].itp_update, + ).value.u, ) == [0] @test unique( - only(pump_control_mapping[(NodeID(:Pump, 4, p), "on")].itp_update).value.u, + only(pump_control_mapping[(NodeID(:Pump, 4, p_non_diff), "on")].itp_update).value.u, ) == [1.0e-5] logic_mapping::Vector{Dict{Vector{Bool}, String}} = [ @@ -73,8 +75,7 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/flow_condition/ribasim.toml") @test ispath(toml_path) model = Ribasim.run(toml_path) - p = model.integrator.p - (; discrete_control, flow_boundary) = p + (; discrete_control, flow_boundary) = model.integrator.p.p_non_diff Δt = discrete_control.compound_variables[1][1].subvariables[1].look_ahead @@ -97,8 +98,7 @@ end ) @test ispath(toml_path) model = Ribasim.run(toml_path) - p = model.integrator.p - (; discrete_control, level_boundary) = p + (; discrete_control, level_boundary) = model.integrator.p.p_non_diff Δt = discrete_control.compound_variables[1][1].subvariables[1].look_ahead @@ -118,8 +118,7 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/pid_control/ribasim.toml") @test ispath(toml_path) model = Ribasim.run(toml_path) - p = model.integrator.p - (; basin, pid_control, flow_boundary) = p + (; basin, pid_control, flow_boundary) = model.integrator.p.p_non_diff level = Ribasim.get_storages_and_levels(model).level[1, :] t = Ribasim.tsaves(model) @@ -162,7 +161,7 @@ end ) @test ispath(toml_path) model = Ribasim.Model(toml_path) - (; discrete_control, tabulated_rating_curve) = model.integrator.p + (; discrete_control, tabulated_rating_curve) = model.integrator.p.p_non_diff (; current_interpolation_index, interpolations) = tabulated_rating_curve index_high, index_low = 1, 2 @@ -194,18 +193,18 @@ end ) @test ispath(toml_path) model = Ribasim.run(toml_path) - (; p) = model.integrator - (; discrete_control, pid_control) = p + (; p_non_diff) = model.integrator.p + (; discrete_control, pid_control) = p_non_diff t = Ribasim.tsaves(model) level = Ribasim.get_storages_and_levels(model).level[1, :] target_high = pid_control.control_mapping[( - NodeID(:PidControl, 6, p), + NodeID(:PidControl, 6, p_non_diff), "target_high", )].itp_update[1].value.u[1] target_low = pid_control.control_mapping[( - NodeID(:PidControl, 6, p), + NodeID(:PidControl, 6, p_non_diff), "target_low", )].itp_update[1].value.u[1] @@ -225,21 +224,21 @@ end ) @test ispath(toml_path) model = Ribasim.run(toml_path) - (; p) = model.integrator - (; discrete_control) = p + (; p_non_diff) = model.integrator.p + (; discrete_control) = p_non_diff (; compound_variables, record) = discrete_control compound_variable = only(only(compound_variables)) @test compound_variable.subvariables[1] == SubVariable(; - listen_node_id = NodeID(:FlowBoundary, 2, p), + listen_node_id = NodeID(:FlowBoundary, 2, p_non_diff), diff_cache_ref = compound_variable.subvariables[1].diff_cache_ref, variable = "flow_rate", weight = 0.5, look_ahead = 0.0, ) @test compound_variable.subvariables[2] == SubVariable(; - listen_node_id = NodeID(:FlowBoundary, 3, p), + listen_node_id = NodeID(:FlowBoundary, 3, p_non_diff), diff_cache_ref = compound_variable.subvariables[2].diff_cache_ref, variable = "flow_rate", weight = 0.5, @@ -260,12 +259,13 @@ end @test ispath(toml_path) model = Ribasim.run(toml_path) - (; p) = model.integrator - (; record) = p.discrete_control + (; p_non_diff) = model.integrator.p + (; starttime, discrete_control) = p_non_diff + (; record) = discrete_control @test record.truth_state == ["T", "F"] @test record.control_state == ["On", "Off"] - t_switch = Ribasim.datetime_since(record.time[2], p.starttime) + t_switch = Ribasim.datetime_since(record.time[2], starttime) flow_table = DataFrame(Ribasim.flow_table(model)) @test all(filter(:time => time -> time <= t_switch, flow_table).flow_rate .> -1e-12) @test all( @@ -317,7 +317,7 @@ end flow_link_0 = filter(:link_id => id -> id == 0, flow_data) t = Ribasim.seconds_since.(flow_link_0.time, model.config.starttime) itp = - model.integrator.p.basin.concentration_data.concentration_external[1]["concentration_external.kryptonite"] + model.integrator.p.p_non_diff.basin.concentration_data.concentration_external[1]["concentration_external.kryptonite"] concentration = itp.(t) threshold = 0.5 above_threshold = concentration .> threshold @@ -332,7 +332,7 @@ end @test ispath(toml_path) model = Ribasim.run(toml_path) - (; record, compound_variables) = model.integrator.p.discrete_control + (; record, compound_variables) = model.integrator.p.p_non_diff.discrete_control itp = compound_variables[1][1].greater_than[1] @test itp.extrapolation_left == Periodic diff --git a/core/test/equations_test.jl b/core/test/equations_test.jl index 89b5c4b1c..cd3cf68e5 100644 --- a/core/test/equations_test.jl +++ b/core/test/equations_test.jl @@ -23,15 +23,15 @@ @test ispath(toml_path) model = Ribasim.run(toml_path) @test successful_retcode(model) - (; p) = model.integrator + (; level_boundary, basin, linear_resistance) = model.integrator.p.p_non_diff t = Ribasim.tsaves(model) storage = Ribasim.get_storages_and_levels(model).storage[1, :] - A = Ribasim.basin_areas(p.basin, 1)[2] # needs to be constant + A = Ribasim.basin_areas(basin, 1)[2] # needs to be constant u0 = A * 10.0 - L = p.level_boundary.level[1].u[1] - R = p.linear_resistance.resistance[1] - Q_max = p.linear_resistance.max_flow_rate[1] + L = level_boundary.level[1].u[1] + R = linear_resistance.resistance[1] + Q_max = linear_resistance.max_flow_rate[1] # derivation in https://github.com/Deltares/Ribasim/pull/1100#issuecomment-1934799342 t_shift = (u0 - A * (L + R * Q_max)) / Q_max @@ -55,11 +55,11 @@ end @test ispath(toml_path) model = Ribasim.run(toml_path) @test successful_retcode(model) - p = model.integrator.p + (; basin) = model.integrator.p.p_non_diff t = Ribasim.tsaves(model) storage = Ribasim.get_storages_and_levels(model).storage[1, :] - basin_area = Ribasim.basin_areas(p.basin, 1)[2] + basin_area = Ribasim.basin_areas(basin, 1)[2] storage_min = 50.005 α = 24 * 60 * 60 storage_analytic = @@ -90,15 +90,14 @@ end @test ispath(toml_path) model = Ribasim.run(toml_path) @test successful_retcode(model) - p = model.integrator.p - (; manning_resistance) = p + (; manning_resistance, basin) = model.integrator.p.p_non_diff t = Ribasim.tsaves(model) storage_both = Ribasim.get_storages_and_levels(model).storage storage = storage_both[1, :] storage_min = 50.005 level_min = 1.0 - basin_area = Ribasim.basin_areas(p.basin, 1)[2] + basin_area = Ribasim.basin_areas(basin, 1)[2] level = @. level_min + (storage - storage_min) / basin_area C = sum(storage_both[:, 1]) Λ = 2 * level_min + (C - 2 * storage_min) / basin_area @@ -127,8 +126,7 @@ end @test ispath(toml_path) model = Ribasim.run(toml_path) @test successful_retcode(model) - p = model.integrator.p - (; basin, pid_control) = p + (; basin, pid_control) = model.integrator.p.p_non_diff storage = Ribasim.get_storages_and_levels(model).storage[:] t = Ribasim.tsaves(model) @@ -178,11 +176,11 @@ end @test config.solver.dt === model.integrator.dt Ribasim.solve!(model) @test successful_retcode(model) - p = model.integrator.p - (; flow_boundary, pump) = p + (; p_non_diff, diff_cache) = model.integrator.p + (; flow_boundary, pump, cache_ranges) = p_non_diff q_boundary = flow_boundary.flow_rate[1].u[1] - pump_flow_rate = pump.flow_rate_cache[Float64[]] + pump_flow_rate = view(diff_cache, cache_ranges.flow_rate_pump) q_pump = pump_flow_rate[1] storage_both = get_storages_and_levels(model).storage diff --git a/core/test/io_test.jl b/core/test/io_test.jl index b3b4eede9..7678c18a2 100644 --- a/core/test/io_test.jl +++ b/core/test/io_test.jl @@ -137,10 +137,11 @@ end config = Ribasim.Config(toml_path) model = Ribasim.Model(config) - storage1_begin = - copy(model.integrator.p.basin.current_properties.current_storage[Float64[]]) + (; p_non_diff, diff_cache) = model.integrator.p + current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) + storage1_begin = copy(current_storage) solve!(model) - storage1_end = model.integrator.p.basin.current_properties.current_storage[Float64[]] + storage1_end = current_storage @test storage1_begin != storage1_end # copy state results to input @@ -156,6 +157,8 @@ end end model = Ribasim.Model(toml_path) - storage2_begin = model.integrator.p.basin.current_properties.current_storage[Float64[]] + (; p_non_diff, diff_cache) = model.integrator.p + current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) + storage2_begin = current_storage @test storage1_end ≈ storage2_begin end diff --git a/core/test/time_test.jl b/core/test/time_test.jl index 913f45e8c..5a29cc9ef 100644 --- a/core/test/time_test.jl +++ b/core/test/time_test.jl @@ -31,8 +31,7 @@ end @test ispath(toml_path) config = Ribasim.Config(toml_path; solver_saveat = 0) model = Ribasim.run(toml_path) - (; p) = model.integrator - (; basin) = p + (; basin) = model.integrator.p.p_non_diff n_basin = length(basin.node_id) basin_table = DataFrame(Ribasim.basin_table(model)) @@ -64,7 +63,7 @@ end solver_algorithm = "Euler", ) model = Ribasim.Model(config) - (; basin) = model.integrator.p + (; basin) = model.integrator.p.p_non_diff starting_precipitation = basin.vertical_flux.precipitation[1] * Ribasim.basin_areas(basin, 1)[end] BMI.update_until(model, saveat) @@ -95,7 +94,7 @@ end @test ispath(toml_path) model = Ribasim.Model(toml_path) - (; level_boundary, flow_boundary, basin) = model.integrator.p + (; level_boundary, flow_boundary, basin) = model.integrator.p.p_non_diff function test_extrapolation(itp) @test itp.extrapolation_left == Periodic @@ -107,7 +106,7 @@ end test_extrapolation(flow_boundary.flow_rate[1]) end -@testitem "transient_pump_weir" begin +@testitem "transient_pump_outlet" begin using DataFrames: DataFrame toml_path = @@ -119,6 +118,6 @@ end @test all(isapprox.(storage[1, 2:end], storage[1, end]; rtol = 1e-4)) t_end = model.integrator.t - flow_rate_end = model.integrator.p.pump.flow_rate[1].u[end] + flow_rate_end = model.integrator.p.p_non_diff.pump.flow_rate[1].u[end] @test storage[2, end] ≈ storage[2, 1] + 0.5 * flow_rate_end * t_end end diff --git a/core/test/utils_test.jl b/core/test/utils_test.jl index 707fb8cb2..3e763eccd 100644 --- a/core/test/utils_test.jl +++ b/core/test/utils_test.jl @@ -261,7 +261,7 @@ end t0 = 0.0 u0 = Ribasim.build_state_vector(p.p_non_diff) du0 = copy(u0) - jac_prototype, _ = Ribasim.get_jac_eval(du0, u0, p, config.solver) + jac_prototype, _, _ = Ribasim.get_diff_eval(du0, u0, p, config.solver) # rows, cols, _ = findnz(jac_prototype) #! format: off @@ -283,7 +283,7 @@ end close(db) u0 = Ribasim.build_state_vector(p_non_diff) du0 = copy(u0) - jac_prototype, _ = Ribasim.get_jac_eval(du0, u0, p, config.solver) + jac_prototype, _, _ = Ribasim.get_diff_eval(du0, u0, p, config.solver) #! format: off rows_expected = [1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2] From 17e2da877aa79c2ea4e1997ac5729a9135858ae1 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Thu, 13 Mar 2025 15:56:05 +0100 Subject: [PATCH 11/28] Fix more tests --- Manifest.toml | 8 +++--- core/src/Ribasim.jl | 2 +- core/src/allocation_optim.jl | 3 ++- core/src/callback.jl | 5 ++-- core/src/model.jl | 8 +++--- core/src/parameter.jl | 3 ++- core/src/read.jl | 18 +++++-------- core/src/util.jl | 26 +++++++++++------- core/src/validation.jl | 2 +- core/test/allocation_test.jl | 51 +++++++++++++++++++++--------------- core/test/bmi_test.jl | 2 +- core/test/control_test.jl | 10 ++++--- core/test/run_models_test.jl | 4 +-- core/test/validation_test.jl | 2 +- 14 files changed, 79 insertions(+), 65 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 382bc81a8..30ff8a495 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -547,9 +547,9 @@ version = "1.15.1" [[deps.DifferentiationInterface]] deps = ["ADTypes", "LinearAlgebra"] -git-tree-sha1 = "258fa016b2d03f19e4d0d1cd8e30c84907af1528" +git-tree-sha1 = "479214d2988a837e6d21ac38afdcb03cb2d0994e" uuid = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" -version = "0.6.42" +version = "0.6.43" [deps.DifferentiationInterface.extensions] DifferentiationInterfaceChainRulesCoreExt = "ChainRulesCore" @@ -2291,9 +2291,9 @@ version = "1.11.0" [[deps.SparseConnectivityTracer]] deps = ["ADTypes", "DocStringExtensions", "FillArrays", "LinearAlgebra", "Random", "SparseArrays"] -git-tree-sha1 = "6651f4663027f3b30a31429d257185f56a571184" +git-tree-sha1 = "e60ba2f27bf0c85cc20ac326a158182630032bb6" uuid = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" -version = "0.6.13" +version = "0.6.14" [deps.SparseConnectivityTracer.extensions] SparseConnectivityTracerDataInterpolationsExt = "DataInterpolations" diff --git a/core/src/Ribasim.jl b/core/src/Ribasim.jl index 105f08369..c12db52e3 100644 --- a/core/src/Ribasim.jl +++ b/core/src/Ribasim.jl @@ -48,7 +48,7 @@ using SciMLBase: # Automatically detecting the sparsity pattern of the Jacobian of water_balance! # through operator overloading using SparseConnectivityTracer: - GradientTracer, TracerSparsityDetector, IndexSetGradientPattern + GradientTracer, TracerSparsityDetector, IndexSetGradientPattern, jacobian_buffer using SparseMatrixColorings: GreedyColoringAlgorithm, sparsity_pattern # For efficient sparse computations diff --git a/core/src/allocation_optim.jl b/core/src/allocation_optim.jl index 1d5ea2e18..bcf369ce1 100644 --- a/core/src/allocation_optim.jl +++ b/core/src/allocation_optim.jl @@ -669,7 +669,8 @@ function save_demands_and_allocations!( has_external_demand(graph, node_id, :flow_demand) if has_demand # Full demand, not the possibly reduced demand - flow_demand_priority_idx = get_external_demand_priority_idx(p, node_id) + flow_demand_priority_idx = + get_external_demand_priority_idx(p_non_diff, node_id) demand = demand_priority_idx == flow_demand_priority_idx ? flow_demand.demand[flow_demand_node_id.idx,] : 0.0 diff --git a/core/src/callback.jl b/core/src/callback.jl index bfe91d67c..9bb429462 100644 --- a/core/src/callback.jl +++ b/core/src/callback.jl @@ -656,10 +656,11 @@ function set_control_params!(p::Parameters, node_id::NodeID, control_state::Stri (; discrete_control) = p.p_non_diff (; control_mappings) = discrete_control control_state_update = control_mappings[node_id.type][(node_id, control_state)] - (; active, scalar_update, itp_update) = control_state_update + (; active, scalar_update, itp_update_linear, itp_update_lookup) = control_state_update apply_parameter_update!(active) apply_parameter_update!.(scalar_update) - apply_parameter_update!.(itp_update) + apply_parameter_update!.(itp_update_linear) + apply_parameter_update!.(itp_update_lookup) return nothing end diff --git a/core/src/model.jl b/core/src/model.jl index 350be55d7..676afaa65 100644 --- a/core/src/model.jl +++ b/core/src/model.jl @@ -35,18 +35,16 @@ The time derivative is also supplied in case a Rosenbrock method is used. function get_diff_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) (; p_non_diff, diff_cache, p_mutable) = p backend = get_ad_type(solver) + sparsity_detector = TracerSparsityDetector() backend_jac = if solver.sparse - AutoSparse( - backend; - sparsity_detector = TracerSparsityDetector(), - coloring_algorithm = GreedyColoringAlgorithm(), - ) + AutoSparse(backend; sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) else backend end t = 0.0 + # diff_cache_SCT = jacobian_buffer(diff_cache, sparsity_detector) diff_cache_SCT = zeros(GradientTracer{IndexSetGradientPattern{Int64, BitSet}}, length(diff_cache)) diff --git a/core/src/parameter.jl b/core/src/parameter.jl index 19bb017f1..b624c7387 100644 --- a/core/src/parameter.jl +++ b/core/src/parameter.jl @@ -326,8 +326,9 @@ The parameter update associated with a certain control state for discrete contro @kwdef struct ControlStateUpdate active::ParameterUpdate{Bool} scalar_update::Vector{ParameterUpdate{Float64}} = ParameterUpdate{Float64}[] - itp_update::Vector{ParameterUpdate{ScalarInterpolation}} = + itp_update_linear::Vector{ParameterUpdate{ScalarInterpolation}} = ParameterUpdate{ScalarInterpolation}[] + itp_update_lookup::Vector{ParameterUpdate{IndexLookup}} = ParameterUpdate{IndexLookup}[] end """ diff --git a/core/src/read.jl b/core/src/read.jl index 3559cf0c6..1003e25ee 100644 --- a/core/src/read.jl +++ b/core/src/read.jl @@ -498,10 +498,11 @@ function TabulatedRatingCurve( if !ismissing(control_state) # let control swap out the static lookup object index_lookup = static_lookup(interpolation_index) - control_mapping[(node_id, control_state)] = ControlStateUpdate( - ParameterUpdate(:active, is_active), - ParameterUpdate{Float64}[], - [ParameterUpdate(:current_interpolation_index, index_lookup)], + control_mapping[(node_id, control_state)] = ControlStateUpdate(; + active = ParameterUpdate(:active, is_active), + itp_update_lookup = [ + ParameterUpdate(:current_interpolation_index, index_lookup), + ], ) end push!(interpolations, interpolation) @@ -1220,12 +1221,7 @@ function continuous_control_compound_variables(db::DB, config::Config, ids) variable_data = filter(row -> row.node_id == id, data) push!( compound_variables, - CompoundVariable( - variable_data, - NodeType.ContinuousControl, - node_ids_all, - placeholder_vector, - ), + CompoundVariable(variable_data, NodeType.ContinuousControl, node_ids_all), ) end compound_variables @@ -1862,7 +1858,7 @@ function Parameters(db::DB, config::Config)::Parameters collect_control_mappings!(p_non_diff) set_listen_diff_cache_refs!(p_non_diff) - set_discrete_controlled_diff_cache_refs!(p_non_diff) + set_discrete_controlled_variable_refs!(p_non_diff) # Allocation data structures if config.allocation.use_allocation diff --git a/core/src/util.jl b/core/src/util.jl index 9a33e09b6..6a323e589 100644 --- a/core/src/util.jl +++ b/core/src/util.jl @@ -611,7 +611,7 @@ end """ Set references to all variables that are controlled by discrete control """ -function set_discrete_controlled_diff_cache_refs!(p_non_diff::ParametersNonDiff)::Nothing +function set_discrete_controlled_variable_refs!(p_non_diff::ParametersNonDiff)::Nothing for nodetype in propertynames(p_non_diff) node = getfield(p_non_diff, nodetype) if node isa AbstractParameterNode && hasfield(typeof(node), :control_mapping) @@ -619,21 +619,27 @@ function set_discrete_controlled_diff_cache_refs!(p_non_diff::ParametersNonDiff) node.control_mapping for ((node_id, control_state), control_state_update) in control_mapping - (; scalar_update, itp_update) = control_state_update + (; scalar_update, itp_update_linear, itp_update_lookup) = + control_state_update # References to scalar parameters for (i, parameter_update) in enumerate(scalar_update) field = getfield(node, parameter_update.name) - if field isa Cache - field = field[Float64[]] - end scalar_update[i] = @set parameter_update.ref = Ref(field, node_id.idx) end - # References to interpolation parameters - for (i, parameter_update) in enumerate(itp_update) + # References to linear interpolation parameters + for (i, parameter_update) in enumerate(itp_update_linear) + field = getfield(node, parameter_update.name) + itp_update_linear[i] = + @set parameter_update.ref = Ref(field, node_id.idx) + end + + # References to index interpolation parameters + for (i, parameter_update) in enumerate(itp_update_lookup) field = getfield(node, parameter_update.name) - itp_update[i] = @set parameter_update.ref = Ref(field, node_id.idx) + itp_update_lookup[i] = + @set parameter_update.ref = Ref(field, node_id.idx) end # Reference to 'active' parameter if it exists @@ -714,12 +720,12 @@ function add_control_state!( end # This is a not so great way to get a concrete type, # which is used as a ControlStateUpdate type parameter. - itp_update = if isempty(itp_update) + itp_update_linear = if isempty(itp_update) ParameterUpdate{ScalarInterpolation}[] else [x for x in itp_update] end - control_state_update = ControlStateUpdate(; active, scalar_update, itp_update) + control_state_update = ControlStateUpdate(; active, scalar_update, itp_update_linear) if add_control_state control_mapping[(node_id, control_state)] = control_state_update diff --git a/core/src/validation.jl b/core/src/validation.jl index eb032f22e..fcdef5c1b 100644 --- a/core/src/validation.jl +++ b/core/src/validation.jl @@ -279,7 +279,7 @@ function valid_flow_rates( for (key, control_state_update) in pairs(control_mapping) id_controlled = key[1] push!(ids_controlled, id_controlled) - flow_rate_update = only(control_state_update.itp_update) + flow_rate_update = only(control_state_update.itp_update_linear) @assert flow_rate_update.name == :flow_rate flow_rate_ = minimum(flow_rate_update.value.u) diff --git a/core/test/allocation_test.jl b/core/test/allocation_test.jl index 9c770185e..0029dad97 100644 --- a/core/test/allocation_test.jl +++ b/core/test/allocation_test.jl @@ -301,7 +301,7 @@ end 5.619053, 10419.156, 4.057502, - ] + ] rtol = 1e-3 # The output should only contain data for the demand_priority for which # a node has a demand @@ -468,8 +468,8 @@ end constraint_with_flow_buffer = JuMP.constraint_object(problem[:flow_conservation][node_id_with_flow_demand]) @test constraint_with_flow_buffer.func == - F[(NodeID(:LevelBoundary, 1, p), node_id_with_flow_demand)] - - F[(node_id_with_flow_demand, NodeID(:Basin, 3, p))] - + F[(NodeID(:LevelBoundary, 1, p_non_diff), node_id_with_flow_demand)] - + F[(node_id_with_flow_demand, NodeID(:Basin, 3, p_non_diff))] - F_flow_buffer_in[node_id_with_flow_demand] + F_flow_buffer_out[node_id_with_flow_demand] @@ -569,12 +569,16 @@ end ) @test ispath(toml_path) model = Ribasim.Model(toml_path) - (; p) = model.integrator + (; p_non_diff) = model.integrator.p + (; allocation) = p_non_diff # Test for pump max flow capacity constraint - (; problem) = p.allocation.allocation_models[1] + (; problem) = allocation.allocation_models[1] constraint = JuMP.constraint_object( - problem[:capacity][(NodeID(:Basin, 1, p), NodeID(:LinearResistance, 2, p))], + problem[:capacity][( + NodeID(:Basin, 1, p_non_diff), + NodeID(:LinearResistance, 2, p_non_diff), + )], ) @test constraint.set.upper == 2.0 end @@ -588,7 +592,7 @@ end normpath(@__DIR__, "../../generated_testmodels/fair_distribution/ribasim.toml") @test ispath(toml_path) model = Ribasim.run(toml_path) - (; user_demand, graph) = model.integrator.p + (; user_demand, graph) = model.integrator.p.p_non_diff data_allocation = DataFrame(Ribasim.allocation_table(model)) fractions = Vector{Float64}[] @@ -612,22 +616,27 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/level_demand/ribasim.toml") model = Ribasim.Model(toml_path) (; p) = model.integrator + (; p_non_diff) = p t = 0.0 - u = model.integrator.u demand_priority_idx = 2 - allocation_model = first(p.allocation.allocation_models) - Ribasim.set_initial_values!(allocation_model, u, p, t) - Ribasim.set_objective_demand_priority!(allocation_model, u, p, t, demand_priority_idx) + allocation_model = first(p_non_diff.allocation.allocation_models) + Ribasim.set_initial_values!(allocation_model, p, t) + Ribasim.set_objective_demand_priority!(allocation_model, p, t, demand_priority_idx) Ribasim.allocate_to_users_from_connected_basin!( allocation_model, - p, + p_non_diff, demand_priority_idx, ) flow_data = allocation_model.flow.data - @test flow_data[(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] == 0.0 - @test flow_data[(NodeID(:Basin, 2, p), NodeID(:UserDemand, 3, p))] == 0.0015 - @test flow_data[(NodeID(:UserDemand, 3, p), NodeID(:Basin, 5, p))] == 0.0 + @test flow_data[( + NodeID(:FlowBoundary, 1, p_non_diff), + NodeID(:Basin, 2, p_non_diff), + )] == 0.0 + @test flow_data[(NodeID(:Basin, 2, p_non_diff), NodeID(:UserDemand, 3, p_non_diff))] == + 0.0015 + @test flow_data[(NodeID(:UserDemand, 3, p_non_diff), NodeID(:Basin, 5, p_non_diff))] == + 0.0 end @testitem "level_demand_without_max_level" begin @@ -638,16 +647,16 @@ end @test ispath(toml_path) model = Ribasim.Model(toml_path) (; p, u, t) = model.integrator - (; allocation_models) = p.allocation - (; basin, level_demand, graph) = p + (; basin, level_demand, graph, allocation) = p.p_non_diff + (; allocation_models) = allocation level_demand.max_level[1].u .= Inf level_demand.max_level[2].u .= Inf # Given a max_level of Inf, the basin capacity is 0.0 because it is not possible for the basin level to be > Inf - @test Ribasim.get_basin_capacity(allocation_models[1], u, p, t, basin.node_id[1]) == 0.0 - @test Ribasim.get_basin_capacity(allocation_models[1], u, p, t, basin.node_id[2]) == 0.0 - @test Ribasim.get_basin_capacity(allocation_models[2], u, p, t, basin.node_id[3]) == 0.0 + @test Ribasim.get_basin_capacity(allocation_models[1], p, t, basin.node_id[1]) == 0.0 + @test Ribasim.get_basin_capacity(allocation_models[1], p, t, basin.node_id[2]) == 0.0 + @test Ribasim.get_basin_capacity(allocation_models[2], p, t, basin.node_id[3]) == 0.0 end @testitem "cyclic_demand" begin @@ -656,7 +665,7 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/cyclic_demand/ribasim.toml") @test ispath(toml_path) model = Ribasim.run(toml_path) - (; level_demand, user_demand, flow_demand) = model.integrator.p + (; level_demand, user_demand, flow_demand) = model.integrator.p.p_non_diff function test_extrapolation(itp) @test itp.extrapolation_left == Periodic diff --git a/core/test/bmi_test.jl b/core/test/bmi_test.jl index faefecd7f..961552073 100644 --- a/core/test/bmi_test.jl +++ b/core/test/bmi_test.jl @@ -50,7 +50,7 @@ end BMI.update_until(model, 86400.0) storage = BMI.get_value_ptr(model, "basin.storage") # get_value_ptr does not copy - @test storage0 === storage != ones(4) + @test storage0 == storage != ones(4) end @testitem "get_value_ptr_all_values" begin diff --git a/core/test/control_test.jl b/core/test/control_test.jl index fdaa95b94..fd71cfb43 100644 --- a/core/test/control_test.jl +++ b/core/test/control_test.jl @@ -16,11 +16,13 @@ pump_control_mapping = pump.control_mapping @test unique( only( - pump_control_mapping[(NodeID(:Pump, 4, p_non_diff), "off")].itp_update, + pump_control_mapping[(NodeID(:Pump, 4, p_non_diff), "off")].itp_update_linear, ).value.u, ) == [0] @test unique( - only(pump_control_mapping[(NodeID(:Pump, 4, p_non_diff), "on")].itp_update).value.u, + only( + pump_control_mapping[(NodeID(:Pump, 4, p_non_diff), "on")].itp_update_linear, + ).value.u, ) == [1.0e-5] logic_mapping::Vector{Dict{Vector{Bool}, String}} = [ @@ -202,11 +204,11 @@ end target_high = pid_control.control_mapping[( NodeID(:PidControl, 6, p_non_diff), "target_high", - )].itp_update[1].value.u[1] + )].itp_update_linear[1].value.u[1] target_low = pid_control.control_mapping[( NodeID(:PidControl, 6, p_non_diff), "target_low", - )].itp_update[1].value.u[1] + )].itp_update_linear[1].value.u[1] t_target_jump = discrete_control.record.time[2] t_idx_target_jump = searchsortedlast(t, t_target_jump) diff --git a/core/test/run_models_test.jl b/core/test/run_models_test.jl index 32c75b84c..7bda1a098 100644 --- a/core/test/run_models_test.jl +++ b/core/test/run_models_test.jl @@ -501,13 +501,13 @@ end # Test for conservation of mass, flow at the beginning == flow at the end @test Ribasim.get_flow( du, - p, + p_non_diff, t, (NodeID(:FlowBoundary, 1, p_non_diff), NodeID(:Basin, 2, p_non_diff)), ) ≈ 5.0 atol = 0.001 skip = Sys.isapple() @test Ribasim.get_flow( du, - p, + p_non_diff, t, (NodeID(:ManningResistance, 101, p_non_diff), NodeID(:Basin, 102, p_non_diff)), ) ≈ 5.0 atol = 0.001 skip = Sys.isapple() diff --git a/core/test/validation_test.jl b/core/test/validation_test.jl index 11547a5bd..095cf6c1d 100644 --- a/core/test/validation_test.jl +++ b/core/test/validation_test.jl @@ -240,7 +240,7 @@ end control_mapping = Dict( (NodeID(:Pump, 1, 1), "foo") => ControlStateUpdate(; active = ParameterUpdate(:active, true), - itp_update = [ + itp_update_linear = [ ParameterUpdate( :flow_rate, LinearInterpolation([-1.0, -1.0], [0.0, 1.0]), From eb31535207e6ed3c152c63ba65081b57b3185735 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Thu, 13 Mar 2025 15:59:46 +0100 Subject: [PATCH 12/28] Replace weight function by Returns --- core/src/graph.jl | 2 +- core/src/parameter.jl | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/core/src/graph.jl b/core/src/graph.jl index 6083655ed..ffec554ec 100644 --- a/core/src/graph.jl +++ b/core/src/graph.jl @@ -39,7 +39,7 @@ function create_graph(db::DB, config::Config)::MetaGraph vertex_data_type = NodeMetadata, edge_data_type = LinkMetadata, graph_data = nothing, - weight_function, + Returns(1.0), ) for row in node_rows node_id = NodeID(row.node_type, row.node_id, node_table) diff --git a/core/src/parameter.jl b/core/src/parameter.jl index 315400f30..5de8750f0 100644 --- a/core/src/parameter.jl +++ b/core/src/parameter.jl @@ -906,10 +906,6 @@ end current_interpolation_index::Vector{IndexLookup} end -# This function is the same as the MetaGraphsNext default anonymous function, -# but has a stable type for the type alias below. -weight_function(::Any) = 1.0 - """ The metadata of the graph (the fields of the NamedTuple) can be accessed e.g. using graph[].flow. @@ -931,7 +927,7 @@ const ModelGraph = MetaGraph{ flow_links::Vector{LinkMetadata}, saveat::Float64, }, - typeof(weight_function), + Returns{Float64}, Float64, } From 19391786d1b24fa498651c422232d09f85397370 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Thu, 13 Mar 2025 16:04:36 +0100 Subject: [PATCH 13/28] Replace weight function by Returns properly --- core/src/graph.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/graph.jl b/core/src/graph.jl index ffec554ec..836497847 100644 --- a/core/src/graph.jl +++ b/core/src/graph.jl @@ -39,7 +39,7 @@ function create_graph(db::DB, config::Config)::MetaGraph vertex_data_type = NodeMetadata, edge_data_type = LinkMetadata, graph_data = nothing, - Returns(1.0), + weight_function = Returns(1.0), ) for row in node_rows node_id = NodeID(row.node_type, row.node_id, node_table) From 23cf2cd7c41a245abfce229f7b3686ad694a7ea5 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Thu, 13 Mar 2025 16:24:39 +0100 Subject: [PATCH 14/28] Avoid accessing undefined cache entries --- core/src/model.jl | 4 +--- core/src/solve.jl | 8 ++------ 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/core/src/model.jl b/core/src/model.jl index 676afaa65..7d3845851 100644 --- a/core/src/model.jl +++ b/core/src/model.jl @@ -44,9 +44,7 @@ function get_diff_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) end t = 0.0 - # diff_cache_SCT = jacobian_buffer(diff_cache, sparsity_detector) - diff_cache_SCT = - zeros(GradientTracer{IndexSetGradientPattern{Int64, BitSet}}, length(diff_cache)) + diff_cache_SCT = jacobian_buffer(diff_cache, sparsity_detector) # Activate all nodes to catch all possible state dependencies p_mutable.all_nodes_active = true diff --git a/core/src/solve.jl b/core/src/solve.jl index 823f4ce34..8a86a4dd8 100644 --- a/core/src/solve.jl +++ b/core/src/solve.jl @@ -563,7 +563,6 @@ function formulate_flow!( inflow_link, outflow_link, active, - flow_rate_from_cache, flow_rate_itp, min_flow_rate, max_flow_rate, @@ -575,7 +574,6 @@ function formulate_flow!( pump.inflow_link, pump.outflow_link, pump.active, - cache_flow_rate, pump.flow_rate, pump.min_flow_rate, pump.max_flow_rate, @@ -591,7 +589,7 @@ function formulate_flow!( flow_rate = if continuous_control_type == ContinuousControlType.None flow_rate_itp(t) else - flow_rate_from_cache + cache_flow_rate[id.idx] end inflow_id = inflow_link.link[1] @@ -630,7 +628,6 @@ function formulate_flow!( inflow_link, outflow_link, active, - flow_rate_from_cache, flow_rate_itp, min_flow_rate, max_flow_rate, @@ -642,7 +639,6 @@ function formulate_flow!( outlet.inflow_link, outlet.outflow_link, outlet.active, - cache_flow_rate, outlet.flow_rate, outlet.min_flow_rate, outlet.max_flow_rate, @@ -658,7 +654,7 @@ function formulate_flow!( flow_rate = if continuous_control_type == ContinuousControlType.None flow_rate_itp(t) else - flow_rate_from_cache + cache_flow_rate[id.idx] end inflow_id = inflow_link.link[1] From 151c552cf8a6cdf6437e3dada7b682c8b8643945 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Thu, 13 Mar 2025 19:01:07 +0100 Subject: [PATCH 15/28] We no longer need to explicitly create the Jacobian buffer for SCT --- Manifest.toml | 9 +++++++-- core/src/Ribasim.jl | 2 +- core/src/model.jl | 3 +-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 30ff8a495..886e82b3b 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -547,9 +547,12 @@ version = "1.15.1" [[deps.DifferentiationInterface]] deps = ["ADTypes", "LinearAlgebra"] -git-tree-sha1 = "479214d2988a837e6d21ac38afdcb03cb2d0994e" +git-tree-sha1 = "5c14e3a9e56bc55cc7360833107a0c1b508d76b8" +repo-rev = "main" +repo-subdir = "DifferentiationInterface" +repo-url = "https://github.com/JuliaDiff/DifferentiationInterface.jl.git" uuid = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" -version = "0.6.43" +version = "0.6.45" [deps.DifferentiationInterface.extensions] DifferentiationInterfaceChainRulesCoreExt = "ChainRulesCore" @@ -564,6 +567,7 @@ version = "0.6.43" DifferentiationInterfacePolyesterForwardDiffExt = "PolyesterForwardDiff" DifferentiationInterfaceReverseDiffExt = ["ReverseDiff", "DiffResults"] DifferentiationInterfaceSparseArraysExt = "SparseArrays" + DifferentiationInterfaceSparseConnectivityTracerExt = "SparseConnectivityTracer" DifferentiationInterfaceSparseMatrixColoringsExt = "SparseMatrixColorings" DifferentiationInterfaceStaticArraysExt = "StaticArrays" DifferentiationInterfaceSymbolicsExt = "Symbolics" @@ -585,6 +589,7 @@ version = "0.6.43" PolyesterForwardDiff = "98d1487c-24ca-40b6-b7ab-df2af84e126b" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" diff --git a/core/src/Ribasim.jl b/core/src/Ribasim.jl index c12db52e3..105f08369 100644 --- a/core/src/Ribasim.jl +++ b/core/src/Ribasim.jl @@ -48,7 +48,7 @@ using SciMLBase: # Automatically detecting the sparsity pattern of the Jacobian of water_balance! # through operator overloading using SparseConnectivityTracer: - GradientTracer, TracerSparsityDetector, IndexSetGradientPattern, jacobian_buffer + GradientTracer, TracerSparsityDetector, IndexSetGradientPattern using SparseMatrixColorings: GreedyColoringAlgorithm, sparsity_pattern # For efficient sparse computations diff --git a/core/src/model.jl b/core/src/model.jl index 7d3845851..8b0c7f504 100644 --- a/core/src/model.jl +++ b/core/src/model.jl @@ -44,7 +44,6 @@ function get_diff_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) end t = 0.0 - diff_cache_SCT = jacobian_buffer(diff_cache, sparsity_detector) # Activate all nodes to catch all possible state dependencies p_mutable.all_nodes_active = true @@ -54,7 +53,7 @@ function get_diff_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) backend_jac, u, Constant(p_non_diff), - Cache(diff_cache_SCT), + Cache(diff_cache), Constant(p_mutable), Constant(t), ) From 23b844b62e10a8960470f0566fedc712dc54e50f Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Fri, 14 Mar 2025 15:36:05 +0100 Subject: [PATCH 16/28] Use DI prep_cache branch --- Manifest.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 886e82b3b..70359ef4f 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -547,12 +547,12 @@ version = "1.15.1" [[deps.DifferentiationInterface]] deps = ["ADTypes", "LinearAlgebra"] -git-tree-sha1 = "5c14e3a9e56bc55cc7360833107a0c1b508d76b8" -repo-rev = "main" +git-tree-sha1 = "a523c3c7068b02b964c22e10a86791d90fcb16a2" +repo-rev = "gd/prep_cache" repo-subdir = "DifferentiationInterface" repo-url = "https://github.com/JuliaDiff/DifferentiationInterface.jl.git" uuid = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" -version = "0.6.45" +version = "0.6.46" [deps.DifferentiationInterface.extensions] DifferentiationInterfaceChainRulesCoreExt = "ChainRulesCore" From a77042eb163247bad282e2689ca009ae31b4f218 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Mon, 17 Mar 2025 09:03:09 +0100 Subject: [PATCH 17/28] Update DI --- Manifest.toml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 70359ef4f..d6f6858ab 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -547,12 +547,9 @@ version = "1.15.1" [[deps.DifferentiationInterface]] deps = ["ADTypes", "LinearAlgebra"] -git-tree-sha1 = "a523c3c7068b02b964c22e10a86791d90fcb16a2" -repo-rev = "gd/prep_cache" -repo-subdir = "DifferentiationInterface" -repo-url = "https://github.com/JuliaDiff/DifferentiationInterface.jl.git" +git-tree-sha1 = "3999a7934cc723719e79ff7db33160b141297444" uuid = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" -version = "0.6.46" +version = "0.6.44" [deps.DifferentiationInterface.extensions] DifferentiationInterfaceChainRulesCoreExt = "ChainRulesCore" @@ -564,7 +561,7 @@ version = "0.6.46" DifferentiationInterfaceForwardDiffExt = ["ForwardDiff", "DiffResults"] DifferentiationInterfaceGTPSAExt = "GTPSA" DifferentiationInterfaceMooncakeExt = "Mooncake" - DifferentiationInterfacePolyesterForwardDiffExt = "PolyesterForwardDiff" + DifferentiationInterfacePolyesterForwardDiffExt = ["PolyesterForwardDiff", "ForwardDiff", "DiffResults"] DifferentiationInterfaceReverseDiffExt = ["ReverseDiff", "DiffResults"] DifferentiationInterfaceSparseArraysExt = "SparseArrays" DifferentiationInterfaceSparseConnectivityTracerExt = "SparseConnectivityTracer" From 193035a7c3bfdbe4af5e62837552a6fe2a4976c9 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Mon, 17 Mar 2025 15:43:04 +0100 Subject: [PATCH 18/28] Fix last bugs --- core/src/parameter.jl | 2 +- core/src/read.jl | 20 ++++++++------------ core/src/solve.jl | 8 ++++---- core/src/util.jl | 3 ++- core/test/equations_test.jl | 6 ++---- 5 files changed, 17 insertions(+), 22 deletions(-) diff --git a/core/src/parameter.jl b/core/src/parameter.jl index 5de8750f0..f12374a5e 100644 --- a/core/src/parameter.jl +++ b/core/src/parameter.jl @@ -783,7 +783,7 @@ end node_id::Vector{NodeID} compound_variable::Vector{CompoundVariable} controlled_variable::Vector{String} - target_ref::Vector{DiffCacheRef} + target_ref::Vector{DiffCacheRef} = Vector{DiffCacheRef}(undef, length(node_id)) func::Vector{ScalarInterpolation} end diff --git a/core/src/read.jl b/core/src/read.jl index 1003e25ee..c2afb52e6 100644 --- a/core/src/read.jl +++ b/core/src/read.jl @@ -1237,20 +1237,11 @@ function ContinuousControl(db::DB, config::Config)::ContinuousControl func, controlled_variable, errors = continuous_control_functions(db, config, ids) compound_variable = continuous_control_compound_variables(db, config, ids) - # References to the controlled parameters, filled in later when they are known - target_refs = DiffCacheRef[] - if errors error("Errors encountered when parsing ContinuousControl data.") end - return ContinuousControl( - node_id, - compound_variable, - controlled_variable, - target_refs, - func, - ) + return ContinuousControl(; node_id, compound_variable, controlled_variable, func) end function PidControl(db::DB, config::Config)::PidControl @@ -1838,6 +1829,13 @@ function Parameters(db::DB, config::Config)::Parameters state_ranges, graph, ) + set_target_ref!( + nodes.continuous_control.target_ref, + nodes.continuous_control.node_id, + nodes.continuous_control.controlled_variable, + state_ranges, + graph, + ) p_non_diff = ParametersNonDiff(; config.starttime, @@ -1865,8 +1863,6 @@ function Parameters(db::DB, config::Config)::Parameters initialize_allocation!(p_non_diff, db, config) end - # TODO: Set and check pump and outlet flow rates - return Parameters(; p_non_diff) end diff --git a/core/src/solve.jl b/core/src/solve.jl index 8a86a4dd8..9e5f2c998 100644 --- a/core/src/solve.jl +++ b/core/src/solve.jl @@ -57,12 +57,12 @@ function water_balance!( return nothing end -function formulate_continuous_control!(du, p, t)::Nothing +function formulate_continuous_control!(du::Vector, p::Parameters, t::Number)::Nothing (; compound_variable, target_ref, func) = p.p_non_diff.continuous_control for (cvar, ref, func_) in zip(compound_variable, target_ref, func) value = compound_variable_value(cvar, p, du, t) - set_value!(ref, func_(value), du) + set_value!(ref, p, func_(value)) end return nothing end @@ -227,7 +227,7 @@ function formulate_pid_control!(du::Vector, u::Vector, p::Parameters, t::Number) set_error!(pid_control, p, t) - for (i, id) in enumerate(node_id) + for (i, _) in enumerate(node_id) if !(active[i] || all_nodes_active) du_integral[i] = 0.0 u_integral[i] = 0.0 @@ -620,7 +620,7 @@ function formulate_flow!( (; state_ranges, cache_ranges) = p_non_diff du_outlet = view(du, state_ranges.outlet) - cache_flow_rate = view(diff_cache, cache_ranges.flow_rate_pump) + cache_flow_rate = view(diff_cache, cache_ranges.flow_rate_outlet) all_nodes_active = p_mutable.all_nodes_active[] for ( diff --git a/core/src/util.jl b/core/src/util.jl index 6a323e589..b7a036358 100644 --- a/core/src/util.jl +++ b/core/src/util.jl @@ -659,7 +659,7 @@ function set_target_ref!( controlled_variable::Vector{String}, state_ranges::StateRanges, graph::MetaGraph, -) +)::Nothing errors = false for (i, (id, variable)) in enumerate(zip(node_id, controlled_variable)) controlled_node_id = only(outneighbor_labels_type(graph, id, LinkType.control)) @@ -672,6 +672,7 @@ function set_target_ref!( if errors error("Errors encountered when setting continuously controlled variable refs.") end + return nothing end """ diff --git a/core/test/equations_test.jl b/core/test/equations_test.jl index cd3cf68e5..fa60fa7db 100644 --- a/core/test/equations_test.jl +++ b/core/test/equations_test.jl @@ -176,13 +176,11 @@ end @test config.solver.dt === model.integrator.dt Ribasim.solve!(model) @test successful_retcode(model) - (; p_non_diff, diff_cache) = model.integrator.p + (; p_non_diff) = model.integrator.p (; flow_boundary, pump, cache_ranges) = p_non_diff q_boundary = flow_boundary.flow_rate[1].u[1] - pump_flow_rate = view(diff_cache, cache_ranges.flow_rate_pump) - q_pump = pump_flow_rate[1] - + q_pump = pump.flow_rate[1].u[1] storage_both = get_storages_and_levels(model).storage t = tsaves(model) tspan = model.integrator.sol.prob.tspan From 7c7401bbebbbcb38cc6c45c31919012419c749c8 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Mon, 17 Mar 2025 16:09:18 +0100 Subject: [PATCH 19/28] Nits --- core/src/Ribasim.jl | 1 - core/test/run_models_test.jl | 1 - 2 files changed, 2 deletions(-) diff --git a/core/src/Ribasim.jl b/core/src/Ribasim.jl index 105f08369..603a862bf 100644 --- a/core/src/Ribasim.jl +++ b/core/src/Ribasim.jl @@ -15,7 +15,6 @@ For more granular access, see: module Ribasim # Requirements for automatic differentiation -using ADTypes: AutoForwardDiff, AutoFiniteDiff using DifferentiationInterface: AutoSparse, Constant, diff --git a/core/test/run_models_test.jl b/core/test/run_models_test.jl index 7bda1a098..31f25a7b1 100644 --- a/core/test/run_models_test.jl +++ b/core/test/run_models_test.jl @@ -421,7 +421,6 @@ end end @testitem "ManningResistance" begin - using PreallocationTools: get_tmp using SciMLBase: successful_retcode using OrdinaryDiffEqCore: get_du using Ribasim: NodeID From 9321621181bfd40183be5f0b7148d0bf7b25c70a Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Mon, 17 Mar 2025 16:11:10 +0100 Subject: [PATCH 20/28] Remove PreallocationTools dep --- Manifest.toml | 4 ++-- Project.toml | 1 - core/Project.toml | 2 -- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index d6f6858ab..28f7158a3 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.11.3" manifest_format = "2.0" -project_hash = "f1f6d1cb011ce63d93f72a0bbd984e2839b32e42" +project_hash = "08df9cf37f934d9c54c355f731e7bb06c158c106" [[deps.ADTypes]] git-tree-sha1 = "fb97701c117c8162e84dfcf80215caa904aef44f" @@ -2085,7 +2085,7 @@ weakdeps = ["Distributed"] DistributedExt = "Distributed" [[deps.Ribasim]] -deps = ["ADTypes", "Accessors", "Arrow", "BasicModelInterface", "CodecZstd", "Configurations", "DBInterface", "DataInterpolations", "DataStructures", "Dates", "DiffEqBase", "DiffEqCallbacks", "DifferentiationInterface", "EnumX", "FiniteDiff", "Graphs", "HiGHS", "IterTools", "JuMP", "Legolas", "LineSearches", "LinearAlgebra", "LinearSolve", "Logging", "LoggingExtras", "MetaGraphsNext", "OrdinaryDiffEqBDF", "OrdinaryDiffEqCore", "OrdinaryDiffEqLowOrderRK", "OrdinaryDiffEqNonlinearSolve", "OrdinaryDiffEqRosenbrock", "OrdinaryDiffEqSDIRK", "OrdinaryDiffEqTsit5", "PreallocationTools", "SQLite", "SciMLBase", "SparseArrays", "SparseConnectivityTracer", "SparseMatrixColorings", "StructArrays", "Tables", "TerminalLoggers", "TranscodingStreams"] +deps = ["ADTypes", "Accessors", "Arrow", "BasicModelInterface", "CodecZstd", "Configurations", "DBInterface", "DataInterpolations", "DataStructures", "Dates", "DiffEqBase", "DiffEqCallbacks", "DifferentiationInterface", "EnumX", "FiniteDiff", "Graphs", "HiGHS", "IterTools", "JuMP", "Legolas", "LineSearches", "LinearAlgebra", "LinearSolve", "Logging", "LoggingExtras", "MetaGraphsNext", "OrdinaryDiffEqBDF", "OrdinaryDiffEqCore", "OrdinaryDiffEqLowOrderRK", "OrdinaryDiffEqNonlinearSolve", "OrdinaryDiffEqRosenbrock", "OrdinaryDiffEqSDIRK", "OrdinaryDiffEqTsit5", "SQLite", "SciMLBase", "SparseArrays", "SparseConnectivityTracer", "SparseMatrixColorings", "StructArrays", "Tables", "TerminalLoggers", "TranscodingStreams"] path = "core" uuid = "aac5e3d9-0b8f-4d4f-8241-b1a7a9632635" version = "2025.2.0" diff --git a/Project.toml b/Project.toml index 264c52b1b..4dbb98580 100644 --- a/Project.toml +++ b/Project.toml @@ -51,7 +51,6 @@ OrdinaryDiffEqSDIRK = "2d112036-d095-4a1e-ab9a-08536f3ecdbf" OrdinaryDiffEqTsit5 = "b1df2697-797e-41e3-8120-5422d3b24e4a" OteraEngine = "b2d7f28f-acd6-4007-8b26-bc27716e5513" PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d" -PreallocationTools = "d236fae5-4411-538c-8e31-a6e3d9e00b46" Revise = "295af30f-e4ad-537b-8983-00126c2a3abe" Ribasim = "aac5e3d9-0b8f-4d4f-8241-b1a7a9632635" SQLite = "0aa819cd-b072-5ff4-a722-6bc24af294d9" diff --git a/core/Project.toml b/core/Project.toml index fb3d937e5..9127b14c1 100644 --- a/core/Project.toml +++ b/core/Project.toml @@ -38,7 +38,6 @@ OrdinaryDiffEqNonlinearSolve = "127b3ac7-2247-4354-8eb6-78cf4e7c58e8" OrdinaryDiffEqRosenbrock = "43230ef6-c299-4910-a778-202eb28ce4ce" OrdinaryDiffEqSDIRK = "2d112036-d095-4a1e-ab9a-08536f3ecdbf" OrdinaryDiffEqTsit5 = "b1df2697-797e-41e3-8120-5422d3b24e4a" -PreallocationTools = "d236fae5-4411-538c-8e31-a6e3d9e00b46" SQLite = "0aa819cd-b072-5ff4-a722-6bc24af294d9" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" @@ -87,7 +86,6 @@ OrdinaryDiffEqNonlinearSolve = "1.1" OrdinaryDiffEqRosenbrock = "1.1" OrdinaryDiffEqSDIRK = "1.1" OrdinaryDiffEqTsit5 = "1.1" -PreallocationTools = "0.4" SQLite = "1.5.1" SciMLBase = "2.36" SparseArrays = "1" From 93aa7a2b5c90a4d5292af724f6dda2e332bec743 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Tue, 18 Mar 2025 10:54:53 +0100 Subject: [PATCH 21/28] Small docs fix --- docs/dev/callstacks.qmd | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/dev/callstacks.qmd b/docs/dev/callstacks.qmd index 2c9fac73e..e142e572f 100644 --- a/docs/dev/callstacks.qmd +++ b/docs/dev/callstacks.qmd @@ -78,8 +78,9 @@ config = Ribasim.Config(toml_path) db_path = Ribasim.database_path(config) db = SQLite.DB(db_path) p = Ribasim.Parameters(db, config) -empty!(p.allocation.subnetwork_ids) -empty!(p.allocation.main_network_connections) +(; allocation) = p.p_non_diff +empty!(allocation.subnetwork_ids) +empty!(allocation.main_network_connections) graph, verts = tracecall((Ribasim,), Ribasim.initialize_allocation!, (p, db, config)) plot_graph(graph) ``` From f96feb587d6c58a7b7eb386c9751ed9b3109b30d Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Tue, 18 Mar 2025 12:37:42 +0100 Subject: [PATCH 22/28] Actually fix the call stacks in the docs this time --- docs/dev/callstacks.qmd | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/dev/callstacks.qmd b/docs/dev/callstacks.qmd index e142e572f..32578cfc5 100644 --- a/docs/dev/callstacks.qmd +++ b/docs/dev/callstacks.qmd @@ -77,11 +77,12 @@ toml_path = normpath( config = Ribasim.Config(toml_path) db_path = Ribasim.database_path(config) db = SQLite.DB(db_path) -p = Ribasim.Parameters(db, config) -(; allocation) = p.p_non_diff +(; p_non_diff) = Ribasim.Parameters(db, config) +(; allocation) = p_non_diff empty!(allocation.subnetwork_ids) empty!(allocation.main_network_connections) -graph, verts = tracecall((Ribasim,), Ribasim.initialize_allocation!, (p, db, config)) +graph, verts = + tracecall((Ribasim,), Ribasim.initialize_allocation!, (p_non_diff, db, config)) plot_graph(graph) ``` @@ -107,7 +108,7 @@ toml_path = normpath(@__DIR__, "../../generated_testmodels/pump_discrete_control/ribasim.toml") model = Ribasim.Model(toml_path) (; u, t) = model.integrator -model.integrator.p.basin.storage0 .= [0.1, 100.0] +model.integrator.p.p_non_diff.basin.storage0 .= [0.1, 100.0] graph, verts = tracecall((Ribasim,), Ribasim.apply_discrete_control!, (u, t, model.integrator)) plot_graph(graph; prune_from = [:water_balance!], max_depth = 3) From 2a6df87c005b781de71a0d66c9936dc16c3f0507 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Tue, 18 Mar 2025 16:06:07 +0100 Subject: [PATCH 23/28] Fix validation docs --- docs/reference/validation.qmd | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/reference/validation.qmd b/docs/reference/validation.qmd index ec4a03fdc..f0005d227 100644 --- a/docs/reference/validation.qmd +++ b/docs/reference/validation.qmd @@ -16,7 +16,8 @@ using MarkdownTables node_names_snake_case = Symbol[] node_names_camel_case = Symbol[] -for (node_name, node_type) in zip(fieldnames(Ribasim.Parameters), fieldtypes(Ribasim.Parameters)) +for (node_name, node_type) in + zip(fieldnames(Ribasim.ParametersNonDiff), fieldtypes(Ribasim.ParametersNonDiff)) if node_type <: Ribasim.AbstractParameterNode push!(node_names_snake_case, node_name) push!(node_names_camel_case, nameof(node_type)) @@ -27,13 +28,14 @@ function to_symbol(b::Bool)::String return b ? "✓" : "x" end - df = DataFrame() df[!, :downstream] = node_names_snake_case for node_name in node_names_snake_case - df[!, node_name] = - [(to_symbol(node_name_ in Ribasim.neighbortypes(node_name))) for node_name_ in node_names_snake_case] + df[!, node_name] = [ + (to_symbol(node_name_ in Ribasim.neighbortypes(node_name))) for + node_name_ in node_names_snake_case + ] end markdown_table(df) @@ -70,11 +72,9 @@ for node_name in node_names_camel_case push!(control_in_max, unbounded(bounds_control.in_max)) push!(control_out_min, string(bounds_control.out_min)) push!(control_out_max, unbounded(bounds_control.out_max)) - end -df = DataFrame( - ; +df = DataFrame(; node_type = node_names_snake_case, flow_in_min, flow_in_max, From 46735b03711cdfb790ba2ebe1b00d67bfa633e15 Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Tue, 18 Mar 2025 16:39:16 +0100 Subject: [PATCH 24/28] Fix discrete control table + type instability --- core/src/util.jl | 2 +- docs/reference/node/discrete-control.qmd | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/util.jl b/core/src/util.jl index b7a036358..15cb17b02 100644 --- a/core/src/util.jl +++ b/core/src/util.jl @@ -52,7 +52,7 @@ end """ Compute the level of a basin given its storage. """ -function get_level_from_storage(basin::Basin, state_idx::Int, storage) +function get_level_from_storage(basin::Basin, state_idx::Int, storage::T)::T where {T} storage_to_level = basin.storage_to_level[state_idx] if storage >= 0 return storage_to_level(storage) diff --git a/docs/reference/node/discrete-control.qmd b/docs/reference/node/discrete-control.qmd index e0f2fbed8..96dbc014a 100644 --- a/docs/reference/node/discrete-control.qmd +++ b/docs/reference/node/discrete-control.qmd @@ -14,7 +14,7 @@ using MarkdownTables node_names = Symbol[] controllable_parameters = String[] -for node_type in fieldtypes(Ribasim.Parameters) +for node_type in fieldtypes(Ribasim.ParametersNonDiff) if node_type <: Ribasim.AbstractParameterNode node_name = nameof(node_type) controllable_fields = Ribasim.controllablefields(node_name) From b550ed28be93c10a62da2374d92cd910f1975e6d Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Wed, 19 Mar 2025 10:23:17 +0100 Subject: [PATCH 25/28] Fix allocation problem docs example --- docs/concept/allocation.qmd | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/concept/allocation.qmd b/docs/concept/allocation.qmd index 9d3305165..66533deaf 100644 --- a/docs/concept/allocation.qmd +++ b/docs/concept/allocation.qmd @@ -244,13 +244,14 @@ using SQLite toml_path = normpath(@__DIR__, "../../generated_testmodels/allocation_example/ribasim.toml") model = Ribasim.Model(toml_path) -(; u, p, t) = model.integrator +(; p, t) = model.integrator +(; allocation) = p.p_non_diff -allocation_model = p.allocation.allocation_models[1] +allocation_model = allocation.allocation_models[1] priority_idx = 1 -Ribasim.set_initial_values!(allocation_model, u, p, t) -Ribasim.set_objective_demand_priority!(allocation_model, u, p, t, priority_idx) +Ribasim.set_initial_values!(allocation_model, p, t) +Ribasim.set_objective_demand_priority!(allocation_model, p, t, priority_idx) -println(p.allocation.allocation_models[1].problem) +println(allocation.allocation_models[1].problem) ``` From 78e049752c215c5ee323a352ece80d4079ec4dfc Mon Sep 17 00:00:00 2001 From: Bart de Koning Date: Wed, 19 Mar 2025 11:50:13 +0100 Subject: [PATCH 26/28] Use NamedTuple DiffCache --- Manifest.toml | 6 +-- core/src/allocation_optim.jl | 4 +- core/src/bmi.jl | 7 ++- core/src/callback.jl | 39 +++++----------- core/src/model.jl | 8 ++-- core/src/parameter.jl | 81 ++++++++++++++------------------ core/src/read.jl | 2 - core/src/solve.jl | 91 +++++++++++++----------------------- core/src/util.jl | 11 ++--- core/src/write.jl | 3 +- core/test/allocation_test.jl | 2 +- core/test/equations_test.jl | 2 +- core/test/io_test.jl | 4 +- core/test/run_models_test.jl | 29 +++++------- 14 files changed, 111 insertions(+), 178 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 28f7158a3..7f56837a2 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.11.3" manifest_format = "2.0" -project_hash = "08df9cf37f934d9c54c355f731e7bb06c158c106" +project_hash = "f7fa41237a3c78d42148159bc809f1a1d9367f72" [[deps.ADTypes]] git-tree-sha1 = "fb97701c117c8162e84dfcf80215caa904aef44f" @@ -547,9 +547,9 @@ version = "1.15.1" [[deps.DifferentiationInterface]] deps = ["ADTypes", "LinearAlgebra"] -git-tree-sha1 = "3999a7934cc723719e79ff7db33160b141297444" +git-tree-sha1 = "69a55d601c88c9340d8d9ea82ff40f6c677eac10" uuid = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" -version = "0.6.44" +version = "0.6.46" [deps.DifferentiationInterface.extensions] DifferentiationInterfaceChainRulesCoreExt = "ChainRulesCore" diff --git a/core/src/allocation_optim.jl b/core/src/allocation_optim.jl index bcf369ce1..52544d78c 100644 --- a/core/src/allocation_optim.jl +++ b/core/src/allocation_optim.jl @@ -329,11 +329,11 @@ Get several variables associated with a basin: """ function get_basin_data(allocation_model::AllocationModel, p::Parameters, node_id::NodeID) (; p_non_diff, diff_cache) = p - (; graph, cache_ranges) = p_non_diff + (; graph) = p_non_diff (; Δt_allocation, subnetwork_id) = allocation_model @assert node_id.type == NodeType.Basin influx = mean_input_flows_subnetwork(p_non_diff, subnetwork_id)[(node_id, node_id)] - storage_basin = view(diff_cache, cache_ranges.current_storage)[node_id.idx] + storage_basin = diff_cache.current_storage[node_id.idx] control_inneighbors = inneighbor_labels_type(graph, node_id, LinkType.control) if isempty(control_inneighbors) level_demand_idx = 0 diff --git a/core/src/bmi.jl b/core/src/bmi.jl index 78bab8bae..fb33e483d 100644 --- a/core/src/bmi.jl +++ b/core/src/bmi.jl @@ -41,14 +41,13 @@ This uses a typeassert to ensure that the return type annotation doesn't create function BMI.get_value_ptr(model::Model, name::String)::Vector{Float64} (; u, p) = model.integrator (; p_non_diff, diff_cache) = p - (; state_ranges, cache_ranges, basin, user_demand, subgrid) = p_non_diff + (; state_ranges, basin, user_demand, subgrid) = p_non_diff (; infiltration, user_demand_inflow) = state_ranges - (; current_storage, current_level) = cache_ranges if name == "basin.storage" - unsafe_array(view(diff_cache, current_storage))::Vector{Float64} + diff_cache.current_storage elseif name == "basin.level" - unsafe_array(view(diff_cache, current_level))::Vector{Float64} + diff_cache.current_level elseif name == "basin.infiltration" basin.vertical_flux.infiltration::Vector{Float64} elseif name == "basin.drainage" diff --git a/core/src/callback.jl b/core/src/callback.jl index 9bb429462..e8e999ab9 100644 --- a/core/src/callback.jl +++ b/core/src/callback.jl @@ -178,7 +178,8 @@ end function update_concentrations!(u, t, integrator)::Nothing (; uprev, p, tprev, dt) = integrator (; p_non_diff, diff_cache) = p - (; basin, flow_boundary, state_ranges, cache_ranges) = p_non_diff + (; current_storage, current_level) = diff_cache + (; basin, flow_boundary, state_ranges) = p_non_diff (; vertical_flux, concentration_data) = basin (; evaporate_mass, cumulative_in, concentration_state, concentration, mass) = concentration_data @@ -253,9 +254,6 @@ function update_concentrations!(u, t, integrator)::Nothing end # Update the Basin concentrations again based on the removed mass - current_storage = view(diff_cache, cache_ranges.current_storage) - current_level = view(diff_cache, cache_ranges.current_level) - concentration_state .= mass ./ current_storage basin.storage_prev .= current_storage basin.level_prev .= current_level @@ -304,13 +302,9 @@ end Save the storages and levels at the latest t. """ function save_basin_state(u, t, integrator) - (; p) = integrator - (; p_non_diff, diff_cache) = p - (; cache_ranges) = p_non_diff - current_storage = view(diff_cache, cache_ranges.current_storage) - current_level = view(diff_cache, cache_ranges.current_level) + (; current_storage, current_level) = integrator.p.diff_cache du = get_du(integrator) - water_balance!(du, u, p, t) + water_balance!(du, u, integrator.p, t) SavedBasinState(; storage = copy(current_storage), level = copy(current_level), t) end @@ -395,16 +389,9 @@ function check_water_balance_error!( )::Nothing (; u, p, t) = integrator (; p_non_diff, diff_cache) = p - (; - basin, - water_balance_abstol, - water_balance_reltol, - state_ranges, - cache_ranges, - starttime, - ) = p_non_diff + (; basin, water_balance_abstol, water_balance_reltol, state_ranges, starttime) = + p_non_diff errors = false - current_storage = view(diff_cache, cache_ranges.current_storage) # The initial storage is irrelevant for the storage rate and can only cause # floating point truncation errors @@ -430,7 +417,7 @@ function check_water_balance_error!( saved_flow.drainage, evaporation, infiltration, - current_storage, + diff_cache.current_storage, basin.Δstorage_prev_saveat, basin.node_id, ) @@ -456,7 +443,7 @@ function check_water_balance_error!( error("Too large water balance error(s) detected at t = $t") end - @. basin.Δstorage_prev_saveat = current_storage + @. basin.Δstorage_prev_saveat = diff_cache.current_storage return nothing end @@ -474,13 +461,12 @@ end function check_negative_storage(u, t, integrator)::Nothing (; p) = integrator (; p_non_diff, diff_cache) = p - (; basin, cache_ranges) = p_non_diff - current_storage = view(diff_cache, cache_ranges.current_storage) + (; basin) = p_non_diff set_current_basin_properties!(u, p, t) errors = false for id in basin.node_id - if current_storage[id.idx] < 0 + if diff_cache.current_storage[id.idx] < 0 @error "Negative storage detected in $id" errors = true end @@ -680,7 +666,6 @@ end function update_subgrid_level!(integrator)::Nothing (; p, t) = integrator (; p_non_diff, diff_cache) = p - basin_level = view(diff_cache, p_non_diff.cache_ranges.current_level) subgrid = p_non_diff.subgrid # First update the all the subgrids with static h(h) relations @@ -689,7 +674,7 @@ function update_subgrid_level!(integrator)::Nothing subgrid.basin_index_static, subgrid.interpolations_static, ) - subgrid.level[level_index] = hh_itp(basin_level[basin_index]) + subgrid.level[level_index] = hh_itp(diff_cache.current_level[basin_index]) end # Then update the subgrids with dynamic h(h) relations for (level_index, basin_index, lookup) in zip( @@ -699,7 +684,7 @@ function update_subgrid_level!(integrator)::Nothing ) itp_index = lookup(t) hh_itp = subgrid.interpolations_time[itp_index] - subgrid.level[level_index] = hh_itp(basin_level[basin_index]) + subgrid.level[level_index] = hh_itp(diff_cache.current_level[basin_index]) end end diff --git a/core/src/model.jl b/core/src/model.jl index 8b0c7f504..786b905ff 100644 --- a/core/src/model.jl +++ b/core/src/model.jl @@ -55,7 +55,8 @@ function get_diff_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) Constant(p_non_diff), Cache(diff_cache), Constant(p_mutable), - Constant(t), + Constant(t); + strict = Val(true), ) p_mutable.all_nodes_active = false @@ -82,7 +83,8 @@ function get_diff_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) Constant(u), Constant(p_non_diff), Cache(diff_cache), - Constant(p_mutable), + Constant(p_mutable); + strict = Val(true), ) tgrad(dT, u, p, t) = derivative!( water_balance!, @@ -223,7 +225,7 @@ function Model(config::Config)::Model # Previous level is used to estimate the minimum level that was attained during a time step # in limit_flow! - p_non_diff.basin.level_prev .= view(diff_cache, p_non_diff.cache_ranges.current_level) + p_non_diff.basin.level_prev .= diff_cache.current_level saveat = convert_saveat(config.solver.saveat, t_end) saveat isa Float64 && push!(tstops, range(0, t_end; step = saveat)) diff --git a/core/src/parameter.jl b/core/src/parameter.jl index f12374a5e..4da71500e 100644 --- a/core/src/parameter.jl +++ b/core/src/parameter.jl @@ -672,34 +672,17 @@ node_id: node ID of the Terminal node node_id::Vector{NodeID} end -""" -Collection of ranges that cover all the components of the cache vector `diff_cache`." - -It is used to create views of `diff_cache`, which allows `diff_cache` to be a vector -in stead of a custom struct which is compatible with more automatic differentiation backends. -""" -@kwdef struct CacheRanges - current_storage::UnitRange{Int64} = 1:0 - # Low storage factor for reducing flows out of drying basins - # given the current storages - current_low_storage_factor::UnitRange{Int64} = 1:0 - current_level::UnitRange{Int64} = 1:0 - current_area::UnitRange{Int64} = 1:0 - current_cumulative_precipitation::UnitRange{Int64} = 1:0 - current_cumulative_drainage::UnitRange{Int64} = 1:0 - flow_rate_pump::UnitRange{Int64} = 1:0 - flow_rate_outlet::UnitRange{Int64} = 1:0 - error_pid_control::UnitRange{Int64} = 1:0 -end - -function CacheRanges(nodes::NamedTuple) - n_basin = length(nodes.basin.node_id) - n_pump = length(nodes.pump.node_id) - n_outlet = length(nodes.outlet.node_id) - n_pid_control = length(nodes.pid_control.node_id) - lengths = [fill(n_basin, 6)..., n_pump, n_outlet, n_pid_control] - CacheRanges(ranges(lengths)...) -end +const DiffCache{T} = @NamedTuple{ + current_storage::Vector{T}, + current_low_storage_factor::Vector{T}, + current_level::Vector{T}, + current_area::Vector{T}, + current_cumulative_precipitation::Vector{T}, + current_cumulative_drainage::Vector{T}, + flow_rate_pump::Vector{T}, + flow_rate_outlet::Vector{T}, + error_pid_control::Vector{T}, +} where {T} @enumx DiffCacheType flow_rate_pump flow_rate_outlet basin_level @@ -709,19 +692,14 @@ end from_du::Bool = false end -function get_cache_view( - diff_cache::Vector, - type::DiffCacheType.T, - cache_ranges::CacheRanges, -) - cache_range = if type == DiffCacheType.flow_rate_pump - cache_ranges.flow_rate_pump +function get_cache_vector(diff_cache::DiffCache, type::DiffCacheType.T) + if type == DiffCacheType.flow_rate_pump + diff_cache.flow_rate_pump elseif type == DiffCacheType.flow_rate_outlet - cache_ranges.flow_rate_outlet + diff_cache.flow_rate_outlet else # type == DiffCacheType.basin_level - cache_ranges.current_level + diff_cache.current_level end - view(diff_cache, cache_range) end @kwdef struct SubVariable @@ -990,18 +968,27 @@ end node_id::Vector{NodeID} = NodeID[] # Range per states or cache component state_ranges::StateRanges = StateRanges() - cache_ranges::CacheRanges = CacheRanges() end -length_cache(p_non_diff::ParametersNonDiff) = - 6 * length(p_non_diff.basin.node_id) + - length(p_non_diff.pump.node_id) + - length(p_non_diff.outlet.node_id) + - length(p_non_diff.pid_control.node_id) +function DiffCache(p_non_diff::ParametersNonDiff) + (; basin, pump, outlet, pid_control) = p_non_diff + n_basin = length(basin.node_id) + return (; + current_storage = zeros(n_basin), + current_low_storage_factor = zeros(n_basin), + current_level = zeros(n_basin), + current_area = zeros(n_basin), + current_cumulative_precipitation = zeros(n_basin), + current_cumulative_drainage = zeros(n_basin), + flow_rate_pump = zeros(length(pump.node_id)), + flow_rate_outlet = zeros(length(outlet.node_id)), + error_pid_control = zeros(length(pid_control.node_id)), + ) +end @kwdef struct Parameters{C1, C2, C3, C4, C5, T} p_non_diff::ParametersNonDiff{C1, C2, C3, C4, C5} - diff_cache::Vector{T} = zeros(length_cache(p_non_diff)) + diff_cache::DiffCache{T} = DiffCache(p_non_diff) p_mutable::ParametersMutable = ParametersMutable() end @@ -1009,11 +996,11 @@ function get_value(ref::DiffCacheRef, p::Parameters, du::Vector) if ref.from_du du[ref.idx] else - get_cache_view(p.diff_cache, ref.type, p.p_non_diff.cache_ranges)[ref.idx] + get_cache_vector(p.diff_cache, ref.type)[ref.idx] end end function set_value!(ref::DiffCacheRef, p::Parameters, value) @assert !ref.from_du - get_cache_view(p.diff_cache, ref.type, p.p_non_diff.cache_ranges)[ref.idx] = value + get_cache_vector(p.diff_cache, ref.type)[ref.idx] = value end diff --git a/core/src/read.jl b/core/src/read.jl index c2afb52e6..5af36b919 100644 --- a/core/src/read.jl +++ b/core/src/read.jl @@ -1818,7 +1818,6 @@ function Parameters(db::DB, config::Config)::Parameters node_id = reduce(vcat, u_ids) n_states = length(node_id) state_ranges = StateRanges(u_ids) - cache_ranges = CacheRanges(nodes) flow_to_storage = build_flow_to_storage(state_ranges, n_states, basin, connector_nodes) state_inflow_link, state_outflow_link = get_state_flow_links(graph, nodes) @@ -1851,7 +1850,6 @@ function Parameters(db::DB, config::Config)::Parameters u_prev_saveat = zeros(n_states), node_id, state_ranges, - cache_ranges, ) collect_control_mappings!(p_non_diff) diff --git a/core/src/solve.jl b/core/src/solve.jl index 9e5f2c998..ba14e87db 100644 --- a/core/src/solve.jl +++ b/core/src/solve.jl @@ -10,7 +10,7 @@ water_balance!( t::Number, u::Vector, p_non_diff::ParametersNonDiff, - diff_cache::Vector, + diff_cache::DiffCache, p_mutable::ParametersMutable, ) = water_balance!(du, u, p_non_diff, diff_cache, p_mutable, t) @@ -19,7 +19,7 @@ function water_balance!( du::Vector, u::Vector, p_non_diff::ParametersNonDiff, - diff_cache::Vector, + diff_cache::DiffCache, p_mutable::ParametersMutable, t::Number, )::Nothing @@ -73,33 +73,28 @@ state u and the time t. """ function set_current_basin_properties!(u::Vector, p::Parameters, t::Number)::Nothing (; p_non_diff, diff_cache, p_mutable) = p - (; basin, cache_ranges) = p_non_diff + (; basin) = p_non_diff (; node_id, cumulative_precipitation, cumulative_drainage, vertical_flux) = basin - current_storage = view(diff_cache, cache_ranges.current_storage) - current_low_storage_factor = view(diff_cache, cache_ranges.current_low_storage_factor) - current_level = view(diff_cache, cache_ranges.current_level) - current_area = view(diff_cache, cache_ranges.current_area) - current_cumulative_precipitation = - view(diff_cache, cache_ranges.current_cumulative_precipitation) - current_cumulative_drainage = view(diff_cache, cache_ranges.current_cumulative_drainage) # The exact cumulative precipitation and drainage up to the t of this water_balance call dt = t - p_mutable.tprev for node_id in node_id fixed_area = basin_areas(basin, node_id.idx)[end] - current_cumulative_precipitation[node_id.idx] = + diff_cache.current_cumulative_precipitation[node_id.idx] = cumulative_precipitation[node_id.idx] + fixed_area * vertical_flux.precipitation[node_id.idx] * dt end - @. current_cumulative_drainage = cumulative_drainage + dt * vertical_flux.drainage + @. diff_cache.current_cumulative_drainage = + cumulative_drainage + dt * vertical_flux.drainage formulate_storages!(u, p, t) - for (id, s) in zip(basin.node_id, current_storage) + for (id, s) in zip(basin.node_id, diff_cache.current_storage) i = id.idx - current_low_storage_factor[i] = reduction_factor(s, LOW_STORAGE_THRESHOLD) - current_level[i] = get_level_from_storage(basin, i, s) - current_area[i] = basin.level_to_area[i](current_level[i]) + diff_cache.current_low_storage_factor[i] = + reduction_factor(s, LOW_STORAGE_THRESHOLD) + diff_cache.current_level[i] = get_level_from_storage(basin, i, s) + diff_cache.current_area[i] = basin.level_to_area[i](diff_cache.current_level[i]) end end @@ -110,36 +105,23 @@ function formulate_storages!( add_initial_storage::Bool = true, )::Nothing (; p_non_diff, diff_cache, p_mutable) = p - (; basin, flow_boundary, flow_to_storage, cache_ranges) = p_non_diff + (; basin, flow_boundary, flow_to_storage) = p_non_diff (; tprev) = p_mutable - current_storage = view(diff_cache, cache_ranges.current_storage) # Current storage: initial condition + # total inflows and outflows since the start # of the simulation if add_initial_storage - current_storage .= basin.storage0 + diff_cache.current_storage .= basin.storage0 else - current_storage .= 0.0 + diff_cache.current_storage .= 0.0 end - mul!(current_storage, flow_to_storage, u, 1, 1) - formulate_storage!(diff_cache, cache_ranges) - formulate_storage!(current_storage, tprev, t, flow_boundary) + mul!(diff_cache.current_storage, flow_to_storage, u, 1, 1) + diff_cache.current_storage .+= diff_cache.current_cumulative_precipitation + diff_cache.current_storage .+= diff_cache.current_cumulative_drainage + formulate_storage!(diff_cache.current_storage, tprev, t, flow_boundary) return nothing end -""" -The storage contributions of the forcings that are not part of the state. -""" -function formulate_storage!(diff_cache::Vector, cache_ranges::CacheRanges) - current_storage = view(diff_cache, cache_ranges.current_storage) - current_cumulative_precipitation = - view(diff_cache, cache_ranges.current_cumulative_precipitation) - current_cumulative_drainage = view(diff_cache, cache_ranges.current_cumulative_drainage) - - current_storage .+= current_cumulative_precipitation - current_storage .+= current_cumulative_drainage -end - """ Formulate storage contributions of flow boundaries. """ @@ -174,10 +156,9 @@ Currently at less than 0.1 m. """ function update_vertical_flux!(du::Vector, p::Parameters)::Nothing (; p_non_diff, diff_cache) = p - (; basin, state_ranges, cache_ranges) = p_non_diff + (; basin, state_ranges) = p_non_diff (; vertical_flux) = basin - current_level = view(diff_cache, cache_ranges.current_level) - current_area = view(diff_cache, cache_ranges.current_area) + (; current_level, current_area) = diff_cache du_evaporation = view(du, state_ranges.evaporation) du_infiltration = view(du, state_ranges.infiltration) @@ -200,28 +181,24 @@ function update_vertical_flux!(du::Vector, p::Parameters)::Nothing end function set_error!(pid_control::PidControl, p::Parameters, t::Number) - (; p_non_diff, diff_cache) = p - (; cache_ranges) = p_non_diff + (; diff_cache) = p (; listen_node_id, target) = pid_control - current_level = view(diff_cache, cache_ranges.current_level) - error = view(diff_cache, cache_ranges.error_pid_control) for i in eachindex(listen_node_id) listened_node_id = listen_node_id[i] @assert listened_node_id.type == NodeType.Basin lazy"Listen node $listened_node_id is not a Basin." - error[i] = target[i](t) - current_level[listened_node_id.idx] + diff_cache.error_pid_control[i] = + target[i](t) - diff_cache.current_level[listened_node_id.idx] end end function formulate_pid_control!(du::Vector, u::Vector, p::Parameters, t::Number)::Nothing (; p_non_diff, diff_cache, p_mutable) = p - (; state_ranges, cache_ranges, pid_control) = p_non_diff + (; state_ranges, pid_control) = p_non_diff (; node_id, active, target, listen_node_id) = pid_control du_integral = view(du, state_ranges.integral) u_integral = view(u, state_ranges.integral) - current_area = view(diff_cache, cache_ranges.current_area) - error = view(diff_cache, cache_ranges.error_pid_control) all_nodes_active = p_mutable.all_nodes_active[] @@ -234,7 +211,7 @@ function formulate_pid_control!(du::Vector, u::Vector, p::Parameters, t::Number) continue end - du_integral[i] = error[i] + du_integral[i] = diff_cache.error_pid_control[i] listened_node_id = listen_node_id[i] @@ -247,14 +224,14 @@ function formulate_pid_control!(du::Vector, u::Vector, p::Parameters, t::Number) if !iszero(K_d) # dlevel/dstorage = 1/area # TODO: replace by DataInterpolations.derivative(storage_to_level, storage) - area = current_area[listened_node_id.idx] + area = diff_cache.current_area[listened_node_id.idx] D = 1.0 - K_d / area else D = 1.0 end if !iszero(K_p) - flow_rate += K_p * error[i] / D + flow_rate += K_p * diff_cache.error_pid_control[i] / D end if !iszero(K_i) @@ -552,10 +529,9 @@ function formulate_flow!( continuous_control_type_::ContinuousControlType.T, )::Nothing (; p_non_diff, diff_cache, p_mutable) = p - (; state_ranges, cache_ranges) = p_non_diff + (; state_ranges) = p_non_diff du_pump = view(du, state_ranges.pump) - cache_flow_rate = view(diff_cache, cache_ranges.flow_rate_pump) all_nodes_active = p_mutable.all_nodes_active[] for ( @@ -589,7 +565,7 @@ function formulate_flow!( flow_rate = if continuous_control_type == ContinuousControlType.None flow_rate_itp(t) else - cache_flow_rate[id.idx] + diff_cache.flow_rate_pump[id.idx] end inflow_id = inflow_link.link[1] @@ -617,10 +593,9 @@ function formulate_flow!( continuous_control_type_::ContinuousControlType.T, )::Nothing (; p_non_diff, diff_cache, p_mutable) = p - (; state_ranges, cache_ranges) = p_non_diff + (; state_ranges) = p_non_diff du_outlet = view(du, state_ranges.outlet) - cache_flow_rate = view(diff_cache, cache_ranges.flow_rate_outlet) all_nodes_active = p_mutable.all_nodes_active[] for ( @@ -654,7 +629,7 @@ function formulate_flow!( flow_rate = if continuous_control_type == ContinuousControlType.None flow_rate_itp(t) else - cache_flow_rate[id.idx] + diff_cache.flow_rate_outlet[id.idx] end inflow_id = inflow_link.link[1] @@ -719,8 +694,8 @@ function limit_flow!(u::Vector, integrator::DEIntegrator, p::Parameters, t::Numb basin, allocation, state_ranges, - cache_ranges, ) = p_non_diff + (; current_storage, current_level) = diff_cache u_tabulated_rating_curve = view(u, state_ranges.tabulated_rating_curve) u_pump = view(u, state_ranges.pump) @@ -743,8 +718,6 @@ function limit_flow!(u::Vector, integrator::DEIntegrator, p::Parameters, t::Numb # of reduction factors du = get_du(integrator) set_current_basin_properties!(du, p, t) - current_storage = view(diff_cache, cache_ranges.current_storage) - current_level = view(diff_cache, cache_ranges.current_level) # TabulatedRatingCurve flow is in [0, ∞) and can be inactive for (id, active) in zip(tabulated_rating_curve.node_id, tabulated_rating_curve.active) diff --git a/core/src/util.jl b/core/src/util.jl index 15cb17b02..ec499f23c 100644 --- a/core/src/util.jl +++ b/core/src/util.jl @@ -160,10 +160,8 @@ du: tells ForwardDiff whether this call is for differentiation or not """ function get_level(p::Parameters, node_id::NodeID, t::Number)::Number (; p_non_diff, diff_cache) = p - (; cache_ranges) = p_non_diff if node_id.type == NodeType.Basin - current_level = view(diff_cache, cache_ranges.current_level) - current_level[node_id.idx] + diff_cache.current_level[node_id.idx] elseif node_id.type == NodeType.LevelBoundary p_non_diff.level_boundary.level[node_id.idx](t) elseif node_id.type == NodeType.Terminal @@ -301,9 +299,7 @@ end # SparseConnectivityTracer overloads function get_low_storage_factor(p::Parameters, id::NodeID) - (; p_non_diff, diff_cache) = p - (; cache_ranges) = p_non_diff - current_low_storage_factor = view(diff_cache, cache_ranges.current_low_storage_factor) + (; current_low_storage_factor) = p.diff_cache if id.type == NodeType.Basin current_low_storage_factor[id.idx] else @@ -1008,8 +1004,7 @@ end Check whether any storages are negative given the state u. """ function isoutofdomain(u, p, t) - (; p_non_diff, diff_cache) = p - current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) + (; current_storage) = p.diff_cache formulate_storages!(u, p, t) any(<(0), current_storage) end diff --git a/core/src/write.jl b/core/src/write.jl index 10855fe75..fbb2b480d 100644 --- a/core/src/write.jl +++ b/core/src/write.jl @@ -102,8 +102,7 @@ function basin_state_table( model::Model, )::@NamedTuple{node_id::Vector{Int32}, level::Vector{Float64}} (; u, p, t) = model.integrator - (; p_non_diff, diff_cache) = p - current_level = view(diff_cache, p_non_diff.cache_ranges.current_level) + (; current_level) = p.diff_cache # ensure the levels are up-to-date set_current_basin_properties!(u, p, t) diff --git a/core/test/allocation_test.jl b/core/test/allocation_test.jl index 0029dad97..a7e2bc67f 100644 --- a/core/test/allocation_test.jl +++ b/core/test/allocation_test.jl @@ -281,7 +281,7 @@ end model = Ribasim.run(toml_path) (; u, p, t) = model.integrator Ribasim.formulate_storages!(u, p, t) - current_storage = view(p.diff_cache, p.p_non_diff.cache_ranges.current_storage) + (; current_storage) = p.diff_cache @test current_storage ≈ Float32[ 1.0346908f6, diff --git a/core/test/equations_test.jl b/core/test/equations_test.jl index fa60fa7db..7c98a32ea 100644 --- a/core/test/equations_test.jl +++ b/core/test/equations_test.jl @@ -177,7 +177,7 @@ end Ribasim.solve!(model) @test successful_retcode(model) (; p_non_diff) = model.integrator.p - (; flow_boundary, pump, cache_ranges) = p_non_diff + (; flow_boundary, pump) = p_non_diff q_boundary = flow_boundary.flow_rate[1].u[1] q_pump = pump.flow_rate[1].u[1] diff --git a/core/test/io_test.jl b/core/test/io_test.jl index 7678c18a2..bd929edcf 100644 --- a/core/test/io_test.jl +++ b/core/test/io_test.jl @@ -138,7 +138,7 @@ end config = Ribasim.Config(toml_path) model = Ribasim.Model(config) (; p_non_diff, diff_cache) = model.integrator.p - current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) + (; current_storage) = diff_cache storage1_begin = copy(current_storage) solve!(model) storage1_end = current_storage @@ -158,7 +158,7 @@ end model = Ribasim.Model(toml_path) (; p_non_diff, diff_cache) = model.integrator.p - current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) + (; current_storage) = diff_cache storage2_begin = current_storage @test storage1_end ≈ storage2_begin end diff --git a/core/test/run_models_test.jl b/core/test/run_models_test.jl index 31f25a7b1..630f9ca53 100644 --- a/core/test/run_models_test.jl +++ b/core/test/run_models_test.jl @@ -134,9 +134,8 @@ end model = Ribasim.run(toml_path) @test model isa Ribasim.Model (; p_non_diff, diff_cache) = model.integrator.p - (; basin, state_ranges, cache_ranges) = p_non_diff - current_storage = view(diff_cache, cache_ranges.current_storage) - @test current_storage ≈ [1000] + (; basin, state_ranges) = p_non_diff + @test diff_cache.current_storage ≈ [1000] @test basin.vertical_flux.precipitation == [0.0] @test basin.vertical_flux.drainage == [0.0] du = get_du(model.integrator) @@ -161,10 +160,10 @@ end du = get_du(integrator) (; u, p, t) = integrator (; p_non_diff, diff_cache) = p - (; basin, state_ranges, cache_ranges) = p_non_diff + (; basin, state_ranges) = p_non_diff Ribasim.water_balance!(du, u, p, t) - stor = view(diff_cache, cache_ranges.current_storage) + stor = diff_cache.current_storage prec = basin.vertical_flux.precipitation evap = view(du, state_ranges.evaporation) drng = basin.vertical_flux.drainage @@ -224,8 +223,7 @@ end @test successful_retcode(model) @test length(model.integrator.sol) == 2 # start and end - current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) - @test current_storage ≈ Float32[804.22156, 803.6474, 495.18243, 1318.3053] skip = + @test diff_cache.current_storage ≈ Float32[804.22156, 803.6474, 495.18243, 1318.3053] skip = Sys.isapple() atol = 1.5 @test length(logger.logs) > 10 @@ -267,9 +265,8 @@ end (; p_non_diff, diff_cache) = model.integrator.p precipitation = p_non_diff.basin.vertical_flux.precipitation @test length(precipitation) == 4 - current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) - @test current_storage ≈ Float32[698.6895, 698.143, 420.57407, 1334.486] atol = 2.0 skip = - Sys.isapple() + @test diff_cache.current_storage ≈ Float32[698.6895, 698.143, 420.57407, 1334.486] atol = + 2.0 skip = Sys.isapple() end @testitem "Allocation example model" begin @@ -324,8 +321,7 @@ end @test model isa Ribasim.Model @test successful_retcode(model) (; p_non_diff, diff_cache) = model.integrator.p - current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) - @test current_storage ≈ Float32[368.31558, 365.68442] skip = Sys.isapple() + @test diff_cache.current_storage ≈ Float32[368.31558, 365.68442] skip = Sys.isapple() (; tabulated_rating_curve) = p_non_diff # The first node is static, the first interpolation object always applies index_itp1 = tabulated_rating_curve.current_interpolation_index[1] @@ -391,19 +387,18 @@ end (; u, p, t, sol) = integrator (; p_non_diff, diff_cache) = p (; p_non_diff, diff_cache) = model.integrator.p - current_storage = view(diff_cache, p_non_diff.cache_ranges.current_storage) day = 86400.0 - @test only(current_storage) ≈ 1000.0 + @test only(diff_cache.current_storage) ≈ 1000.0 # constant UserDemand withdraws to 0.9m or 900m3 due to min level = 0.9 BMI.update_until(model, 150day) formulate_storages!(u, p, t) - @test only(current_storage) ≈ 900 atol = 5 + @test only(diff_cache.current_storage) ≈ 900 atol = 5 # dynamic UserDemand withdraws to 0.5m or 500m3 due to min level = 0.5 BMI.update_until(model, 220day) formulate_storages!(u, p, t) - @test only(current_storage) ≈ 500 atol = 1 + @test only(diff_cache.current_storage) ≈ 500 atol = 1 # Trasient return factor flow = DataFrame(Ribasim.flow_table(model)) @@ -487,7 +482,7 @@ end du = get_du(model.integrator) (; p, t) = model.integrator (; p_non_diff, diff_cache) = p - current_level = view(diff_cache, p_non_diff.cache_ranges.current_level) + (; current_level) = diff_cache h_actual = current_level[1:50] x = collect(10.0:20.0:990.0) h_expected = standard_step_method(x, 5.0, 1.0, 0.04, h_actual[end], 1.0e-6) From 8735c4e03decc103f427c2507e162490f32b6108 Mon Sep 17 00:00:00 2001 From: Martijn Visser Date: Fri, 21 Mar 2025 11:40:06 +0100 Subject: [PATCH 27/28] Use latest version of the new dependencies --- Manifest.toml | 10 +++++----- core/Project.toml | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 7f56837a2..a2408748d 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.11.3" manifest_format = "2.0" -project_hash = "f7fa41237a3c78d42148159bc809f1a1d9367f72" +project_hash = "08df9cf37f934d9c54c355f731e7bb06c158c106" [[deps.ADTypes]] git-tree-sha1 = "fb97701c117c8162e84dfcf80215caa904aef44f" @@ -547,9 +547,9 @@ version = "1.15.1" [[deps.DifferentiationInterface]] deps = ["ADTypes", "LinearAlgebra"] -git-tree-sha1 = "69a55d601c88c9340d8d9ea82ff40f6c677eac10" +git-tree-sha1 = "cf6dcb4b21bdd893fd45be70791d6dc89ca16506" uuid = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" -version = "0.6.46" +version = "0.6.48" [deps.DifferentiationInterface.extensions] DifferentiationInterfaceChainRulesCoreExt = "ChainRulesCore" @@ -2333,9 +2333,9 @@ version = "2.23.1" [[deps.SparseMatrixColorings]] deps = ["ADTypes", "DataStructures", "DocStringExtensions", "LinearAlgebra", "Random", "SparseArrays"] -git-tree-sha1 = "97092c0a40d6033b7da27ea15bcf75fd5b446254" +git-tree-sha1 = "e0ae9189392572abe85bc9fd4ce35e772b1e1e10" uuid = "0a514795-09f3-496d-8182-132a7b665d35" -version = "0.4.13" +version = "0.4.14" weakdeps = ["Colors"] [deps.SparseMatrixColorings.extensions] diff --git a/core/Project.toml b/core/Project.toml index 9127b14c1..1a2838465 100644 --- a/core/Project.toml +++ b/core/Project.toml @@ -64,7 +64,7 @@ DataStructures = "0.18" Dates = "1" DiffEqBase = "6.155" DiffEqCallbacks = "3.6, 4" -DifferentiationInterface = "0.6.42" +DifferentiationInterface = "0.6.48" EnumX = "1.0" FiniteDiff = "2.21" Graphs = "1.9" @@ -90,7 +90,7 @@ SQLite = "1.5.1" SciMLBase = "2.36" SparseArrays = "1" SparseConnectivityTracer = "0.6.8" -SparseMatrixColorings = "0.4.13" +SparseMatrixColorings = "0.4.14" Statistics = "1" StructArrays = "0.6.13, 0.7" TOML = "1" From 8afa4bbb93fb43847149f271cc07cb184546719c Mon Sep 17 00:00:00 2001 From: Martijn Visser Date: Fri, 21 Mar 2025 12:52:11 +0100 Subject: [PATCH 28/28] Add `model.to_fews(region_dir)` (#2161) This adds a function `model.to_fews(region_dir)` that converts the network and results to files that Delft-FEWS can directly handle. It is marked as experimental for now. @gijsber is working on a Delft-FEWS configuration that can be used to visualize model results, to complement our existing tools. We'll likely add this configuration to this monorepo since it is generic. #2159 also pertains to this work. What is especially nice is the spatio-temporal support of Delft-FEWS, so we can make visualizations like this: ![image](https://github.com/user-attachments/assets/2e61bf82-0d7d-4558-a645-755d7e763b74) In theory we can support similar functionality with QGIS, but looking at the plots in https://github.com/Deltares/Ribasim/pull/1369 this would likely need work in QGIS itself. So this is really a quick win to be able to inspect models better. --------- Co-authored-by: Maarten Pronk --- python/ribasim/ribasim/model.py | 166 ++++++++++++++++++++++++----- python/ribasim/ribasim/utils.py | 20 ++-- python/ribasim/tests/test_model.py | 39 ++++++- 3 files changed, 190 insertions(+), 35 deletions(-) diff --git a/python/ribasim/ribasim/model.py b/python/ribasim/ribasim/model.py index df28aa10e..1b59416b9 100644 --- a/python/ribasim/ribasim/model.py +++ b/python/ribasim/ribasim/model.py @@ -1,6 +1,7 @@ import datetime import logging import shutil +import warnings from collections.abc import Generator from os import PathLike from pathlib import Path @@ -15,6 +16,7 @@ from pydantic import ( DirectoryPath, Field, + FilePath, PrivateAttr, field_serializer, model_validator, @@ -57,11 +59,11 @@ from ribasim.utils import ( MissingOptionalModule, UsedIDs, + _add_cf_attributes, _concat, _link_lookup, _node_lookup, _node_lookup_numpy, - _time_in_ns, ) from ribasim.validation import control_link_neighbor_amount, flow_link_neighbor_amount @@ -461,6 +463,55 @@ def _reset_contextvar(self) -> "Model": context_file_loading.set({}) return self + @property + def toml_path(self) -> FilePath: + """ + Get the path to the TOML file if it exists. + + Raises + ------ + FileNotFoundError + If the model has not been written to disk. + + Returns + ------- + FilePath + The path to the TOML file. + """ + if self.filepath is None: + raise FileNotFoundError("Model must be written to disk.") + return FilePath(self.filepath) + + @property + def results_path(self) -> DirectoryPath: + """ + Get the path to the results directory if it exists. + + This checks for the presence of required result files in the directory. + + Raises + ------ + FileNotFoundError + If any of the required result files are missing. + + Returns + ------- + DirectoryPath + The path to the results directory. + """ + toml_path = self.toml_path + results_dir = DirectoryPath(toml_path.parent / self.results_dir) + # This only checks results that are always written. + # Some results like allocation_flow.arrow are optional. + filenames = ["basin_state.arrow", "basin.arrow", "flow.arrow"] + for filename in filenames: + if not (results_dir / filename).is_file(): + raise FileNotFoundError( + f"Cannot find {filename} in '{results_dir}', " + "perhaps the model needs to be run first." + ) + return results_dir + def plot_control_listen(self, ax): """Plot the implicit listen links of the model.""" df_listen_link = pd.DataFrame( @@ -645,29 +696,11 @@ def to_xugrid(self, add_flow: bool = False, add_allocation: bool = False): return uds - def _checked_toml_path(self) -> Path: - toml_path = self.filepath - if toml_path is None: - raise FileNotFoundError("Model must be written to disk to add results.") - return toml_path - def _add_flow(self, uds, node_lookup): - toml_path = self._checked_toml_path() - - results_path = toml_path.parent / self.results_dir - basin_path = results_path / "basin.arrow" - flow_path = results_path / "flow.arrow" - - if not basin_path.is_file() or not flow_path.is_file(): - raise FileNotFoundError( - f"Cannot find results in '{results_path}', " - "perhaps the model needs to be run first." - ) - + basin_path = self.results_path / "basin.arrow" + flow_path = self.results_path / "flow.arrow" basin_df = pd.read_feather(basin_path, dtype_backend="pyarrow") flow_df = pd.read_feather(flow_path, dtype_backend="pyarrow") - _time_in_ns(basin_df) - _time_in_ns(flow_df) # add the xugrid dimension indices to the dataframes link_dim = uds.grid.edge_dimension @@ -691,10 +724,7 @@ def _add_flow(self, uds, node_lookup): return uds def _add_allocation(self, uds): - toml_path = self._checked_toml_path() - - results_path = toml_path.parent / self.results_dir - alloc_flow_path = results_path / "allocation_flow.arrow" + alloc_flow_path = self.results_path / "allocation_flow.arrow" if not alloc_flow_path.is_file(): raise FileNotFoundError( @@ -713,7 +743,6 @@ def _add_allocation(self, uds): ], dtype_backend="pyarrow", ) - _time_in_ns(alloc_flow_df) # add the xugrid link dimension index to the dataframe link_dim = uds.grid.edge_dimension @@ -738,3 +767,88 @@ def _add_allocation(self, uds): uds[varname] = da return uds + + def to_fews( + self, + region_home: str | PathLike[str], + add_network: bool = True, + add_results: bool = True, + ) -> None: + """ + Write the model network and results into files used by Delft-FEWS. + + ** Warning: This method is experimental and is likely to change. ** + + To run this method, the model needs to be written to disk, and have results. + The Node, Link and Basin / area tables are written to shapefiles in the REGION_HOME/Config directory. + The results are written to NetCDF files in the REGION_HOME/Modules directory. + The netCDF files are NetCDF4 with CF-conventions. + + Parameters + ---------- + region_home: str | PathLike[str] + Path to the Delft-FEWS REGION_HOME directory. + add_network: bool, optional + Write shapefiles representing the network, enabled by default. + add_results: bool, optional + Write the results to NetCDF files, enabled by default. + """ + region_home = DirectoryPath(region_home) + if add_network: + self._network_to_fews(region_home) + if add_results: + self._results_to_fews(region_home) + + def _network_to_fews(self, region_home: DirectoryPath) -> None: + """Write the Node and Link tables to shapefiles for use in Delft-FEWS.""" + df_link = self.link.df + df_node = self.node_table().df + assert df_link is not None + assert df_node is not None + + df_basin_area = self.basin.area.df + if df_basin_area is None: + # Fall back to the Basin points if the area polygons are not set + df_basin_area = df_node[df_node["node_type"] == "Basin"] + + network_dir = region_home / "Config/MapLayerFiles/{ModelId}" + network_dir.mkdir(parents=True, exist_ok=True) + link_path = network_dir / "{ModelId}Links.shp" + node_path = network_dir / "{ModelId}Nodes.shp" + basin_area_path = network_dir / "{ModelId}Areas.shp" + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Normalized/laundered field name", RuntimeWarning + ) + warnings.filterwarnings( + "ignore", + "Column names longer than 10 characters will be truncated when saved to ESRI Shapefile.", + UserWarning, + ) + df_link.to_file(link_path) + df_node.to_file(node_path) + df_basin_area.to_file(basin_area_path) + + def _results_to_fews(self, region_home: DirectoryPath) -> None: + """Convert the model results to NetCDF with CF-conventions for importing into Delft-FEWS.""" + # Delft-FEWS doesn't support our UGRID from `model.to_xugrid` yet, + # so we convert Arrow to regular CF-NetCDF4. + + basin_path = self.results_path / "basin.arrow" + flow_path = self.results_path / "flow.arrow" + + basin_df = pd.read_feather(basin_path) + flow_df = pd.read_feather(flow_path) + + ds_basin = _add_cf_attributes( + basin_df.set_index(["time", "node_id"]).to_xarray(), "node_id" + ) + ds_flow = _add_cf_attributes( + flow_df.set_index(["time", "link_id"]).to_xarray(), "link_id" + ) + + results_dir = region_home / "Modules/ribasim/{ModelId}/work/results" + results_dir.mkdir(parents=True, exist_ok=True) + ds_basin.to_netcdf(results_dir / "basin.nc") + ds_flow.to_netcdf(results_dir / "flow.nc") diff --git a/python/ribasim/ribasim/utils.py b/python/ribasim/ribasim/utils.py index 757b88157..ae611cdf8 100644 --- a/python/ribasim/ribasim/utils.py +++ b/python/ribasim/ribasim/utils.py @@ -66,12 +66,6 @@ def _link_lookup(uds) -> Series[Int32]: ) -def _time_in_ns(df) -> None: - """Convert the time column to datetime64[ns] dtype.""" - # datetime64[ms] gives trouble; https://github.com/pydata/xarray/issues/6318 - df["time"] = df["time"].astype("datetime64[ns]") - - def _concat(dfs, **kwargs): """Concatenate DataFrames with a warning filter.""" with catch_warnings(): @@ -86,6 +80,20 @@ def _concat(dfs, **kwargs): return pd.concat(dfs, **kwargs) +def _add_cf_attributes(ds, timeseries_id): + """Add CF attributes to an xarray.Dataset.""" + ds.attrs.update( + { + "Conventions": "CF-1.8", + "title": "Ribasim model results", + "references": "https://ribasim.org", + } + ) + ds["time"].attrs.update({"standard_name": "time", "axis": "T"}) + ds[timeseries_id].attrs.update({"cf_role": "timeseries_id"}) + return ds + + class UsedIDs(BaseModel): """A helper class to manage globally unique node IDs. diff --git a/python/ribasim/tests/test_model.py b/python/ribasim/tests/test_model.py index e1e55ad61..24678cc30 100644 --- a/python/ribasim/tests/test_model.py +++ b/python/ribasim/tests/test_model.py @@ -75,6 +75,16 @@ def test_exclude_unset(basic): assert d["solver"]["saveat"] == 86400.0 +def test_toml_path(basic): + with pytest.raises(FileNotFoundError, match="Model must be written to disk"): + basic.toml_path + + +def test_results_path(basic): + with pytest.raises(FileNotFoundError, match="Model must be written to disk"): + basic.results_path + + def test_invalid_node_id(): with pytest.raises( ValueError, @@ -191,7 +201,7 @@ def test_indexing(basic): "model", [basic_model(), outlet_model(), pid_control_equation_model(), trivial_model()], ) -def test_xugrid(model, tmp_path): +def test_to_xugrid(model, tmp_path): uds = model.to_xugrid(add_flow=False) assert isinstance(uds, xugrid.UgridDataset) assert uds.grid.edge_dimension == "ribasim_nEdges" @@ -206,14 +216,37 @@ def test_xugrid(model, tmp_path): model.to_xugrid(add_flow=True) model.write(tmp_path / "ribasim.toml") - with pytest.raises(FileNotFoundError, match="Cannot find results"): + with pytest.raises(FileNotFoundError, match="Cannot find basin_state.arrow"): model.to_xugrid(add_flow=True) - with pytest.raises(FileNotFoundError, match="or allocation is not used"): + with pytest.raises(FileNotFoundError, match="Cannot find basin_state.arrow"): model.to_xugrid(add_flow=False, add_allocation=True) with pytest.raises(ValueError, match="Cannot add both allocation and flow results"): model.to_xugrid(add_flow=True, add_allocation=True) +@pytest.mark.parametrize( + "model", + [basic_model(), outlet_model(), pid_control_equation_model(), trivial_model()], +) +def test_to_fews(model, tmp_path): + region_home = tmp_path + network_dir = region_home / "Config/MapLayerFiles/{ModelId}" + + with pytest.raises(FileNotFoundError, match="Model must be written to disk"): + model.to_fews(region_home) + + model.write(tmp_path / "model/ribasim.toml") + model.to_fews(region_home, add_results=False) + assert (network_dir / "{ModelId}Links.dbf").is_file() + assert (network_dir / "{ModelId}Links.shp").is_file() + assert (network_dir / "{ModelId}Nodes.dbf").is_file() + assert (network_dir / "{ModelId}Nodes.shp").is_file() + + # Cannot test results=True without results + with pytest.raises(FileNotFoundError, match="Cannot find basin_state.arrow"): + model.to_fews(region_home, add_results=True) + + def test_to_crs(bucket: Model): model = bucket