diff --git a/Manifest.toml b/Manifest.toml index 723041056..a2408748d 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.11.3" manifest_format = "2.0" -project_hash = "7591f8aefbf83678f96ece6aa98d0232f3c083ff" +project_hash = "08df9cf37f934d9c54c355f731e7bb06c158c106" [[deps.ADTypes]] git-tree-sha1 = "fb97701c117c8162e84dfcf80215caa904aef44f" @@ -547,9 +547,9 @@ version = "1.15.1" [[deps.DifferentiationInterface]] deps = ["ADTypes", "LinearAlgebra"] -git-tree-sha1 = "258fa016b2d03f19e4d0d1cd8e30c84907af1528" +git-tree-sha1 = "cf6dcb4b21bdd893fd45be70791d6dc89ca16506" uuid = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" -version = "0.6.42" +version = "0.6.48" [deps.DifferentiationInterface.extensions] DifferentiationInterfaceChainRulesCoreExt = "ChainRulesCore" @@ -561,9 +561,10 @@ version = "0.6.42" DifferentiationInterfaceForwardDiffExt = ["ForwardDiff", "DiffResults"] DifferentiationInterfaceGTPSAExt = "GTPSA" DifferentiationInterfaceMooncakeExt = "Mooncake" - DifferentiationInterfacePolyesterForwardDiffExt = "PolyesterForwardDiff" + DifferentiationInterfacePolyesterForwardDiffExt = ["PolyesterForwardDiff", "ForwardDiff", "DiffResults"] DifferentiationInterfaceReverseDiffExt = ["ReverseDiff", "DiffResults"] DifferentiationInterfaceSparseArraysExt = "SparseArrays" + DifferentiationInterfaceSparseConnectivityTracerExt = "SparseConnectivityTracer" DifferentiationInterfaceSparseMatrixColoringsExt = "SparseMatrixColorings" DifferentiationInterfaceStaticArraysExt = "StaticArrays" DifferentiationInterfaceSymbolicsExt = "Symbolics" @@ -585,6 +586,7 @@ version = "0.6.42" PolyesterForwardDiff = "98d1487c-24ca-40b6-b7ab-df2af84e126b" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" @@ -2083,7 +2085,7 @@ weakdeps = ["Distributed"] DistributedExt = "Distributed" [[deps.Ribasim]] -deps = ["ADTypes", "Accessors", "Arrow", "BasicModelInterface", "CodecZstd", "Configurations", "DBInterface", "DataInterpolations", "DataStructures", "Dates", "DiffEqBase", "DiffEqCallbacks", "EnumX", "FiniteDiff", "Graphs", "HiGHS", "IterTools", "JuMP", "Legolas", "LineSearches", "LinearAlgebra", "LinearSolve", "Logging", "LoggingExtras", "MetaGraphsNext", "OrdinaryDiffEqBDF", "OrdinaryDiffEqCore", "OrdinaryDiffEqLowOrderRK", "OrdinaryDiffEqNonlinearSolve", "OrdinaryDiffEqRosenbrock", "OrdinaryDiffEqSDIRK", "OrdinaryDiffEqTsit5", "PreallocationTools", "SQLite", "SciMLBase", "SparseArrays", "SparseConnectivityTracer", "StructArrays", "Tables", "TerminalLoggers", "TranscodingStreams"] +deps = ["ADTypes", "Accessors", "Arrow", "BasicModelInterface", "CodecZstd", "Configurations", "DBInterface", "DataInterpolations", "DataStructures", "Dates", "DiffEqBase", "DiffEqCallbacks", "DifferentiationInterface", "EnumX", "FiniteDiff", "Graphs", "HiGHS", "IterTools", "JuMP", "Legolas", "LineSearches", "LinearAlgebra", "LinearSolve", "Logging", "LoggingExtras", "MetaGraphsNext", "OrdinaryDiffEqBDF", "OrdinaryDiffEqCore", "OrdinaryDiffEqLowOrderRK", "OrdinaryDiffEqNonlinearSolve", "OrdinaryDiffEqRosenbrock", "OrdinaryDiffEqSDIRK", "OrdinaryDiffEqTsit5", "SQLite", "SciMLBase", "SparseArrays", "SparseConnectivityTracer", "SparseMatrixColorings", "StructArrays", "Tables", "TerminalLoggers", "TranscodingStreams"] path = "core" uuid = "aac5e3d9-0b8f-4d4f-8241-b1a7a9632635" version = "2025.2.0" @@ -2291,9 +2293,9 @@ version = "1.11.0" [[deps.SparseConnectivityTracer]] deps = ["ADTypes", "DocStringExtensions", "FillArrays", "LinearAlgebra", "Random", "SparseArrays"] -git-tree-sha1 = "6651f4663027f3b30a31429d257185f56a571184" +git-tree-sha1 = "e60ba2f27bf0c85cc20ac326a158182630032bb6" uuid = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" -version = "0.6.13" +version = "0.6.14" [deps.SparseConnectivityTracer.extensions] SparseConnectivityTracerDataInterpolationsExt = "DataInterpolations" @@ -2331,9 +2333,9 @@ version = "2.23.1" [[deps.SparseMatrixColorings]] deps = ["ADTypes", "DataStructures", "DocStringExtensions", "LinearAlgebra", "Random", "SparseArrays"] -git-tree-sha1 = "97092c0a40d6033b7da27ea15bcf75fd5b446254" +git-tree-sha1 = "e0ae9189392572abe85bc9fd4ce35e772b1e1e10" uuid = "0a514795-09f3-496d-8182-132a7b665d35" -version = "0.4.13" +version = "0.4.14" weakdeps = ["Colors"] [deps.SparseMatrixColorings.extensions] diff --git a/Project.toml b/Project.toml index 7dc910f42..4dbb98580 100644 --- a/Project.toml +++ b/Project.toml @@ -18,6 +18,7 @@ DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" +DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" EnumX = "4e289a0a-7415-4d19-859d-a7e5c4648b56" FindFirstFunctions = "64ca27bc-2ba2-4a57-88aa-44e436879224" FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" @@ -50,13 +51,13 @@ OrdinaryDiffEqSDIRK = "2d112036-d095-4a1e-ab9a-08536f3ecdbf" OrdinaryDiffEqTsit5 = "b1df2697-797e-41e3-8120-5422d3b24e4a" OteraEngine = "b2d7f28f-acd6-4007-8b26-bc27716e5513" PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d" -PreallocationTools = "d236fae5-4411-538c-8e31-a6e3d9e00b46" Revise = "295af30f-e4ad-537b-8983-00126c2a3abe" Ribasim = "aac5e3d9-0b8f-4d4f-8241-b1a7a9632635" SQLite = "0aa819cd-b072-5ff4-a722-6bc24af294d9" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" +SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76" Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" diff --git a/core/Project.toml b/core/Project.toml index 95420bc24..1a2838465 100644 --- a/core/Project.toml +++ b/core/Project.toml @@ -17,6 +17,7 @@ DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" +DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" EnumX = "4e289a0a-7415-4d19-859d-a7e5c4648b56" FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" @@ -37,11 +38,11 @@ OrdinaryDiffEqNonlinearSolve = "127b3ac7-2247-4354-8eb6-78cf4e7c58e8" OrdinaryDiffEqRosenbrock = "43230ef6-c299-4910-a778-202eb28ce4ce" OrdinaryDiffEqSDIRK = "2d112036-d095-4a1e-ab9a-08536f3ecdbf" OrdinaryDiffEqTsit5 = "b1df2697-797e-41e3-8120-5422d3b24e4a" -PreallocationTools = "d236fae5-4411-538c-8e31-a6e3d9e00b46" SQLite = "0aa819cd-b072-5ff4-a722-6bc24af294d9" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" +SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" TerminalLoggers = "5d786b92-1e48-4d6f-9151-6b4477ca9bed" @@ -63,6 +64,7 @@ DataStructures = "0.18" Dates = "1" DiffEqBase = "6.155" DiffEqCallbacks = "3.6, 4" +DifferentiationInterface = "0.6.48" EnumX = "1.0" FiniteDiff = "2.21" Graphs = "1.9" @@ -84,11 +86,11 @@ OrdinaryDiffEqNonlinearSolve = "1.1" OrdinaryDiffEqRosenbrock = "1.1" OrdinaryDiffEqSDIRK = "1.1" OrdinaryDiffEqTsit5 = "1.1" -PreallocationTools = "0.4" SQLite = "1.5.1" SciMLBase = "2.36" SparseArrays = "1" SparseConnectivityTracer = "0.6.8" +SparseMatrixColorings = "0.4.14" Statistics = "1" StructArrays = "0.6.13, 0.7" TOML = "1" diff --git a/core/src/Ribasim.jl b/core/src/Ribasim.jl index bdade9934..603a862bf 100644 --- a/core/src/Ribasim.jl +++ b/core/src/Ribasim.jl @@ -14,6 +14,16 @@ For more granular access, see: """ module Ribasim +# Requirements for automatic differentiation +using DifferentiationInterface: + AutoSparse, + Constant, + Cache, + prepare_jacobian, + jacobian!, + prepare_derivative, + derivative! + # Algorithms for solving ODEs. using OrdinaryDiffEqCore: OrdinaryDiffEqCore, get_du, AbstractNLSolver using DiffEqBase: DiffEqBase, calculate_residuals! @@ -36,7 +46,9 @@ using SciMLBase: # Automatically detecting the sparsity pattern of the Jacobian of water_balance! # through operator overloading -using SparseConnectivityTracer: TracerSparsityDetector, jacobian_sparsity, GradientTracer +using SparseConnectivityTracer: + GradientTracer, TracerSparsityDetector, IndexSetGradientPattern +using SparseMatrixColorings: GreedyColoringAlgorithm, sparsity_pattern # For efficient sparse computations using SparseArrays: SparseMatrixCSC, spzeros @@ -44,15 +56,6 @@ using SparseArrays: SparseMatrixCSC, spzeros # Linear algebra using LinearAlgebra: mul! -# PreallocationTools is used because the RHS function (water_balance!) gets called with different input types -# for u, du: -# - Float64 for normal calls -# - Dual numbers for automatic differentiation with ForwardDiff -# - GradientTracer for automatic Jacobian sparsity detection with SparseConnectivityTracer -# The computations inside the rhs go trough preallocated arrays of the required type which are created by LazyBufferCache. -# Retrieving a cache from a LazyBufferCache looks like indexing: https://docs.sciml.ai/PreallocationTools/stable/#LazyBufferCache -using PreallocationTools: LazyBufferCache - # Interpolation functionality, used for e.g. # basin profiles and TabulatedRatingCurve. See also the node # references in the docs. @@ -65,7 +68,8 @@ using DataInterpolations: integral, AbstractInterpolation, ExtrapolationType -using DataInterpolations.ExtrapolationType: Constant, Periodic, Extension, Linear +using DataInterpolations.ExtrapolationType: + Constant as ConstantExtrapolation, Periodic, Extension, Linear # Modeling language for Mathematical Optimization. # Used for allocation, see the docs: https://ribasim.org/dev/allocation.html diff --git a/core/src/allocation_init.jl b/core/src/allocation_init.jl index e7c3ca6a2..0fab2a483 100644 --- a/core/src/allocation_init.jl +++ b/core/src/allocation_init.jl @@ -4,10 +4,10 @@ which is a type of sparse arrays that in this case takes NodeID in stead of Int E.g. capacity[(node_a, node_b)] gives the capacity of link (node_a, node_b). """ function get_subnetwork_capacity( - p::Parameters, + p_non_diff::ParametersNonDiff, subnetwork_id::Int32, )::JuMP.Containers.SparseAxisArray{Float64, 2, Tuple{NodeID, NodeID}} - (; graph) = p + (; graph) = p_non_diff node_ids_subnetwork = graph[].node_ids[subnetwork_id] dict = Dict{Tuple{NodeID, NodeID}, Float64}() @@ -28,7 +28,7 @@ function get_subnetwork_capacity( # Find flow constraints for this link if is_flow_constraining(id_src.type) - node_src = getfield(p, graph[id_src].type) + node_src = getfield(p_non_diff, graph[id_src].type) max_flow_rate = node_src.max_flow_rate[id_src.idx] capacity_node_src = @@ -37,7 +37,7 @@ function get_subnetwork_capacity( capacity_link = min(capacity_link, capacity_node_src) end if is_flow_constraining(id_dst.type) - node_dst = getfield(p, graph[id_dst].type) + node_dst = getfield(p_non_diff, graph[id_dst].type) max_flow_rate = node_dst.max_flow_rate[id_dst.idx] capacity_node_dst = @@ -72,10 +72,10 @@ and subnetwork allocation network (defined by their capacity objects). """ function add_subnetwork_connections!( capacity::JuMP.Containers.SparseAxisArray{Float64, 2, Tuple{NodeID, NodeID}}, - p::Parameters, + p_non_diff::ParametersNonDiff, subnetwork_id::Int32, )::Nothing - (; allocation) = p + (; allocation) = p_non_diff (; main_network_connections) = allocation # Add the connections to the main network @@ -100,11 +100,11 @@ dictionary wrapper. The keys of this dictionary define the which links are used in the allocation optimization problem. """ function get_capacity( - p::Parameters, + p_non_diff::ParametersNonDiff, subnetwork_id::Int32, )::JuMP.Containers.SparseAxisArray{Float64, 2, Tuple{NodeID, NodeID}} - capacity = get_subnetwork_capacity(p, subnetwork_id) - add_subnetwork_connections!(capacity, p, subnetwork_id) + capacity = get_subnetwork_capacity(p_non_diff, subnetwork_id) + add_subnetwork_connections!(capacity, p_non_diff, subnetwork_id) return capacity end @@ -176,10 +176,10 @@ flow over link <= link capacity function add_constraints_capacity!( problem::JuMP.Model, capacity::JuMP.Containers.SparseAxisArray{Float64, 2, Tuple{NodeID, NodeID}}, - p::Parameters, + p_non_diff::ParametersNonDiff, subnetwork_id::Int32, )::Nothing - (; main_network_connections) = p.allocation + (; main_network_connections) = p_non_diff.allocation main_network_source_links = main_network_connections[subnetwork_id] F = problem[:F] @@ -291,10 +291,10 @@ sum(flows out of basin) == sum(flows into basin) + flow from storage and vertica """ function add_constraints_conservation_node!( problem::JuMP.Model, - p::Parameters, + p_non_diff::ParametersNonDiff, subnetwork_id::Int32, )::Nothing - (; graph) = p + (; graph) = p_non_diff F = problem[:F] F_basin_in = problem[:F_basin_in] F_basin_out = problem[:F_basin_out] @@ -405,7 +405,7 @@ end Construct the allocation problem for the current subnetwork as a JuMP model. """ function allocation_problem( - p::Parameters, + p_non_diff::ParametersNonDiff, sources::OrderedDict{Tuple{NodeID, NodeID}, AllocationSource}, capacity::JuMP.Containers.SparseAxisArray{Float64, 2, Tuple{NodeID, NodeID}}, subnetwork_id::Int32, @@ -427,8 +427,8 @@ function allocation_problem( add_variables_flow_buffer!(problem, sources) # Add constraints to problem - add_constraints_conservation_node!(problem, p, subnetwork_id) - add_constraints_capacity!(problem, capacity, p, subnetwork_id) + add_constraints_conservation_node!(problem, p_non_diff, subnetwork_id) + add_constraints_capacity!(problem, capacity, p_non_diff, subnetwork_id) add_constraints_boundary_source!(problem, sources) add_constraints_main_network_source!(problem, sources) add_constraints_user_source!(problem, sources) @@ -443,17 +443,17 @@ Construct the JuMP.jl problem for allocation. """ function AllocationModel( subnetwork_id::Int32, - p::Parameters, + p_non_diff::ParametersNonDiff, sources::OrderedDict{Tuple{NodeID, NodeID}, AllocationSource}, Δt_allocation::Float64, )::AllocationModel - capacity = get_capacity(p, subnetwork_id) + capacity = get_capacity(p_non_diff, subnetwork_id) source_priorities = unique(source.source_priority for source in values(sources)) sources = OrderedDict( link => source for (link, source) in sources if source.subnetwork_id == subnetwork_id ) - problem = allocation_problem(p, sources, capacity, subnetwork_id) + problem = allocation_problem(p_non_diff, sources, capacity, subnetwork_id) flow = JuMP.Containers.SparseAxisArray(Dict(only(problem[:F].axes) .=> 0.0)) return AllocationModel(; diff --git a/core/src/allocation_optim.jl b/core/src/allocation_optim.jl index a6ec614c9..52544d78c 100644 --- a/core/src/allocation_optim.jl +++ b/core/src/allocation_optim.jl @@ -29,13 +29,13 @@ Set the objective for the given demand priority. """ function set_objective_demand_priority!( allocation_model::AllocationModel, - u::Vector, p::Parameters, t::Float64, demand_priority_idx::Int, )::Nothing (; problem, subnetwork_id, capacity) = allocation_model - (; graph, user_demand, flow_demand, allocation, basin) = p + (; p_non_diff) = p + (; graph, user_demand, flow_demand, allocation, basin) = p_non_diff (; node_id, demand_reduced) = user_demand (; main_network_connections, subnetwork_demands) = allocation F = problem[:F] @@ -74,7 +74,8 @@ function set_objective_demand_priority!( has_external_demand(graph, to_node_id, :flow_demand) # FlowDemand if has_demand - flow_demand_priority_idx = get_external_demand_priority_idx(p, to_node_id) + flow_demand_priority_idx = + get_external_demand_priority_idx(p_non_diff, to_node_id) d = demand_priority_idx == flow_demand_priority_idx ? flow_demand.demand[demand_node_id.idx] : 0.0 @@ -88,10 +89,10 @@ function set_objective_demand_priority!( # Terms for LevelDemand nodes F_basin_in = problem[:F_basin_in] for node_id in only(F_basin_in.axes) - basin_demand_priority_idx = get_external_demand_priority_idx(p, node_id) + basin_demand_priority_idx = get_external_demand_priority_idx(p_non_diff, node_id) d = basin_demand_priority_idx == demand_priority_idx ? - get_basin_demand(allocation_model, u, p, t, node_id) : 0.0 + get_basin_demand(allocation_model, p, t, node_id) : 0.0 basin.demand[node_id.idx] = d F_ld = F_basin_in[node_id] add_objective_term!(ex, d, F_ld) @@ -108,12 +109,12 @@ Assign the allocations to the UserDemand or subnetwork as determined by the solu """ function assign_allocations!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, demand_priority_idx::Int, optimization_type::OptimizationType.T, )::Nothing (; subnetwork_id, capacity, flow) = allocation_model - (; graph, user_demand, allocation) = p + (; graph, user_demand, allocation) = p_non_diff (; subnetwork_demands, subnetwork_allocateds, main_network_connections) = allocation main_network_source_links = main_network_connections[subnetwork_id] for link in keys(capacity.data) @@ -163,11 +164,11 @@ allocate: the total flow allocated to this inlet from the main network """ function set_initial_capacities_inlet!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, optimization_type::OptimizationType.T, )::Nothing (; sources) = allocation_model - (; allocation) = p + (; allocation) = p_non_diff (; subnetwork_id) = allocation_model (; subnetwork_allocateds, main_network_connections) = allocation @@ -197,12 +198,12 @@ as the average flow over the last Δt_allocation of the source in the physical l """ function set_initial_capacities_source!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, )::Nothing (; sources) = allocation_model (; subnetwork_id) = allocation_model - mean_input_flows_subnetwork_ = mean_input_flows_subnetwork(p, subnetwork_id) + mean_input_flows_subnetwork_ = mean_input_flows_subnetwork(p_non_diff, subnetwork_id) for link in keys(mean_input_flows_subnetwork_) source = sources[link] @@ -242,11 +243,11 @@ user return flow and flow demand buffers. """ function increase_source_capacities!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, t::AbstractFloat, )::Nothing (; problem, sources) = allocation_model - (; user_demand) = p + (; user_demand) = p_non_diff for source in values(sources) (; link) = source @@ -274,9 +275,9 @@ the smallest max_flow_rate of a node on this link """ function set_initial_capacities_link!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, )::Nothing - (; main_network_connections) = p.allocation + (; main_network_connections) = p_non_diff.allocation (; problem, capacity, subnetwork_id) = allocation_model constraints_capacity = problem[:capacity] main_network_source_links = main_network_connections[subnetwork_id] @@ -326,17 +327,13 @@ Get several variables associated with a basin: node does not exist) - The index of the basin """ -function get_basin_data( - allocation_model::AllocationModel, - p::Parameters, - u::Vector, - node_id::NodeID, -) - (; graph, basin) = p +function get_basin_data(allocation_model::AllocationModel, p::Parameters, node_id::NodeID) + (; p_non_diff, diff_cache) = p + (; graph) = p_non_diff (; Δt_allocation, subnetwork_id) = allocation_model @assert node_id.type == NodeType.Basin - influx = mean_input_flows_subnetwork(p, subnetwork_id)[(node_id, node_id)] - storage_basin = basin.current_properties.current_storage[u][node_id.idx] + influx = mean_input_flows_subnetwork(p_non_diff, subnetwork_id)[(node_id, node_id)] + storage_basin = diff_cache.current_storage[node_id.idx] control_inneighbors = inneighbor_labels_type(graph, node_id, LinkType.control) if isempty(control_inneighbors) level_demand_idx = 0 @@ -356,15 +353,15 @@ Storages are converted to flows by dividing by the allocation timestep. """ function get_basin_capacity( allocation_model::AllocationModel, - u::Vector, p::Parameters, t::Float64, node_id::NodeID, )::Float64 - (; level_demand) = p + (; p_non_diff) = p + (; level_demand, basin) = p_non_diff @assert node_id.type == NodeType.Basin storage_basin, Δt_allocation, influx, level_demand_idx, basin_idx = - get_basin_data(allocation_model, p, u, node_id) + get_basin_data(allocation_model, p, node_id) if iszero(level_demand_idx) return 0.0 else @@ -372,7 +369,7 @@ function get_basin_capacity( if isinf(level_max) storage_max = Inf else - storage_max = get_storage_from_level(p.basin, basin_idx, level_max) + storage_max = get_storage_from_level(basin, basin_idx, level_max) end return max(0.0, (storage_basin - storage_max) / Δt_allocation + influx) end @@ -386,20 +383,19 @@ Storages are converted to flows by dividing by the allocation timestep. """ function get_basin_demand( allocation_model::AllocationModel, - u::Vector, p::Parameters, t::Float64, node_id::NodeID, )::Float64 - (; level_demand) = p + (; level_demand, basin) = p.p_non_diff @assert node_id.type == NodeType.Basin storage_basin, Δt_allocation, influx, level_demand_idx, basin_idx = - get_basin_data(allocation_model, p, u, node_id) + get_basin_data(allocation_model, p, node_id) if iszero(level_demand_idx) return 0.0 else level_min = level_demand.min_level[level_demand_idx](t) - storage_min = get_storage_from_level(p.basin, basin_idx, level_min) + storage_min = get_storage_from_level(basin, basin_idx, level_min) return max(0.0, (storage_min - storage_basin) / Δt_allocation - influx) end end @@ -410,7 +406,6 @@ vertical fluxes + the disk of storage above the maximum level / Δt_allocation """ function set_initial_capacities_basin!( allocation_model::AllocationModel, - u::Vector, p::Parameters, t::Float64, )::Nothing @@ -420,7 +415,7 @@ function set_initial_capacities_basin!( for node_id in only(constraints_outflow.axes) source = sources[(node_id, node_id)] @assert source.type == AllocationSourceType.level_demand - source.capacity = get_basin_capacity(allocation_model, u, p, t, node_id) + source.capacity = get_basin_capacity(allocation_model, p, t, node_id) end return nothing end @@ -431,11 +426,11 @@ by either a coupled model or a timeseries """ function set_initial_demands_user!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, t::Float64, )::Nothing (; subnetwork_id) = allocation_model - (; graph, user_demand, allocation) = p + (; graph, user_demand, allocation) = p_non_diff (; node_id, demand_from_timeseries, demand_itp, demand, demand_reduced) = user_demand # Read the demand from the interpolated timeseries @@ -458,19 +453,18 @@ Set the initial demand of each basin in the subnetwork as """ function set_initial_demands_level!( allocation_model::AllocationModel, - u::Vector, p::Parameters, t::Float64, )::Nothing (; subnetwork_id, problem) = allocation_model - (; graph, basin) = p + (; graph, basin) = p.p_non_diff (; demand) = basin node_ids_level_demand = only(problem[:basin_outflow].axes) for id in node_ids_level_demand if graph[id].subnetwork_id == subnetwork_id - demand[id.idx] = get_basin_demand(allocation_model, u, p, t, id) + demand[id.idx] = get_basin_demand(allocation_model, p, t, id) end end @@ -482,10 +476,10 @@ Set the initial capacities of the UserDemand return flow sources to 0. """ function set_initial_capacities_returnflow!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, )::Nothing (; problem, sources) = allocation_model - (; user_demand) = p + (; user_demand) = p_non_diff constraints_outflow = problem[:source_user] for node_id in only(constraints_outflow.axes) @@ -502,12 +496,12 @@ from the total flow demand (which will be used at the demand priority of the flo """ function reduce_demands!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, demand_priority_idx::Int, user_demand::UserDemand, )::Nothing (; problem, subnetwork_id) = allocation_model - (; graph) = p + (; graph) = p_non_diff (; node_id, demand_reduced) = user_demand F = problem[:F] @@ -532,11 +526,11 @@ to obtain the reduced demand used for goal programming function reduce_demands!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, ::Int, ::LevelDemand, )::Nothing - (; graph, basin) = p + (; graph, basin) = p_non_diff (; demand) = basin (; subnetwork_id, problem) = allocation_model F_basin_in = problem[:F_basin_in] @@ -557,10 +551,10 @@ interpolated value from the given timeseries. """ function set_initial_demands_flow!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, t::Float64, )::Nothing - (; flow_demand, graph) = p + (; flow_demand, graph) = p_non_diff (; subnetwork_id) = allocation_model for node_id in flow_demand.node_id @@ -578,11 +572,11 @@ Flow from any demand priority counts. """ function reduce_demands!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, ::Int, flow_demand::FlowDemand, )::Nothing - (; graph) = p + (; graph) = p_non_diff (; problem, subnetwork_id) = allocation_model F = problem[:F] @@ -625,12 +619,12 @@ Save the demands and allocated flows for UserDemand and Basin. Note: Basin supply (negative demand) is only saved for the first demand priority. """ function save_demands_and_allocations!( - p::Parameters, + p_non_diff::ParametersNonDiff, allocation_model::AllocationModel, t::Float64, demand_priority_idx::Int, )::Nothing - (; graph, allocation, user_demand, flow_demand, basin) = p + (; graph, allocation, user_demand, flow_demand, basin) = p_non_diff (; record_demand, demand_priorities_all, mean_realized_flows) = allocation (; subnetwork_id, sources, flow) = allocation_model node_ids = graph[].node_ids[subnetwork_id] @@ -651,7 +645,8 @@ function save_demands_and_allocations!( elseif node_id.type == NodeType.Basin && has_external_demand(graph, node_id, :level_demand)[1] # Basins with level demand - basin_demand_priority_idx = get_external_demand_priority_idx(p, node_id) + basin_demand_priority_idx = + get_external_demand_priority_idx(p_non_diff, node_id) if demand_priority_idx == 1 || basin_demand_priority_idx == demand_priority_idx has_demand = true @@ -674,7 +669,8 @@ function save_demands_and_allocations!( has_external_demand(graph, node_id, :flow_demand) if has_demand # Full demand, not the possibly reduced demand - flow_demand_priority_idx = get_external_demand_priority_idx(p, node_id) + flow_demand_priority_idx = + get_external_demand_priority_idx(p_non_diff, node_id) demand = demand_priority_idx == flow_demand_priority_idx ? flow_demand.demand[flow_demand_node_id.idx,] : 0.0 @@ -702,14 +698,14 @@ end Save the allocation flows per basin and physical link. """ function save_allocation_flows!( - p::Parameters, + p_non_diff::ParametersNonDiff, t::Float64, allocation_model::AllocationModel, demand_priority::Int32, optimization_type::OptimizationType.T, )::Nothing (; flow, subnetwork_id, sources) = allocation_model - (; allocation, graph) = p + (; allocation, graph) = p_non_diff (; record_flow) = allocation links_allocation = keys(flow.data) @@ -784,11 +780,11 @@ end function allocate_to_users_from_connected_basin!( allocation_model::AllocationModel, - p::Parameters, + p_non_diff::ParametersNonDiff, demand_priority_idx::Int, )::Nothing (; flow, problem, sources) = allocation_model - (; graph, user_demand) = p + (; graph, user_demand) = p_non_diff # Get all UserDemand nodes from this subnetwork node_ids_user_demand = only(problem[:source_user].axes) @@ -876,13 +872,13 @@ Solve the allocation problem for a single (demand_priority, source_priority) pai function optimize_per_source!( allocation_model::AllocationModel, demand_priority_idx::Integer, - u::Vector, p::Parameters, t::AbstractFloat, optimization_type::OptimizationType.T, )::Nothing (; problem, sources, subnetwork_id, flow) = allocation_model - (; demand_priorities_all) = p.allocation + (; p_non_diff) = p + (; demand_priorities_all) = p_non_diff.allocation F_basin_in = problem[:F_basin_in] F_basin_out = problem[:F_basin_out] @@ -905,7 +901,7 @@ function optimize_per_source!( # of an existing objective function because this is not supported for # quadratic terms: # https://jump.dev/JuMP.jl/v1.16/manual/objective/#Modify-an-objective-coefficient - set_objective_demand_priority!(allocation_model, u, p, t, demand_priority_idx) + set_objective_demand_priority!(allocation_model, p, t, demand_priority_idx) # Set only the capacity of the current source to nonzero set_source_capacity!(allocation_model, source, optimization_type) @@ -925,15 +921,20 @@ function optimize_per_source!( end # Adjust capacities for the optimization for the next source - increase_source_capacities!(allocation_model, p, t) + increase_source_capacities!(allocation_model, p_non_diff, t) reduce_source_capacity!(problem, source) reduce_link_capacities!(allocation_model) # Adjust demands for next optimization (in case of internal_sources -> collect_demands) - for parameter in propertynames(p) - demand_node = getfield(p, parameter) + for parameter in propertynames(p_non_diff) + demand_node = getfield(p_non_diff, parameter) if demand_node isa AbstractDemandNode - reduce_demands!(allocation_model, p, demand_priority_idx, demand_node) + reduce_demands!( + allocation_model, + p_non_diff, + demand_priority_idx, + demand_node, + ) end end @@ -947,7 +948,7 @@ function optimize_per_source!( end # Adjust allocated flow to basins - increase_allocateds!(p.basin, problem) + increase_allocateds!(p_non_diff.basin, problem) end return nothing end @@ -970,14 +971,14 @@ end function optimize_demand_priority!( allocation_model::AllocationModel, - u::Vector, p::Parameters, t::Float64, demand_priority_idx::Int, optimization_type::OptimizationType.T, )::Nothing (; flow) = allocation_model - (; basin, allocation) = p + (; p_non_diff) = p + (; basin, allocation) = p_non_diff (; demand_priorities_all) = allocation # Start the values of the flows at this demand priority at 0.0 @@ -990,20 +991,29 @@ function optimize_demand_priority!( # Allocate to UserDemand nodes from the directly connected basin # This happens outside the JuMP optimization - allocate_to_users_from_connected_basin!(allocation_model, p, demand_priority_idx) + allocate_to_users_from_connected_basin!( + allocation_model, + p_non_diff, + demand_priority_idx, + ) # Solve the allocation problem for this demand priority per source - optimize_per_source!(allocation_model, demand_priority_idx, u, p, t, optimization_type) + optimize_per_source!(allocation_model, demand_priority_idx, p, t, optimization_type) # Assign the allocations to the UserDemand or subnetwork for this demand priority - assign_allocations!(allocation_model, p, demand_priority_idx, optimization_type) + assign_allocations!( + allocation_model, + p_non_diff, + demand_priority_idx, + optimization_type, + ) # Save the demands and allocated flows for all nodes that have these - save_demands_and_allocations!(p, allocation_model, t, demand_priority_idx) + save_demands_and_allocations!(p_non_diff, allocation_model, t, demand_priority_idx) # Save the flows over all links in the subnetwork save_allocation_flows!( - p, + p_non_diff, t, allocation_model, demand_priorities_all[demand_priority_idx], @@ -1017,23 +1027,23 @@ Set the initial capacities and demands which are reduced by usage. """ function set_initial_values!( allocation_model::AllocationModel, - u::Vector, p::Parameters, t::Float64, )::Nothing - set_initial_capacities_source!(allocation_model, p) - set_initial_capacities_link!(allocation_model, p) - set_initial_capacities_basin!(allocation_model, u, p, t) + (; p_non_diff) = p + set_initial_capacities_source!(allocation_model, p_non_diff) + set_initial_capacities_link!(allocation_model, p_non_diff) + set_initial_capacities_basin!(allocation_model, p, t) set_initial_capacities_buffer!(allocation_model) - set_initial_capacities_returnflow!(allocation_model, p) + set_initial_capacities_returnflow!(allocation_model, p_non_diff) for source in values(allocation_model.sources) source.capacity_reduced = source.capacity end - set_initial_demands_user!(allocation_model, p, t) - set_initial_demands_level!(allocation_model, u, p, t) - set_initial_demands_flow!(allocation_model, p, t) + set_initial_demands_user!(allocation_model, p_non_diff, t) + set_initial_demands_level!(allocation_model, p, t) + set_initial_demands_flow!(allocation_model, p_non_diff, t) return nothing end @@ -1066,22 +1076,21 @@ function collect_demands!( p::Parameters, allocation_model::AllocationModel, t::Float64, - u::Vector, )::Nothing - (; allocation) = p + (; p_non_diff) = p + (; allocation) = p_non_diff (; subnetwork_id) = allocation_model (; demand_priorities_all, subnetwork_demands, main_network_connections) = allocation ## Find internal sources optimization_type = OptimizationType.internal_sources - set_initial_capacities_inlet!(allocation_model, p, optimization_type) - set_initial_values!(allocation_model, u, p, t) + set_initial_capacities_inlet!(allocation_model, p_non_diff, optimization_type) + set_initial_values!(allocation_model, p, t) # Loop over demand priorities for demand_priority_idx in eachindex(demand_priorities_all) optimize_demand_priority!( allocation_model, - u, p, t, demand_priority_idx, @@ -1101,7 +1110,7 @@ function collect_demands!( end end - set_initial_capacities_inlet!(allocation_model, p, optimization_type) + set_initial_capacities_inlet!(allocation_model, p_non_diff, optimization_type) # When collecting demands, only flow should be available # from the main to subnetwork connections @@ -1111,7 +1120,6 @@ function collect_demands!( for demand_priority_idx in eachindex(demand_priorities_all) optimize_demand_priority!( allocation_model, - u, p, t, demand_priority_idx, @@ -1124,20 +1132,19 @@ function allocate_demands!( p::Parameters, allocation_model::AllocationModel, t::Float64, - u::Vector, )::Nothing optimization_type = OptimizationType.allocate - (; demand_priorities_all) = p.allocation + (; p_non_diff) = p + (; demand_priorities_all) = p_non_diff.allocation - set_initial_capacities_inlet!(allocation_model, p, optimization_type) + set_initial_capacities_inlet!(allocation_model, p_non_diff, optimization_type) - set_initial_values!(allocation_model, u, p, t) + set_initial_values!(allocation_model, p, t) # Loop over the demand priorities for demand_priority_idx in eachindex(demand_priorities_all) optimize_demand_priority!( allocation_model, - u, p, t, demand_priority_idx, diff --git a/core/src/bmi.jl b/core/src/bmi.jl index e358ead75..fb33e483d 100644 --- a/core/src/bmi.jl +++ b/core/src/bmi.jl @@ -40,24 +40,26 @@ This uses a typeassert to ensure that the return type annotation doesn't create """ function BMI.get_value_ptr(model::Model, name::String)::Vector{Float64} (; u, p) = model.integrator - (; infiltration, user_demand_inflow) = p.state_ranges + (; p_non_diff, diff_cache) = p + (; state_ranges, basin, user_demand, subgrid) = p_non_diff + (; infiltration, user_demand_inflow) = state_ranges if name == "basin.storage" - p.basin.current_properties.current_storage[u]::Vector{Float64} + diff_cache.current_storage elseif name == "basin.level" - p.basin.current_properties.current_level[u]::Vector{Float64} + diff_cache.current_level elseif name == "basin.infiltration" - p.basin.vertical_flux.infiltration::Vector{Float64} + basin.vertical_flux.infiltration::Vector{Float64} elseif name == "basin.drainage" - p.basin.vertical_flux.drainage::Vector{Float64} + basin.vertical_flux.drainage::Vector{Float64} elseif name == "basin.cumulative_infiltration" unsafe_array(view(u, infiltration))::Vector{Float64} elseif name == "basin.cumulative_drainage" - p.basin.cumulative_drainage::Vector{Float64} + basin.cumulative_drainage::Vector{Float64} elseif name == "basin.subgrid_level" - p.subgrid.level::Vector{Float64} + subgrid.level::Vector{Float64} elseif name == "user_demand.demand" - vec(p.user_demand.demand)::Vector{Float64} + vec(user_demand.demand)::Vector{Float64} elseif name == "user_demand.cumulative_inflow" unsafe_array(view(u, user_demand_inflow))::Vector{Float64} else diff --git a/core/src/callback.jl b/core/src/callback.jl index 75cd5e87c..e8e999ab9 100644 --- a/core/src/callback.jl +++ b/core/src/callback.jl @@ -5,12 +5,11 @@ are combined to a CallbackSet that goes to the integrator. Returns the CallbackSet and the SavedValues for flow. """ function create_callbacks( - parameters::Parameters, + p_non_diff::ParametersNonDiff, config::Config, - u0::Vector, saveat, )::Tuple{CallbackSet, SavedResults} - (; starttime, basin, flow_boundary, level_boundary, user_demand) = parameters + (; starttime, basin, flow_boundary, level_boundary, user_demand) = p_non_diff callbacks = SciMLBase.DECallback[] # Check for negative storage @@ -112,12 +111,13 @@ Specifically, we first use all the inflows to update the mass of the Basins, rec the Basin concentration(s) and then remove the mass that is being lost to the outflows. """ function update_cumulative_flows!(u, t, integrator)::Nothing + (; p_non_diff, p_mutable) = integrator.p (; p, tprev, dt) = integrator - (; basin, flow_boundary, allocation) = p + (; basin, flow_boundary, allocation) = p_non_diff (; vertical_flux) = basin # Update tprev - p.tprev = t + p_mutable.tprev = t # Update cumulative forcings which are integrated exactly @. basin.cumulative_drainage += vertical_flux.drainage * dt @@ -149,7 +149,8 @@ function update_cumulative_flows!(u, t, integrator)::Nothing # Update realized flows for allocation input for subnetwork_id in allocation.subnetwork_ids - mean_input_flows_subnetwork_ = mean_input_flows_subnetwork(p, subnetwork_id) + mean_input_flows_subnetwork_ = + mean_input_flows_subnetwork(p_non_diff, subnetwork_id) for link in keys(mean_input_flows_subnetwork_) mean_input_flows_subnetwork_[link] += flow_update_on_link(integrator, link) end @@ -176,7 +177,9 @@ end function update_concentrations!(u, t, integrator)::Nothing (; uprev, p, tprev, dt) = integrator - (; basin, flow_boundary, state_ranges) = p + (; p_non_diff, diff_cache) = p + (; current_storage, current_level) = diff_cache + (; basin, flow_boundary, state_ranges) = p_non_diff (; vertical_flux, concentration_data) = basin (; evaporate_mass, cumulative_in, concentration_state, concentration, mass) = concentration_data @@ -251,9 +254,9 @@ function update_concentrations!(u, t, integrator)::Nothing end # Update the Basin concentrations again based on the removed mass - concentration_state .= mass ./ basin.current_properties.current_storage[u] - basin.storage_prev .= basin.current_properties.current_storage[u] - basin.level_prev .= basin.current_properties.current_level[u] + concentration_state .= mass ./ current_storage + basin.storage_prev .= current_storage + basin.level_prev .= current_level return nothing end @@ -267,7 +270,7 @@ function flow_update_on_link( link_src::Tuple{NodeID, NodeID}, )::Float64 (; u, uprev, p, t, tprev, dt) = integrator - (; basin, flow_boundary, state_ranges) = p + (; basin, flow_boundary, state_ranges) = p.p_non_diff (; vertical_flux) = basin u_evaporation = view(u, state_ranges.evaporation) @@ -290,7 +293,7 @@ function flow_update_on_link( 0.0 end else - flow_idx = get_state_index(p.state_ranges, link_src) + flow_idx = get_state_index(state_ranges, link_src) u[flow_idx] - uprev[flow_idx] end end @@ -299,12 +302,9 @@ end Save the storages and levels at the latest t. """ function save_basin_state(u, t, integrator) - (; p) = integrator - (; basin) = p + (; current_storage, current_level) = integrator.p.diff_cache du = get_du(integrator) - current_storage = basin.current_properties.current_storage[du] - current_level = basin.current_properties.current_level[du] - water_balance!(du, u, p, t) + water_balance!(du, u, integrator.p, t) SavedBasinState(; storage = copy(current_storage), level = copy(current_level), t) end @@ -315,7 +315,8 @@ inflow and outflow per Basin. """ function save_flow(u, t, integrator) (; p) = integrator - (; basin, state_inflow_link, state_outflow_link, flow_boundary, u_prev_saveat) = p + (; basin, state_inflow_link, state_outflow_link, flow_boundary, u_prev_saveat) = + p.p_non_diff Δt = get_Δt(integrator) flow_mean = (u - u_prev_saveat) / Δt @@ -387,13 +388,14 @@ function check_water_balance_error!( Δt::Float64, )::Nothing (; u, p, t) = integrator - (; basin, water_balance_abstol, water_balance_reltol, state_ranges) = p + (; p_non_diff, diff_cache) = p + (; basin, water_balance_abstol, water_balance_reltol, state_ranges, starttime) = + p_non_diff errors = false - current_storage = basin.current_properties.current_storage[u] # The initial storage is irrelevant for the storage rate and can only cause # floating point truncation errors - formulate_storages!(current_storage, u, u, p, t; add_initial_storage = false) + formulate_storages!(u, p, t; add_initial_storage = false) evaporation = view(saved_flow.flow, state_ranges.evaporation) infiltration = view(saved_flow.flow, state_ranges.infiltration) @@ -415,7 +417,7 @@ function check_water_balance_error!( saved_flow.drainage, evaporation, infiltration, - current_storage, + diff_cache.current_storage, basin.Δstorage_prev_saveat, basin.node_id, ) @@ -437,11 +439,11 @@ function check_water_balance_error!( saved_flow.relative_error[id.idx] = relative_error end if errors - t = datetime_since(t, p.starttime) + t = datetime_since(t, starttime) error("Too large water balance error(s) detected at t = $t") end - @. basin.Δstorage_prev_saveat = current_storage + @. basin.Δstorage_prev_saveat = diff_cache.current_storage return nothing end @@ -457,23 +459,21 @@ function save_solver_stats(u, t, integrator) end function check_negative_storage(u, t, integrator)::Nothing - (; basin) = integrator.p - (; node_id, current_properties) = basin - (; current_storage) = current_properties - du = get_du(integrator) - set_current_basin_properties!(du, u, integrator.p, t) - current_storage = current_storage[du] + (; p) = integrator + (; p_non_diff, diff_cache) = p + (; basin) = p_non_diff + set_current_basin_properties!(u, p, t) errors = false - for id in node_id - if current_storage[id.idx] < 0 + for id in basin.node_id + if diff_cache.current_storage[id.idx] < 0 @error "Negative storage detected in $id" errors = true end end if errors - t_datetime = datetime_since(integrator.t, integrator.p.starttime) + t_datetime = datetime_since(integrator.t, p_non_diff.starttime) error("Negative storages found at $t_datetime.") end return nothing @@ -493,7 +493,7 @@ Apply the discrete control logic. There's somewhat of a complex structure: """ function apply_discrete_control!(u, t, integrator)::Nothing (; p) = integrator - (; discrete_control) = p + (; discrete_control) = p.p_non_diff (; node_id, truth_state, compound_variables) = discrete_control du = get_du(integrator) water_balance!(du, u, p, t) @@ -542,7 +542,7 @@ function set_new_control_state!( truth_state::Vector{Bool}, )::Nothing (; p) = integrator - (; discrete_control) = p + (; discrete_control) = p.p_non_diff # Get the control state corresponding to the new truth state, # if one is defined @@ -579,11 +579,11 @@ Get a value for a condition. Currently supports getting levels from basins and f from flow boundaries. """ function get_value(subvariable::SubVariable, p::Parameters, du::AbstractVector, t::Float64) - (; flow_boundary, level_boundary, basin) = p - (; listen_node_id, look_ahead, variable, variable_ref) = subvariable + (; flow_boundary, level_boundary, basin) = p.p_non_diff + (; listen_node_id, look_ahead, variable, diff_cache_ref) = subvariable - if !iszero(variable_ref.idx) - return get_value(variable_ref, du) + if !iszero(diff_cache_ref.idx) + return get_value(diff_cache_ref, p, du) end if variable == "level" @@ -625,9 +625,11 @@ function compound_variable_value(compound_variable::CompoundVariable, p, du, t) return value end -function get_allocation_model(p::Parameters, subnetwork_id::Int32)::AllocationModel - (; allocation) = p - (; subnetwork_ids, allocation_models) = allocation +function get_allocation_model( + p_non_diff::ParametersNonDiff, + subnetwork_id::Int32, +)::AllocationModel + (; subnetwork_ids, allocation_models) = p_non_diff.allocation idx = findsorted(subnetwork_ids, subnetwork_id) if isnothing(idx) error("Invalid allocation network ID $subnetwork_id.") @@ -637,13 +639,14 @@ function get_allocation_model(p::Parameters, subnetwork_id::Int32)::AllocationMo end function set_control_params!(p::Parameters, node_id::NodeID, control_state::String)::Nothing - (; discrete_control, allocation) = p + (; discrete_control) = p.p_non_diff (; control_mappings) = discrete_control control_state_update = control_mappings[node_id.type][(node_id, control_state)] - (; active, scalar_update, itp_update) = control_state_update + (; active, scalar_update, itp_update_linear, itp_update_lookup) = control_state_update apply_parameter_update!(active) apply_parameter_update!.(scalar_update) - apply_parameter_update!.(itp_update) + apply_parameter_update!.(itp_update_linear) + apply_parameter_update!.(itp_update_lookup) return nothing end @@ -662,9 +665,8 @@ end function update_subgrid_level!(integrator)::Nothing (; p, t) = integrator - du = get_du(integrator) - basin_level = p.basin.current_properties.current_level[du] - subgrid = integrator.p.subgrid + (; p_non_diff, diff_cache) = p + subgrid = p_non_diff.subgrid # First update the all the subgrids with static h(h) relations for (level_index, basin_index, hh_itp) in zip( @@ -672,7 +674,7 @@ function update_subgrid_level!(integrator)::Nothing subgrid.basin_index_static, subgrid.interpolations_static, ) - subgrid.level[level_index] = hh_itp(basin_level[basin_index]) + subgrid.level[level_index] = hh_itp(diff_cache.current_level[basin_index]) end # Then update the subgrids with dynamic h(h) relations for (level_index, basin_index, lookup) in zip( @@ -682,14 +684,14 @@ function update_subgrid_level!(integrator)::Nothing ) itp_index = lookup(t) hh_itp = subgrid.interpolations_time[itp_index] - subgrid.level[level_index] = hh_itp(basin_level[basin_index]) + subgrid.level[level_index] = hh_itp(diff_cache.current_level[basin_index]) end end "Interpolate the levels and save them to SavedValues" function save_subgrid_level(u, t, integrator) update_subgrid_level!(integrator) - return copy(integrator.p.subgrid.level) + return copy(integrator.p.p_non_diff.subgrid.level) end "Update one current vertical flux from an interpolation at time t." @@ -715,7 +717,7 @@ in the ConstantInterpolations we use, failing the vertical_flux_means test. """ function update_basin!(integrator)::Nothing (; p, t) = integrator - (; basin) = p + (; basin) = p.p_non_diff update_basin!(basin, t) return nothing @@ -736,11 +738,11 @@ end "Load updates from 'Basin / concentration' into the parameters" function update_basin_conc!(integrator)::Nothing - (; p) = integrator - (; basin) = p + (; p_non_diff) = integrator.p + (; basin, starttime) = p_non_diff (; node_id, concentration_data, concentration_time) = basin (; concentration, substances) = concentration_data - t = datetime_since(integrator.t, integrator.p.starttime) + t = datetime_since(integrator.t, starttime) rows = searchsorted(concentration_time.time, t) timeblock = view(concentration_time, rows) @@ -756,12 +758,12 @@ end "Load updates from 'concentration' tables into the parameters" function update_conc!(integrator, parameter, nodetype)::Nothing - (; p) = integrator - node = getproperty(p, parameter) - (; basin) = p + (; p_non_diff) = integrator.p + (; basin, starttime) = p_non_diff + node = getproperty(p_non_diff, parameter) (; node_id, concentration, concentration_time) = node (; substances) = basin.concentration_data - t = datetime_since(integrator.t, integrator.p.starttime) + t = datetime_since(integrator.t, starttime) rows = searchsorted(concentration_time.time, t) timeblock = view(concentration_time, rows) @@ -783,14 +785,12 @@ update_userd_conc!(integrator)::Nothing = "Solve the allocation problem for all demands and assign allocated abstractions." function update_allocation!(integrator)::Nothing (; p, t, u) = integrator - (; allocation, basin) = p - (; current_storage) = basin.current_properties + (; p_non_diff) = p + (; allocation) = p_non_diff (; allocation_models, mean_input_flows, mean_realized_flows) = allocation # Make sure current storages are up to date - du = get_du(integrator) - current_storage = current_storage[du] - formulate_storages!(current_storage, du, u, p, t) + formulate_storages!(u, p, t) # Don't run the allocation algorithm if allocation is not active # (Specifically for running Ribasim via the BMI) @@ -814,7 +814,7 @@ function update_allocation!(integrator)::Nothing # If a main network is present, collect demands of subnetworks if has_main_network(allocation) for allocation_model in Iterators.drop(allocation_models, 1) - collect_demands!(p, allocation_model, t, u) + collect_demands!(p, allocation_model, t) end end @@ -822,7 +822,7 @@ function update_allocation!(integrator)::Nothing # If a main network is present this is solved first, # which provides allocation to the subnetworks for allocation_model in allocation_models - allocate_demands!(p, allocation_model, t, u) + allocate_demands!(p, allocation_model, t) end # Reset the mean flows diff --git a/core/src/concentration.jl b/core/src/concentration.jl index 4cea74033..a6d470283 100644 --- a/core/src/concentration.jl +++ b/core/src/concentration.jl @@ -3,7 +3,7 @@ Process mass updates for UserDemand separately as the inflow and outflow are decoupled in the states """ function mass_updates_user_demand!(integrator::DEIntegrator)::Nothing - (; basin, user_demand) = integrator.p + (; basin, user_demand) = integrator.p.p_non_diff (; concentration_state, mass) = basin.concentration_data @views for (inflow_link, outflow_link) in @@ -35,7 +35,8 @@ end Process all mass inflows to basins """ function mass_inflows_basin!(integrator::DEIntegrator)::Nothing - (; basin, state_inflow_link, state_outflow_link, level_boundary) = integrator.p + (; basin, state_inflow_link, state_outflow_link, level_boundary) = + integrator.p.p_non_diff (; cumulative_in, concentration_state, mass) = basin.concentration_data for (inflow_link, outflow_link) in zip(state_inflow_link, state_outflow_link) @@ -92,7 +93,7 @@ end Process all mass outflows from Basins """ function mass_outflows_basin!(integrator::DEIntegrator)::Nothing - (; state_inflow_link, state_outflow_link, basin) = integrator.p + (; state_inflow_link, state_outflow_link, basin) = integrator.p.p_non_diff (; mass, concentration_state) = basin.concentration_data @views for (inflow_link, outflow_link) in zip(state_inflow_link, state_outflow_link) diff --git a/core/src/config.jl b/core/src/config.jl index 9c1415d49..38a9aaa6a 100644 --- a/core/src/config.jl +++ b/core/src/config.jl @@ -8,12 +8,12 @@ Ribasim.config is a submodule mainly to avoid name clashes between the configura """ module config +using ADTypes: AutoForwardDiff, AutoFiniteDiff using Configurations: Configurations, @option, from_toml, @type_alias using DataStructures: DefaultDict using Dates: DateTime using Logging: LogLevel, Debug, Info, Warn, Error using ..Ribasim: Ribasim, isnode, nodetype -using ADTypes: AutoForwardDiff, AutoFiniteDiff using OrdinaryDiffEqCore: OrdinaryDiffEqAlgorithm, OrdinaryDiffEqNewtonAdaptiveAlgorithm using OrdinaryDiffEqNonlinearSolve: NLNewton using OrdinaryDiffEqLowOrderRK: Euler, RK4 @@ -25,6 +25,7 @@ using OrdinaryDiffEqRosenbrock: Rosenbrock23, Rodas4P, Rodas5P export Config, Solver, Results, Logging, Toml export algorithm, camel_case, + get_ad_type, snake_case, input_path, database_path, @@ -290,6 +291,9 @@ function function_accepts_kwarg(f, kwarg)::Bool return false end +get_ad_type(solver::Solver) = + solver.autodiff ? AutoForwardDiff(; tag = :Ribasim) : AutoFiniteDiff() + "Create an OrdinaryDiffEqAlgorithm from solver config" function algorithm(solver::Solver; u0 = [])::OrdinaryDiffEqAlgorithm algotype = get(algorithms, solver.algorithm, nothing) @@ -311,7 +315,7 @@ function algorithm(solver::Solver; u0 = [])::OrdinaryDiffEqAlgorithm end if function_accepts_kwarg(algotype, :autodiff) - kwargs[:autodiff] = solver.autodiff ? AutoForwardDiff() : AutoFiniteDiff() + kwargs[:autodiff] = get_ad_type(solver) end algotype(; kwargs...) diff --git a/core/src/graph.jl b/core/src/graph.jl index 6d34ac32f..836497847 100644 --- a/core/src/graph.jl +++ b/core/src/graph.jl @@ -39,7 +39,7 @@ function create_graph(db::DB, config::Config)::MetaGraph vertex_data_type = NodeMetadata, edge_data_type = LinkMetadata, graph_data = nothing, - weight_function, + weight_function = Returns(1.0), ) for row in node_rows node_id = NodeID(row.node_type, row.node_id, node_table) @@ -233,12 +233,12 @@ from the parameters, but integrated/averaged FlowBoundary flows must be provided """ function get_flow( flow::Vector, - p::Parameters, + p_non_diff::ParametersNonDiff, t::Number, link::Tuple{NodeID, NodeID}; boundary_flow = nothing, ) - (; flow_boundary) = p + (; flow_boundary) = p_non_diff from_id = link[1] if from_id.type == NodeType.FlowBoundary if boundary_flow === nothing @@ -248,13 +248,13 @@ function get_flow( boundary_flow[from_id.idx] end else - flow[get_state_index(p.state_ranges, link)] + flow[get_state_index(p_non_diff.state_ranges, link)] end end function get_influx(du::Vector, id::NodeID, p::Parameters) @assert id.type == NodeType.Basin - (; basin, state_ranges) = p + (; basin, state_ranges) = p.p_non_diff (; vertical_flux) = basin du_evaporation = view(du, state_ranges.evaporation) du_infiltration = view(du, state_ranges.infiltration) diff --git a/core/src/logging.jl b/core/src/logging.jl index 267f8671c..791337a48 100644 --- a/core/src/logging.jl +++ b/core/src/logging.jl @@ -29,7 +29,7 @@ end function log_bottlenecks(model; converged::Bool) (; cache, p, u) = model.integrator - (; basin) = p + (; p_non_diff) = p level = converged ? LoggingExtras.Info : LoggingExtras.Warn @@ -41,7 +41,7 @@ function log_bottlenecks(model; converged::Bool) max_errors = 5 # Iterate over the errors in descending order for i in sortperm(flow_error; rev = true) - node_id = Symbol(p.node_id[i]) + node_id = Symbol(p_non_diff.node_id[i]) error = flow_error[i] isnan(error) && continue # NaN are sorted as largest # Stop reporting errors if they are too small or too many diff --git a/core/src/model.jl b/core/src/model.jl index 5c9cf54ba..786b905ff 100644 --- a/core/src/model.jl +++ b/core/src/model.jl @@ -28,6 +28,80 @@ struct Model{T} end end +""" +Get the Jacobian evaluation function via DifferentiationInterface.jl. +The time derivative is also supplied in case a Rosenbrock method is used. +""" +function get_diff_eval(du::Vector, u::Vector, p::Parameters, solver::Solver) + (; p_non_diff, diff_cache, p_mutable) = p + backend = get_ad_type(solver) + sparsity_detector = TracerSparsityDetector() + + backend_jac = if solver.sparse + AutoSparse(backend; sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) + else + backend + end + + t = 0.0 + + # Activate all nodes to catch all possible state dependencies + p_mutable.all_nodes_active = true + jac_prep = prepare_jacobian( + water_balance!, + du, + backend_jac, + u, + Constant(p_non_diff), + Cache(diff_cache), + Constant(p_mutable), + Constant(t); + strict = Val(true), + ) + p_mutable.all_nodes_active = false + + jac_prototype = solver.sparse ? sparsity_pattern(jac_prep) : nothing + + jac(J, u, p, t) = jacobian!( + water_balance!, + du, + J, + jac_prep, + backend_jac, + u, + Constant(p.p_non_diff), + Cache(diff_cache), + Constant(p.p_mutable), + Constant(t), + ) + + tgrad_prep = prepare_derivative( + water_balance!, + du, + backend, + t, + Constant(u), + Constant(p_non_diff), + Cache(diff_cache), + Constant(p_mutable); + strict = Val(true), + ) + tgrad(dT, u, p, t) = derivative!( + water_balance!, + du, + dT, + tgrad_prep, + backend, + t, + Constant(u), + Constant(p.p_non_diff), + Cache(p.diff_cache), + Constant(p.p_mutable), + ) + + return jac_prototype, jac, tgrad +end + function Model(config_path::AbstractString)::Model config = Config(config_path) if !valid_config(config) @@ -61,11 +135,12 @@ function Model(config::Config)::Model t0 = zero(t_end) timespan = (t0, t_end) - local parameters, tstops + local parameters, p_non_diff, diff_cache, p_mutable, tstops try parameters = Parameters(db, config) + (; p_non_diff, diff_cache, p_mutable) = parameters - if !valid_discrete_control(parameters, config) + if !valid_discrete_control(parameters.p_non_diff, config) error("Invalid discrete control state definition(s).") end @@ -82,7 +157,7 @@ function Model(config::Config)::Model pump, tabulated_rating_curve, user_demand, - ) = parameters + ) = p_non_diff if !valid_pid_connectivity(pid_control.node_id, pid_control.listen_node_id, graph) error("Invalid PidControl connectivity.") end @@ -139,37 +214,31 @@ function Model(config::Config)::Model end @debug "Read database into memory." - u0 = build_state_vector(parameters) + u0 = build_state_vector(parameters.p_non_diff) du0 = zero(u0) - @reset parameters.u_prev_saveat = zero(u0) - # The Solver algorithm alg = algorithm(config.solver; u0) # Synchronize level with storage - set_current_basin_properties!(du0, u0, parameters, t0) + set_current_basin_properties!(u0, parameters, t0) # Previous level is used to estimate the minimum level that was attained during a time step # in limit_flow! - parameters.basin.level_prev .= parameters.basin.current_properties.current_level[u0] + p_non_diff.basin.level_prev .= diff_cache.current_level saveat = convert_saveat(config.solver.saveat, t_end) saveat isa Float64 && push!(tstops, range(0, t_end; step = saveat)) tstops = sort(unique(vcat(tstops...))) adaptive, dt = convert_dt(config.solver.dt) - jac_prototype = if config.solver.sparse - get_jac_prototype(du0, u0, parameters, t0) - else - nothing - end - RHS = ODEFunction(water_balance!; jac_prototype) + jac_prototype, jac, tgrad = get_diff_eval(du0, u0, parameters, config.solver) + RHS = ODEFunction(water_balance!; jac_prototype, jac, tgrad) prob = ODEProblem(RHS, u0, timespan, parameters) @debug "Setup ODEProblem." - callback, saved = create_callbacks(parameters, config, u0, saveat) + callback, saved = create_callbacks(p_non_diff, config, saveat) @debug "Created callbacks." # Run water_balance! before initializing the integrator. This is because @@ -202,7 +271,7 @@ function Model(config::Config)::Model ) @debug "Setup integrator." - if config.allocation.use_allocation && is_active(parameters.allocation) + if config.allocation.use_allocation && is_active(p_non_diff.allocation) set_initial_allocation_mean_flows!(integrator) end diff --git a/core/src/parameter.jl b/core/src/parameter.jl index eaff84bd5..4da71500e 100644 --- a/core/src/parameter.jl +++ b/core/src/parameter.jl @@ -151,20 +151,6 @@ const ScalarInterpolation = LinearInterpolation{ const IndexLookup = ConstantInterpolation{Vector{Int64}, Vector{Float64}, Vector{Float64}, Int64, (1,)} -set_zero!(v) = v .= zero(eltype(v)) -const Cache = LazyBufferCache{Returns{Int}, typeof(set_zero!)} - -""" -Cache for in place computations within water_balance!, with different eltypes -for different situations: -- Symbolics.Num for Jacobian sparsity detection -- ForwardDiff.Dual for automatic differentiation -- Float64 for normal calls - -The caches are always initialized with zeros -""" -cache(len::Int)::Cache = LazyBufferCache(Returns(len); initializer! = set_zero!) - @eval @enumx AllocationSourceType $(fieldnames(Ribasim.config.SourcePriority)...) # Support creating a AllocationSourceTuple enum instance from a symbol @@ -337,10 +323,12 @@ end """ The parameter update associated with a certain control state for discrete control """ -@kwdef struct ControlStateUpdate{T <: AbstractInterpolation} +@kwdef struct ControlStateUpdate active::ParameterUpdate{Bool} scalar_update::Vector{ParameterUpdate{Float64}} = ParameterUpdate{Float64}[] - itp_update::Vector{ParameterUpdate{T}} = ParameterUpdate{ScalarInterpolation}[] + itp_update_linear::Vector{ParameterUpdate{ScalarInterpolation}} = + ParameterUpdate{ScalarInterpolation}[] + itp_update_lookup::Vector{ParameterUpdate{IndexLookup}} = ParameterUpdate{IndexLookup}[] end """ @@ -384,23 +372,6 @@ abstract type AbstractParameterNode end abstract type AbstractDemandNode <: AbstractParameterNode end -""" -Caches of current basin properties -""" -struct CurrentBasinProperties - current_storage::Cache - # Low storage factor for reducing flows out of drying basins - # given the current storages - current_low_storage_factor::Cache - current_level::Cache - current_area::Cache - current_cumulative_precipitation::Cache - current_cumulative_drainage::Cache - function CurrentBasinProperties(n) - new((cache(n) for _ in 1:6)...) - end -end - @kwdef struct ConcentrationData # Config setting to enable/disable evaporation of mass evaporate_mass::Bool = true @@ -472,8 +443,6 @@ of vectors or Arrow Tables, and is added to avoid type instabilities. cumulative_drainage::Vector{Float64} = zeros(length(node_id)) cumulative_precipitation_saveat::Vector{Float64} = zeros(length(node_id)) cumulative_drainage_saveat::Vector{Float64} = zeros(length(node_id)) - # Cache this to avoid recomputation - current_properties::CurrentBasinProperties = CurrentBasinProperties(length(node_id)) # Discrete values for interpolation storage_to_level::Vector{ LinearInterpolationIntInv{ @@ -643,7 +612,6 @@ inflow_link: incoming flow link metadata outflow_link: outgoing flow link metadata The ID of the source node is always the ID of the Pump node active: whether this node is active and thus contributes flow -flow_rate_cache: target flow rate flow_rate: timeseries for transient flow data if available min_flow_rate: The minimal flow rate of the pump max_flow_rate: The maximum flow rate of the pump @@ -657,7 +625,6 @@ continuous_control_type: one of None, ContinuousControl, PidControl inflow_link::Vector{LinkMetadata} = [] outflow_link::Vector{LinkMetadata} = [] active::Vector{Bool} = fill(true, length(node_id)) - flow_rate_cache::Cache = cache(length(node_id)) flow_rate::Vector{ScalarInterpolation} = ScalarInterpolation[] min_flow_rate::Vector{ScalarInterpolation} = ScalarInterpolation[] max_flow_rate::Vector{ScalarInterpolation} = ScalarInterpolation[] @@ -666,40 +633,6 @@ continuous_control_type: one of None, ContinuousControl, PidControl control_mapping::Dict{Tuple{NodeID, String}, ControlStateUpdate} continuous_control_type::Vector{ContinuousControlType.T} = fill(ContinuousControlType.None, length(node_id)) - - function Pump( - node_id, - inflow_link, - outflow_link, - active, - flow_rate_cache, - flow_rate, - min_flow_rate, - max_flow_rate, - min_upstream_level, - max_downstream_level, - control_mapping, - continuous_control_type, - ) - if valid_flow_rates(node_id, flow_rate[Float64[]], control_mapping) - return new( - node_id, - inflow_link, - outflow_link, - active, - flow_rate_cache, - flow_rate, - min_flow_rate, - max_flow_rate, - min_upstream_level, - max_downstream_level, - control_mapping, - continuous_control_type, - ) - else - error("Invalid Pump flow rate(s).") - end - end end """ @@ -709,7 +642,6 @@ inflow_link: incoming flow link metadata. outflow_link: outgoing flow link metadata. The ID of the source node is always the ID of the Outlet node active: whether this node is active and thus contributes flow -flow_rate_cache: target flow rate flow_rate: timeseries for transient flow data if available min_flow_rate: The minimal flow rate of the outlet max_flow_rate: The maximum flow rate of the outlet @@ -723,7 +655,6 @@ continuous_control_type: one of None, ContinuousControl, PidControl inflow_link::Vector{LinkMetadata} = [] outflow_link::Vector{LinkMetadata} = [] active::Vector{Bool} = fill(true, length(node_id)) - flow_rate_cache::Cache = cache(length(node_id)) flow_rate::Vector{ScalarInterpolation} = ScalarInterpolation[] min_flow_rate::Vector{ScalarInterpolation} = ScalarInterpolation[] max_flow_rate::Vector{ScalarInterpolation} = ScalarInterpolation[] @@ -732,40 +663,6 @@ continuous_control_type: one of None, ContinuousControl, PidControl control_mapping::Dict{Tuple{NodeID, String}, ControlStateUpdate} = Dict() continuous_control_type::Vector{ContinuousControlType.T} = fill(ContinuousControlType.None, length(node_id)) - - function Outlet( - node_id, - inflow_link, - outflow_link, - active, - flow_rate_cache, - flow_rate, - min_flow_rate, - max_flow_rate, - min_upstream_level, - max_downstream_level, - control_mapping, - continuous_control_type, - ) - if valid_flow_rates(node_id, flow_rate[Float64[]], control_mapping) - return new( - node_id, - inflow_link, - outflow_link, - active, - flow_rate_cache, - flow_rate, - min_flow_rate, - max_flow_rate, - min_upstream_level, - max_downstream_level, - control_mapping, - continuous_control_type, - ) - else - error("Invalid Outlet flow rate(s).") - end - end end """ @@ -775,30 +672,39 @@ node_id: node ID of the Terminal node node_id::Vector{NodeID} end -""" -A variant on `Base.Ref` where the source array is a vector that is possibly wrapped in a ForwardDiff.LazyBufferCache, -or a reference to the state derivative vector du. -Retrieve value with get_value(ref::PreallocationRef, val) where `val` determines the return type. -""" -struct PreallocationRef - vector::Cache - idx::Int - from_du::Bool - function PreallocationRef(vector::Cache, idx::Int; from_du = false) - new(vector, idx, from_du) +const DiffCache{T} = @NamedTuple{ + current_storage::Vector{T}, + current_low_storage_factor::Vector{T}, + current_level::Vector{T}, + current_area::Vector{T}, + current_cumulative_precipitation::Vector{T}, + current_cumulative_drainage::Vector{T}, + flow_rate_pump::Vector{T}, + flow_rate_outlet::Vector{T}, + error_pid_control::Vector{T}, +} where {T} + +@enumx DiffCacheType flow_rate_pump flow_rate_outlet basin_level + +@kwdef struct DiffCacheRef + type::DiffCacheType.T = DiffCacheType.flow_rate_pump + idx::Int = 0 + from_du::Bool = false +end + +function get_cache_vector(diff_cache::DiffCache, type::DiffCacheType.T) + if type == DiffCacheType.flow_rate_pump + diff_cache.flow_rate_pump + elseif type == DiffCacheType.flow_rate_outlet + diff_cache.flow_rate_outlet + else # type == DiffCacheType.basin_level + diff_cache.current_level end end -get_value(ref::PreallocationRef, du) = ref.from_du ? du[ref.idx] : ref.vector[du][ref.idx] - -function set_value!(ref::PreallocationRef, value, du)::Nothing - ref.vector[du][ref.idx] = value - return nothing -end - @kwdef struct SubVariable listen_node_id::NodeID - variable_ref::PreallocationRef + diff_cache_ref::DiffCacheRef variable::String weight::Float64 look_ahead::Float64 @@ -855,7 +761,7 @@ end node_id::Vector{NodeID} compound_variable::Vector{CompoundVariable} controlled_variable::Vector{String} - target_ref::Vector{PreallocationRef} + target_ref::Vector{DiffCacheRef} = Vector{DiffCacheRef}(undef, length(node_id)) func::Vector{ScalarInterpolation} end @@ -871,20 +777,17 @@ target_ref: reference to the controlled flow_rate value proportional: proportionality coefficient error integral: proportionality coefficient error integral derivative: proportionality coefficient error derivative -error: the current error; basin_target - current_level -dictionary from (node_id, control_state) to target flow rate +control_mapping: dictionary from (node_id, control_state) to target flow rate """ @kwdef struct PidControl <: AbstractParameterNode node_id::Vector{NodeID} active::Vector{Bool} listen_node_id::Vector{NodeID} target::Vector{ScalarInterpolation} - target_ref::Vector{PreallocationRef} + target_ref::Vector{DiffCacheRef} = Vector{DiffCacheRef}(undef, length(node_id)) proportional::Vector{ScalarInterpolation} integral::Vector{ScalarInterpolation} derivative::Vector{ScalarInterpolation} - error::Cache = cache(length(node_id)) - controlled_basins::Vector{NodeID} control_mapping::Dict{Tuple{NodeID, String}, ControlStateUpdate} end @@ -981,10 +884,6 @@ end current_interpolation_index::Vector{IndexLookup} end -# This function is the same as the MetaGraphsNext default anonymous function, -# but has a stable type for the type alias below. -weight_function(::Any) = 1.0 - """ The metadata of the graph (the fields of the NamedTuple) can be accessed e.g. using graph[].flow. @@ -1006,7 +905,7 @@ const ModelGraph = MetaGraph{ flow_links::Vector{LinkMetadata}, saveat::Float64, }, - typeof(weight_function), + Returns{Float64}, Float64, } @@ -1028,57 +927,80 @@ It is used to create views of `u`, and an low-latency alternative to making `u` integral::UnitRange{Int64} = 1:0 end -function StateRanges(u_ids::NamedTuple)::StateRanges - lengths = map(length, u_ids) - # from the lengths of the components - # construct [1:n_pump, (n_pump+1):(n_pump+n_outlet)] - # which are used to create views into the data array - bounds = pushfirst!(cumsum(lengths), 0) - ranges = [range(p[1] + 1, p[2]) for p in IterTools.partition(bounds, 2, 1)] - # standardize empty ranges to 1:0 for easier testing - replace!(x -> isempty(x) ? (1:0) : x, ranges) - return StateRanges(ranges...) -end +StateRanges(u_ids::NamedTuple) = StateRanges(ranges(map(length, collect(u_ids)))...) -@kwdef mutable struct Parameters{C3, C4, C6, C7, C8} - const starttime::DateTime - const graph::ModelGraph - const allocation::Allocation - const basin::Basin{C3, C4} - const linear_resistance::LinearResistance - const manning_resistance::ManningResistance - const tabulated_rating_curve::TabulatedRatingCurve - const level_boundary::LevelBoundary{C6} - const flow_boundary::FlowBoundary{C7} - const pump::Pump - const outlet::Outlet - const terminal::Terminal - const discrete_control::DiscreteControl - const continuous_control::ContinuousControl - const pid_control::PidControl - const user_demand::UserDemand{C8} - const level_demand::LevelDemand - const flow_demand::FlowDemand - const subgrid::Subgrid - # Per state the in- and outflow links associated with that state (if they exist) - const state_inflow_link::Vector{LinkMetadata} = LinkMetadata[] - const state_outflow_link::Vector{LinkMetadata} = LinkMetadata[] +@kwdef mutable struct ParametersMutable all_nodes_active::Bool = false tprev::Float64 = 0.0 +end + +@kwdef struct ParametersNonDiff{C1, C2, C3, C4, C5} + starttime::DateTime + graph::ModelGraph + allocation::Allocation + basin::Basin{C1, C2} + linear_resistance::LinearResistance + manning_resistance::ManningResistance + tabulated_rating_curve::TabulatedRatingCurve + level_boundary::LevelBoundary{C3} + flow_boundary::FlowBoundary{C4} + pump::Pump + outlet::Outlet + terminal::Terminal + discrete_control::DiscreteControl + continuous_control::ContinuousControl + pid_control::PidControl + user_demand::UserDemand{C5} + level_demand::LevelDemand + flow_demand::FlowDemand + subgrid::Subgrid + # Per state the in- and outflow links associated with that state (if they exist) + state_inflow_link::Vector{LinkMetadata} = LinkMetadata[] + state_outflow_link::Vector{LinkMetadata} = LinkMetadata[] # Sparse matrix for combining flows into storages - const flow_to_storage::SparseMatrixCSC{Float64, Int64} = spzeros(1, 1) + flow_to_storage::SparseMatrixCSC{Float64, Int64} = spzeros(1, 1) # Water balance tolerances - const water_balance_abstol::Float64 - const water_balance_reltol::Float64 + water_balance_abstol::Float64 + water_balance_reltol::Float64 # State at previous saveat - const u_prev_saveat::Vector{Float64} = Float64[] + u_prev_saveat::Vector{Float64} = Float64[] # Node ID associated with each state - const node_id::Vector{NodeID} = NodeID[] - # Range per states component - const state_ranges::StateRanges = StateRanges() + node_id::Vector{NodeID} = NodeID[] + # Range per states or cache component + state_ranges::StateRanges = StateRanges() +end + +function DiffCache(p_non_diff::ParametersNonDiff) + (; basin, pump, outlet, pid_control) = p_non_diff + n_basin = length(basin.node_id) + return (; + current_storage = zeros(n_basin), + current_low_storage_factor = zeros(n_basin), + current_level = zeros(n_basin), + current_area = zeros(n_basin), + current_cumulative_precipitation = zeros(n_basin), + current_cumulative_drainage = zeros(n_basin), + flow_rate_pump = zeros(length(pump.node_id)), + flow_rate_outlet = zeros(length(outlet.node_id)), + error_pid_control = zeros(length(pid_control.node_id)), + ) +end + +@kwdef struct Parameters{C1, C2, C3, C4, C5, T} + p_non_diff::ParametersNonDiff{C1, C2, C3, C4, C5} + diff_cache::DiffCache{T} = DiffCache(p_non_diff) + p_mutable::ParametersMutable = ParametersMutable() +end + +function get_value(ref::DiffCacheRef, p::Parameters, du::Vector) + if ref.from_du + du[ref.idx] + else + get_cache_vector(p.diff_cache, ref.type)[ref.idx] + end end -# To opt-out of type checking for ForwardDiff -function DiffEqBase.anyeltypedual(::Parameters, ::Type{Val{counter}}) where {counter} - Any +function set_value!(ref::DiffCacheRef, p::Parameters, value) + @assert !ref.from_du + get_cache_vector(p.diff_cache, ref.type)[ref.idx] = value end diff --git a/core/src/read.jl b/core/src/read.jl index f5ccaa575..5af36b919 100644 --- a/core/src/read.jl +++ b/core/src/read.jl @@ -195,7 +195,7 @@ function parse_static_and_time( [val, val], trivial_timespan; cache_parameters = true, - extrapolation = Constant, + extrapolation = ConstantExtrapolation, ) end getfield(out, parameter_name)[node_id.idx] = val @@ -253,11 +253,11 @@ const conservative_nodetypes = Set{NodeType.T}([ ]) function get_allocation_sources_in_order!( - p::Parameters, + p_non_diff::ParametersNonDiff, db::DB, config::Config, )::OrderedDict{Tuple{NodeID, NodeID}, AllocationSource} - (; graph, user_demand, allocation) = p + (; graph, user_demand, allocation) = p_non_diff (; subnetwork_demands, subnetwork_allocateds, @@ -296,7 +296,7 @@ function get_allocation_sources_in_order!( # node which connects to multiple basins # - One source node can imply multiple sources in the case of a LevelBoundary with multiple # neighbors - node_id = NodeID(Symbol(row.node_type), row.node_id, p) + node_id = NodeID(Symbol(row.node_type), row.node_id, p_non_diff) links = Tuple{NodeID, NodeID}[] if !ismissing(row.subnetwork_id) # E.g. :manning_resistance @@ -337,7 +337,7 @@ function get_allocation_sources_in_order!( elseif row.subnetwork_id != 1 # Not in the main network for main_network_id in filter!( id -> graph[id].subnetwork_id == 1, # Connects to the main network - collect(inflow_ids(p.graph, node_id)), + collect(inflow_ids(graph, node_id)), ) is_source = true source_priority = default_source_priority_dict[:subnetwork_inlet] @@ -381,8 +381,12 @@ function get_allocation_sources_in_order!( ) end -function initialize_allocation!(p::Parameters, db::DB, config::Config)::Nothing - (; graph, allocation) = p +function initialize_allocation!( + p_non_diff::ParametersNonDiff, + db::DB, + config::Config, +)::Nothing + (; graph, allocation) = p_non_diff (; subnetwork_ids, allocation_models, main_network_connections) = allocation subnetwork_ids_ = sort(collect(keys(graph[].node_ids))) @@ -400,12 +404,12 @@ function initialize_allocation!(p::Parameters, db::DB, config::Config)::Nothing main_network_connections[subnetwork_id] = Tuple{NodeID, NodeID}[] end - sources = get_allocation_sources_in_order!(p, db, config) + sources = get_allocation_sources_in_order!(p_non_diff, db, config) for subnetwork_id in subnetwork_ids_ push!( allocation_models, - AllocationModel(subnetwork_id, p, sources, config.allocation.timestep), + AllocationModel(subnetwork_id, p_non_diff, sources, config.allocation.timestep), ) end return nothing @@ -494,10 +498,11 @@ function TabulatedRatingCurve( if !ismissing(control_state) # let control swap out the static lookup object index_lookup = static_lookup(interpolation_index) - control_mapping[(node_id, control_state)] = ControlStateUpdate( - ParameterUpdate(:active, is_active), - ParameterUpdate{Float64}[], - [ParameterUpdate(:current_interpolation_index, index_lookup)], + control_mapping[(node_id, control_state)] = ControlStateUpdate(; + active = ParameterUpdate(:active, is_active), + itp_update_lookup = [ + ParameterUpdate(:current_interpolation_index, index_lookup), + ], ) end push!(interpolations, interpolation) @@ -737,23 +742,28 @@ function Pump(db::DB, config::Config, graph::MetaGraph)::Pump end (; node_id) = parsed_parameters + continuous_control_type = get_continuous_control_type(graph, node_id) - # If flow rate is set by PidControl or ContinuousControl, it is part of the AD Jacobian computations - flow_rate_cache = cache(length(node_id)) - flow_rate_cache[Float64[]] .= [itp(0) for itp in parsed_parameters.flow_rate] + if !valid_flow_rates( + node_id, + parsed_parameters.flow_rate, + parsed_parameters.control_mapping, + ) + error("Invalid pump flow_rates found.") + end return Pump(; node_id, inflow_link = inflow_link.(Ref(graph), node_id), outflow_link = outflow_link.(Ref(graph), node_id), parsed_parameters.active, - flow_rate_cache, parsed_parameters.flow_rate, parsed_parameters.min_flow_rate, parsed_parameters.max_flow_rate, parsed_parameters.min_upstream_level, parsed_parameters.max_downstream_level, parsed_parameters.control_mapping, + continuous_control_type, ) end @@ -789,30 +799,29 @@ function Outlet(db::DB, config::Config, graph::MetaGraph)::Outlet error("Errors occurred when parsing Outlet data.") end - node_id = - NodeID.( - NodeType.Outlet, - parsed_parameters.node_id, - eachindex(parsed_parameters.node_id), - ) + (; node_id) = parsed_parameters + continuous_control_type = get_continuous_control_type(graph, node_id) - # If flow rate is set by PidControl or ContinuousControl, it is part of the AD Jacobian computations - flow_rate_cache = cache(length(node_id)) - flow_rate_cache[Float64[], length(node_id)] .= - [itp(0) for itp in parsed_parameters.flow_rate] + if !valid_flow_rates( + node_id, + parsed_parameters.flow_rate, + parsed_parameters.control_mapping, + ) + error("Invalid pump flow_rates found.") + end return Outlet(; node_id, inflow_link = inflow_link.(Ref(graph), node_id), outflow_link = outflow_link.(Ref(graph), node_id), parsed_parameters.active, - flow_rate_cache, parsed_parameters.flow_rate, parsed_parameters.min_flow_rate, parsed_parameters.max_flow_rate, parsed_parameters.control_mapping, parsed_parameters.min_upstream_level, parsed_parameters.max_downstream_level, + continuous_control_type, ) end @@ -955,7 +964,7 @@ function Basin(db::DB, config::Config, graph::MetaGraph)::Basin LinearInterpolation.( area, level; - extrapolation_left = Constant, + extrapolation_left = ConstantExtrapolation, extrapolation_right = Extension, cache_parameters = true, ) @@ -1030,8 +1039,7 @@ References to listened parameters are added later. function CompoundVariable( variables_compound_variable, node_type::NodeType.T, - node_ids_all::Vector{NodeID}, - placeholder_vector; + node_ids_all::Vector{NodeID}; conditions_compound_variable = nothing, starttime = nothing, cyclic_time = false, @@ -1047,14 +1055,14 @@ function CompoundVariable( for row in variables_compound_variable listen_node_id = NodeID(row.listen_node_id, node_ids_all) # Placeholder until actual ref is known - variable_ref = PreallocationRef(placeholder_vector, 0) + diff_cache_ref = DiffCacheRef() variable = row.variable # Default to weight = 1.0 if not specified weight = coalesce(row.weight, 1.0) # Default to look_ahead = 0.0 if not specified look_ahead = coalesce(row.look_ahead, 0.0) subvariable = - SubVariable(listen_node_id, variable_ref, variable, weight, look_ahead) + SubVariable(listen_node_id, diff_cache_ref, variable, weight, look_ahead) push!(subvariables, subvariable) end @@ -1071,8 +1079,6 @@ end function parse_variables_and_conditions(ids::Vector{Int32}, db::DB, config::Config) condition = load_structvector(db, config, DiscreteControlConditionV1) compound_variable = load_structvector(db, config, DiscreteControlVariableV1) - - placeholder_vector = cache(1) compound_variables = Vector{CompoundVariable}[] cyclic_times = get_cyclic_time(db, "DiscreteControl") errors = false @@ -1114,8 +1120,7 @@ function parse_variables_and_conditions(ids::Vector{Int32}, db::DB, config::Conf CompoundVariable( variables_compound_variable, NodeType.DiscreteControl, - node_ids_all, - placeholder_vector; + node_ids_all; conditions_compound_variable, config.starttime, cyclic_time, @@ -1206,7 +1211,6 @@ function continuous_control_functions(db, config, ids) end function continuous_control_compound_variables(db::DB, config::Config, ids) - placeholder_vector = cache(1) node_ids_all = get_node_ids(db) data = load_structvector(db, config, ContinuousControlVariableV1) @@ -1217,18 +1221,13 @@ function continuous_control_compound_variables(db::DB, config::Config, ids) variable_data = filter(row -> row.node_id == id, data) push!( compound_variables, - CompoundVariable( - variable_data, - NodeType.ContinuousControl, - node_ids_all, - placeholder_vector, - ), + CompoundVariable(variable_data, NodeType.ContinuousControl, node_ids_all), ) end compound_variables end -function ContinuousControl(db::DB, config::Config, graph::MetaGraph)::ContinuousControl +function ContinuousControl(db::DB, config::Config)::ContinuousControl compound_variable = load_structvector(db, config, ContinuousControlVariableV1) node_id = get_node_ids(db, NodeType.ContinuousControl) @@ -1238,27 +1237,18 @@ function ContinuousControl(db::DB, config::Config, graph::MetaGraph)::Continuous func, controlled_variable, errors = continuous_control_functions(db, config, ids) compound_variable = continuous_control_compound_variables(db, config, ids) - # References to the controlled parameters, filled in later when they are known - target_refs = PreallocationRef[] - if errors error("Errors encountered when parsing ContinuousControl data.") end - return ContinuousControl( - node_id, - compound_variable, - controlled_variable, - target_refs, - func, - ) + return ContinuousControl(; node_id, compound_variable, controlled_variable, func) end -function PidControl(db::DB, config::Config, graph::MetaGraph)::PidControl +function PidControl(db::DB, config::Config)::PidControl static = load_structvector(db, config, PidControlStaticV1) time = load_structvector(db, config, PidControlTimeV1) - _, _, node_ids, valid = static_and_time_node_ids(db, static, time, NodeType.PidControl) + _, _, node_id, valid = static_and_time_node_ids(db, static, time, NodeType.PidControl) if !valid error("Problems encountered when parsing PidControl static and time node IDs.") @@ -1272,34 +1262,17 @@ function PidControl(db::DB, config::Config, graph::MetaGraph)::PidControl error("Errors occurred when parsing PidControl data.") end - pid_error = cache(length(node_ids)) - target_ref = PreallocationRef[] - - controlled_basins = Set{NodeID}() - for id in node_ids - controlled_node = only(outneighbor_labels_type(graph, id, LinkType.control)) - for id_inout in inoutflow_ids(graph, controlled_node) - if id_inout.type == NodeType.Basin - push!(controlled_basins, id_inout) - end - end - end - controlled_basins = collect(controlled_basins) - all_node_ids = get_node_ids(db) listen_node_id = NodeID.(parsed_parameters.listen_node_id, Ref(all_node_ids)) return PidControl(; - node_id = node_ids, + node_id, parsed_parameters.active, listen_node_id, parsed_parameters.target, - target_ref, parsed_parameters.proportional, parsed_parameters.integral, parsed_parameters.derivative, - error = pid_error, - controlled_basins, parsed_parameters.control_mapping, ) end @@ -1324,7 +1297,7 @@ function user_demand_static!( return_factor[user_demand_idx] = LinearInterpolation( fill(first_row.return_factor, 2), return_factor_old.t; - extrapolation = Constant, + extrapolation = ConstantExtrapolation, cache_parameters = true, ) min_level[user_demand_idx] = first_row.min_level @@ -1337,7 +1310,7 @@ function user_demand_static!( demand_itp[user_demand_idx][demand_priority_idx] = LinearInterpolation( fill(demand_row, 2), demand_itp_old.t; - extrapolation = Constant, + extrapolation = ConstantExtrapolation, cache_parameters = true, ) demand[user_demand_idx, demand_priority_idx] = demand_row @@ -1557,7 +1530,7 @@ function push_constant_interpolation!( itp = ConstantInterpolation( output, input; - extrapolation = cyclic_time ? Periodic : Constant, + extrapolation = cyclic_time ? Periodic : ConstantExtrapolation, cache_parameters = true, ) push!(constant_interpolations, itp) @@ -1568,7 +1541,7 @@ function static_lookup(lookup_index::Int)::IndexLookup return ConstantInterpolation( [lookup_index], [0.0]; - extrapolation = Constant, + extrapolation = ConstantExtrapolation, cache_parameters = true, ) end @@ -1607,7 +1580,7 @@ function Subgrid(db::DB, config::Config, basin::Basin)::Subgrid hh_itp = LinearInterpolation( subgrid_level, basin_level; - extrapolation_left = Constant, + extrapolation_left = ConstantExtrapolation, extrapolation_right = Linear, cache_parameters = true, ) @@ -1656,7 +1629,7 @@ function Subgrid(db::DB, config::Config, basin::Basin)::Subgrid hh_itp = LinearInterpolation( subgrid_level, basin_level; - extrapolation_left = Constant, + extrapolation_left = ConstantExtrapolation, extrapolation_right = Linear, cache_parameters = true, ) @@ -1815,8 +1788,8 @@ function Parameters(db::DB, config::Config)::Parameters outlet = Outlet(db, config, graph), terminal = Terminal(db, config), discrete_control = DiscreteControl(db, config, graph), - continuous_control = ContinuousControl(db, config, graph), - pid_control = PidControl(db, config, graph), + continuous_control = ContinuousControl(db, config), + pid_control = PidControl(db, config), user_demand = UserDemand(db, config, graph), level_demand = LevelDemand(db, config), flow_demand = FlowDemand(db, config), @@ -1848,46 +1821,47 @@ function Parameters(db::DB, config::Config)::Parameters flow_to_storage = build_flow_to_storage(state_ranges, n_states, basin, connector_nodes) state_inflow_link, state_outflow_link = get_state_flow_links(graph, nodes) - p = Parameters(; + set_target_ref!( + nodes.pid_control.target_ref, + nodes.pid_control.node_id, + fill("flow_rate", length(node_id)), + state_ranges, + graph, + ) + set_target_ref!( + nodes.continuous_control.target_ref, + nodes.continuous_control.node_id, + nodes.continuous_control.controlled_variable, + state_ranges, + graph, + ) + + p_non_diff = ParametersNonDiff(; config.starttime, graph, allocation, - basin, - nodes.linear_resistance, - nodes.manning_resistance, - nodes.tabulated_rating_curve, - nodes.level_boundary, - nodes.flow_boundary, - nodes.pump, - nodes.outlet, - nodes.terminal, - nodes.discrete_control, - nodes.continuous_control, - nodes.pid_control, - nodes.user_demand, - nodes.level_demand, - nodes.flow_demand, + nodes..., subgrid, state_inflow_link, state_outflow_link, flow_to_storage, config.solver.water_balance_abstol, config.solver.water_balance_reltol, + u_prev_saveat = zeros(n_states), node_id, state_ranges, ) - collect_control_mappings!(p) - set_continuous_control_type!(p) - set_listen_variable_refs!(p) - set_discrete_controlled_variable_refs!(p) - set_continuously_controlled_variable_refs!(p) + collect_control_mappings!(p_non_diff) + set_listen_diff_cache_refs!(p_non_diff) + set_discrete_controlled_variable_refs!(p_non_diff) # Allocation data structures if config.allocation.use_allocation - initialize_allocation!(p, db, config) + initialize_allocation!(p_non_diff, db, config) end - return p + + return Parameters(; p_non_diff) end function get_node_ids_int32(db::DB, node_type)::Vector{Int32} diff --git a/core/src/solve.jl b/core/src/solve.jl index bcb23b8c7..ba14e87db 100644 --- a/core/src/solve.jl +++ b/core/src/solve.jl @@ -1,18 +1,34 @@ """ The right hand side function of the system of ODEs set up by Ribasim. """ -function water_balance!(du::Vector, u::Vector, p::Parameters, t::Number)::Nothing - (; current_storage, current_low_storage_factor, current_level) = - p.basin.current_properties +water_balance!(du::Vector, u::Vector, p::Parameters, t::Number)::Nothing = + water_balance!(du, u, p.p_non_diff, p.diff_cache, p.p_mutable, t) + +# Method with `t` as second argument parsable by DifferentiationInterface.jl for time derivative computation +water_balance!( + du::Vector, + t::Number, + u::Vector, + p_non_diff::ParametersNonDiff, + diff_cache::DiffCache, + p_mutable::ParametersMutable, +) = water_balance!(du, u, p_non_diff, diff_cache, p_mutable, t) + +# Method with separate parameter parsable by DifferentiationInterface.jl for Jacobian computation +function water_balance!( + du::Vector, + u::Vector, + p_non_diff::ParametersNonDiff, + diff_cache::DiffCache, + p_mutable::ParametersMutable, + t::Number, +)::Nothing + p = Parameters(p_non_diff, diff_cache, p_mutable) du .= 0.0 # Ensures current_* vectors are current - set_current_basin_properties!(du, u, p, t) - - current_storage = current_storage[du] - current_low_storage_factor = current_low_storage_factor[du] - current_level = current_level[du] + set_current_basin_properties!(u, p, t) # Notes on the ordering of these formulations: # - Continuous control can depend on flows (which are not continuously controlled themselves), @@ -24,43 +40,29 @@ function water_balance!(du::Vector, u::Vector, p::Parameters, t::Number)::Nothin update_vertical_flux!(du, p) # Formulate intermediate flows (non continuously controlled) - formulate_flows!(du, p, t, current_low_storage_factor, current_level) + formulate_flows!(du, p, t) # Compute continuous control formulate_continuous_control!(du, p, t) # Formulate intermediate flows (controlled by ContinuousControl) - formulate_flows!( - du, - p, - t, - current_low_storage_factor, - current_level; - continuous_control_type = ContinuousControlType.Continuous, - ) + formulate_flows!(du, p, t; continuous_control_type = ContinuousControlType.Continuous) # Compute PID control formulate_pid_control!(du, u, p, t) # Formulate intermediate flow (controlled by PID control) - formulate_flows!( - du, - p, - t, - current_low_storage_factor, - current_level; - continuous_control_type = ContinuousControlType.PID, - ) + formulate_flows!(du, p, t; continuous_control_type = ContinuousControlType.PID) return nothing end -function formulate_continuous_control!(du, p, t)::Nothing - (; compound_variable, target_ref, func) = p.continuous_control +function formulate_continuous_control!(du::Vector, p::Parameters, t::Number)::Nothing + (; compound_variable, target_ref, func) = p.p_non_diff.continuous_control for (cvar, ref, func_) in zip(compound_variable, target_ref, func) value = compound_variable_value(cvar, p, du, t) - set_value!(ref, func_(value), du) + set_value!(ref, p, func_(value)) end return nothing end @@ -69,87 +71,57 @@ end Compute the storages, levels and areas of all Basins given the state u and the time t. """ -function set_current_basin_properties!( - du::Vector, - u::Vector, - p::Parameters, - t::Number, -)::Nothing - (; basin) = p - (; current_properties, cumulative_precipitation, cumulative_drainage, vertical_flux) = - basin - (; - current_storage, - current_low_storage_factor, - current_level, - current_area, - current_cumulative_precipitation, - current_cumulative_drainage, - ) = current_properties - - current_storage = current_storage[du] - current_low_storage_factor = current_low_storage_factor[du] - current_level = current_level[du] - current_area = current_area[du] - current_cumulative_precipitation = current_cumulative_precipitation[du] - current_cumulative_drainage = current_cumulative_drainage[du] +function set_current_basin_properties!(u::Vector, p::Parameters, t::Number)::Nothing + (; p_non_diff, diff_cache, p_mutable) = p + (; basin) = p_non_diff + (; node_id, cumulative_precipitation, cumulative_drainage, vertical_flux) = basin # The exact cumulative precipitation and drainage up to the t of this water_balance call - dt = t - p.tprev - for node_id in basin.node_id + dt = t - p_mutable.tprev + for node_id in node_id fixed_area = basin_areas(basin, node_id.idx)[end] - current_cumulative_precipitation[node_id.idx] = + diff_cache.current_cumulative_precipitation[node_id.idx] = cumulative_precipitation[node_id.idx] + fixed_area * vertical_flux.precipitation[node_id.idx] * dt end - @. current_cumulative_drainage = cumulative_drainage + dt * vertical_flux.drainage + @. diff_cache.current_cumulative_drainage = + cumulative_drainage + dt * vertical_flux.drainage - formulate_storages!(current_storage, du, u, p, t) + formulate_storages!(u, p, t) - for (id, s) in zip(basin.node_id, current_storage) + for (id, s) in zip(basin.node_id, diff_cache.current_storage) i = id.idx - current_low_storage_factor[i] = reduction_factor(s, LOW_STORAGE_THRESHOLD) - current_level[i] = get_level_from_storage(basin, i, s) - current_area[i] = basin.level_to_area[i](current_level[i]) + diff_cache.current_low_storage_factor[i] = + reduction_factor(s, LOW_STORAGE_THRESHOLD) + diff_cache.current_level[i] = get_level_from_storage(basin, i, s) + diff_cache.current_area[i] = basin.level_to_area[i](diff_cache.current_level[i]) end end function formulate_storages!( - current_storage::AbstractVector, - du::Vector, u::Vector, p::Parameters, t::Number; add_initial_storage::Bool = true, )::Nothing - (; basin, flow_boundary, tprev, flow_to_storage) = p + (; p_non_diff, diff_cache, p_mutable) = p + (; basin, flow_boundary, flow_to_storage) = p_non_diff + (; tprev) = p_mutable # Current storage: initial condition + # total inflows and outflows since the start # of the simulation if add_initial_storage - current_storage .= basin.storage0 + diff_cache.current_storage .= basin.storage0 else - current_storage .= 0.0 + diff_cache.current_storage .= 0.0 end - mul!(current_storage, flow_to_storage, u, 1, 1) - formulate_storage!(current_storage, basin, du) - formulate_storage!(current_storage, tprev, t, flow_boundary) + mul!(diff_cache.current_storage, flow_to_storage, u, 1, 1) + diff_cache.current_storage .+= diff_cache.current_cumulative_precipitation + diff_cache.current_storage .+= diff_cache.current_cumulative_drainage + formulate_storage!(diff_cache.current_storage, tprev, t, flow_boundary) return nothing end -""" -The storage contributions of the forcings that are not part of the state. -""" -function formulate_storage!(current_storage::AbstractVector, basin::Basin, du::Vector) - (; current_cumulative_precipitation, current_cumulative_drainage) = - basin.current_properties - - current_cumulative_precipitation = current_cumulative_precipitation[du] - current_cumulative_drainage = current_cumulative_drainage[du] - current_storage .+= current_cumulative_precipitation - current_storage .+= current_cumulative_drainage -end - """ Formulate storage contributions of flow boundaries. """ @@ -183,11 +155,10 @@ Smoothly let the evaporation flux go to 0 when at small water depths Currently at less than 0.1 m. """ function update_vertical_flux!(du::Vector, p::Parameters)::Nothing - (; basin, state_ranges) = p - (; vertical_flux, current_properties) = basin - (; current_level, current_area) = current_properties - current_level = current_level[du] - current_area = current_area[du] + (; p_non_diff, diff_cache) = p + (; basin, state_ranges) = p_non_diff + (; vertical_flux) = basin + (; current_level, current_area) = diff_cache du_evaporation = view(du, state_ranges.evaporation) du_infiltration = view(du, state_ranges.infiltration) @@ -209,40 +180,38 @@ function update_vertical_flux!(du::Vector, p::Parameters)::Nothing return nothing end -function set_error!(pid_control::PidControl, p::Parameters, du::Vector, t::Number) - (; basin) = p - (; listen_node_id, target, error) = pid_control - error = error[du] - current_level = basin.current_properties.current_level[du] +function set_error!(pid_control::PidControl, p::Parameters, t::Number) + (; diff_cache) = p + (; listen_node_id, target) = pid_control for i in eachindex(listen_node_id) listened_node_id = listen_node_id[i] @assert listened_node_id.type == NodeType.Basin lazy"Listen node $listened_node_id is not a Basin." - error[i] = target[i](t) - current_level[listened_node_id.idx] + diff_cache.error_pid_control[i] = + target[i](t) - diff_cache.current_level[listened_node_id.idx] end end function formulate_pid_control!(du::Vector, u::Vector, p::Parameters, t::Number)::Nothing - (; basin, state_ranges, pid_control) = p - (; node_id, active, target, listen_node_id, error) = pid_control - (; current_area) = basin.current_properties + (; p_non_diff, diff_cache, p_mutable) = p + (; state_ranges, pid_control) = p_non_diff + (; node_id, active, target, listen_node_id) = pid_control + du_integral = view(du, state_ranges.integral) u_integral = view(u, state_ranges.integral) - current_area = current_area[du] - error = error[du] - all_nodes_active = p.all_nodes_active[] + all_nodes_active = p_mutable.all_nodes_active[] - set_error!(pid_control, p, du, t) + set_error!(pid_control, p, t) - for (i, id) in enumerate(node_id) + for (i, _) in enumerate(node_id) if !(active[i] || all_nodes_active) du_integral[i] = 0.0 u_integral[i] = 0.0 continue end - du_integral[i] = error[i] + du_integral[i] = diff_cache.error_pid_control[i] listened_node_id = listen_node_id[i] @@ -255,14 +224,14 @@ function formulate_pid_control!(du::Vector, u::Vector, p::Parameters, t::Number) if !iszero(K_d) # dlevel/dstorage = 1/area # TODO: replace by DataInterpolations.derivative(storage_to_level, storage) - area = current_area[listened_node_id.idx] + area = diff_cache.current_area[listened_node_id.idx] D = 1.0 - K_d / area else D = 1.0 end if !iszero(K_p) - flow_rate += K_p * error[i] / D + flow_rate += K_p * diff_cache.error_pid_control[i] / D end if !iszero(K_i) @@ -271,7 +240,8 @@ function formulate_pid_control!(du::Vector, u::Vector, p::Parameters, t::Number) if !iszero(K_d) dlevel_demand = derivative(target[i], t) - dstorage_listened_basin_old = formulate_dstorage(du, p, t, listened_node_id) + dstorage_listened_basin_old = + formulate_dstorage(du, p_non_diff, t, listened_node_id) # The expression below is the solution to an implicit equation for # dstorage_listened_basin. This equation results from the fact that if the derivative # term in the PID controller is used, the controlled pump flow rate depends on itself. @@ -279,7 +249,7 @@ function formulate_pid_control!(du::Vector, u::Vector, p::Parameters, t::Number) end # Set flow_rate - set_value!(pid_control.target_ref[i], flow_rate, du) + set_value!(pid_control.target_ref[i], p, flow_rate) end return nothing end @@ -287,18 +257,23 @@ end """ Formulate the time derivative of the storage in a single Basin. """ -function formulate_dstorage(du::Vector, p::Parameters, t::Number, node_id::NodeID) - (; basin, state_ranges) = p +function formulate_dstorage( + du::Vector, + p_non_diff::ParametersNonDiff, + t::Number, + node_id::NodeID, +) + (; basin, state_ranges) = p_non_diff (; inflow_ids, outflow_ids, vertical_flux) = basin du_evaporation = view(du, state_ranges.evaporation) du_infiltration = view(du, state_ranges.infiltration) @assert node_id.type == NodeType.Basin dstorage = 0.0 for inflow_id in inflow_ids[node_id.idx] - dstorage += get_flow(du, p, t, (inflow_id, node_id)) + dstorage += get_flow(du, p_non_diff, t, (inflow_id, node_id)) end for outflow_id in outflow_ids[node_id.idx] - dstorage -= get_flow(du, p, t, (node_id, outflow_id)) + dstorage -= get_flow(du, p_non_diff, t, (node_id, outflow_id)) end fixed_area = basin_areas(basin, node_id.idx)[end] @@ -315,11 +290,10 @@ function formulate_flow!( user_demand::UserDemand, p::Parameters, t::Number, - current_low_storage_factor::Vector, - current_level::Vector, )::Nothing - (; allocation, state_ranges) = p - all_nodes_active = p.all_nodes_active[] + (; p_non_diff) = p + (; allocation, state_ranges) = p_non_diff + all_nodes_active = p.p_mutable.all_nodes_active[] du_user_demand_inflow = view(du, state_ranges.user_demand_inflow) du_user_demand_outflow = view(du, state_ranges.user_demand_outflow) @@ -352,12 +326,12 @@ function formulate_flow!( # Smoothly let abstraction go to 0 as the source basin dries out inflow_id = inflow_link.link[1] - factor_basin = get_low_storage_factor(current_low_storage_factor, inflow_id) + factor_basin = get_low_storage_factor(p, inflow_id) q *= factor_basin # Smoothly let abstraction go to 0 as the source basin # level reaches its minimum level - source_level = get_level(p, inflow_id, t, current_level) + source_level = get_level(p, inflow_id, t) Δsource_level = source_level - min_level factor_level = reduction_factor(Δsource_level, USER_DEMAND_MIN_LEVEL_THRESHOLD) q *= factor_level @@ -372,12 +346,14 @@ function formulate_flow!( linear_resistance::LinearResistance, p::Parameters, t::Number, - current_low_storage_factor::Vector, - current_level::Vector, )::Nothing - all_nodes_active = p.all_nodes_active[] + (; p_non_diff, p_mutable) = p + (; state_ranges) = p_non_diff + all_nodes_active = p_mutable.all_nodes_active[] (; node_id, active, resistance, max_flow_rate) = linear_resistance - du_linear_resistance = view(du, p.state_ranges.linear_resistance) + + du_linear_resistance = view(du, state_ranges.linear_resistance) + for id in node_id inflow_link = linear_resistance.inflow_link[id.idx] outflow_link = linear_resistance.outflow_link[id.idx] @@ -386,16 +362,11 @@ function formulate_flow!( outflow_id = outflow_link.link[2] if (active[id.idx] || all_nodes_active) - h_a = get_level(p, inflow_id, t, current_level) - h_b = get_level(p, outflow_id, t, current_level) + h_a = get_level(p, inflow_id, t) + h_b = get_level(p, outflow_id, t) q_unlimited = (h_a - h_b) / resistance[id.idx] q = clamp(q_unlimited, -max_flow_rate[id.idx], max_flow_rate[id.idx]) - q *= low_storage_factor_resistance_node( - current_low_storage_factor, - q, - inflow_id, - outflow_id, - ) + q *= low_storage_factor_resistance_node(p, q, inflow_id, outflow_id) du_linear_resistance[id.idx] = q end end @@ -407,14 +378,14 @@ function formulate_flow!( tabulated_rating_curve::TabulatedRatingCurve, p::Parameters, t::Number, - current_low_storage_factor::Vector, - current_level::Vector, )::Nothing - all_nodes_active = p.all_nodes_active[] + (; p_non_diff, p_mutable) = p + (; state_ranges) = p_non_diff + all_nodes_active = p_mutable.all_nodes_active[] (; node_id, active, interpolations, current_interpolation_index) = tabulated_rating_curve - du_tabulated_rating_curve = view(du, p.state_ranges.tabulated_rating_curve) + du_tabulated_rating_curve = view(du, state_ranges.tabulated_rating_curve) for id in node_id inflow_link = tabulated_rating_curve.inflow_link[id.idx] @@ -423,12 +394,12 @@ function formulate_flow!( outflow_id = outflow_link.link[2] max_downstream_level = tabulated_rating_curve.max_downstream_level[id.idx] - h_a = get_level(p, inflow_id, t, current_level) - h_b = get_level(p, outflow_id, t, current_level) + h_a = get_level(p, inflow_id, t) + h_b = get_level(p, outflow_id, t) Δh = h_a - h_b if active[id.idx] || all_nodes_active - factor = get_low_storage_factor(current_low_storage_factor, inflow_id) + factor = get_low_storage_factor(p, inflow_id) interpolation_index = current_interpolation_index[id.idx](t) qh = interpolations[interpolation_index] q = factor * qh(h_a) @@ -487,9 +458,9 @@ function formulate_flow!( manning_resistance::ManningResistance, p::Parameters, t::Number, - current_low_storage_factor::Vector, - current_level::Vector, )::Nothing + (; p_non_diff, p_mutable) = p + (; state_ranges) = p_non_diff (; node_id, active, @@ -501,9 +472,9 @@ function formulate_flow!( downstream_bottom, ) = manning_resistance - du_manning_resistance = view(du, p.state_ranges.manning_resistance) + du_manning_resistance = view(du, state_ranges.manning_resistance) - all_nodes_active = p.all_nodes_active[] + all_nodes_active = p_mutable.all_nodes_active[] for id in node_id inflow_link = manning_resistance.inflow_link[id.idx] outflow_link = manning_resistance.outflow_link[id.idx] @@ -515,8 +486,8 @@ function formulate_flow!( continue end - h_a = get_level(p, inflow_id, t, current_level) - h_b = get_level(p, outflow_id, t, current_level) + h_a = get_level(p, inflow_id, t) + h_b = get_level(p, outflow_id, t) bottom_a = upstream_bottom[id.idx] bottom_b = downstream_bottom[id.idx] @@ -544,12 +515,7 @@ function formulate_flow!( Δh = h_a - h_b q = A / n * ∛(R_h^2) * relaxed_root(Δh / L, 1e-5) - q *= low_storage_factor_resistance_node( - current_low_storage_factor, - q, - inflow_id, - outflow_id, - ) + q *= low_storage_factor_resistance_node(p, q, inflow_id, outflow_id) du_manning_resistance[id.idx] = q end return nothing @@ -560,18 +526,19 @@ function formulate_flow!( pump::Pump, p::Parameters, t::Number, - current_low_storage_factor::Vector, - current_level::Vector, continuous_control_type_::ContinuousControlType.T, )::Nothing - du_pump = view(du, p.state_ranges.pump) - all_nodes_active = p.all_nodes_active[] + (; p_non_diff, diff_cache, p_mutable) = p + (; state_ranges) = p_non_diff + + du_pump = view(du, state_ranges.pump) + + all_nodes_active = p_mutable.all_nodes_active[] for ( id, inflow_link, outflow_link, active, - flow_rate_from_cache, flow_rate_itp, min_flow_rate, max_flow_rate, @@ -583,7 +550,6 @@ function formulate_flow!( pump.inflow_link, pump.outflow_link, pump.active, - pump.flow_rate_cache[du], pump.flow_rate, pump.min_flow_rate, pump.max_flow_rate, @@ -599,15 +565,15 @@ function formulate_flow!( flow_rate = if continuous_control_type == ContinuousControlType.None flow_rate_itp(t) else - flow_rate_from_cache + diff_cache.flow_rate_pump[id.idx] end inflow_id = inflow_link.link[1] outflow_id = outflow_link.link[2] - src_level = get_level(p, inflow_id, t, current_level) - dst_level = get_level(p, outflow_id, t, current_level) + src_level = get_level(p, inflow_id, t) + dst_level = get_level(p, outflow_id, t) - factor = get_low_storage_factor(current_low_storage_factor, inflow_id) + factor = get_low_storage_factor(p, inflow_id) q = flow_rate * factor q *= reduction_factor(src_level - min_upstream_level(t), 0.02) @@ -624,18 +590,19 @@ function formulate_flow!( outlet::Outlet, p::Parameters, t::Number, - current_low_storage_factor::Vector, - current_level::Vector, continuous_control_type_::ContinuousControlType.T, )::Nothing - du_outlet = view(du, p.state_ranges.outlet) - all_nodes_active = p.all_nodes_active[] + (; p_non_diff, diff_cache, p_mutable) = p + (; state_ranges) = p_non_diff + + du_outlet = view(du, state_ranges.outlet) + + all_nodes_active = p_mutable.all_nodes_active[] for ( id, inflow_link, outflow_link, active, - flow_rate_from_cache, flow_rate_itp, min_flow_rate, max_flow_rate, @@ -647,7 +614,6 @@ function formulate_flow!( outlet.inflow_link, outlet.outflow_link, outlet.active, - outlet.flow_rate_cache[du], outlet.flow_rate, outlet.min_flow_rate, outlet.max_flow_rate, @@ -663,16 +629,16 @@ function formulate_flow!( flow_rate = if continuous_control_type == ContinuousControlType.None flow_rate_itp(t) else - flow_rate_from_cache + diff_cache.flow_rate_outlet[id.idx] end inflow_id = inflow_link.link[1] outflow_id = outflow_link.link[2] - src_level = get_level(p, inflow_id, t, current_level) - dst_level = get_level(p, outflow_id, t, current_level) + src_level = get_level(p, inflow_id, t) + dst_level = get_level(p, outflow_id, t) q = flow_rate - q *= get_low_storage_factor(current_low_storage_factor, inflow_id) + q *= get_low_storage_factor(p, inflow_id) # No flow of outlet if source level is lower than target level Δlevel = src_level - dst_level @@ -689,9 +655,7 @@ end function formulate_flows!( du::AbstractVector, p::Parameters, - t::Number, - current_low_storage_factor::Vector, - current_level::Vector; + t::Number; continuous_control_type::ContinuousControlType.T = ContinuousControlType.None, )::Nothing (; @@ -701,53 +665,16 @@ function formulate_flows!( pump, outlet, user_demand, - ) = p + ) = p.p_non_diff - formulate_flow!( - du, - pump, - p, - t, - current_low_storage_factor, - current_level, - continuous_control_type, - ) - formulate_flow!( - du, - outlet, - p, - t, - current_low_storage_factor, - current_level, - continuous_control_type, - ) + formulate_flow!(du, pump, p, t, continuous_control_type) + formulate_flow!(du, outlet, p, t, continuous_control_type) if continuous_control_type == ContinuousControlType.None - formulate_flow!( - du, - linear_resistance, - p, - t, - current_low_storage_factor, - current_level, - ) - formulate_flow!( - du, - manning_resistance, - p, - t, - current_low_storage_factor, - current_level, - ) - formulate_flow!( - du, - tabulated_rating_curve, - p, - t, - current_low_storage_factor, - current_level, - ) - formulate_flow!(du, user_demand, p, t, current_low_storage_factor, current_level) + formulate_flow!(du, linear_resistance, p, t) + formulate_flow!(du, manning_resistance, p, t) + formulate_flow!(du, tabulated_rating_curve, p, t) + formulate_flow!(du, user_demand, p, t) end end @@ -757,6 +684,7 @@ flow rates for the last time step if these flow rate bounds are known. """ function limit_flow!(u::Vector, integrator::DEIntegrator, p::Parameters, t::Number)::Nothing (; uprev, dt) = integrator + (; p_non_diff, diff_cache) = p (; pump, outlet, @@ -766,7 +694,8 @@ function limit_flow!(u::Vector, integrator::DEIntegrator, p::Parameters, t::Numb basin, allocation, state_ranges, - ) = p + ) = p_non_diff + (; current_storage, current_level) = diff_cache u_tabulated_rating_curve = view(u, state_ranges.tabulated_rating_curve) u_pump = view(u, state_ranges.pump) @@ -788,9 +717,7 @@ function limit_flow!(u::Vector, integrator::DEIntegrator, p::Parameters, t::Numb # storage and level attained in the last time step to estimate whether there was an effect # of reduction factors du = get_du(integrator) - set_current_basin_properties!(du, u, p, t) - current_storage = basin.current_properties.current_storage[u] - current_level = basin.current_properties.current_level[u] + set_current_basin_properties!(du, p, t) # TabulatedRatingCurve flow is in [0, ∞) and can be inactive for (id, active) in zip(tabulated_rating_curve.node_id, tabulated_rating_curve.active) diff --git a/core/src/util.jl b/core/src/util.jl index 7ab5fd345..ec499f23c 100644 --- a/core/src/util.jl +++ b/core/src/util.jl @@ -52,7 +52,7 @@ end """ Compute the level of a basin given its storage. """ -function get_level_from_storage(basin::Basin, state_idx::Int, storage) +function get_level_from_storage(basin::Basin, state_idx::Int, storage::T)::T where {T} storage_to_level = basin.storage_to_level[state_idx] if storage >= 0 return storage_to_level(storage) @@ -84,7 +84,7 @@ function get_scalar_interpolation( return interpolation_type( parameter, times; - extrapolation = cyclic_time ? Periodic : Constant, + extrapolation = cyclic_time ? Periodic : ConstantExtrapolation, cache_parameters = true, ) end @@ -125,7 +125,7 @@ function qh_interpolation( return LinearInterpolation( flow_rate, level; - extrapolation_left = Constant, + extrapolation_left = ConstantExtrapolation, extrapolation_right = Linear, cache_parameters = true, ) @@ -158,11 +158,12 @@ Get the current water level of a node ID. The ID can belong to either a Basin or a LevelBoundary. du: tells ForwardDiff whether this call is for differentiation or not """ -function get_level(p::Parameters, node_id::NodeID, t::Number, current_level::Vector)::Number +function get_level(p::Parameters, node_id::NodeID, t::Number)::Number + (; p_non_diff, diff_cache) = p if node_id.type == NodeType.Basin - current_level[node_id.idx] + diff_cache.current_level[node_id.idx] elseif node_id.type == NodeType.LevelBoundary - p.level_boundary.level[node_id.idx](t) + p_non_diff.level_boundary.level[node_id.idx](t) elseif node_id.type == NodeType.Terminal # Terminal is like a bottomless pit. # A level at -Inf ensures we don't hit `max_downstream_level` reduction factors. @@ -295,11 +296,15 @@ function reduction_factor(x::T, threshold::Real)::T where {T <: Real} end end -function get_low_storage_factor( - current_low_storage_factor::Vector{T}, - id::NodeID, -)::T where {T} - return id.type == NodeType.Basin ? current_low_storage_factor[id.idx] : one(T) +# SparseConnectivityTracer overloads + +function get_low_storage_factor(p::Parameters, id::NodeID) + (; current_low_storage_factor) = p.diff_cache + if id.type == NodeType.Basin + current_low_storage_factor[id.idx] + else + one(eltype(current_low_storage_factor)) + end end """ @@ -307,15 +312,15 @@ For resistance nodes, give a reduction factor based on the upstream node as defined by the flow direction. """ function low_storage_factor_resistance_node( - current_low_storage_factor, - q, - inflow_id, - outflow_id, + p::Parameters, + q::Number, + inflow_id::NodeID, + outflow_id::NodeID, ) if q > 0 - get_low_storage_factor(current_low_storage_factor, inflow_id) + get_low_storage_factor(p, inflow_id) else - get_low_storage_factor(current_low_storage_factor, outflow_id) + get_low_storage_factor(p, outflow_id) end end @@ -380,8 +385,11 @@ function get_all_demand_priorities(db::DB, config::Config;)::Vector{Int32} end end -function get_external_demand_priority_idx(p::Parameters, node_id::NodeID)::Int - (; graph, level_demand, flow_demand, allocation) = p +function get_external_demand_priority_idx( + p_non_diff::ParametersNonDiff, + node_id::NodeID, +)::Int + (; graph, level_demand, flow_demand, allocation) = p_non_diff inneighbor_control_ids = inneighbor_labels_type(graph, node_id, LinkType.control) if isempty(inneighbor_control_ids) return 0 @@ -399,48 +407,27 @@ function get_external_demand_priority_idx(p::Parameters, node_id::NodeID)::Int return findsorted(allocation.demand_priorities_all, demand_priority) end -""" -Set continuous_control_type for those pumps and outlets that are controlled by either -PidControl or ContinuousControl -""" -function set_continuous_control_type!(p::Parameters)::Nothing - (; continuous_control, pid_control) = p - errors = false - - errors = set_continuous_control_type!( - p, - continuous_control.node_id, - ContinuousControlType.Continuous, - ) - errors |= - set_continuous_control_type!(p, pid_control.node_id, ContinuousControlType.PID) - - if errors - error("Errors occurred when parsing ContinuousControl and PidControl connectivity") - end - return nothing -end - -function set_continuous_control_type!( - p::Parameters, - node_id::Vector{NodeID}, - continuous_control_type::ContinuousControlType.T, -)::Bool - (; graph, pump, outlet) = p - errors = false +const control_type_mapping = Dict{NodeType.T, ContinuousControlType.T}( + NodeType.PidControl => ContinuousControlType.PID, + NodeType.ContinuousControl => ContinuousControlType.Continuous, +) +function get_continuous_control_type(graph::MetaGraph, node_id::Vector{NodeID}) + continuous_control_type = fill(ContinuousControlType.None, length(node_id)) for id in node_id - id_controlled = only(outneighbor_labels_type(graph, id, LinkType.control)) - if id_controlled.type == NodeType.Pump - pump.continuous_control_type[id_controlled.idx] = continuous_control_type - elseif id_controlled.type == NodeType.Outlet - outlet.continuous_control_type[id_controlled.idx] = continuous_control_type - else - errors = true - @error "Only Pump and Outlet can be controlled by PidController, got $id_controlled" + control_inneighbors = collect(inneighbor_labels_type(graph, id, LinkType.control)) + if length(control_inneighbors) == 1 + control_inneighbor = only(control_inneighbors) + continuous_control_type[id.idx] = get( + control_type_mapping, + control_inneighbor.type, + ContinuousControlType.None, + ) + elseif length(control_inneighbors) > 1 + error("$id has more than 1 control inneighbors.") end end - return errors + return continuous_control_type end function has_external_demand( @@ -457,23 +444,12 @@ function has_external_demand( return false, nothing end -function Base.get( - constraints::JuMP.Containers.DenseAxisArray, - node_id::NodeID, -)::Union{JuMP.ConstraintRef, Nothing} - if node_id in only(constraints.axes) - constraints[node_id] - else - nothing - end -end - """ Get the time interval between (flow) saves """ function get_Δt(integrator)::Float64 (; p, t, dt) = integrator - (; saveat) = p.graph[] + (; saveat) = p.p_non_diff.graph[] if iszero(saveat) dt elseif isinf(saveat) @@ -501,7 +477,8 @@ as input. Therefore we set the instantaneous flows as the mean flows as allocati """ function set_initial_allocation_mean_flows!(integrator)::Nothing (; u, p, t) = integrator - (; allocation, graph) = p + (; p_non_diff) = p + (; allocation) = p_non_diff (; mean_input_flows, mean_realized_flows, allocation_models) = allocation (; Δt_allocation) = allocation_models[1] @@ -516,7 +493,7 @@ function set_initial_allocation_mean_flows!(integrator)::Nothing if link[1] == link[2] q = get_influx(du, link[1], p) else - q = get_flow(du, p, t, link) + q = get_flow(du, p_non_diff, t, link) end # Multiply by Δt_allocation as averaging divides by this factor # in update_allocation! @@ -530,7 +507,7 @@ function set_initial_allocation_mean_flows!(integrator)::Nothing if link[1] == link[2] mean_realized_flows[link] = -u[link[1].idx] else - q = get_flow(du, p, t, link) + q = get_flow(du, p_non_diff, t, link) mean_realized_flows[link] = q * Δt_allocation end end @@ -545,9 +522,9 @@ function convert_truth_state(boolean_vector)::String String(UInt8.(ifelse.(boolean_vector, 'T', 'F'))) end -function NodeID(type::Symbol, value::Integer, p::Parameters)::NodeID +function NodeID(type::Symbol, value::Integer, p_non_diff::ParametersNonDiff)::NodeID node_type = NodeType.T(type) - node = getfield(p, snake_case(type)) + node = getfield(p_non_diff, snake_case(type)) idx = searchsortedfirst(node.node_id, NodeID(node_type, value, 0)) return NodeID(node_type, value, idx) end @@ -555,38 +532,42 @@ end """ Get the reference to a parameter """ -function get_variable_ref( - p::Parameters, +function get_diff_cache_ref( node_id::NodeID, - variable::String; + variable::String, + state_ranges::StateRanges; listen::Bool = true, -)::Tuple{PreallocationRef, Bool} - (; basin) = p +)::Tuple{DiffCacheRef, Bool} errors = false - # Only built here because it is needed to obtain indices - u = build_state_vector(p) - ref = if node_id.type == NodeType.Basin && variable == "level" - PreallocationRef(basin.current_properties.current_level, node_id.idx) + DiffCacheRef(; type = DiffCacheType.basin_level, node_id.idx) elseif variable == "flow_rate" && node_id.type != NodeType.FlowBoundary if listen if node_id.type ∉ conservative_nodetypes errors = true @error "Cannot listen to flow_rate of $node_id, the node type must be one of $conservative_node_types." - Ref(Float64[], 0) + DiffCacheRef() else # Index in the state vector (inflow) - flow_idx = get_state_index(p.state_ranges, node_id) - PreallocationRef(cache(1), flow_idx; from_du = true) + idx = get_state_index(state_ranges, node_id) + DiffCacheRef(; idx, from_du = true) end else - node = getfield(p, snake_case(node_id)) - PreallocationRef(node.flow_rate_cache, node_id.idx) + type = if node_id.type == NodeType.Pump + DiffCacheType.flow_rate_pump + elseif node_id.type == NodeType.Outlet + DiffCacheType.flow_rate_outlet + else + errors = true + @error "Cannot set the flow rate of $node_id." + DiffCacheType.flow_rate_pump + end + DiffCacheRef(; type, node_id.idx) end else # Placeholder to obtain correct type - PreallocationRef(cache(1), 0) + DiffCacheRef() end return ref, errors end @@ -594,8 +575,8 @@ end """ Set references to all variables that are listened to by discrete/continuous control """ -function set_listen_variable_refs!(p::Parameters)::Nothing - (; discrete_control, continuous_control) = p +function set_listen_diff_cache_refs!(p_non_diff::ParametersNonDiff)::Nothing + (; discrete_control, continuous_control, state_ranges) = p_non_diff compound_variable_sets = [discrete_control.compound_variables..., continuous_control.compound_variable] errors = false @@ -604,10 +585,13 @@ function set_listen_variable_refs!(p::Parameters)::Nothing for compound_variable in compound_variables (; subvariables) = compound_variable for (j, subvariable) in enumerate(subvariables) - ref, error = - get_variable_ref(p, subvariable.listen_node_id, subvariable.variable) + ref, error = get_diff_cache_ref( + subvariable.listen_node_id, + subvariable.variable, + state_ranges, + ) if !error - subvariables[j] = @set subvariable.variable_ref = ref + subvariables[j] = @set subvariable.diff_cache_ref = ref end errors |= error end @@ -623,29 +607,35 @@ end """ Set references to all variables that are controlled by discrete control """ -function set_discrete_controlled_variable_refs!(p::Parameters)::Nothing - for nodetype in propertynames(p) - node = getfield(p, nodetype) +function set_discrete_controlled_variable_refs!(p_non_diff::ParametersNonDiff)::Nothing + for nodetype in propertynames(p_non_diff) + node = getfield(p_non_diff, nodetype) if node isa AbstractParameterNode && hasfield(typeof(node), :control_mapping) control_mapping::Dict{Tuple{NodeID, String}, ControlStateUpdate} = node.control_mapping for ((node_id, control_state), control_state_update) in control_mapping - (; scalar_update, itp_update) = control_state_update + (; scalar_update, itp_update_linear, itp_update_lookup) = + control_state_update # References to scalar parameters for (i, parameter_update) in enumerate(scalar_update) field = getfield(node, parameter_update.name) - if field isa Cache - field = field[Float64[]] - end scalar_update[i] = @set parameter_update.ref = Ref(field, node_id.idx) end - # References to interpolation parameters - for (i, parameter_update) in enumerate(itp_update) + # References to linear interpolation parameters + for (i, parameter_update) in enumerate(itp_update_linear) field = getfield(node, parameter_update.name) - itp_update[i] = @set parameter_update.ref = Ref(field, node_id.idx) + itp_update_linear[i] = + @set parameter_update.ref = Ref(field, node_id.idx) + end + + # References to index interpolation parameters + for (i, parameter_update) in enumerate(itp_update_lookup) + field = getfield(node, parameter_update.name) + itp_update_lookup[i] = + @set parameter_update.ref = Ref(field, node_id.idx) end # Reference to 'active' parameter if it exists @@ -659,20 +649,20 @@ function set_discrete_controlled_variable_refs!(p::Parameters)::Nothing return nothing end -function set_continuously_controlled_variable_refs!(p::Parameters)::Nothing - (; continuous_control, pid_control, graph) = p +function set_target_ref!( + target_ref::Vector{DiffCacheRef}, + node_id::Vector{NodeID}, + controlled_variable::Vector{String}, + state_ranges::StateRanges, + graph::MetaGraph, +)::Nothing errors = false - for (node, controlled_variable) in ( - (continuous_control, continuous_control.controlled_variable), - (pid_control, fill("flow_rate", length(pid_control.node_id))), - ) - for (id, controlled_variable) in zip(node.node_id, controlled_variable) - controlled_node_id = only(outneighbor_labels_type(graph, id, LinkType.control)) - ref, error = - get_variable_ref(p, controlled_node_id, controlled_variable; listen = false) - push!(node.target_ref, ref) - errors |= error - end + for (i, (id, variable)) in enumerate(zip(node_id, controlled_variable)) + controlled_node_id = only(outneighbor_labels_type(graph, id, LinkType.control)) + ref, error = + get_diff_cache_ref(controlled_node_id, variable, state_ranges; listen = false) + target_ref[i] = ref + errors |= error end if errors @@ -727,12 +717,12 @@ function add_control_state!( end # This is a not so great way to get a concrete type, # which is used as a ControlStateUpdate type parameter. - itp_update = if isempty(itp_update) + itp_update_linear = if isempty(itp_update) ParameterUpdate{ScalarInterpolation}[] else [x for x in itp_update] end - control_state_update = ControlStateUpdate(; active, scalar_update, itp_update) + control_state_update = ControlStateUpdate(; active, scalar_update, itp_update_linear) if add_control_state control_mapping[(node_id, control_state)] = control_state_update @@ -744,12 +734,12 @@ end Collect the control mappings of all controllable nodes in the DiscreteControl object for easy access """ -function collect_control_mappings!(p)::Nothing - (; control_mappings) = p.discrete_control +function collect_control_mappings!(p_non_diff::ParametersNonDiff)::Nothing + (; control_mappings) = p_non_diff.discrete_control for node_type in instances(NodeType.T) node_type == NodeType.Terminal && continue - node = getfield(p, snake_case(node_type)) + node = getfield(p_non_diff, snake_case(node_type)) if hasfield(typeof(node), :control_mapping) control_mappings[node_type] = node.control_mapping end @@ -778,24 +768,11 @@ function relaxed_root(x, threshold) end end -function get_jac_prototype(du0, u0, p, t0) - p.all_nodes_active = true - jac_prototype = jacobian_sparsity( - (du, u) -> water_balance!(du, u, p, t0), - du0, - u0, - TracerSparsityDetector(), - ) - p.all_nodes_active = false - jac_prototype -end - # Custom overloads -reduction_factor(x::GradientTracer, threshold::Real) = x -low_storage_factor_resistance_node(storage, q::GradientTracer, inflow_id, outflow_id) = q +reduction_factor(x::GradientTracer, ::Real) = x +low_storage_factor_resistance_node(::Parameters, q::GradientTracer, ::NodeID, ::NodeID) = q relaxed_root(x::GradientTracer, threshold::Real) = x get_level_from_storage(basin::Basin, state_idx::Int, storage::GradientTracer) = storage -stop_declining_negative_storage!(du, u::Vector{<:GradientTracer}) = nothing @kwdef struct MonitoredBackTracking{B, V} linesearch::B = BackTracking() @@ -863,7 +840,7 @@ function OrdinaryDiffEqNonlinearSolve.relax!( end "Create a NamedTuple of the node IDs per state component in the state order" -function state_node_ids(p::Union{Parameters, NamedTuple})::NamedTuple +function state_node_ids(p::Union{ParametersNonDiff, NamedTuple})::NamedTuple (; tabulated_rating_curve = p.tabulated_rating_curve.node_id, pump = p.pump.node_id, @@ -878,15 +855,15 @@ function state_node_ids(p::Union{Parameters, NamedTuple})::NamedTuple ) end -function build_state_vector(p::Parameters) +function build_state_vector(p_non_diff::ParametersNonDiff) # It is assumed that the horizontal flow states come first in - # p.state_inflow_link and p.state_outflow_link - u_ids = state_node_ids(p) - state_ranges = p.state_ranges - u = zeros(length(p.node_id)) - # Ensure p.node_id, p.state_ranges and u have the same length and order + # p_non_diff.state_inflow_link and p_non_diff.state_outflow_link + u_ids = state_node_ids(p_non_diff) + state_ranges = p_non_diff.state_ranges + u = zeros(length(p_non_diff.node_id)) + # Ensure p_non_diff.node_id, p_non_diff.state_ranges and u have the same length and order ranges = (getproperty(state_ranges, x) for x in propertynames(state_ranges)) - @assert length(u) == length(p.node_id) == mapreduce(length, +, ranges) + @assert length(u) == length(p_non_diff.node_id) == mapreduce(length, +, ranges) @assert keys(u_ids) == fieldnames(StateRanges) return u end @@ -1027,9 +1004,8 @@ end Check whether any storages are negative given the state u. """ function isoutofdomain(u, p, t) - (; current_storage) = p.basin.current_properties - current_storage = current_storage[u] - formulate_storages!(current_storage, u, u, p, t) + (; current_storage) = p.diff_cache + formulate_storages!(u, p, t) any(<(0), current_storage) end @@ -1048,7 +1024,7 @@ estimating the lowest storage achieved over the last time step. To make sure it is an underestimate of the minimum, 2LOW_STORAGE_THRESHOLD is subtracted from this lowest storage. This is done to not be too strict in clamping the flow in the limiter """ -function min_low_storage_factor(storage_now::Vector{T}, storage_prev, id) where {T} +function min_low_storage_factor(storage_now::AbstractVector{T}, storage_prev, id) where {T} if id.type == NodeType.Basin reduction_factor( min(storage_now[id.idx], storage_prev[id.idx]) - 2LOW_STORAGE_THRESHOLD, @@ -1066,7 +1042,7 @@ it is an underestimate of the minimum, 2USER_DEMAND_MIN_LEVEL_THRESHOLD is subtr This is done to not be too strict in clamping the flow in the limiter """ function min_low_user_demand_level_factor( - level_now::Vector{T}, + level_now::AbstractVector{T}, level_prev, min_level, id_user_demand, @@ -1083,8 +1059,8 @@ function min_low_user_demand_level_factor( end end -function mean_input_flows_subnetwork(p::Parameters, subnetwork_id::Int32) - (; mean_input_flows, subnetwork_ids) = p.allocation +function mean_input_flows_subnetwork(p_non_diff::ParametersNonDiff, subnetwork_id::Int32) + (; mean_input_flows, subnetwork_ids) = p_non_diff.allocation subnetwork_idx = searchsortedfirst(subnetwork_ids, subnetwork_id) return mean_input_flows[subnetwork_idx] end @@ -1146,3 +1122,14 @@ function get_timeseries_tstops( return tstops end + +function ranges(lengths::Vector{<:Integer}) + # from the lengths of the components + # construct [1:n_pump, (n_pump+1):(n_pump+n_outlet)] + # which are used to create views into the data array + bounds = pushfirst!(cumsum(lengths), 0) + ranges = [range(p[1] + 1, p[2]) for p in IterTools.partition(bounds, 2, 1)] + # standardize empty ranges to 1:0 for easier testing + replace!(x -> isempty(x) ? (1:0) : x, ranges) + return ranges +end diff --git a/core/src/validation.jl b/core/src/validation.jl index 413aece6e..fcdef5c1b 100644 --- a/core/src/validation.jl +++ b/core/src/validation.jl @@ -92,14 +92,6 @@ controllablefields(::Val{:PidControl}) = Set((:active, :target, :proportional, :integral, :derivative)) controllablefields(nodetype) = Set{Symbol}() -function variable_names(s::Any) - filter(x -> !(x in (:node_id, :control_state)), fieldnames(s)) -end -function variable_nt(s::Any) - names = variable_names(typeof(s)) - NamedTuple{names}((getfield(s, x) for x in names)) -end - "Get the right sort by function (by in `sort(x; by)`) given the Schema" function sort_by end # Not using any fallbacks to avoid forgetting to add the correct sorting. @@ -275,8 +267,8 @@ Test whether static or discrete controlled flow rates are indeed non-negative. """ function valid_flow_rates( node_id::Vector{NodeID}, - flow_rate::Vector, - control_mapping::Dict, + flow_rate::Vector{ScalarInterpolation}, + control_mapping::Dict{Tuple{NodeID, String}, <:ControlStateUpdate}, )::Bool errors = false @@ -287,14 +279,14 @@ function valid_flow_rates( for (key, control_state_update) in pairs(control_mapping) id_controlled = key[1] push!(ids_controlled, id_controlled) - flow_rate_update = only(control_state_update.itp_update) + flow_rate_update = only(control_state_update.itp_update_linear) @assert flow_rate_update.name == :flow_rate flow_rate_ = minimum(flow_rate_update.value.u) if flow_rate_ < 0.0 errors = true control_state = key[2] - @error "$id_controlled flow rates must be non-negative, found $flow_rate_ for control state '$control_state'." + @error "Negative flow rate(s) for $id_controlled, control state '$control_state' found." end end @@ -302,9 +294,9 @@ function valid_flow_rates( if id in ids_controlled continue end - if flow_rate_ < 0.0 + if minimum(flow_rate_.u) < 0.0 errors = true - @error "$id flow rates must be non-negative, found $flow_rate_." + @error "Negative flow rate(s) for $id found." end end @@ -539,7 +531,7 @@ Check: - Whether the supplied truth states have the proper length; - Whether look_ahead is only supplied for condition variables given by a time-series. """ -function valid_discrete_control(p::Parameters, config::Config)::Bool +function valid_discrete_control(p::ParametersNonDiff, config::Config)::Bool (; discrete_control, graph) = p (; node_id, logic_mapping) = discrete_control diff --git a/core/src/write.jl b/core/src/write.jl index 8b85abfa4..fbb2b480d 100644 --- a/core/src/write.jl +++ b/core/src/write.jl @@ -82,9 +82,9 @@ function get_storages_and_levels( level::Matrix{Float64}, } (; config, integrator, saved) = model - (; p) = integrator + (; p_non_diff) = integrator.p - node_id = p.basin.node_id::Vector{NodeID} + node_id = p_non_diff.basin.node_id::Vector{NodeID} tsteps = datetime_since.(tsaves(model), config.starttime) storage = zeros(length(node_id), length(tsteps)) @@ -101,16 +101,13 @@ end function basin_state_table( model::Model, )::@NamedTuple{node_id::Vector{Int32}, level::Vector{Float64}} - du = get_du(model.integrator) (; u, p, t) = model.integrator + (; current_level) = p.diff_cache # ensure the levels are up-to-date - set_current_basin_properties!(du, u, p, t) + set_current_basin_properties!(u, p, t) - return (; - node_id = Int32.(p.basin.node_id), - level = p.basin.current_properties.current_level[Float64[]], - ) + return (; node_id = Int32.(p.p_non_diff.basin.node_id), level = current_level) end "Create the basin result table from the saved data" @@ -132,7 +129,7 @@ function basin_table( relative_error::Vector{Float64}, } (; saved) = model - (; state_ranges) = model.integrator.p + (; state_ranges) = model.integrator.p.p_non_diff # The last timestep is not included; there is no period over which to compute flows. data = get_storages_and_levels(model) @@ -197,7 +194,7 @@ function solver_stats_table( (; time = datetime_since.( solver_stats.time[1:(end - 1)], - model.integrator.p.starttime, + model.integrator.p.p_non_diff.starttime, ), rhs_calls = diff(solver_stats.rhs_calls), linear_solves = diff(solver_stats.linear_solves), @@ -219,7 +216,8 @@ function flow_table( (; config, saved, integrator) = model (; t, saveval) = saved.flow (; p) = integrator - (; graph) = p + (; p_non_diff) = p + (; graph) = p_non_diff (; flow_links) = graph[] from_node_id = Int32[] @@ -243,7 +241,7 @@ function flow_table( for (j, cvec) in enumerate(saveval) (; flow, flow_boundary) = cvec flow_rate[i + (j - 1) * nflow] = - get_flow(flow, p, 0.0, link; boundary_flow = flow_boundary) + get_flow(flow, p_non_diff, 0.0, link; boundary_flow = flow_boundary) end end @@ -270,8 +268,8 @@ function concentration_table( concentration::Vector{Float64}, } (; saved, integrator) = model - (; p) = integrator - (; basin) = p + (; p_non_diff) = integrator.p + (; basin) = p_non_diff # The last timestep is not included; there is no period over which to compute flows. data = get_storages_and_levels(model) @@ -300,7 +298,7 @@ function discrete_control_table( control_state::Vector{String}, } (; config) = model - (; record) = model.integrator.p.discrete_control + (; record) = model.integrator.p.p_non_diff.discrete_control time = datetime_since.(record.time, config.starttime) return (; time, record.control_node_id, record.truth_state, record.control_state) @@ -320,7 +318,7 @@ function allocation_table( realized::Vector{Float64}, } (; config) = model - (; record_demand) = model.integrator.p.allocation + (; record_demand) = model.integrator.p.p_non_diff.allocation time = datetime_since.(record_demand.time, config.starttime) return (; @@ -350,7 +348,7 @@ function allocation_flow_table( optimization_type::Vector{String}, } (; config) = model - (; record_flow) = model.integrator.p.allocation + (; record_flow) = model.integrator.p.p_non_diff.allocation time = datetime_since.(record_flow.time, config.starttime) @@ -377,7 +375,7 @@ function subgrid_level_table( } (; config, saved, integrator) = model (; t, saveval) = saved.subgrid_level - subgrid = integrator.p.subgrid + subgrid = integrator.p.p_non_diff.subgrid nelem = length(subgrid.level) ntsteps = length(t) diff --git a/core/test/allocation_test.jl b/core/test/allocation_test.jl index 000214a85..a7e2bc67f 100644 --- a/core/test/allocation_test.jl +++ b/core/test/allocation_test.jl @@ -6,28 +6,33 @@ toml_path = normpath(@__DIR__, "../../generated_testmodels/subnetwork/ribasim.toml") @test ispath(toml_path) model = Ribasim.Model(toml_path) - p = model.integrator.p - (; graph, allocation) = p - - allocation.mean_input_flows[1][(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] = - 4.5 - allocation_model = p.allocation.allocation_models[1] + (; p) = model.integrator + (; p_non_diff) = p + (; graph, allocation, user_demand) = p_non_diff + + allocation.mean_input_flows[1][( + NodeID(:FlowBoundary, 1, p_non_diff), + NodeID(:Basin, 2, p_non_diff), + )] = 4.5 + allocation_model = allocation.allocation_models[1] (; flow) = allocation_model - u = Float64[] t = 0.0 - Ribasim.allocate_demands!(p, allocation_model, t, u) + Ribasim.allocate_demands!(p, allocation_model, t) # Last demand priority (= 2) flows - @test flow[(NodeID(:Basin, 2, p), NodeID(:Pump, 5, p))] ≈ 0.0 - @test flow[(NodeID(:Basin, 2, p), NodeID(:UserDemand, 10, p))] ≈ 0.5 - @test flow[(NodeID(:Basin, 8, p), NodeID(:UserDemand, 12, p))] ≈ 3.0 rtol = 1e-5 - @test flow[(NodeID(:UserDemand, 12, p), NodeID(:Basin, 8, p))] ≈ 1.0 rtol = 1e-5 - @test flow[(NodeID(:Basin, 6, p), NodeID(:Outlet, 7, p))] ≈ 2.0 rtol = 1e-5 - @test flow[(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] ≈ 0.5 - @test flow[(NodeID(:Basin, 6, p), NodeID(:UserDemand, 11, p))] ≈ 0.0 - - (; allocated) = p.user_demand + @test flow[(NodeID(:Basin, 2, p_non_diff), NodeID(:Pump, 5, p_non_diff))] ≈ 0.0 + @test flow[(NodeID(:Basin, 2, p_non_diff), NodeID(:UserDemand, 10, p_non_diff))] ≈ 0.5 + @test flow[(NodeID(:Basin, 8, p_non_diff), NodeID(:UserDemand, 12, p_non_diff))] ≈ 3.0 rtol = + 1e-5 + @test flow[(NodeID(:UserDemand, 12, p_non_diff), NodeID(:Basin, 8, p_non_diff))] ≈ 1.0 rtol = + 1e-5 + @test flow[(NodeID(:Basin, 6, p_non_diff), NodeID(:Outlet, 7, p_non_diff))] ≈ 2.0 rtol = + 1e-5 + @test flow[(NodeID(:FlowBoundary, 1, p_non_diff), NodeID(:Basin, 2, p_non_diff))] ≈ 0.5 + @test flow[(NodeID(:Basin, 6, p_non_diff), NodeID(:UserDemand, 11, p_non_diff))] ≈ 0.0 + + (; allocated) = user_demand @test allocated[1, :] ≈ [0.0, 0.5] @test allocated[2, :] ≈ [4.0, 0.0] rtol = 1e-5 @test allocated[3, :] ≈ [0.0, 3.0] atol = 1e-5 @@ -45,17 +50,18 @@ end model = Ribasim.run(toml_path) @test successful_retcode(model) - (; u, p, t) = model.integrator - (; user_demand) = p - allocation_model = p.allocation.allocation_models[1] - Ribasim.set_initial_values!(allocation_model, u, p, t) - Ribasim.set_objective_demand_priority!(allocation_model, u, p, t, 1) + (; p, t) = model.integrator + (; p_non_diff) = p + (; user_demand, allocation) = p_non_diff + allocation_model = allocation.allocation_models[1] + Ribasim.set_initial_values!(allocation_model, p, t) + Ribasim.set_objective_demand_priority!(allocation_model, p, t, 1) objective = JuMP.objective_function(allocation_model.problem) @test objective isa JuMP.QuadExpr # Quadratic expression F = allocation_model.problem[:F] - to_user_5 = F[(NodeID(:Basin, 4, p), NodeID(:UserDemand, 5, p))] - to_user_6 = F[(NodeID(:Basin, 4, p), NodeID(:UserDemand, 6, p))] + to_user_5 = F[(NodeID(:Basin, 4, p_non_diff), NodeID(:UserDemand, 5, p_non_diff))] + to_user_6 = F[(NodeID(:Basin, 4, p_non_diff), NodeID(:UserDemand, 6, p_non_diff))] @test objective.aff.constant ≈ sum(user_demand.demand) @test objective.aff.terms[to_user_5] ≈ -2.0 @@ -76,34 +82,37 @@ end ) @test ispath(toml_path) model = Ribasim.Model(toml_path) - p = model.integrator.p - (; allocation, graph) = p + (; p_non_diff) = model.integrator.p + (; allocation, graph) = p_non_diff (; main_network_connections, subnetwork_ids, allocation_models) = allocation @test Ribasim.has_main_network(allocation) @test Ribasim.is_main_network(first(subnetwork_ids)) # Connections from main network to subnetworks @test isempty(main_network_connections[1]) - @test only(main_network_connections[3]) == (NodeID(:Basin, 2, p), NodeID(:Pump, 11, p)) - @test only(main_network_connections[5]) == (NodeID(:Basin, 6, p), NodeID(:Pump, 24, p)) - @test only(main_network_connections[7]) == (NodeID(:Basin, 10, p), NodeID(:Pump, 38, p)) + @test only(main_network_connections[3]) == + (NodeID(:Basin, 2, p_non_diff), NodeID(:Pump, 11, p_non_diff)) + @test only(main_network_connections[5]) == + (NodeID(:Basin, 6, p_non_diff), NodeID(:Pump, 24, p_non_diff)) + @test only(main_network_connections[7]) == + (NodeID(:Basin, 10, p_non_diff), NodeID(:Pump, 38, p_non_diff)) # main-sub connections are part of main network allocation network - allocation_model_main_network = Ribasim.get_allocation_model(p, Int32(1)) + allocation_model_main_network = Ribasim.get_allocation_model(p_non_diff, Int32(1)) @test [ - (NodeID(:Basin, 2, p), NodeID(:Pump, 11, p)), - (NodeID(:Basin, 6, p), NodeID(:Pump, 24, p)), - (NodeID(:Basin, 10, p), NodeID(:Pump, 38, p)), + (NodeID(:Basin, 2, p_non_diff), NodeID(:Pump, 11, p_non_diff)), + (NodeID(:Basin, 6, p_non_diff), NodeID(:Pump, 24, p_non_diff)), + (NodeID(:Basin, 10, p_non_diff), NodeID(:Pump, 38, p_non_diff)), ] ⊆ keys(allocation_model_main_network.capacity.data) # In each subnetwork, the connection from the main network to the subnetwork is # interpreted as a source - @test Ribasim.get_allocation_model(p, Int32(3)).problem[:source_main_network].axes[1] == - [(NodeID(:Basin, 2, p), NodeID(:Pump, 11, p))] - @test Ribasim.get_allocation_model(p, Int32(5)).problem[:source_main_network].axes[1] == - [(NodeID(:Basin, 6, p), NodeID(:Pump, 24, p))] - @test Ribasim.get_allocation_model(p, Int32(7)).problem[:source_main_network].axes[1] == - [(NodeID(:Basin, 10, p), NodeID(:Pump, 38, p))] + @test Ribasim.get_allocation_model(p_non_diff, Int32(3)).problem[:source_main_network].axes[1] == + [(NodeID(:Basin, 2, p_non_diff), NodeID(:Pump, 11, p_non_diff))] + @test Ribasim.get_allocation_model(p_non_diff, Int32(5)).problem[:source_main_network].axes[1] == + [(NodeID(:Basin, 6, p_non_diff), NodeID(:Pump, 24, p_non_diff))] + @test Ribasim.get_allocation_model(p_non_diff, Int32(7)).problem[:source_main_network].axes[1] == + [(NodeID(:Basin, 10, p_non_diff), NodeID(:Pump, 38, p_non_diff))] end @testitem "Allocation with main network optimization problem" begin @@ -120,7 +129,8 @@ end model = Ribasim.Model(toml_path) (; p) = model.integrator - (; allocation, user_demand, graph, basin) = p + (; p_non_diff) = p + (; allocation, user_demand, graph, basin) = p_non_diff (; allocation_models, subnetwork_demands, @@ -133,56 +143,65 @@ end # Collecting demands u = Float64[] for allocation_model in allocation_models[2:end] - Ribasim.collect_demands!(p, allocation_model, t, u) + Ribasim.collect_demands!(p, allocation_model, t) end # See the difference between these values here and in # "subnetworks_with_sources" - @test subnetwork_demands[(NodeID(:Basin, 2, p), NodeID(:Pump, 11, p))] ≈ [4.0, 4.0, 0.0] atol = - 1e-4 - @test subnetwork_demands[(NodeID(:Basin, 6, p), NodeID(:Pump, 24, p))] ≈ - [0.001, 0.0, 0.0] atol = 1e-4 - @test subnetwork_demands[(NodeID(:Basin, 10, p), NodeID(:Pump, 38, p))][1:2] ≈ - [0.001, 0.002] atol = 1e-4 + @test subnetwork_demands[( + NodeID(:Basin, 2, p_non_diff), + NodeID(:Pump, 11, p_non_diff), + )] ≈ [4.0, 4.0, 0.0] atol = 1e-4 + @test subnetwork_demands[( + NodeID(:Basin, 6, p_non_diff), + NodeID(:Pump, 24, p_non_diff), + )] ≈ [0.001, 0.0, 0.0] atol = 1e-4 + @test subnetwork_demands[( + NodeID(:Basin, 10, p_non_diff), + NodeID(:Pump, 38, p_non_diff), + )][1:2] ≈ [0.001, 0.002] atol = 1e-4 # Solving for the main network, containing subnetworks as UserDemands allocation_model = allocation_models[1] (; problem) = allocation_model - main_source = - allocation_model.sources[(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] + main_source = allocation_model.sources[( + NodeID(:FlowBoundary, 1, p_non_diff), + NodeID(:Basin, 2, p_non_diff), + )] main_source.capacity_reduced = 4.5 - Ribasim.optimize_demand_priority!( - allocation_model, - u, - p, - t, - 1, - OptimizationType.allocate, - ) + Ribasim.optimize_demand_priority!(allocation_model, p, t, 1, OptimizationType.allocate) # Main network objective function F = problem[:F] objective = JuMP.objective_function(problem) objective_links = keys(objective.terms) - F_1 = F[(NodeID(:Basin, 2, p), NodeID(:Pump, 11, p))] - F_2 = F[(NodeID(:Basin, 6, p), NodeID(:Pump, 24, p))] - F_3 = F[(NodeID(:Basin, 10, p), NodeID(:Pump, 38, p))] + F_1 = F[(NodeID(:Basin, 2, p_non_diff), NodeID(:Pump, 11, p_non_diff))] + F_2 = F[(NodeID(:Basin, 6, p_non_diff), NodeID(:Pump, 24, p_non_diff))] + F_3 = F[(NodeID(:Basin, 10, p_non_diff), NodeID(:Pump, 38, p_non_diff))] @test JuMP.UnorderedPair(F_1, F_1) ∈ objective_links @test JuMP.UnorderedPair(F_2, F_2) ∈ objective_links @test JuMP.UnorderedPair(F_3, F_3) ∈ objective_links # Running full allocation algorithm (; Δt_allocation) = allocation_models[1] - mean_input_flows[1][(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] = - 4.5 * Δt_allocation + mean_input_flows[1][( + NodeID(:FlowBoundary, 1, p_non_diff), + NodeID(:Basin, 2, p_non_diff), + )] = 4.5 * Δt_allocation Ribasim.update_allocation!(model.integrator) - @test subnetwork_allocateds[NodeID(:Basin, 2, p), NodeID(:Pump, 11, p)] ≈ - [4.0, 0.49775, 0.0] atol = 1e-4 - @test subnetwork_allocateds[NodeID(:Basin, 6, p), NodeID(:Pump, 24, p)] ≈ - [0.001, 0.0, 0.0] rtol = 1e-3 - @test subnetwork_allocateds[NodeID(:Basin, 10, p), NodeID(:Pump, 38, p)] ≈ - [0.001, 0.00024888, 0.0] rtol = 1e-3 + @test subnetwork_allocateds[ + NodeID(:Basin, 2, p_non_diff), + NodeID(:Pump, 11, p_non_diff), + ] ≈ [4.0, 0.49775, 0.0] atol = 1e-4 + @test subnetwork_allocateds[ + NodeID(:Basin, 6, p_non_diff), + NodeID(:Pump, 24, p_non_diff), + ] ≈ [0.001, 0.0, 0.0] rtol = 1e-3 + @test subnetwork_allocateds[ + NodeID(:Basin, 10, p_non_diff), + NodeID(:Pump, 38, p_non_diff), + ] ≈ [0.001, 0.00024888, 0.0] rtol = 1e-3 # Test for existence of links in allocation flow record allocation_flow = DataFrame(record_flow) @@ -190,8 +209,11 @@ end allocation_flow, [:from_node_type, :from_node_id, :to_node_type, :to_node_id] => ByRow( - (a, b, c, d) -> - haskey(graph, NodeID(Symbol(a), b, p), NodeID(Symbol(c), d, p)), + (a, b, c, d) -> haskey( + graph, + NodeID(Symbol(a), b, p_non_diff), + NodeID(Symbol(c), d, p_non_diff), + ), ) => :link_exists, ) @test all(allocation_flow.link_exists) @@ -213,9 +235,9 @@ end ) @test ispath(toml_path) model = Ribasim.Model(toml_path) - p = model.integrator.p - - (; allocation, user_demand, graph, basin) = p + (; p) = model.integrator + (; p_non_diff) = p + (; allocation, user_demand, graph, basin) = p_non_diff (; allocation_models, subnetwork_demands, @@ -226,31 +248,40 @@ end t = 0.0 # Set flows of sources in subnetworks - mean_input_flows[2][(NodeID(:FlowBoundary, 58, p), NodeID(:Basin, 16, p))] = 1.0 - mean_input_flows[4][(NodeID(:FlowBoundary, 59, p), NodeID(:Basin, 44, p))] = 1e-3 + mean_input_flows[2][( + NodeID(:FlowBoundary, 58, p_non_diff), + NodeID(:Basin, 16, p_non_diff), + )] = 1.0 + mean_input_flows[4][( + NodeID(:FlowBoundary, 59, p_non_diff), + NodeID(:Basin, 44, p_non_diff), + )] = 1e-3 # Collecting demands - u = Float64[] for allocation_model in allocation_models[2:end] - Ribasim.collect_demands!(p, allocation_model, t, u) + Ribasim.collect_demands!(p, allocation_model, t) end # See the difference between these values here and in # "allocation with main network optimization problem", internal sources # lower the subnetwork demands - @test subnetwork_demands[(NodeID(:Basin, 2, p), NodeID(:Pump, 11, p))] ≈ [4.0, 4.0, 0.0] rtol = - 1e-4 - @test subnetwork_demands[(NodeID(:Basin, 6, p), NodeID(:Pump, 24, p))] ≈ - [0.001, 0.0, 0.0] rtol = 1e-4 - @test subnetwork_demands[(NodeID(:Basin, 10, p), NodeID(:Pump, 38, p))][1:2] ≈ - [0.001, 0.001] rtol = 1e-4 + @test subnetwork_demands[( + NodeID(:Basin, 2, p_non_diff), + NodeID(:Pump, 11, p_non_diff), + )] ≈ [4.0, 4.0, 0.0] rtol = 1e-4 + @test subnetwork_demands[( + NodeID(:Basin, 6, p_non_diff), + NodeID(:Pump, 24, p_non_diff), + )] ≈ [0.001, 0.0, 0.0] rtol = 1e-4 + @test subnetwork_demands[( + NodeID(:Basin, 10, p_non_diff), + NodeID(:Pump, 38, p_non_diff), + )][1:2] ≈ [0.001, 0.001] rtol = 1e-4 model = Ribasim.run(toml_path) (; u, p, t) = model.integrator - (; current_storage) = p.basin.current_properties - current_storage = current_storage[Float64[]] - du = get_du(model.integrator) - Ribasim.formulate_storages!(current_storage, du, u, p, t) + Ribasim.formulate_storages!(u, p, t) + (; current_storage) = p.diff_cache @test current_storage ≈ Float32[ 1.0346908f6, @@ -270,7 +301,7 @@ end 5.619053, 10419.156, 4.057502, - ] + ] rtol = 1e-3 # The output should only contain data for the demand_priority for which # a node has a demand @@ -292,12 +323,14 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/level_demand/ribasim.toml") @test ispath(toml_path) model = Ribasim.Model(toml_path) - - p = model.integrator.p - (; user_demand, graph, allocation, basin, level_demand) = p + (; p_non_diff) = model.integrator.p + (; user_demand, graph, allocation, basin, level_demand) = p_non_diff # Initial "integrated" vertical flux - @test allocation.mean_input_flows[1][(NodeID(:Basin, 2, p), NodeID(:Basin, 2, p))] ≈ 1e2 + @test allocation.mean_input_flows[1][( + NodeID(:Basin, 2, p_non_diff), + NodeID(:Basin, 2, p_non_diff), + )] ≈ 1e2 Ribasim.solve!(model) @@ -309,9 +342,12 @@ end ϕ = 1e-3 # precipitation q = Ribasim.get_flow( du, - p, + p_non_diff, 0.0, - (Ribasim.NodeID(:FlowBoundary, 1, p), Ribasim.NodeID(:Basin, 2, p)), + ( + Ribasim.NodeID(:FlowBoundary, 1, p_non_diff), + Ribasim.NodeID(:Basin, 2, p_non_diff), + ), ) A = Ribasim.basin_areas(basin, 1)[1] l_max = level_demand.max_level[1](0) @@ -404,7 +440,8 @@ end @test ispath(toml_path) model = Ribasim.Model(toml_path) (; p) = model.integrator - (; graph, allocation, flow_demand, user_demand, level_boundary) = p + (; p_non_diff) = p + (; graph, allocation, flow_demand, user_demand, level_boundary) = p_non_diff # Test has_external_demand @test !any( @@ -413,7 +450,7 @@ end ) @test Ribasim.has_external_demand( graph, - NodeID(:TabulatedRatingCurve, 2, p), + NodeID(:TabulatedRatingCurve, 2, p_non_diff), :flow_demand, )[1] @@ -425,33 +462,27 @@ end F_flow_buffer_in = problem[:F_flow_buffer_in] F_flow_buffer_out = problem[:F_flow_buffer_out] - node_id_with_flow_demand = NodeID(:TabulatedRatingCurve, 2, p) + node_id_with_flow_demand = NodeID(:TabulatedRatingCurve, 2, p_non_diff) # Test flow conservation constraint containing flow buffer constraint_with_flow_buffer = JuMP.constraint_object(problem[:flow_conservation][node_id_with_flow_demand]) @test constraint_with_flow_buffer.func == - F[(NodeID(:LevelBoundary, 1, p), node_id_with_flow_demand)] - - F[(node_id_with_flow_demand, NodeID(:Basin, 3, p))] - + F[(NodeID(:LevelBoundary, 1, p_non_diff), node_id_with_flow_demand)] - + F[(node_id_with_flow_demand, NodeID(:Basin, 3, p_non_diff))] - F_flow_buffer_in[node_id_with_flow_demand] + F_flow_buffer_out[node_id_with_flow_demand] t = 0.0 - (; u) = model.integrator optimization_type = OptimizationType.internal_sources - Ribasim.set_initial_values!(allocation_model, u, p, t) - sources[(NodeID(:LevelBoundary, 1, p), node_id_with_flow_demand)].capacity_reduced = - 2e-3 + Ribasim.set_initial_values!(allocation_model, p, t) + sources[( + NodeID(:LevelBoundary, 1, p_non_diff), + node_id_with_flow_demand, + )].capacity_reduced = 2e-3 # Priority 1 - Ribasim.optimize_demand_priority!( - allocation_model, - model.integrator.u, - p, - t, - 1, - optimization_type, - ) + Ribasim.optimize_demand_priority!(allocation_model, p, t, 1, optimization_type) objective = JuMP.objective_function(problem) @test JuMP.UnorderedPair( F_flow_buffer_in[node_id_with_flow_demand], @@ -462,50 +493,30 @@ end @test flow_demand.demand[1] ≈ flow_demand.demand_itp[1](t) - 0.001 rtol = 1e-3 ## Priority 2 - Ribasim.optimize_demand_priority!( - allocation_model, - model.integrator.u, - p, - t, - 2, - optimization_type, - ) + Ribasim.optimize_demand_priority!(allocation_model, p, t, 2, optimization_type) # No demand left @test flow_demand.demand[1] < 1e-10 # Allocated @test JuMP.value(only(F_flow_buffer_in)) ≈ only(flow_demand.demand) atol = 1e-10 ## Priority 3 - Ribasim.optimize_demand_priority!( - allocation_model, - model.integrator.u, - p, - t, - 3, - optimization_type, - ) + Ribasim.optimize_demand_priority!(allocation_model, p, t, 3, optimization_type) # The flow from the source is used up in previous demand priorities - @test flow[(NodeID(:LevelBoundary, 1, p), node_id_with_flow_demand)] ≈ 0 atol = 1e-10 + @test flow[(NodeID(:LevelBoundary, 1, p_non_diff), node_id_with_flow_demand)] ≈ 0 atol = + 1e-10 # So flow from the flow buffer is used for UserDemand #4 - @test flow[(node_id_with_flow_demand, NodeID(:Basin, 3, p))] ≈ 0.001 - @test flow[(NodeID(:Basin, 3, p), NodeID(:UserDemand, 4, p))] ≈ 0.001 + @test flow[(node_id_with_flow_demand, NodeID(:Basin, 3, p_non_diff))] ≈ 0.001 + @test flow[(NodeID(:Basin, 3, p_non_diff), NodeID(:UserDemand, 4, p_non_diff))] ≈ 0.001 # No flow coming from level boundary @test JuMP.value(F[(only(level_boundary.node_id), node_id_with_flow_demand)]) ≈ 0 atol = 1e-10 ## Priority 4 - Ribasim.optimize_demand_priority!( - allocation_model, - model.integrator.u, - p, - t, - 4, - optimization_type, - ) + Ribasim.optimize_demand_priority!(allocation_model, p, t, 4, optimization_type) # Realized flow demand model = Ribasim.run(toml_path) - record_demand = DataFrame(model.integrator.p.allocation.record_demand) + record_demand = DataFrame(model.integrator.p.p_non_diff.allocation.record_demand) df_rating_curve_2 = record_demand[record_demand.node_id .== 2, :] @test all(df_rating_curve_2.realized .≈ 0.002) @@ -558,12 +569,16 @@ end ) @test ispath(toml_path) model = Ribasim.Model(toml_path) - (; p) = model.integrator + (; p_non_diff) = model.integrator.p + (; allocation) = p_non_diff # Test for pump max flow capacity constraint - (; problem) = p.allocation.allocation_models[1] + (; problem) = allocation.allocation_models[1] constraint = JuMP.constraint_object( - problem[:capacity][(NodeID(:Basin, 1, p), NodeID(:LinearResistance, 2, p))], + problem[:capacity][( + NodeID(:Basin, 1, p_non_diff), + NodeID(:LinearResistance, 2, p_non_diff), + )], ) @test constraint.set.upper == 2.0 end @@ -577,7 +592,7 @@ end normpath(@__DIR__, "../../generated_testmodels/fair_distribution/ribasim.toml") @test ispath(toml_path) model = Ribasim.run(toml_path) - (; user_demand, graph) = model.integrator.p + (; user_demand, graph) = model.integrator.p.p_non_diff data_allocation = DataFrame(Ribasim.allocation_table(model)) fractions = Vector{Float64}[] @@ -601,22 +616,27 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/level_demand/ribasim.toml") model = Ribasim.Model(toml_path) (; p) = model.integrator + (; p_non_diff) = p t = 0.0 - u = model.integrator.u demand_priority_idx = 2 - allocation_model = first(p.allocation.allocation_models) - Ribasim.set_initial_values!(allocation_model, u, p, t) - Ribasim.set_objective_demand_priority!(allocation_model, u, p, t, demand_priority_idx) + allocation_model = first(p_non_diff.allocation.allocation_models) + Ribasim.set_initial_values!(allocation_model, p, t) + Ribasim.set_objective_demand_priority!(allocation_model, p, t, demand_priority_idx) Ribasim.allocate_to_users_from_connected_basin!( allocation_model, - p, + p_non_diff, demand_priority_idx, ) flow_data = allocation_model.flow.data - @test flow_data[(NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))] == 0.0 - @test flow_data[(NodeID(:Basin, 2, p), NodeID(:UserDemand, 3, p))] == 0.0015 - @test flow_data[(NodeID(:UserDemand, 3, p), NodeID(:Basin, 5, p))] == 0.0 + @test flow_data[( + NodeID(:FlowBoundary, 1, p_non_diff), + NodeID(:Basin, 2, p_non_diff), + )] == 0.0 + @test flow_data[(NodeID(:Basin, 2, p_non_diff), NodeID(:UserDemand, 3, p_non_diff))] == + 0.0015 + @test flow_data[(NodeID(:UserDemand, 3, p_non_diff), NodeID(:Basin, 5, p_non_diff))] == + 0.0 end @testitem "level_demand_without_max_level" begin @@ -627,16 +647,16 @@ end @test ispath(toml_path) model = Ribasim.Model(toml_path) (; p, u, t) = model.integrator - (; allocation_models) = p.allocation - (; basin, level_demand, graph) = p + (; basin, level_demand, graph, allocation) = p.p_non_diff + (; allocation_models) = allocation level_demand.max_level[1].u .= Inf level_demand.max_level[2].u .= Inf # Given a max_level of Inf, the basin capacity is 0.0 because it is not possible for the basin level to be > Inf - @test Ribasim.get_basin_capacity(allocation_models[1], u, p, t, basin.node_id[1]) == 0.0 - @test Ribasim.get_basin_capacity(allocation_models[1], u, p, t, basin.node_id[2]) == 0.0 - @test Ribasim.get_basin_capacity(allocation_models[2], u, p, t, basin.node_id[3]) == 0.0 + @test Ribasim.get_basin_capacity(allocation_models[1], p, t, basin.node_id[1]) == 0.0 + @test Ribasim.get_basin_capacity(allocation_models[1], p, t, basin.node_id[2]) == 0.0 + @test Ribasim.get_basin_capacity(allocation_models[2], p, t, basin.node_id[3]) == 0.0 end @testitem "cyclic_demand" begin @@ -645,7 +665,7 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/cyclic_demand/ribasim.toml") @test ispath(toml_path) model = Ribasim.run(toml_path) - (; level_demand, user_demand, flow_demand) = model.integrator.p + (; level_demand, user_demand, flow_demand) = model.integrator.p.p_non_diff function test_extrapolation(itp) @test itp.extrapolation_left == Periodic diff --git a/core/test/bmi_test.jl b/core/test/bmi_test.jl index 37f377b5c..961552073 100644 --- a/core/test/bmi_test.jl +++ b/core/test/bmi_test.jl @@ -50,7 +50,7 @@ end BMI.update_until(model, 86400.0) storage = BMI.get_value_ptr(model, "basin.storage") # get_value_ptr does not copy - @test storage0 === storage != ones(4) + @test storage0 == storage != ones(4) end @testitem "get_value_ptr_all_values" begin @@ -89,7 +89,7 @@ end demand = BMI.get_value_ptr(model, "user_demand.demand") inflow = BMI.get_value_ptr(model, "user_demand.cumulative_inflow") # One year in seconds - year = model.integrator.p.user_demand.demand_itp[2][1].t[2] + year = model.integrator.p.p_non_diff.user_demand.demand_itp[2][1].t[2] demand_start = 1e-3 slope = 1e-3 / year day = 86400.0 diff --git a/core/test/config_test.jl b/core/test/config_test.jl index 3eccac95a..864b3a8a6 100644 --- a/core/test/config_test.jl +++ b/core/test/config_test.jl @@ -57,7 +57,7 @@ end Solver(; algorithm = "DoesntExist"), ) @test alg_autodiff(algorithm(Solver(; algorithm = "QNDF", autodiff = true))) == - AutoForwardDiff() + AutoForwardDiff(; tag = :Ribasim) @test alg_autodiff(algorithm(Solver(; algorithm = "QNDF", autodiff = false))) == AutoFiniteDiff() @test alg_autodiff(algorithm(Solver(; algorithm = "QNDF"))) == AutoFiniteDiff() diff --git a/core/test/control_test.jl b/core/test/control_test.jl index eb1aa5cd1..fd71cfb43 100644 --- a/core/test/control_test.jl +++ b/core/test/control_test.jl @@ -9,16 +9,20 @@ normpath(@__DIR__, "../../generated_testmodels/pump_discrete_control/ribasim.toml") @test ispath(toml_path) model = Ribasim.run(toml_path) - p = model.integrator.p - (; discrete_control, graph, state_ranges) = p + (; p_non_diff) = model.integrator.p + (; discrete_control, pump, graph, state_ranges) = p_non_diff # Control input(flow rates) - pump_control_mapping = p.pump.control_mapping + pump_control_mapping = pump.control_mapping @test unique( - only(pump_control_mapping[(NodeID(:Pump, 4, p), "off")].itp_update).value.u, + only( + pump_control_mapping[(NodeID(:Pump, 4, p_non_diff), "off")].itp_update_linear, + ).value.u, ) == [0] @test unique( - only(pump_control_mapping[(NodeID(:Pump, 4, p), "on")].itp_update).value.u, + only( + pump_control_mapping[(NodeID(:Pump, 4, p_non_diff), "on")].itp_update_linear, + ).value.u, ) == [1.0e-5] logic_mapping::Vector{Dict{Vector{Bool}, String}} = [ @@ -73,8 +77,7 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/flow_condition/ribasim.toml") @test ispath(toml_path) model = Ribasim.run(toml_path) - p = model.integrator.p - (; discrete_control, flow_boundary) = p + (; discrete_control, flow_boundary) = model.integrator.p.p_non_diff Δt = discrete_control.compound_variables[1][1].subvariables[1].look_ahead @@ -97,8 +100,7 @@ end ) @test ispath(toml_path) model = Ribasim.run(toml_path) - p = model.integrator.p - (; discrete_control, level_boundary) = p + (; discrete_control, level_boundary) = model.integrator.p.p_non_diff Δt = discrete_control.compound_variables[1][1].subvariables[1].look_ahead @@ -118,8 +120,7 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/pid_control/ribasim.toml") @test ispath(toml_path) model = Ribasim.run(toml_path) - p = model.integrator.p - (; basin, pid_control, flow_boundary) = p + (; basin, pid_control, flow_boundary) = model.integrator.p.p_non_diff level = Ribasim.get_storages_and_levels(model).level[1, :] t = Ribasim.tsaves(model) @@ -162,7 +163,7 @@ end ) @test ispath(toml_path) model = Ribasim.Model(toml_path) - (; discrete_control, tabulated_rating_curve) = model.integrator.p + (; discrete_control, tabulated_rating_curve) = model.integrator.p.p_non_diff (; current_interpolation_index, interpolations) = tabulated_rating_curve index_high, index_low = 1, 2 @@ -194,20 +195,20 @@ end ) @test ispath(toml_path) model = Ribasim.run(toml_path) - (; p) = model.integrator - (; discrete_control, pid_control) = p + (; p_non_diff) = model.integrator.p + (; discrete_control, pid_control) = p_non_diff t = Ribasim.tsaves(model) level = Ribasim.get_storages_and_levels(model).level[1, :] target_high = pid_control.control_mapping[( - NodeID(:PidControl, 6, p), + NodeID(:PidControl, 6, p_non_diff), "target_high", - )].itp_update[1].value.u[1] + )].itp_update_linear[1].value.u[1] target_low = pid_control.control_mapping[( - NodeID(:PidControl, 6, p), + NodeID(:PidControl, 6, p_non_diff), "target_low", - )].itp_update[1].value.u[1] + )].itp_update_linear[1].value.u[1] t_target_jump = discrete_control.record.time[2] t_idx_target_jump = searchsortedlast(t, t_target_jump) @@ -225,22 +226,22 @@ end ) @test ispath(toml_path) model = Ribasim.run(toml_path) - (; p) = model.integrator - (; discrete_control) = p + (; p_non_diff) = model.integrator.p + (; discrete_control) = p_non_diff (; compound_variables, record) = discrete_control compound_variable = only(only(compound_variables)) @test compound_variable.subvariables[1] == SubVariable(; - listen_node_id = NodeID(:FlowBoundary, 2, p), - variable_ref = compound_variable.subvariables[1].variable_ref, + listen_node_id = NodeID(:FlowBoundary, 2, p_non_diff), + diff_cache_ref = compound_variable.subvariables[1].diff_cache_ref, variable = "flow_rate", weight = 0.5, look_ahead = 0.0, ) @test compound_variable.subvariables[2] == SubVariable(; - listen_node_id = NodeID(:FlowBoundary, 3, p), - variable_ref = compound_variable.subvariables[2].variable_ref, + listen_node_id = NodeID(:FlowBoundary, 3, p_non_diff), + diff_cache_ref = compound_variable.subvariables[2].diff_cache_ref, variable = "flow_rate", weight = 0.5, look_ahead = 0.0, @@ -260,12 +261,13 @@ end @test ispath(toml_path) model = Ribasim.run(toml_path) - (; p) = model.integrator - (; record) = p.discrete_control + (; p_non_diff) = model.integrator.p + (; starttime, discrete_control) = p_non_diff + (; record) = discrete_control @test record.truth_state == ["T", "F"] @test record.control_state == ["On", "Off"] - t_switch = Ribasim.datetime_since(record.time[2], p.starttime) + t_switch = Ribasim.datetime_since(record.time[2], starttime) flow_table = DataFrame(Ribasim.flow_table(model)) @test all(filter(:time => time -> time <= t_switch, flow_table).flow_rate .> -1e-12) @test all( @@ -317,7 +319,7 @@ end flow_link_0 = filter(:link_id => id -> id == 0, flow_data) t = Ribasim.seconds_since.(flow_link_0.time, model.config.starttime) itp = - model.integrator.p.basin.concentration_data.concentration_external[1]["concentration_external.kryptonite"] + model.integrator.p.p_non_diff.basin.concentration_data.concentration_external[1]["concentration_external.kryptonite"] concentration = itp.(t) threshold = 0.5 above_threshold = concentration .> threshold @@ -332,7 +334,7 @@ end @test ispath(toml_path) model = Ribasim.run(toml_path) - (; record, compound_variables) = model.integrator.p.discrete_control + (; record, compound_variables) = model.integrator.p.p_non_diff.discrete_control itp = compound_variables[1][1].greater_than[1] @test itp.extrapolation_left == Periodic diff --git a/core/test/equations_test.jl b/core/test/equations_test.jl index 89b5c4b1c..7c98a32ea 100644 --- a/core/test/equations_test.jl +++ b/core/test/equations_test.jl @@ -23,15 +23,15 @@ @test ispath(toml_path) model = Ribasim.run(toml_path) @test successful_retcode(model) - (; p) = model.integrator + (; level_boundary, basin, linear_resistance) = model.integrator.p.p_non_diff t = Ribasim.tsaves(model) storage = Ribasim.get_storages_and_levels(model).storage[1, :] - A = Ribasim.basin_areas(p.basin, 1)[2] # needs to be constant + A = Ribasim.basin_areas(basin, 1)[2] # needs to be constant u0 = A * 10.0 - L = p.level_boundary.level[1].u[1] - R = p.linear_resistance.resistance[1] - Q_max = p.linear_resistance.max_flow_rate[1] + L = level_boundary.level[1].u[1] + R = linear_resistance.resistance[1] + Q_max = linear_resistance.max_flow_rate[1] # derivation in https://github.com/Deltares/Ribasim/pull/1100#issuecomment-1934799342 t_shift = (u0 - A * (L + R * Q_max)) / Q_max @@ -55,11 +55,11 @@ end @test ispath(toml_path) model = Ribasim.run(toml_path) @test successful_retcode(model) - p = model.integrator.p + (; basin) = model.integrator.p.p_non_diff t = Ribasim.tsaves(model) storage = Ribasim.get_storages_and_levels(model).storage[1, :] - basin_area = Ribasim.basin_areas(p.basin, 1)[2] + basin_area = Ribasim.basin_areas(basin, 1)[2] storage_min = 50.005 α = 24 * 60 * 60 storage_analytic = @@ -90,15 +90,14 @@ end @test ispath(toml_path) model = Ribasim.run(toml_path) @test successful_retcode(model) - p = model.integrator.p - (; manning_resistance) = p + (; manning_resistance, basin) = model.integrator.p.p_non_diff t = Ribasim.tsaves(model) storage_both = Ribasim.get_storages_and_levels(model).storage storage = storage_both[1, :] storage_min = 50.005 level_min = 1.0 - basin_area = Ribasim.basin_areas(p.basin, 1)[2] + basin_area = Ribasim.basin_areas(basin, 1)[2] level = @. level_min + (storage - storage_min) / basin_area C = sum(storage_both[:, 1]) Λ = 2 * level_min + (C - 2 * storage_min) / basin_area @@ -127,8 +126,7 @@ end @test ispath(toml_path) model = Ribasim.run(toml_path) @test successful_retcode(model) - p = model.integrator.p - (; basin, pid_control) = p + (; basin, pid_control) = model.integrator.p.p_non_diff storage = Ribasim.get_storages_and_levels(model).storage[:] t = Ribasim.tsaves(model) @@ -178,13 +176,11 @@ end @test config.solver.dt === model.integrator.dt Ribasim.solve!(model) @test successful_retcode(model) - p = model.integrator.p - (; flow_boundary, pump) = p + (; p_non_diff) = model.integrator.p + (; flow_boundary, pump) = p_non_diff q_boundary = flow_boundary.flow_rate[1].u[1] - pump_flow_rate = pump.flow_rate_cache[Float64[]] - q_pump = pump_flow_rate[1] - + q_pump = pump.flow_rate[1].u[1] storage_both = get_storages_and_levels(model).storage t = tsaves(model) tspan = model.integrator.sol.prob.tspan diff --git a/core/test/io_test.jl b/core/test/io_test.jl index b3b4eede9..bd929edcf 100644 --- a/core/test/io_test.jl +++ b/core/test/io_test.jl @@ -137,10 +137,11 @@ end config = Ribasim.Config(toml_path) model = Ribasim.Model(config) - storage1_begin = - copy(model.integrator.p.basin.current_properties.current_storage[Float64[]]) + (; p_non_diff, diff_cache) = model.integrator.p + (; current_storage) = diff_cache + storage1_begin = copy(current_storage) solve!(model) - storage1_end = model.integrator.p.basin.current_properties.current_storage[Float64[]] + storage1_end = current_storage @test storage1_begin != storage1_end # copy state results to input @@ -156,6 +157,8 @@ end end model = Ribasim.Model(toml_path) - storage2_begin = model.integrator.p.basin.current_properties.current_storage[Float64[]] + (; p_non_diff, diff_cache) = model.integrator.p + (; current_storage) = diff_cache + storage2_begin = current_storage @test storage1_end ≈ storage2_begin end diff --git a/core/test/run_models_test.jl b/core/test/run_models_test.jl index 3a87f12b5..630f9ca53 100644 --- a/core/test/run_models_test.jl +++ b/core/test/run_models_test.jl @@ -20,10 +20,10 @@ model = Ribasim.run(config) @test model isa Ribasim.Model @test successful_retcode(model) - (; p) = model.integrator + (; p_non_diff) = model.integrator.p - @test p.node_id == [0, 6, 6] - @test p.state_ranges == + @test p_non_diff.node_id == [0, 6, 6] + @test p_non_diff.state_ranges == StateRanges(; tabulated_rating_curve = 1:1, evaporation = 2:2, infiltration = 3:3) @test !ispath(control_path) @@ -93,7 +93,7 @@ # t0 has no flow, 2 flow links @test nrow(flow) == (nsaved - 1) * 2 @test nrow(basin) == nsaved - 1 - @test nrow(subgrid) == nsaved * length(p.subgrid.level) + @test nrow(subgrid) == nsaved * length(p_non_diff.subgrid.level) end @testset "Results values" begin @@ -116,12 +116,12 @@ # The exporter interpolates 1:1 for three subgrid elements, but shifted by 1.0 meter. basin_level = basin.level[1] - @test length(p.subgrid.level) == 3 - @test diff(p.subgrid.level) ≈ [-1.0, 2.0] + @test length(p_non_diff.subgrid.level) == 3 + @test diff(p_non_diff.subgrid.level) ≈ [-1.0, 2.0] @test subgrid.subgrid_id[1:3] == [11, 22, 33] @test subgrid.subgrid_level[1:3] ≈ [basin_level, basin_level - 1.0, basin_level + 1.0] - @test subgrid.subgrid_level[(end - 2):end] == p.subgrid.level + @test subgrid.subgrid_level[(end - 2):end] == p_non_diff.subgrid.level end end @@ -133,8 +133,9 @@ end @test ispath(toml_path) model = Ribasim.run(toml_path) @test model isa Ribasim.Model - (; basin, state_ranges) = model.integrator.p - @test basin.current_properties.current_storage[Float64[]] ≈ [1000] + (; p_non_diff, diff_cache) = model.integrator.p + (; basin, state_ranges) = p_non_diff + @test diff_cache.current_storage ≈ [1000] @test basin.vertical_flux.precipitation == [0.0] @test basin.vertical_flux.drainage == [0.0] du = get_du(model.integrator) @@ -158,12 +159,15 @@ end (; integrator) = model du = get_du(integrator) (; u, p, t) = integrator + (; p_non_diff, diff_cache) = p + (; basin, state_ranges) = p_non_diff + Ribasim.water_balance!(du, u, p, t) - stor = integrator.p.basin.current_properties.current_storage[du] - prec = p.basin.vertical_flux.precipitation - evap = view(du, p.state_ranges.evaporation) - drng = p.basin.vertical_flux.drainage - infl = view(du, p.state_ranges.infiltration) + stor = diff_cache.current_storage + prec = basin.vertical_flux.precipitation + evap = view(du, state_ranges.evaporation) + drng = basin.vertical_flux.drainage + infl = view(du, state_ranges.infiltration) # The dynamic data has missings, but these are not set. @test prec == [0.0] @test evap == [0.0] @@ -207,19 +211,20 @@ end (; integrator) = model (; p, alg) = integrator + (; p_non_diff, diff_cache) = p @test p isa Ribasim.Parameters - @test isconcretetype(typeof(p)) - @test all(isconcretetype, fieldtypes(typeof(p))) - @test p.node_id == [4, 5, 8, 7, 10, 12, 2, 1, 3, 6, 9, 1, 3, 6, 9] + @test isconcretetype(typeof(p_non_diff)) + @test all(isconcretetype, fieldtypes(typeof(p_non_diff))) + @test p_non_diff.node_id == [4, 5, 8, 7, 10, 12, 2, 1, 3, 6, 9, 1, 3, 6, 9] @test alg isa QNDF @test alg.step_limiter! == Ribasim.limit_flow! @test successful_retcode(model) @test length(model.integrator.sol) == 2 # start and end - @test model.integrator.p.basin.current_properties.current_storage[Float64[]] ≈ - Float32[804.22156, 803.6474, 495.18243, 1318.3053] skip = Sys.isapple() atol = 1.5 + @test diff_cache.current_storage ≈ Float32[804.22156, 803.6474, 495.18243, 1318.3053] skip = + Sys.isapple() atol = 1.5 @test length(logger.logs) > 10 @test logger.logs[1].level == Debug @@ -257,11 +262,11 @@ end @test model isa Ribasim.Model @test successful_retcode(model) @test allunique(Ribasim.tsaves(model)) - du = get_du(model.integrator) - precipitation = model.integrator.p.basin.vertical_flux.precipitation + (; p_non_diff, diff_cache) = model.integrator.p + precipitation = p_non_diff.basin.vertical_flux.precipitation @test length(precipitation) == 4 - @test model.integrator.p.basin.current_properties.current_storage[du] ≈ - Float32[698.6895, 698.143, 420.57407, 1334.486] atol = 2.0 skip = Sys.isapple() + @test diff_cache.current_storage ≈ Float32[698.6895, 698.143, 420.57407, 1334.486] atol = + 2.0 skip = Sys.isapple() end @testitem "Allocation example model" begin @@ -315,9 +320,9 @@ end model = Ribasim.run(toml_path) @test model isa Ribasim.Model @test successful_retcode(model) - @test model.integrator.p.basin.current_properties.current_storage[Float64[]] ≈ - Float32[368.31558, 365.68442] skip = Sys.isapple() - (; tabulated_rating_curve) = model.integrator.p + (; p_non_diff, diff_cache) = model.integrator.p + @test diff_cache.current_storage ≈ Float32[368.31558, 365.68442] skip = Sys.isapple() + (; tabulated_rating_curve) = p_non_diff # The first node is static, the first interpolation object always applies index_itp1 = tabulated_rating_curve.current_interpolation_index[1] @test only(index_itp1.u) == 1 @@ -343,8 +348,7 @@ end model = Ribasim.run(toml_path) @test successful_retcode(model) - p = model.integrator.p - (; level_boundary, outlet) = p + (; level_boundary, outlet) = model.integrator.p.p_non_diff (; level) = level_boundary level = level[1] @@ -381,23 +385,24 @@ end (; integrator) = model (; u, p, t, sol) = integrator - current_storage = p.basin.current_properties.current_storage[Float64[]] + (; p_non_diff, diff_cache) = p + (; p_non_diff, diff_cache) = model.integrator.p day = 86400.0 - @test only(current_storage) ≈ 1000.0 + @test only(diff_cache.current_storage) ≈ 1000.0 # constant UserDemand withdraws to 0.9m or 900m3 due to min level = 0.9 BMI.update_until(model, 150day) - formulate_storages!(current_storage, u, u, p, t) - @test only(current_storage) ≈ 900 atol = 5 + formulate_storages!(u, p, t) + @test only(diff_cache.current_storage) ≈ 900 atol = 5 # dynamic UserDemand withdraws to 0.5m or 500m3 due to min level = 0.5 BMI.update_until(model, 220day) - formulate_storages!(current_storage, u, u, p, t) - @test only(current_storage) ≈ 500 atol = 1 + formulate_storages!(u, p, t) + @test only(diff_cache.current_storage) ≈ 500 atol = 1 # Trasient return factor flow = DataFrame(Ribasim.flow_table(model)) - return_factor_itp = model.integrator.p.user_demand.return_factor[3] + return_factor_itp = p_non_diff.user_demand.return_factor[3] flow_in = filter([:from_node_id, :to_node_id] => (from, to) -> (from, to) == (1, 4), flow) flow_out = @@ -411,7 +416,6 @@ end end @testitem "ManningResistance" begin - using PreallocationTools: get_tmp using SciMLBase: successful_retcode using OrdinaryDiffEqCore: get_du using Ribasim: NodeID @@ -477,7 +481,9 @@ end du = get_du(model.integrator) (; p, t) = model.integrator - h_actual = p.basin.current_properties.current_level[du][1:50] + (; p_non_diff, diff_cache) = p + (; current_level) = diff_cache + h_actual = current_level[1:50] x = collect(10.0:20.0:990.0) h_expected = standard_step_method(x, 5.0, 1.0, 0.04, h_actual[end], 1.0e-6) @@ -487,13 +493,17 @@ end # https://www.hec.usace.army.mil/confluence/rasdocs/ras1dtechref/latest/theoretical-basis-for-one-dimensional-and-two-dimensional-hydrodynamic-calculations/1d-steady-flow-water-surface-profiles/friction-loss-evaluation @test all(isapprox.(h_expected, h_actual; atol = 0.02)) # Test for conservation of mass, flow at the beginning == flow at the end - @test Ribasim.get_flow(du, p, t, (NodeID(:FlowBoundary, 1, p), NodeID(:Basin, 2, p))) ≈ - 5.0 atol = 0.001 skip = Sys.isapple() @test Ribasim.get_flow( du, - p, + p_non_diff, + t, + (NodeID(:FlowBoundary, 1, p_non_diff), NodeID(:Basin, 2, p_non_diff)), + ) ≈ 5.0 atol = 0.001 skip = Sys.isapple() + @test Ribasim.get_flow( + du, + p_non_diff, t, - (NodeID(:ManningResistance, 101, p), NodeID(:Basin, 102, p)), + (NodeID(:ManningResistance, 101, p_non_diff), NodeID(:Basin, 102, p_non_diff)), ) ≈ 5.0 atol = 0.001 skip = Sys.isapple() end @@ -632,5 +642,5 @@ end basin_level = copy(BMI.get_value_ptr(model, "basin.level")) basin_level[2] += 1 @test basin_level ≈ df.subgrid_level[(end - 1):end] - @test basin_level ≈ model.integrator.p.subgrid.level + @test basin_level ≈ model.integrator.p.p_non_diff.subgrid.level end diff --git a/core/test/time_test.jl b/core/test/time_test.jl index 913f45e8c..5a29cc9ef 100644 --- a/core/test/time_test.jl +++ b/core/test/time_test.jl @@ -31,8 +31,7 @@ end @test ispath(toml_path) config = Ribasim.Config(toml_path; solver_saveat = 0) model = Ribasim.run(toml_path) - (; p) = model.integrator - (; basin) = p + (; basin) = model.integrator.p.p_non_diff n_basin = length(basin.node_id) basin_table = DataFrame(Ribasim.basin_table(model)) @@ -64,7 +63,7 @@ end solver_algorithm = "Euler", ) model = Ribasim.Model(config) - (; basin) = model.integrator.p + (; basin) = model.integrator.p.p_non_diff starting_precipitation = basin.vertical_flux.precipitation[1] * Ribasim.basin_areas(basin, 1)[end] BMI.update_until(model, saveat) @@ -95,7 +94,7 @@ end @test ispath(toml_path) model = Ribasim.Model(toml_path) - (; level_boundary, flow_boundary, basin) = model.integrator.p + (; level_boundary, flow_boundary, basin) = model.integrator.p.p_non_diff function test_extrapolation(itp) @test itp.extrapolation_left == Periodic @@ -107,7 +106,7 @@ end test_extrapolation(flow_boundary.flow_rate[1]) end -@testitem "transient_pump_weir" begin +@testitem "transient_pump_outlet" begin using DataFrames: DataFrame toml_path = @@ -119,6 +118,6 @@ end @test all(isapprox.(storage[1, 2:end], storage[1, end]; rtol = 1e-4)) t_end = model.integrator.t - flow_rate_end = model.integrator.p.pump.flow_rate[1].u[end] + flow_rate_end = model.integrator.p.p_non_diff.pump.flow_rate[1].u[end] @test storage[2, end] ≈ storage[2, 1] + 0.5 * flow_rate_end * t_end end diff --git a/core/test/utils_test.jl b/core/test/utils_test.jl index c2230cad3..3e763eccd 100644 --- a/core/test/utils_test.jl +++ b/core/test/utils_test.jl @@ -30,11 +30,6 @@ end concentration_time = StructVector{Ribasim.BasinConcentrationV1}(undef, 0), ) - (; current_level, current_area) = basin.current_properties - - current_level[Float64[]] .= [2.0, 3.0] - current_area[Float64[]] .= [2.0, 3.0] - @test Ribasim.basin_levels(basin, 2)[1] === 4.0 @test Ribasim.basin_bottom(basin, NodeID(:Basin, 5, 1))[2] === 0.0 @test Ribasim.basin_bottom(basin, NodeID(:Basin, 7, 2))[2] === 4.0 @@ -257,16 +252,16 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/basic/ribasim.toml") - cfg = Ribasim.Config(toml_path) - db_path = Ribasim.database_path(cfg) + config = Ribasim.Config(toml_path) + db_path = Ribasim.database_path(config) db = SQLite.DB(db_path) - p = Ribasim.Parameters(db, cfg) + p = Ribasim.Parameters(db, config) close(db) t0 = 0.0 - u0 = Ribasim.build_state_vector(p) + u0 = Ribasim.build_state_vector(p.p_non_diff) du0 = copy(u0) - jac_prototype = Ribasim.get_jac_prototype(du0, u0, p, t0) + jac_prototype, _, _ = Ribasim.get_diff_eval(du0, u0, p, config.solver) # rows, cols, _ = findnz(jac_prototype) #! format: off @@ -279,15 +274,16 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/pid_control/ribasim.toml") - cfg = Ribasim.Config(toml_path) - db_path = Ribasim.database_path(cfg) + config = Ribasim.Config(toml_path) + db_path = Ribasim.database_path(config) db = SQLite.DB(db_path) - p = Ribasim.Parameters(db, cfg) + p = Ribasim.Parameters(db, config) + (; p_non_diff) = p close(db) - u0 = Ribasim.build_state_vector(p) + u0 = Ribasim.build_state_vector(p_non_diff) du0 = copy(u0) - jac_prototype = Ribasim.get_jac_prototype(du0, u0, p, t0) + jac_prototype, _, _ = Ribasim.get_diff_eval(du0, u0, p, config.solver) #! format: off rows_expected = [1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2] @@ -365,7 +361,7 @@ end end @testitem "Node types" begin - using Ribasim: nodetypes, NodeType, Parameters, AbstractParameterNode, snake_case + using Ribasim: nodetypes, NodeType, ParametersNonDiff, AbstractParameterNode, snake_case @test Set(nodetypes) == Set([ :Basin, @@ -390,7 +386,7 @@ end # It has a struct which is added to Parameters T = getproperty(Ribasim, nodetype) @test T <: AbstractParameterNode - @test hasfield(Parameters, snake_case(nodetype)) + @test hasfield(ParametersNonDiff, snake_case(nodetype)) end end end @@ -400,9 +396,8 @@ end toml_path = normpath(@__DIR__, "../../generated_testmodels/basic/ribasim.toml") @test ispath(toml_path) model = Ribasim.Model(toml_path) - (; p) = model.integrator - n_basins = length(p.basin.node_id) - (; flow_to_storage, state_ranges) = p + (; basin, flow_to_storage, state_ranges) = model.integrator.p.p_non_diff + n_basins = length(basin.node_id) @test flow_to_storage[:, state_ranges.evaporation] == -I @test flow_to_storage[:, state_ranges.infiltration] == -I diff --git a/core/test/validation_test.jl b/core/test/validation_test.jl index 073cf97f2..095cf6c1d 100644 --- a/core/test/validation_test.jl +++ b/core/test/validation_test.jl @@ -189,12 +189,12 @@ end cfg = Ribasim.Config(toml_path) db_path = Ribasim.database_path(cfg) db = SQLite.DB(db_path) - p = Ribasim.Parameters(db, cfg) + (; p_non_diff) = Ribasim.Parameters(db, cfg) close(db) logger = TestLogger() with_logger(logger) do - @test !Ribasim.valid_discrete_control(p, cfg) + @test !Ribasim.valid_discrete_control(p_non_diff, cfg) end @test length(logger.logs) == 5 @@ -217,51 +217,45 @@ end @testitem "Pump/outlet flow rate sign validation" begin using Logging - using Ribasim: NodeID, NodeType, ControlStateUpdate, ParameterUpdate, cache + using Ribasim: NodeID, NodeType, ControlStateUpdate, ParameterUpdate, valid_flow_rates using DataInterpolations: LinearInterpolation logger = TestLogger() + flow_rate = [LinearInterpolation([-1.0, 2.0], [0.0, 1.0])] with_logger(logger) do - flow_rate = cache(1) - flow_rate[Float64[]] .= -1 - @test_throws "Invalid Outlet flow rate(s)." Ribasim.Outlet(; - node_id = [NodeID(:Outlet, 1, 1)], - flow_rate, - ) + node_id = [NodeID(:Outlet, 1, 1)] + control_mapping = Dict{Tuple{NodeID, String}, ControlStateUpdate}() + @test !valid_flow_rates(node_id, flow_rate, control_mapping) end @test length(logger.logs) == 1 @test logger.logs[1].level == Error - @test logger.logs[1].message == "Outlet #1 flow rates must be non-negative, found -1.0." + @test logger.logs[1].message == "Negative flow rate(s) for Outlet #1 found." logger = TestLogger() with_logger(logger) do - flow_rate_cache = cache(1) - flow_rate_cache[Float64[]] .= -1 - @test_throws "Invalid Pump flow rate(s)." Ribasim.Pump(; - node_id = [NodeID(:Pump, 1, 1)], - flow_rate_cache, - control_mapping = Dict( - (NodeID(:Pump, 1, 1), "foo") => ControlStateUpdate(; - active = ParameterUpdate(:active, true), - itp_update = [ - ParameterUpdate( - :flow_rate, - LinearInterpolation([-1.0, -1.0], [0.0, 1.0]), - ), - ], - ), + node_id = [NodeID(:Pump, 1, 1)] + control_mapping = Dict( + (NodeID(:Pump, 1, 1), "foo") => ControlStateUpdate(; + active = ParameterUpdate(:active, true), + itp_update_linear = [ + ParameterUpdate( + :flow_rate, + LinearInterpolation([-1.0, -1.0], [0.0, 1.0]), + ), + ], ), ) + @test !valid_flow_rates(node_id, flow_rate, control_mapping) end # Only the invalid control state flow_rate yields an error @test length(logger.logs) == 1 @test logger.logs[1].level == Error @test logger.logs[1].message == - "Pump #1 flow rates must be non-negative, found -1.0 for control state 'foo'." + "Negative flow rate(s) for Pump #1, control state 'foo' found." end @testitem "Link type validation" begin @@ -380,9 +374,9 @@ end config = Ribasim.Config(toml_path) model = Ribasim.Model(config) - parameters = model.integrator.p + (; p_non_diff) = model.integrator.p - (; graph, tabulated_rating_curve, basin) = parameters + (; graph, tabulated_rating_curve, basin) = p_non_diff tabulated_rating_curve.interpolations[1].t[1] = invalid_level logger = TestLogger() @@ -411,9 +405,9 @@ end config = Ribasim.Config(toml_path) model = Ribasim.Model(config) - parameters = model.integrator.p + (; p_non_diff) = model.integrator.p - (; graph, outlet, basin) = parameters + (; graph, outlet, basin) = p_non_diff outlet.min_upstream_level[1] = LinearInterpolation(fill(invalid_level, 2), zeros(2)) logger = TestLogger() diff --git a/docs/concept/allocation.qmd b/docs/concept/allocation.qmd index 9d3305165..66533deaf 100644 --- a/docs/concept/allocation.qmd +++ b/docs/concept/allocation.qmd @@ -244,13 +244,14 @@ using SQLite toml_path = normpath(@__DIR__, "../../generated_testmodels/allocation_example/ribasim.toml") model = Ribasim.Model(toml_path) -(; u, p, t) = model.integrator +(; p, t) = model.integrator +(; allocation) = p.p_non_diff -allocation_model = p.allocation.allocation_models[1] +allocation_model = allocation.allocation_models[1] priority_idx = 1 -Ribasim.set_initial_values!(allocation_model, u, p, t) -Ribasim.set_objective_demand_priority!(allocation_model, u, p, t, priority_idx) +Ribasim.set_initial_values!(allocation_model, p, t) +Ribasim.set_objective_demand_priority!(allocation_model, p, t, priority_idx) -println(p.allocation.allocation_models[1].problem) +println(allocation.allocation_models[1].problem) ``` diff --git a/docs/dev/callstacks.qmd b/docs/dev/callstacks.qmd index 2c9fac73e..32578cfc5 100644 --- a/docs/dev/callstacks.qmd +++ b/docs/dev/callstacks.qmd @@ -77,10 +77,12 @@ toml_path = normpath( config = Ribasim.Config(toml_path) db_path = Ribasim.database_path(config) db = SQLite.DB(db_path) -p = Ribasim.Parameters(db, config) -empty!(p.allocation.subnetwork_ids) -empty!(p.allocation.main_network_connections) -graph, verts = tracecall((Ribasim,), Ribasim.initialize_allocation!, (p, db, config)) +(; p_non_diff) = Ribasim.Parameters(db, config) +(; allocation) = p_non_diff +empty!(allocation.subnetwork_ids) +empty!(allocation.main_network_connections) +graph, verts = + tracecall((Ribasim,), Ribasim.initialize_allocation!, (p_non_diff, db, config)) plot_graph(graph) ``` @@ -106,7 +108,7 @@ toml_path = normpath(@__DIR__, "../../generated_testmodels/pump_discrete_control/ribasim.toml") model = Ribasim.Model(toml_path) (; u, t) = model.integrator -model.integrator.p.basin.storage0 .= [0.1, 100.0] +model.integrator.p.p_non_diff.basin.storage0 .= [0.1, 100.0] graph, verts = tracecall((Ribasim,), Ribasim.apply_discrete_control!, (u, t, model.integrator)) plot_graph(graph; prune_from = [:water_balance!], max_depth = 3) diff --git a/docs/reference/node/discrete-control.qmd b/docs/reference/node/discrete-control.qmd index e0f2fbed8..96dbc014a 100644 --- a/docs/reference/node/discrete-control.qmd +++ b/docs/reference/node/discrete-control.qmd @@ -14,7 +14,7 @@ using MarkdownTables node_names = Symbol[] controllable_parameters = String[] -for node_type in fieldtypes(Ribasim.Parameters) +for node_type in fieldtypes(Ribasim.ParametersNonDiff) if node_type <: Ribasim.AbstractParameterNode node_name = nameof(node_type) controllable_fields = Ribasim.controllablefields(node_name) diff --git a/docs/reference/validation.qmd b/docs/reference/validation.qmd index ec4a03fdc..f0005d227 100644 --- a/docs/reference/validation.qmd +++ b/docs/reference/validation.qmd @@ -16,7 +16,8 @@ using MarkdownTables node_names_snake_case = Symbol[] node_names_camel_case = Symbol[] -for (node_name, node_type) in zip(fieldnames(Ribasim.Parameters), fieldtypes(Ribasim.Parameters)) +for (node_name, node_type) in + zip(fieldnames(Ribasim.ParametersNonDiff), fieldtypes(Ribasim.ParametersNonDiff)) if node_type <: Ribasim.AbstractParameterNode push!(node_names_snake_case, node_name) push!(node_names_camel_case, nameof(node_type)) @@ -27,13 +28,14 @@ function to_symbol(b::Bool)::String return b ? "✓" : "x" end - df = DataFrame() df[!, :downstream] = node_names_snake_case for node_name in node_names_snake_case - df[!, node_name] = - [(to_symbol(node_name_ in Ribasim.neighbortypes(node_name))) for node_name_ in node_names_snake_case] + df[!, node_name] = [ + (to_symbol(node_name_ in Ribasim.neighbortypes(node_name))) for + node_name_ in node_names_snake_case + ] end markdown_table(df) @@ -70,11 +72,9 @@ for node_name in node_names_camel_case push!(control_in_max, unbounded(bounds_control.in_max)) push!(control_out_min, string(bounds_control.out_min)) push!(control_out_max, unbounded(bounds_control.out_max)) - end -df = DataFrame( - ; +df = DataFrame(; node_type = node_names_snake_case, flow_in_min, flow_in_max, diff --git a/python/ribasim/ribasim/model.py b/python/ribasim/ribasim/model.py index df28aa10e..1b59416b9 100644 --- a/python/ribasim/ribasim/model.py +++ b/python/ribasim/ribasim/model.py @@ -1,6 +1,7 @@ import datetime import logging import shutil +import warnings from collections.abc import Generator from os import PathLike from pathlib import Path @@ -15,6 +16,7 @@ from pydantic import ( DirectoryPath, Field, + FilePath, PrivateAttr, field_serializer, model_validator, @@ -57,11 +59,11 @@ from ribasim.utils import ( MissingOptionalModule, UsedIDs, + _add_cf_attributes, _concat, _link_lookup, _node_lookup, _node_lookup_numpy, - _time_in_ns, ) from ribasim.validation import control_link_neighbor_amount, flow_link_neighbor_amount @@ -461,6 +463,55 @@ def _reset_contextvar(self) -> "Model": context_file_loading.set({}) return self + @property + def toml_path(self) -> FilePath: + """ + Get the path to the TOML file if it exists. + + Raises + ------ + FileNotFoundError + If the model has not been written to disk. + + Returns + ------- + FilePath + The path to the TOML file. + """ + if self.filepath is None: + raise FileNotFoundError("Model must be written to disk.") + return FilePath(self.filepath) + + @property + def results_path(self) -> DirectoryPath: + """ + Get the path to the results directory if it exists. + + This checks for the presence of required result files in the directory. + + Raises + ------ + FileNotFoundError + If any of the required result files are missing. + + Returns + ------- + DirectoryPath + The path to the results directory. + """ + toml_path = self.toml_path + results_dir = DirectoryPath(toml_path.parent / self.results_dir) + # This only checks results that are always written. + # Some results like allocation_flow.arrow are optional. + filenames = ["basin_state.arrow", "basin.arrow", "flow.arrow"] + for filename in filenames: + if not (results_dir / filename).is_file(): + raise FileNotFoundError( + f"Cannot find {filename} in '{results_dir}', " + "perhaps the model needs to be run first." + ) + return results_dir + def plot_control_listen(self, ax): """Plot the implicit listen links of the model.""" df_listen_link = pd.DataFrame( @@ -645,29 +696,11 @@ def to_xugrid(self, add_flow: bool = False, add_allocation: bool = False): return uds - def _checked_toml_path(self) -> Path: - toml_path = self.filepath - if toml_path is None: - raise FileNotFoundError("Model must be written to disk to add results.") - return toml_path - def _add_flow(self, uds, node_lookup): - toml_path = self._checked_toml_path() - - results_path = toml_path.parent / self.results_dir - basin_path = results_path / "basin.arrow" - flow_path = results_path / "flow.arrow" - - if not basin_path.is_file() or not flow_path.is_file(): - raise FileNotFoundError( - f"Cannot find results in '{results_path}', " - "perhaps the model needs to be run first." - ) - + basin_path = self.results_path / "basin.arrow" + flow_path = self.results_path / "flow.arrow" basin_df = pd.read_feather(basin_path, dtype_backend="pyarrow") flow_df = pd.read_feather(flow_path, dtype_backend="pyarrow") - _time_in_ns(basin_df) - _time_in_ns(flow_df) # add the xugrid dimension indices to the dataframes link_dim = uds.grid.edge_dimension @@ -691,10 +724,7 @@ def _add_flow(self, uds, node_lookup): return uds def _add_allocation(self, uds): - toml_path = self._checked_toml_path() - - results_path = toml_path.parent / self.results_dir - alloc_flow_path = results_path / "allocation_flow.arrow" + alloc_flow_path = self.results_path / "allocation_flow.arrow" if not alloc_flow_path.is_file(): raise FileNotFoundError( @@ -713,7 +743,6 @@ def _add_allocation(self, uds): ], dtype_backend="pyarrow", ) - _time_in_ns(alloc_flow_df) # add the xugrid link dimension index to the dataframe link_dim = uds.grid.edge_dimension @@ -738,3 +767,88 @@ def _add_allocation(self, uds): uds[varname] = da return uds + + def to_fews( + self, + region_home: str | PathLike[str], + add_network: bool = True, + add_results: bool = True, + ) -> None: + """ + Write the model network and results into files used by Delft-FEWS. + + ** Warning: This method is experimental and is likely to change. ** + + To run this method, the model needs to be written to disk, and have results. + The Node, Link and Basin / area tables are written to shapefiles in the REGION_HOME/Config directory. + The results are written to NetCDF files in the REGION_HOME/Modules directory. + The netCDF files are NetCDF4 with CF-conventions. + + Parameters + ---------- + region_home: str | PathLike[str] + Path to the Delft-FEWS REGION_HOME directory. + add_network: bool, optional + Write shapefiles representing the network, enabled by default. + add_results: bool, optional + Write the results to NetCDF files, enabled by default. + """ + region_home = DirectoryPath(region_home) + if add_network: + self._network_to_fews(region_home) + if add_results: + self._results_to_fews(region_home) + + def _network_to_fews(self, region_home: DirectoryPath) -> None: + """Write the Node and Link tables to shapefiles for use in Delft-FEWS.""" + df_link = self.link.df + df_node = self.node_table().df + assert df_link is not None + assert df_node is not None + + df_basin_area = self.basin.area.df + if df_basin_area is None: + # Fall back to the Basin points if the area polygons are not set + df_basin_area = df_node[df_node["node_type"] == "Basin"] + + network_dir = region_home / "Config/MapLayerFiles/{ModelId}" + network_dir.mkdir(parents=True, exist_ok=True) + link_path = network_dir / "{ModelId}Links.shp" + node_path = network_dir / "{ModelId}Nodes.shp" + basin_area_path = network_dir / "{ModelId}Areas.shp" + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Normalized/laundered field name", RuntimeWarning + ) + warnings.filterwarnings( + "ignore", + "Column names longer than 10 characters will be truncated when saved to ESRI Shapefile.", + UserWarning, + ) + df_link.to_file(link_path) + df_node.to_file(node_path) + df_basin_area.to_file(basin_area_path) + + def _results_to_fews(self, region_home: DirectoryPath) -> None: + """Convert the model results to NetCDF with CF-conventions for importing into Delft-FEWS.""" + # Delft-FEWS doesn't support our UGRID from `model.to_xugrid` yet, + # so we convert Arrow to regular CF-NetCDF4. + + basin_path = self.results_path / "basin.arrow" + flow_path = self.results_path / "flow.arrow" + + basin_df = pd.read_feather(basin_path) + flow_df = pd.read_feather(flow_path) + + ds_basin = _add_cf_attributes( + basin_df.set_index(["time", "node_id"]).to_xarray(), "node_id" + ) + ds_flow = _add_cf_attributes( + flow_df.set_index(["time", "link_id"]).to_xarray(), "link_id" + ) + + results_dir = region_home / "Modules/ribasim/{ModelId}/work/results" + results_dir.mkdir(parents=True, exist_ok=True) + ds_basin.to_netcdf(results_dir / "basin.nc") + ds_flow.to_netcdf(results_dir / "flow.nc") diff --git a/python/ribasim/ribasim/utils.py b/python/ribasim/ribasim/utils.py index 757b88157..ae611cdf8 100644 --- a/python/ribasim/ribasim/utils.py +++ b/python/ribasim/ribasim/utils.py @@ -66,12 +66,6 @@ def _link_lookup(uds) -> Series[Int32]: ) -def _time_in_ns(df) -> None: - """Convert the time column to datetime64[ns] dtype.""" - # datetime64[ms] gives trouble; https://github.com/pydata/xarray/issues/6318 - df["time"] = df["time"].astype("datetime64[ns]") - - def _concat(dfs, **kwargs): """Concatenate DataFrames with a warning filter.""" with catch_warnings(): @@ -86,6 +80,20 @@ def _concat(dfs, **kwargs): return pd.concat(dfs, **kwargs) +def _add_cf_attributes(ds, timeseries_id): + """Add CF attributes to an xarray.Dataset.""" + ds.attrs.update( + { + "Conventions": "CF-1.8", + "title": "Ribasim model results", + "references": "https://ribasim.org", + } + ) + ds["time"].attrs.update({"standard_name": "time", "axis": "T"}) + ds[timeseries_id].attrs.update({"cf_role": "timeseries_id"}) + return ds + + class UsedIDs(BaseModel): """A helper class to manage globally unique node IDs. diff --git a/python/ribasim/tests/test_model.py b/python/ribasim/tests/test_model.py index e1e55ad61..24678cc30 100644 --- a/python/ribasim/tests/test_model.py +++ b/python/ribasim/tests/test_model.py @@ -75,6 +75,16 @@ def test_exclude_unset(basic): assert d["solver"]["saveat"] == 86400.0 +def test_toml_path(basic): + with pytest.raises(FileNotFoundError, match="Model must be written to disk"): + basic.toml_path + + +def test_results_path(basic): + with pytest.raises(FileNotFoundError, match="Model must be written to disk"): + basic.results_path + + def test_invalid_node_id(): with pytest.raises( ValueError, @@ -191,7 +201,7 @@ def test_indexing(basic): "model", [basic_model(), outlet_model(), pid_control_equation_model(), trivial_model()], ) -def test_xugrid(model, tmp_path): +def test_to_xugrid(model, tmp_path): uds = model.to_xugrid(add_flow=False) assert isinstance(uds, xugrid.UgridDataset) assert uds.grid.edge_dimension == "ribasim_nEdges" @@ -206,14 +216,37 @@ def test_xugrid(model, tmp_path): model.to_xugrid(add_flow=True) model.write(tmp_path / "ribasim.toml") - with pytest.raises(FileNotFoundError, match="Cannot find results"): + with pytest.raises(FileNotFoundError, match="Cannot find basin_state.arrow"): model.to_xugrid(add_flow=True) - with pytest.raises(FileNotFoundError, match="or allocation is not used"): + with pytest.raises(FileNotFoundError, match="Cannot find basin_state.arrow"): model.to_xugrid(add_flow=False, add_allocation=True) with pytest.raises(ValueError, match="Cannot add both allocation and flow results"): model.to_xugrid(add_flow=True, add_allocation=True) +@pytest.mark.parametrize( + "model", + [basic_model(), outlet_model(), pid_control_equation_model(), trivial_model()], +) +def test_to_fews(model, tmp_path): + region_home = tmp_path + network_dir = region_home / "Config/MapLayerFiles/{ModelId}" + + with pytest.raises(FileNotFoundError, match="Model must be written to disk"): + model.to_fews(region_home) + + model.write(tmp_path / "model/ribasim.toml") + model.to_fews(region_home, add_results=False) + assert (network_dir / "{ModelId}Links.dbf").is_file() + assert (network_dir / "{ModelId}Links.shp").is_file() + assert (network_dir / "{ModelId}Nodes.dbf").is_file() + assert (network_dir / "{ModelId}Nodes.shp").is_file() + + # Cannot test results=True without results + with pytest.raises(FileNotFoundError, match="Cannot find basin_state.arrow"): + model.to_fews(region_home, add_results=True) + + def test_to_crs(bucket: Model): model = bucket