diff --git a/.gitignore b/.gitignore index eeb9984..5bac5bd 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ *.g.nem *.g.pex *.g.*.* +*.e.*.* diff --git a/Project.toml b/Project.toml index 5dfe7fb..19aa4a1 100644 --- a/Project.toml +++ b/Project.toml @@ -1,12 +1,13 @@ name = "FiniteElementContainers" uuid = "d08262e4-672f-4e7f-a976-f2cea5767631" -version = "0.10.1" authors = ["Craig M. Hamel and contributors"] +version = "0.10.1" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" Atomix = "a9b6321e-bd34-4604-b9c9-b65b8de01458" DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" +Exodus = "f57ae99e-f805-4780-bdca-96e224be1e5a" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c" Krylov = "ba0b0d4f-ebba-5204-a429-3ac8c609bfb7" @@ -19,19 +20,22 @@ TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" [weakdeps] AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" +AbaqusReader = "bc6b9049-e460-56d6-94b4-a597b2c0390d" BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" -Exodus = "f57ae99e-f805-4780-bdca-96e224be1e5a" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" +PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" [extensions] -FiniteElementContainersAMDGPUExt = "AMDGPU" -FiniteElementContainersBlockArraysExt = "BlockArrays" -FiniteElementContainersCUDAExt = "CUDA" -FiniteElementContainersExodusExt = "Exodus" -FiniteElementContainersMPIExt = ["Exodus", "MPI"] +AbaqusReaderExt = "AbaqusReader" +AMDGPUExt = "AMDGPU" +BlockArraysExt = "BlockArrays" +CUDAExt = "CUDA" +MPIExt = "MPI" +PartitionedArraysExt = "PartitionedArrays" [compat] +AbaqusReader = "0.2.7" AMDGPU = "2" Adapt = "4" Aqua = "0.8" @@ -45,6 +49,7 @@ KernelAbstractions = "0.9" Krylov = "0.9" LinearAlgebra = "1" MPI = "0.20" +PartitionedArrays = "0.5" ReferenceFiniteElements = "0.13" SparseArrays = "1" StaticArrays = "1" @@ -58,9 +63,9 @@ AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" -Exodus = "f57ae99e-f805-4780-bdca-96e224be1e5a" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" +PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["AMDGPU", "Aqua", "CUDA", "Exodus", "MPI", "Test"] +test = ["AMDGPU", "Aqua", "CUDA", "MPI", "PartitionedArrays", "Test"] diff --git a/ext/FiniteElementContainersAMDGPUExt.jl b/ext/AMDGPUExt.jl similarity index 97% rename from ext/FiniteElementContainersAMDGPUExt.jl rename to ext/AMDGPUExt.jl index 8494900..68e1ee2 100644 --- a/ext/FiniteElementContainersAMDGPUExt.jl +++ b/ext/AMDGPUExt.jl @@ -1,4 +1,4 @@ -module FiniteElementContainersAMDGPUExt +module AMDGPUExt using Adapt using AMDGPU diff --git a/ext/AbaqusReaderExt.jl b/ext/AbaqusReaderExt.jl new file mode 100644 index 0000000..9ccd831 --- /dev/null +++ b/ext/AbaqusReaderExt.jl @@ -0,0 +1,306 @@ +module AbaqusReaderExt + +using AbaqusReader +using FiniteElementContainers + +function _check_for_command(line) + if line[1] == '*' && line[2] != '*' + return true + else + return false + end +end + +function _check_for_comment(line) + try + if line[1:2] == "**" + return true + end + catch BoundsError + # do nothing + end + return false +end + +function _get_command_data(lines, command_line_ids, comment_line_ids, command_id) + if _check_for_comment(lines[command_id]) + return nothing + end + # substr = split(lines[command_id], '*')[2] + substr = lowercase(lines[command_id]) + + if contains(substr, "*element") && + !contains(substr, "*element output") && + !contains(substr, "*user element") + element_conns = Dict{Int, Vector{Int}}() + + if contains(substr, "elset") + elset_name = String(split(split(substr, "elset")[2], '=')[2]) + else + elset_name = nothing + end + + if contains(substr, "type") + el_type = String(split(split(substr, "type")[2], '=')[2]) + else + @assert false + end + + n = command_id + 1 + while n ∉ command_line_ids + if n in comment_line_ids + n = n + 1 + continue + end + parts = split(lines[n], ',') + el_id = parse(Int, parts[1]) + conn = parse.((Int,), parts[2:end]) + element_conns[el_id] = conn + n = n + 1 + end + return "element", Dict(elset_name => element_conns), el_type + elseif contains(substr, "*heading") + heading = String[] + n = command_id + 1 + while n ∉ command_line_ids + if n in comment_line_ids + n = n + 1 + continue + end + push!(heading, lines[n]) + n = n + 1 + end + return "heading", heading + elseif contains(substr, "*material") + mat_name = String(split(split(substr, ',')[2], '=')[2]) + props = Dict{String, Float64}() + n = command_id + 1 + if contains(lowercase(lines[n]), "*elastic") + n = n + 1 + props["elastic"] = parse(Float64, lines[n]) + end + return "material", Dict(mat_name => props) + elseif contains(substr, "*node") && !contains(substr, "*node output") + nodes = Dict{Int, Vector{Float64}}() + if contains(substr, "nset") + nset_name = split(split(substr, "nset")[2], '=')[2] + else + nset_name = nothing + end + + n = command_id + 1 + while n ∉ command_line_ids + if n in comment_line_ids + n = n + 1 + continue + end + + parts = split(lines[n], ',') + node_id = parse(Int, parts[1]) + coords = parse.((Float64,), parts[2:end]) + nodes[node_id] = coords + n = n + 1 + end + return "nodes", Dict(nset_name => nodes) + elseif contains(substr, "*nset") + nodes = Dict{String, Vector{Int}}() + n = command_id + 1 + if contains(substr, "generate") + nset_name = split(split(split(split(substr, "*nset")[2], "generate")[1], ',')[2], '=')[2] + parts = parse.((Int,), split(lines[n], ',')) + nodes[nset_name] = range(start=parts[1], step=parts[3], stop=parts[2]) + else + nset_name = split(split(split(substr, "*nset")[2], ',')[2], '=')[2] + temp_nodes = Vector{Int}(undef, 0) + while n ∉ command_line_ids + if n in comment_line_ids + n = n + 1 + continue + end + for node in split(lines[n], ',') + try + push!(temp_nodes, parse(Int, node)) + catch ArgumentError + # do nothing + if node != "" + @warn "Got a potentially bad line reading a nodeset" + @warn "$(lines[n])" + end + end + end + n = n + 1 + end + nodes[nset_name] = temp_nodes + end + return "nset", nodes + elseif contains(substr, "*parameter") + parameters = String[] + n = command_id + 1 + while n ∉ command_line_ids + if n in comment_line_ids + n = n + 1 + continue + end + push!(parameters, lines[n]) + n = n + 1 + end + return "parameter", parameters + else + @warn "Implement $substr" + substr, nothing + end +end + +function FiniteElementContainers.UnstructuredMesh( + ::FiniteElementContainers.AbaqusMesh, + file_name::String, + create_edges::Bool, create_faces::Bool +) + command_line_ids = Int[] + comment_line_ids = Int[] + comments = String[] + + sections = Dict{String, Any}() + open(file_name, "r") do f + lines = readlines(f) + + # do a first pass to find where commands stop + for (n, line) in enumerate(lines) + if _check_for_command(line) + push!(command_line_ids, n) + end + + # skip comments + if _check_for_comment(line) + push!(comment_line_ids, n) + end + end + + for command_id in command_line_ids + key, data = _get_command_data(lines, command_line_ids, comment_line_ids, command_id) + + if key === nothing || data === nothing + @warn "Got nothing key/data for line $(lines[command_id])" + continue + end + + if haskey(sections, key) + sections[key] = merge(sections[key], data) + else + sections[key] = data + end + end + + for comment_id in comment_line_ids + push!(comments, lines[comment_id]) + end + end + + sections + # comments + # WIP below + # AbaqusReader.register_element!("CPE4T", 4, "Quad4") + # AbaqusReader.register_element!("C3D8RT", 8, "Hex8") + # AbaqusReader.register_element!("C3D8T", 8, "Hex8") + + # # adding a few user element type names + # # we've encountered in the wild + # # NOTE this is probably dangerous in general + # AbaqusReader.register_element!("U1", 8, "Quad4") + # # AbaqusReader.register_element!("U3", 8, "Hex8") + + # abq_data = abaqus_read_mesh(file_name) + + # # nodal_coords = abq_data["nodes"] + # # nodal_coords = values(abq_data["nodes"]) + # # node_id_map = keys(abq_data["nodes"]) + # # perm = sortperm(keys(abq_data["nodes"]) |> collect) + # # node_id_map = collect(keys(abq_data["nodes"]))[perm] + # # nodal_coords = collect(values(abq_data["nodes"]))[perm] + + # # node stuff + # # perm = Int[] + # abq_to_fem = Dict{Int, Int}() + # for (n, k) in enumerate(keys(abq_data["nodes"])) + # # push!(perm, k) + # abq_to_fem[k] = n + # end + + # nodal_coords = Matrix{Float64}(undef, length(abq_data["nodes"][1]), length(abq_data["nodes"])) + # for (n, v) in enumerate(values(abq_data["nodes"])) + # nodal_coords[:, n] = v + # end + # node_id_map = 1:size(nodal_coords, 2) |> collect + + # # element stuff + # element_block_names = keys(abq_data["element_sets"]) + # # NOTE these are mapping el id to type name + # # we need to sort through this with the + # element_types = abq_data["element_types"] + # # abq_conns = values(abq_data["element_sets"]) + # blocks = abq_data["element_sets"] + + # # @show length(element_types) + # # @show length(blocks) + + # # collect element type by block + # for block in values(blocks) + # # el_types = Vector{Symbol}(undef, length(block)) + # el_types = Symbol[] + # for (k, v) in element_types + # # el_types[k] = v + # push!(el_types, v) + # end + # display(el_types) + # # @assert length(unique(el_types)) == 1 + # end + + # # element_conns = L2ElementField[] + + # # for conn in abq_conns + # # # block_el_ids = map(x -> ) + # # # n_elem = length(conn) + # # # fem_conns = Matrix{Int}(undef, ) + # # # @show el_type + # # # el_type = unique(el_type) + # # # @show el_type + # # # @assert length(el_type) == 1 + # # # el_type = el_type[1] + # # # @show el_type + + # # if el_type == :Hex8 + # # nnpe = 8 + # # elseif el_type == :Quad4 + # # nnpe = 4 + # # elseif el_type == :Tri3 + # # nnpe = 3 + # # elseif el_type == :Tri6 + # # nnpe = 6 + # # elseif el_type == :Tet4 + # # nnpe = 4 + # # elseif el_type == :Tet10 + # # nnpe = 10 + # # else + # # # @assert false "Got unnsupported el_type $el_type" + # # end + + # # ne = length(conn) ÷ nnpe + # # conn_rs = reshape(conn, nnpe, ne) + # # display(conn_rs ) + # # end + # # # nodesets + # # nodeset_nodes = Dict{Symbol, Vector{Int}}() + # # for (k, v) in abq_data["node_sets"] + # # nodeset_nodes[Symbol(k)] = v + # # end + + # # TODO need to set up sidesets + + # # TODO edge/face connectivities + # # abq_data + # # nodal_coords + # # abq_conns + # abq_data +end + +end # module diff --git a/ext/FiniteElementContainersBlockArraysExt.jl b/ext/BlockArraysExt.jl similarity index 83% rename from ext/FiniteElementContainersBlockArraysExt.jl rename to ext/BlockArraysExt.jl index 9da4fd3..3c72a79 100644 --- a/ext/FiniteElementContainersBlockArraysExt.jl +++ b/ext/BlockArraysExt.jl @@ -1,4 +1,4 @@ -module FiniteElementContainersBlockArraysExt +module BlockArraysExt using BlockArrays using FiniteElementContainers diff --git a/ext/FiniteElementContainersCUDAExt.jl b/ext/CUDAExt.jl similarity index 97% rename from ext/FiniteElementContainersCUDAExt.jl rename to ext/CUDAExt.jl index 8991467..3444e1c 100644 --- a/ext/FiniteElementContainersCUDAExt.jl +++ b/ext/CUDAExt.jl @@ -1,4 +1,4 @@ -module FiniteElementContainersCUDAExt +module CUDAExt using Adapt using CUDA diff --git a/ext/FiniteElementContainersMPIExt.jl b/ext/FiniteElementContainersMPIExt.jl deleted file mode 100644 index 3090b32..0000000 --- a/ext/FiniteElementContainersMPIExt.jl +++ /dev/null @@ -1,86 +0,0 @@ -module FiniteElementContainersMPIExt - -using Exodus -using FiniteElementContainers -using MPI - -# include("parallel/Parallel.jl") -# TODO eventually type this further so it only works on exodus - -function FiniteElementContainers.communication_graph(file_name::String) - comm = MPI.COMM_WORLD - num_procs = MPI.Comm_size(comm) |> Int32 - rank = MPI.Comm_rank(comm) + 1 - - file_name = file_name * ".$num_procs" * ".$(lpad(rank - 1, Exodus.exodus_pad(num_procs), '0'))" - exo = ExodusDatabase(file_name, "r") - - node_map = read_id_map(exo, NodeMap) - node_cmaps = Exodus.read_node_cmaps(rank, exo) - - # error checking - for node_cmap in node_cmaps - @assert length(unique(node_cmap.proc_ids)) == 1 "Node communication map has more than one processor present" - end - - comm_graph_edges = FiniteElementContainers.CommunicationGraphEdge[] - for node_cmap in node_cmaps - to_rank = unique(node_cmap.proc_ids)[1] - indices = convert.(Int64, node_cmap.node_ids) - edge = FiniteElementContainers.CommunicationGraphEdge(indices, to_rank) - push!(comm_graph_edges, edge) - # display(edge) - end - close(exo) - - return FiniteElementContainers.CommunicationGraph(comm_graph_edges) -end - -function FiniteElementContainers.decompose_mesh(file_name::String, n_ranks::Int) - if !MPI.Initialized() - MPI.Init() - end - - comm = MPI.COMM_WORLD - root = 0 - # num_procs = MPI.Comm_size(comm) - if MPI.Comm_rank(comm) == root - @info "Running decomp on $file_name with $n_ranks" - decomp(file_name, n_ranks) - end - MPI.Barrier(comm) -end - -function FiniteElementContainers.global_colorings(file_name::String, num_dofs::Int, num_procs::Int) - if !MPI.Initialized() - MPI.Init() - end - - comm = MPI.COMM_WORLD - root = 0 - # num_procs = MPI.Comm_size(comm) - if MPI.Comm_rank(comm) == root - @info "Setting up global colorings on root" - global_elems_to_colors, global_nodes_to_colors = Exodus.collect_global_element_and_node_numberings(file_name, num_procs) - global_dofs = reshape(1:num_dofs * length(global_nodes_to_colors), num_dofs, length(global_nodes_to_colors)) - global_dofs_to_colors = similar(global_dofs) - - for dof in 1:num_dofs - global_dofs_to_colors[dof, :] .= global_nodes_to_colors - end - global_dofs_to_colors = global_dofs_to_colors |> vec - else - exo = ExodusDatabase(file_name, "r") - num_elems = Exodus.initialization(exo).num_elements - num_nodes = Exodus.initialization(exo).num_nodes - n_dofs = num_dofs * num_nodes - global_elems_to_colors = Vector{Int}(undef, num_elems) - global_dofs_to_colors = Vector{Int}(undef, n_dofs) - close(exo) - end - - MPI.Bcast!(global_dofs_to_colors, root, comm) - return global_dofs_to_colors -end - -end # module diff --git a/ext/MPIExt.jl b/ext/MPIExt.jl new file mode 100644 index 0000000..42d5638 --- /dev/null +++ b/ext/MPIExt.jl @@ -0,0 +1,86 @@ +module MPIExt + +using Exodus +using FiniteElementContainers +using MPI + +# include("parallel/Parallel.jl") +# TODO eventually type this further so it only works on exodus + +function FiniteElementContainers.communication_graph(file_name::String) + comm = MPI.COMM_WORLD + num_procs = MPI.Comm_size(comm) |> Int32 + rank = MPI.Comm_rank(comm) + 1 + + file_name = file_name * ".$num_procs" * ".$(lpad(rank - 1, Exodus.exodus_pad(num_procs), '0'))" + exo = ExodusDatabase(file_name, "r") + + node_map = read_id_map(exo, NodeMap) + node_cmaps = Exodus.read_node_cmaps(rank, exo) + + # error checking + for node_cmap in node_cmaps + @assert length(unique(node_cmap.proc_ids)) == 1 "Node communication map has more than one processor present" + end + + comm_graph_edges = FiniteElementContainers.CommunicationGraphEdge[] + for node_cmap in node_cmaps + to_rank = unique(node_cmap.proc_ids)[1] + indices = convert.(Int64, node_cmap.node_ids) + edge = FiniteElementContainers.CommunicationGraphEdge(indices, to_rank) + push!(comm_graph_edges, edge) + # display(edge) + end + close(exo) + + return FiniteElementContainers.CommunicationGraph(comm_graph_edges) +end + +# function FiniteElementContainers.decompose_mesh(file_name::String, n_ranks::Int) +# if !MPI.Initialized() +# MPI.Init() +# end + +# comm = MPI.COMM_WORLD +# root = 0 +# # num_procs = MPI.Comm_size(comm) +# if MPI.Comm_rank(comm) == root +# @info "Running decomp on $file_name with $n_ranks" +# decomp(file_name, n_ranks) +# end +# MPI.Barrier(comm) +# end + +# function FiniteElementContainers.global_colorings(file_name::String, num_dofs::Int, num_procs::Int) +# if !MPI.Initialized() +# MPI.Init() +# end + +# comm = MPI.COMM_WORLD +# root = 0 +# # num_procs = MPI.Comm_size(comm) +# if MPI.Comm_rank(comm) == root +# @info "Setting up global colorings on root" +# global_elems_to_colors, global_nodes_to_colors = Exodus.collect_global_element_and_node_numberings(file_name, num_procs) +# global_dofs = reshape(1:num_dofs * length(global_nodes_to_colors), num_dofs, length(global_nodes_to_colors)) +# global_dofs_to_colors = similar(global_dofs) + +# for dof in 1:num_dofs +# global_dofs_to_colors[dof, :] .= global_nodes_to_colors +# end +# global_dofs_to_colors = global_dofs_to_colors |> vec +# else +# exo = ExodusDatabase(file_name, "r") +# num_elems = Exodus.initialization(exo).num_elements +# num_nodes = Exodus.initialization(exo).num_nodes +# n_dofs = num_dofs * num_nodes +# global_elems_to_colors = Vector{Int}(undef, num_elems) +# global_dofs_to_colors = Vector{Int}(undef, n_dofs) +# close(exo) +# end + +# MPI.Bcast!(global_dofs_to_colors, root, comm) +# return global_dofs_to_colors +# end + +end # module diff --git a/ext/PartitionedArraysExt.jl b/ext/PartitionedArraysExt.jl new file mode 100644 index 0000000..cc27b99 --- /dev/null +++ b/ext/PartitionedArraysExt.jl @@ -0,0 +1,465 @@ +module PartitionedArraysExt + +using Exodus +using FiniteElementContainers +using PartitionedArrays + +function FiniteElementContainers.decompose_mesh(file_name::String, n_ranks::Int, comm) + if comm == 1 + @info "Running decomp on $file_name with $n_ranks processors" + decomp(file_name, n_ranks) + end +end + +# TODO need to adjust for problems with more than one dof +function FiniteElementContainers.global_colorings(file_name, n_dofs, n_ranks) + global_elems_to_colors, global_nodes_to_colors = Exodus.collect_global_element_and_node_numberings(file_name, n_ranks) + global_dofs = reshape(1:n_dofs * length(global_nodes_to_colors), n_dofs, length(global_nodes_to_colors)) + global_dofs_to_colors = similar(global_dofs) + for dof in 1:n_dofs + global_dofs_to_colors[dof, :] .= global_nodes_to_colors + end + global_dofs_to_colors = global_dofs_to_colors |> vec + + return global_dofs_to_colors, convert.(Int, global_elems_to_colors) +end + +# function FiniteElementContainers.global_colorings(file_name::String, num_dofs::Int, num_procs::Int, comm) +# # if comm == 1 +# @info "Setting up global colorings on rank $comm" +# global_elems_to_colors, global_nodes_to_colors = Exodus.collect_global_element_and_node_numberings(file_name, num_procs) +# global_dofs = reshape(1:num_dofs * length(global_nodes_to_colors), num_dofs, length(global_nodes_to_colors)) +# global_dofs_to_colors = similar(global_dofs) + +# for dof in 1:num_dofs +# global_dofs_to_colors[dof, :] .= global_nodes_to_colors +# end +# global_dofs_to_colors = global_dofs_to_colors |> vec +# # else +# # exo = ExodusDatabase(file_name, "r") +# # num_elems = Exodus.initialization(exo).num_elements +# # num_nodes = Exodus.initialization(exo).num_nodes +# # n_dofs = num_dofs * num_nodes +# # global_elems_to_colors = Vector{Int}(undef, num_elems) +# # global_dofs_to_colors = Vector{Int}(undef, n_dofs) +# # close(exo) +# # end + +# # MPI.Bcast!(global_dofs_to_colors, root, comm) +# # return global_dofs_to_colors +# # gather(global_dofs_to_colors, destination=:all) +# end + +function FiniteElementContainers.UnstructuredMesh( + file_name, num_ranks, rank +) + file_name = file_name * ".$num_ranks" * ".$(lpad(rank - 1, Exodus.exodus_pad(num_ranks |> Int32), '0'))" + return UnstructuredMesh(file_name) +end + +struct ExodusPartition{A, B, C, D, E, F, G} + # these map globals to globals + exo_dof_to_own::A + exo_elem_to_own::A + exo_elem_to_exo_node::B + exo_node_to_exo_elem::C + # tracks total dofs/elems per part + ndpp::A + nepp::A + # different partitions we have to keep track of + dof_parts::D + elem_parts::E + # maps between PartitionedArrays and exodus + dof_exo_to_par::F + dof_par_to_exo::F + elem_exo_to_par::F + elem_par_to_exo::F + # for building sparse parrays + par_conns::G +end + +function Base.show(io::IO, part::ExodusPartition) + println(io, "ExodusPartition:") + println(io, " Number of partitions = $(maximum(values(part.exo_to_par)))") + # println(io, " Number of global dofs = $(length(part.exo_to_par))") +end + +function FiniteElementContainers.create_partition( + mesh_file, num_dofs, num_ranks, ranks; + add_element_borders = false, + add_ghost_nodes = false +) + exo_dof_to_own, exo_elem_to_own = FiniteElementContainers.global_colorings(mesh_file, num_dofs, num_ranks) + exo_elem_to_glob_node = FiniteElementContainers._global_elem_to_global_node(mesh_file) + exo_node_to_glob_elem = FiniteElementContainers._global_node_to_global_elem(mesh_file) + + ndpp, nepp = tuple_of_arrays(map(ranks) do rank + nd = count(x -> x == rank, exo_dof_to_own) + ne = count(x -> x == rank, exo_elem_to_own) + return nd, ne + end) + + dof_parts = variable_partition(ndpp, sum(ndpp)) + elem_parts = variable_partition(nepp, sum(nepp)) + + meshes = map(ranks) do rank + UnstructuredMesh(mesh_file, num_ranks, rank) + end + + dof_exo_to_par, dof_par_to_exo = FiniteElementContainers._dofs_exo_to_par_dicts( + meshes, num_dofs, + exo_dof_to_own, + ndpp, dof_parts, ranks + ) + elem_exo_to_par, elem_par_to_exo = FiniteElementContainers._elems_exo_to_par_dicts( + meshes, + exo_elem_to_own, + nepp, elem_parts, ranks + ) + + par_conns = map(meshes) do mesh + conns = mesh.element_conns + node_map = mesh.node_id_map + # update in place since we'll make new mesh objects later + for conn in values(conns) + for e in axes(conn, 2) + for n in axes(conn, 1) + conn[n, e] = dof_exo_to_par[node_map[conn[n, e]]] + end + end + end + conns + end + + # get inernal global exo nodes + + + exo_parts = ExodusPartition( + exo_dof_to_own, exo_elem_to_own, + exo_elem_to_glob_node, + exo_node_to_glob_elem, + ndpp, nepp, + dof_parts, elem_parts, + dof_exo_to_par, dof_par_to_exo, + elem_exo_to_par, elem_par_to_exo, + par_conns + ) + + if add_element_borders + exo_parts = _add_element_borders(exo_parts, meshes, ranks) + end + + if add_ghost_nodes + exo_parts = _add_ghost_nodes(exo_parts, meshes, ranks) + end + + return exo_parts +end + +function _add_element_borders(exo_parts, meshes, ranks) + owns, ghosts = tuple_of_arrays(map(meshes, ranks) do mesh, rank + conns = mesh.element_conns + node_map = mesh.node_id_map + elem_map = Exodus.read_id_map(mesh.mesh_obj.mesh_obj, ElementMap) + owns = Vector{Int}(undef, 0) + ghosts = Vector{Int}(undef, 0) + + for node in node_map + elems = exo_parts.exo_node_to_exo_elem[node] + for elem in elems + if exo_parts.exo_elem_to_own[elem] == rank + push!(owns, elem) + else + push!(ghosts, elem) + end + end + end + + owns = unique(sort(owns)) + ghosts = unique(sort(ghosts)) + owns = map(x -> exo_parts.elem_exo_to_par[x], owns) + ghosts = map(x -> exo_parts.elem_exo_to_par[x], ghosts) + + return owns, ghosts + end) + + elem_parts = map(exo_parts.elem_parts, ghosts) do part, ghost + owners = map(x -> exo_parts.exo_elem_to_own[exo_parts.elem_par_to_exo[x]], ghost) + # perm = sortperm(owners) + # union_ghost(part, ghost[perm], owners[perm]) + union_ghost(part, ghost, owners) + end + + return ExodusPartition( + exo_parts.exo_dof_to_own, exo_parts.exo_elem_to_own, + exo_parts.exo_elem_to_exo_node, + exo_parts.exo_node_to_exo_elem, + exo_parts.ndpp, exo_parts.nepp, + exo_parts.dof_parts, elem_parts, + exo_parts.dof_exo_to_par, exo_parts.dof_par_to_exo, + exo_parts.elem_exo_to_par, exo_parts.elem_par_to_exo, + exo_parts.par_conns + ) +end + +function _add_ghost_nodes(exo_parts, meshes, ranks) + ghost_nodes, owners = tuple_of_arrays(map(meshes, ranks) do mesh, rank + node_map = mesh.node_id_map + node_cmaps = Exodus.read_node_cmaps(rank, mesh.mesh_obj.mesh_obj) + ghost_nodes, owners = Int[], Int[] + for cmap in node_cmaps + for (node, owner) in zip(cmap.node_ids, cmap.proc_ids) + if exo_parts.exo_dof_to_own[node_map[node]] != rank + push!(ghost_nodes, node_map[node]) + push!(owners, owner) + end + end + end + idx = unique(z -> ghost_nodes[z], 1:length(ghost_nodes)) + ghost_nodes = ghost_nodes[idx] + owners = owners[idx] + + ghost_nodes = map(x -> exo_parts.dof_exo_to_par[x], ghost_nodes) + + return ghost_nodes, owners + end) + + dof_parts = map(exo_parts.dof_parts, ghost_nodes, owners) do part, ghost, owner + union_ghost(part, ghost, owner) + end + + return ExodusPartition( + exo_parts.exo_dof_to_own, exo_parts.exo_elem_to_own, + exo_parts.exo_elem_to_exo_node, + exo_parts.exo_node_to_exo_elem, + exo_parts.ndpp, exo_parts.nepp, + dof_parts, exo_parts.elem_parts, + exo_parts.dof_exo_to_par, exo_parts.dof_par_to_exo, + exo_parts.elem_exo_to_par, exo_parts.elem_par_to_exo, + exo_parts.par_conns + ) +end + +function FiniteElementContainers._dofs_exo_to_par_dicts( + meshes, num_dofs, + global_dofs_to_colors, + n_dofs_per_parts, parts, ranks +) + exo_to_pars, par_to_exos = tuple_of_arrays(map(meshes, parts, ranks) do mesh, part, rank + exo_to_par = Dict{Int, Int}() + par_to_exo = Dict{Int, Int}() + + par_nodes = 1:sum(n_dofs_per_parts) |> collect + # mesh = UnstructuredMesh(mesh_file, num_ranks, rank) + node_id_map = mesh.node_id_map + + num_nodes = length(global_dofs_to_colors) ÷ num_dofs + global_dofs = reshape(1:length(global_dofs_to_colors), num_dofs, num_nodes) + node_id_map = global_dofs[:, node_id_map] |> vec + + par_local_nodes = part.ranges[1] |> collect + exo_local_nodes = filter(x -> global_dofs_to_colors[x] == rank, node_id_map) + + @assert length(exo_local_nodes) == length(par_local_nodes) + + for (e, p) in zip(exo_local_nodes, par_local_nodes) + exo_to_par[e] = p + par_to_exo[p] = e + end + + return exo_to_par, par_to_exo + end) + + whole_exo_to_par = Dict{Int, Int}() + whole_par_to_exo = Dict{Int, Int}() + + for (exo_to_par, par_to_exo) in zip(exo_to_pars, par_to_exos) + for (k, v) in exo_to_par + if haskey(whole_exo_to_par, k) + @assert false + end + whole_exo_to_par[k] = v + end + + for (k, v) in par_to_exo + if haskey(whole_par_to_exo, k) + @assert false + end + whole_par_to_exo[k] = v + end + end + + # now check + for (k, v) in whole_exo_to_par + @assert whole_par_to_exo[v] == k + end + return whole_exo_to_par, whole_par_to_exo +end + +function FiniteElementContainers._elems_exo_to_par_dicts( + # mesh_file, num_ranks, + meshes, + global_elems_to_colors, + n_elems_per_parts, parts, ranks +) + exo_to_pars, par_to_exos = tuple_of_arrays(map(meshes, parts, ranks) do mesh, part, rank + exo_to_par = Dict{Int, Int}() + par_to_exo = Dict{Int, Int}() + + par_elems = 1:sum(n_elems_per_parts) |> collect + # mesh = UnstructuredMesh(mesh_file, num_ranks, rank) + # node_id_map = mesh.node_id_map + elem_id_map = Exodus.read_id_map(mesh.mesh_obj.mesh_obj, ElementMap) + par_local_elems = part.ranges[1] |> collect + exo_local_elems = filter(x -> global_elems_to_colors[x] == rank, elem_id_map) + + for (e, p) in zip(exo_local_elems, par_local_elems) + exo_to_par[e] = p + par_to_exo[p] = e + end + + return exo_to_par, par_to_exo + end) + + whole_exo_to_par = Dict{Int, Int}() + whole_par_to_exo = Dict{Int, Int}() + + for (exo_to_par, par_to_exo) in zip(exo_to_pars, par_to_exos) + for (k, v) in exo_to_par + if haskey(whole_exo_to_par, k) + @assert false + end + whole_exo_to_par[k] = v + end + + for (k, v) in par_to_exo + if haskey(whole_par_to_exo, k) + @assert false + end + whole_par_to_exo[k] = v + end + end + + # now check + for (k, v) in whole_exo_to_par + @assert whole_par_to_exo[v] == k + end + + return whole_exo_to_par, whole_par_to_exo +end + +function FiniteElementContainers._global_elem_to_global_node(mesh_file) + exo = ExodusDatabase(mesh_file, "r") + num_elem = Exodus.num_elements(Exodus.initialization(exo)) + elem_to_node = Dict{Int, Set{Int}}() + for elem in 1:num_elem + elem_to_node[elem] = Set{Int}() + end + + blocks = read_sets(exo, Block) + block_ids = map(x -> x.id, blocks) + block_maps = Exodus.read_block_id_map.((exo,), block_ids) + for (block, emap) in zip(blocks, block_maps) + conn = block.conn + for e in axes(conn, 2) + for n in axes(conn, 1) + global_e = emap[e] + elem_to_node[global_e] = union(elem_to_node[global_e], conn[n, e]) + end + end + end + close(exo) + + new_elem_to_node = Vector{Vector{Int}}(undef, num_elem) + for elem in 1:num_elem + new_elem_to_node[elem] = sort(collect(elem_to_node[elem])) + end + + return new_elem_to_node +end + +function FiniteElementContainers._global_node_to_global_elem(mesh_file) + exo = ExodusDatabase(mesh_file, "r") + num_node = Exodus.num_nodes(Exodus.initialization(exo)) + node_to_elem = Dict{Int, Set{Int}}() + for node in 1:num_node + node_to_elem[node] = Set{Int}() + end + + blocks = read_sets(exo, Block) + block_ids = map(x -> x.id, blocks) + block_maps = Exodus.read_block_id_map.((exo,), block_ids) + for (block, emap) in zip(blocks, block_maps) + conn = block.conn + for e in axes(conn, 2) + for n in axes(conn, 1) + global_e = emap[e] + node_to_elem[conn[n, e]] = union(node_to_elem[conn[n, e]], global_e) + end + end + end + + new_node_to_elem = Vector{Vector{Int}}(undef, num_node) + for node in 1:num_node + new_node_to_elem[node] = sort(collect(node_to_elem[node])) + end + + close(exo) + + new_node_to_elem +end + +function FiniteElementContainers._renumber_mesh(mesh, exo_to_par) + + for conn in mesh.element_conns + for e in axes(conn, 2) + for n in axes(conn, 1) + conn[n, e] = exo_to_par[conn[n, e]] + end + end + end + + for n in axes(mesh.node_id_map, 1) + mesh.node_id_map[n] = exo_to_par[mesh.node_id_map[n]] + end + + for nset in values(mesh.nodeset_nodes) + for n in axes(nset, 1) + nset[n] = exo_to_par[nset[n]] + end + end + + for sset in values(mesh.sideset_nodes) + for n in axes(sset, 1) + sset[n] = exo_to_par[sset[n]] + end + end + + for sset in values(mesh.sideset_side_nodes) + for s in axes(sset, 2) + for n in axes(sset, 1) + sset[n, s] = exo_to_par[sset[n, s]] + end + end + end + + new_mesh = UnstructuredMesh( + mesh.mesh_obj, + mesh.nodal_coords, + mesh.element_block_names, + mesh.element_types, + mesh.element_conns, # updated in place above + mesh.element_id_maps, + mesh.node_id_map, # updated in place above + mesh.nodeset_nodes, # updated in place above + mesh.sideset_elems, + mesh.sideset_nodes, # updated in place above + mesh.sideset_sides, + mesh.sideset_side_nodes, # updated in place above + mesh.edge_conns, # TODO need to renumber + mesh.face_conns, # TODO need to renumber + ) + return new_mesh +end + +end # module diff --git a/src/FiniteElementContainers.jl b/src/FiniteElementContainers.jl index 680fe90..df42b26 100644 --- a/src/FiniteElementContainers.jl +++ b/src/FiniteElementContainers.jl @@ -76,6 +76,7 @@ export evolve! # Meshes export FileMesh +export StructuredMesh export UnstructuredMesh export coordinates export element_block_id_map @@ -142,6 +143,7 @@ import KernelAbstractions as KA using Adapt using Atomix using DocStringExtensions +using Exodus using ForwardDiff using Krylov using LinearAlgebra @@ -157,17 +159,24 @@ function cuda end function rocm end function communication_graph end +function create_partition end function decompose_mesh end function global_colorings end +function _dofs_exo_to_par_dicts end +function _elems_exo_to_par_dicts end +function _exo_to_par_dicts end +function _global_elem_to_global_node end +function _global_node_to_global_elem end +function _renumber_mesh end # TODO need to further specialize for staticarrays, etc. cpu(x) = adapt(Array, x) # TODO clean this up, make it make sense in an ordered way # include("parallel/Parallel.jl") - +include("PostProcessors.jl") include("fields/Fields.jl") -include("Meshes.jl") +include("meshes/Meshes.jl") include("FunctionSpaces.jl") include("Functions.jl") include("DofManagers.jl") @@ -177,8 +186,6 @@ include("ics/InitialConditions.jl") include("formulations/Formulations.jl") include("physics/Physics.jl") include("assemblers/Assemblers.jl") -include("PostProcessors.jl") - # include("TimeSteppers.jl") include("Parameters.jl") diff --git a/src/Meshes.jl b/src/Meshes.jl deleted file mode 100644 index 7ece0e7..0000000 --- a/src/Meshes.jl +++ /dev/null @@ -1,417 +0,0 @@ -elem_type_map = Dict{String, Type{<:ReferenceFiniteElements.AbstractElementType}}( - "HEX" => Hex8, - "HEX8" => Hex8, - "QUAD" => Quad4, - "QUAD4" => Quad4, - "QUAD9" => Quad9, - "TRI" => Tri3, - "TRI3" => Tri3, - "TRI6" => Tri6, - "TET" => Tet4, - "TETRA4" => Tet4, - "TETRA10" => Tet10 -) - -""" -$(TYPEDEF) -""" -abstract type AbstractMesh end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function coordinates(::AbstractMesh) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function copy_mesh end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function element_block_id_map(::AbstractMesh, id) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function element_block_ids(::AbstractMesh) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function element_block_names(::AbstractMesh) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function element_connectivity(::AbstractMesh, id) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function element_type(::AbstractMesh, id) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function node_cmaps(::AbstractMesh, rank) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function node_id_map(::AbstractMesh) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function nodeset(::AbstractMesh, id) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function nodesets(::AbstractMesh, ids) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function nodeset_ids(::AbstractMesh) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function nodeset_names(::AbstractMesh) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function sideset(::AbstractMesh, id) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function sidesets(::AbstractMesh) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function sideset_ids(::AbstractMesh) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function sideset_names(::AbstractMesh) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function num_dimensions(::AbstractMesh) - @assert false -end -""" -$(TYPEDSIGNATURES) -Dummy method to be overriden for specific mesh file format -""" -function num_nodes(::AbstractMesh) - @assert false "This method needs to overriden in extensions!" -end - -""" -$(TYPEDSIGNATURES) -Returns file name for an mesh type -""" -file_name(mesh::AbstractMesh) = mesh.file_name - -# TODO this one is making JET not happy -""" -$(TYPEDEF) -$(TYPEDFIELDS) -Mesh type that has a handle to an open mesh file object. -This type's methods are "overridden" in extensions. - -See FiniteElementContainersExodusExt for an example. -""" -struct FileMesh{MeshObj} <: AbstractMesh - file_name::String - mesh_obj::MeshObj -end - -# TODO change to a type that subtypes AbstractMesh -function create_structured_mesh_data(Nx, Ny, xExtent, yExtent) - xs = LinRange(xExtent[1], xExtent[2], Nx) - ys = LinRange(yExtent[1], yExtent[2], Ny) - Ex = Nx - 1 - Ey = Ny - 1 - - coords = Matrix{Float64}(undef, 2, Nx * Ny) - n = 1 - for ny in 1:Ny - for nx in 1:Nx - coords[1, n] = xs[nx] - coords[2, n] = ys[ny] - n = n + 1 - end - end - - conns = Matrix{Int64}(undef, 3, 2 * Ex * Ey) - n = 1 - for ex in 1:Ex - for ey in 1:Ey - conns[1, n] = (ex - 1) + Nx * (ey - 1) + 1 - conns[2, n] = ex + Nx * (ey - 1) + 1 - conns[3, n] = ex + Nx * ey + 1 - conns[1, n + 1] = (ex - 1) + Nx * (ey - 1) + 1 - conns[2, n + 1] = ex + Nx * ey + 1 - conns[3, n + 1] = (ex - 1) + Nx * ey + 1 - n = n + 2 - end - end - return coords, conns -end - -function _create_edges!(all_edges, block_edges, el_to_nodes, ref_fe) - local_edge_to_nodes = ref_fe.edge_nodes - - # loop over all elements connectivity - for e in axes(el_to_nodes, 2) - el_nodes = el_to_nodes[:, e] - el_edges = map(x -> el_nodes[x], local_edge_to_nodes) - # loop over edges - local_edges = () - for edge in el_edges - sorted_edge = sort(edge).data - if !haskey(all_edges, sorted_edge) - all_edges[sorted_edge] = length(all_edges) + 1 - local_edges = (local_edges..., length(all_edges) + 1) - else - local_edges = (local_edges..., all_edges[sorted_edge]) - end - end - push!(block_edges, local_edges) - end - return nothing -end - -# this one isn't quite working -# function _create_faces!(all_faces, block_faces, el_to_nodes, ref_fe) -# local_face_to_nodes = ref_fe.face_nodes -# # display(local_face_to_nodes) -# # loop over all elements connectivity -# for e in axes(el_to_nodes, 2) -# el_nodes = el_to_nodes[:, e] -# el_faces = map(x -> el_nodes[x], local_face_to_nodes) -# # display(el_faces) -# # loop over faces -# local_faces = () -# for face in el_faces -# sorted_face = sort(face).data -# if !haskey(all_faces, sorted_face) -# all_faces[sorted_face] = length(all_faces) -# local_faces = (local_faces..., length(all_faces)) -# else -# local_faces = (local_faces..., all_faces[sorted_face]) -# end -# end -# push!(block_faces, local_faces) -# end -# return nothing -# end - -# new stuff below -""" -$(TYPEDEF) -$(TYPEDSIGNATURES) -$(TYPEDFIELDS) -""" -struct UnstructuredMesh{ - MeshObj, - ND, - RT <: Number, - IT <: Integer, - EConns, - EdgeConns, - FaceConns -} <: AbstractMesh - mesh_obj::MeshObj - nodal_coords::H1Field{RT, Vector{RT}, ND} - element_block_names::Vector{Symbol} - element_types::Vector{Symbol} - element_conns::EConns - element_id_maps::Dict{Symbol, Vector{IT}} - node_id_map::Vector{IT} - nodeset_nodes::Dict{Symbol, Vector{IT}} - sideset_elems::Dict{Symbol, Vector{IT}} - sideset_nodes::Dict{Symbol, Vector{IT}} - sideset_sides::Dict{Symbol, Vector{IT}} - sideset_side_nodes::Dict{Symbol, Matrix{IT}} - # new additions - edge_conns::EdgeConns - face_conns::FaceConns -end - -""" -$(TYPEDSIGNATURES) -""" -function UnstructuredMesh(file_type, file_name::String, create_edges::Bool, create_faces::Bool) - file = FileMesh(file_type, file_name) - return UnstructuredMesh(file, create_edges, create_faces) -end - -function UnstructuredMesh(file::FileMesh{T}, create_edges::Bool, create_faces::Bool) where T - - # read nodal coordinates - if num_dimensions(file) == 2 - coord_syms = (:X, :Y) - elseif num_dimensions(file) == 3 - coord_syms = (:X, :Y, :Z) - # else - # @assert false "Bad number of dimensions $(num_dimensions(file))" - end - - nodal_coords = coordinates(file) - nodal_coords = H1Field(nodal_coords) - - # read element block types, conn, etc. - el_block_ids = element_block_ids(file) - el_block_names = element_block_names(file) - el_block_names = Symbol.(el_block_names) - el_types = Symbol.(element_type.((file,), el_block_ids)) - el_conns = element_connectivity.((file,), el_block_ids) - # el_conns = Dict(zip(el_block_names, el_conns)) - el_conns = map(Connectivity, el_conns) - el_conns = NamedTuple{tuple(el_block_names...)}(tuple(el_conns...)) - el_id_maps = element_block_id_map.((file,), el_block_ids) - el_id_maps = Dict(zip(el_block_names, el_id_maps)) - # el_id_maps = NamedTuple{tuple(el_block_names...)}(tuple(el_id_maps...)) - - # read node id map - n_id_map = convert.(Int64, node_id_map(file)) - - # read nodesets - nset_names = Symbol.(nodeset_names(file)) - nsets = nodesets(file, nodeset_ids(file)) - nset_nodes = Dict(zip(nset_names, nsets)) - - # read sidesets - sset_names = Symbol.(sideset_names(file)) - ssets = sidesets(file, sideset_ids(file)) - - sset_elems = Dict(zip(sset_names, map(x -> x[1], ssets))) - sset_nodes = Dict(zip(sset_names, map(x -> x[2], ssets))) - sset_sides = Dict(zip(sset_names, map(x -> x[3], ssets))) - sset_side_nodes = Dict(zip(sset_names, map(x -> x[4], ssets))) - - - # TODO also add edges/faces for sidesets, this may be tricky... - - # TODO - # write methods to create edge and face connectivity - # hack for now since we need a ref fe for this stuff - # - # TODO maybe move this into function space... - - if create_edges - edges = () - all_edges = Dict{Tuple{Vararg{Int}}, Int}() - - for n in 1:length(el_types) - ref_fe = ReferenceFE(elem_type_map[el_types[n]]{Lagrange, 1}()) - block_edges = Vector{SVector{length(ref_fe.edge_nodes), Int}}(undef, 0) - _create_edges!(all_edges, block_edges, values(el_conns)[n], ref_fe) - edges = (edges..., Connectivity{length(block_edges[1]), length(block_edges)}(reduce(vcat, block_edges))) - # TODO need to create an id map - end - - edges = NamedTuple{keys(el_conns)}(edges) - else - edges = nothing - end - - # TODO need to finish this up - if create_faces - @assert num_dimensions(file) > 2 "Faces require a 3D mesh" - @assert false "Need to fix this..." - # faces = () - # all_faces = Dict{Tuple{Vararg{Int}}, Int}() - - # for n in 1:length(el_types) - # ref_fe = ReferenceFE(elem_type_map[el_types[n]]{Lagrange, 1}()) - # block_faces = Vector{SVector{length(ref_fe.face_nodes), Int}}(undef, 0) - # _create_faces!(all_faces, block_faces, values(el_conns)[n], ref_fe) - # faces = (faces..., block_faces) - # # TODO need to create an id map - # end - # @show length(all_faces) - # faces = NamedTuple{keys(el_conns)}(faces) - else - faces = nothing - end - - return UnstructuredMesh( - file, - nodal_coords, - el_block_names, el_types, el_conns, el_id_maps, - n_id_map, - nset_nodes, - sset_elems, - sset_nodes, - sset_sides, sset_side_nodes, - edges, faces - ) -end - -num_dimensions(mesh::UnstructuredMesh) = num_dimensions(mesh.mesh_obj) - -# different mesh types -abstract type AbstractMeshType end -struct ExodusMesh <: AbstractMeshType -end - -function _mesh_file_type end - -# dispatch based on file extension -""" -$(TYPEDSIGNATURES) -""" -function UnstructuredMesh(file_name::String; create_edges=false, create_faces=false) - ext = splitext(file_name) - if occursin(".g", file_name) || occursin(".e", file_name) || occursin(".exo", file_name) - return UnstructuredMesh(ExodusMesh, file_name, create_edges, create_faces) - else - throw(ErrorException("Unsupported file type with extension $ext")) - end -end - -# TODO write a show method diff --git a/src/PostProcessors.jl b/src/PostProcessors.jl index dce071d..1580e22 100644 --- a/src/PostProcessors.jl +++ b/src/PostProcessors.jl @@ -5,7 +5,7 @@ struct PostProcessor{O} field_output_db::O end -function PostProcessor(mesh::AbstractMesh, output_file::String, vars...) +function PostProcessor(mesh, output_file::String, vars...) copy_mesh(mesh.mesh_obj.file_name, output_file) db_type = typeof(mesh.mesh_obj)#.name.name # field_output_db = FileMesh(output_file) @@ -16,7 +16,7 @@ function PostProcessor(mesh::AbstractMesh, output_file::String, vars...) end ext = splitext(output_file) - if ext[2] == ".g" || ext[2] == ".e" || ext[2] == ".exo" + if occursin(".g", output_file) || occursin(".e", output_file) || occursin(".exo", output_file) pp = PostProcessor(ExodusMesh, output_file, vars...) else throw(ErrorException("Unsupported file type with extension $ext")) diff --git a/src/assemblers/SparsityPatterns.jl b/src/assemblers/SparsityPatterns.jl index cf71a23..aceaa93 100644 --- a/src/assemblers/SparsityPatterns.jl +++ b/src/assemblers/SparsityPatterns.jl @@ -233,7 +233,7 @@ function SparseVectorPattern(dof::DofManager) for (n, conn) in enumerate(values(fspace.elem_conns)) # TODO do we need this operation? conn = reshape(ids[:, conn], ND * size(conn, 1), size(conn, 2)) - n_entries += size(conn, 1)^2 * size(conn, 2) + n_entries += size(conn, 1) * size(conn, 2) end # setup pre-allocated arrays based on number of entries diff --git a/src/bcs/DirichletBCs.jl b/src/bcs/DirichletBCs.jl index cfe4429..bb46ab3 100644 --- a/src/bcs/DirichletBCs.jl +++ b/src/bcs/DirichletBCs.jl @@ -221,7 +221,12 @@ end function DirichletBCs(mesh, dof, dirichlet_bcs) if length(dirichlet_bcs) == 0 - return NamedTuple(), NamedTuple() + # return NamedTuple(), NamedTuple() + bc_caches = DirichletBCContainer{Vector{Int}, Vector{Float64}}[] + # ti = typeof(identity) + # bc_funcs = DirichletBCFunction{ti, ti, ti}[] + bc_funcs = NamedTuple() + return DirichletBCs(bc_caches, bc_funcs) end syms = map(x -> Symbol("dirichlet_bc_$x"), 1:length(dirichlet_bcs)) diff --git a/ext/FiniteElementContainersExodusExt.jl b/src/meshes/Exodus.jl similarity index 67% rename from ext/FiniteElementContainersExodusExt.jl rename to src/meshes/Exodus.jl index 16d344b..63a842a 100644 --- a/ext/FiniteElementContainersExodusExt.jl +++ b/src/meshes/Exodus.jl @@ -1,30 +1,23 @@ -module FiniteElementContainersExodusExt - -using DocStringExtensions -using Exodus -using FiniteElementContainers -using Tensors - """ $(TYPEDEF) $(TYPEDFIELDS) """ -function FiniteElementContainers.FileMesh(::Type{<:FiniteElementContainers.ExodusMesh}, file_name::String) +function FileMesh(::ExodusMesh, file_name::String) exo = ExodusDatabase(file_name, "r") return FileMesh{typeof(exo)}(file_name, exo) end -function FiniteElementContainers._mesh_file_type(::Type{FiniteElementContainers.ExodusMesh}) +function _mesh_file_type(::Type{ExodusMesh}) return ExodusDatabase end -function FiniteElementContainers.num_dimensions( +function num_dimensions( mesh::FileMesh{ExodusDatabase{M, I, B, F}} )::Int32 where {M, I, B, F} return Exodus.num_dimensions(mesh.mesh_obj.init) end -function FiniteElementContainers.num_nodes( +function num_nodes( mesh::FileMesh{<:ExodusDatabase} )::Int32 return Exodus.num_nodes(mesh.mesh_obj.init) @@ -33,67 +26,67 @@ end """ $(TYPEDSIGNATURES) """ -function FiniteElementContainers.element_block_id_map(mesh::FileMesh{<:ExodusDatabase}, id) +function element_block_id_map(mesh::FileMesh{<:ExodusDatabase}, id) return convert.(Int64, Exodus.read_block_id_map(mesh.mesh_obj, id)) end """ $(TYPEDSIGNATURES) """ -function FiniteElementContainers.element_block_ids(mesh::FileMesh{<:ExodusDatabase}) +function element_block_ids(mesh::FileMesh{<:ExodusDatabase}) return Exodus.read_ids(mesh.mesh_obj, Block) end """ $(TYPEDSIGNATURES) """ -function FiniteElementContainers.element_block_names(mesh::FileMesh{<:ExodusDatabase}) +function element_block_names(mesh::FileMesh{<:ExodusDatabase}) return Exodus.read_names(mesh.mesh_obj, Block) end """ $(TYPEDSIGNATURES) """ -function FiniteElementContainers.node_cmaps(mesh::FileMesh{<:ExodusDatabase}, rank) +function node_cmaps(mesh::FileMesh{<:ExodusDatabase}, rank) return Exodus.read_node_cmaps(rank, mesh.mesh_obj) end """ $(TYPEDSIGNATURES) """ -function FiniteElementContainers.nodeset_ids(mesh::FileMesh{<:ExodusDatabase}) +function nodeset_ids(mesh::FileMesh{<:ExodusDatabase}) return Exodus.read_ids(mesh.mesh_obj, NodeSet) end """ $(TYPEDSIGNATURES) """ -function FiniteElementContainers.nodeset_names(mesh::FileMesh{<:ExodusDatabase}) +function nodeset_names(mesh::FileMesh{<:ExodusDatabase}) return Exodus.read_names(mesh.mesh_obj, NodeSet) end """ $(TYPEDSIGNATURES) """ -function FiniteElementContainers.sideset_ids(mesh::FileMesh{<:ExodusDatabase}) +function sideset_ids(mesh::FileMesh{<:ExodusDatabase}) return Exodus.read_ids(mesh.mesh_obj, SideSet) end """ $(TYPEDSIGNATURES) """ -function FiniteElementContainers.sideset_names(mesh::FileMesh{<:ExodusDatabase}) +function sideset_names(mesh::FileMesh{<:ExodusDatabase}) return Exodus.read_names(mesh.mesh_obj, SideSet) end """ $(TYPEDSIGNATURES) """ -function FiniteElementContainers.coordinates(mesh::FileMesh{ExodusDatabase{M, I, B, F}})::Matrix{F} where {M, I, B, F} +function coordinates(mesh::FileMesh{ExodusDatabase{M, I, B, F}})::Matrix{F} where {M, I, B, F} coords = Exodus.read_coordinates(mesh.mesh_obj) return coords end -function FiniteElementContainers.element_connectivity( +function element_connectivity( mesh::FileMesh{<:ExodusDatabase}, id::Integer ) @@ -102,7 +95,7 @@ function FiniteElementContainers.element_connectivity( return convert.(Int64, block.conn) end -function FiniteElementContainers.element_connectivity( +function element_connectivity( mesh::FileMesh{<:ExodusDatabase}, name::String ) @@ -112,7 +105,7 @@ function FiniteElementContainers.element_connectivity( end -function FiniteElementContainers.element_type( +function element_type( mesh::FileMesh{<:ExodusDatabase}, id::Integer ) @@ -120,7 +113,7 @@ function FiniteElementContainers.element_type( return block.elem_type end -function FiniteElementContainers.element_type( +function element_type( mesh::FileMesh{<:ExodusDatabase}, name::String ) @@ -128,17 +121,17 @@ function FiniteElementContainers.element_type( return block.elem_type end -function FiniteElementContainers.copy_mesh(file_1::String, file_2::String) +function copy_mesh(file_1::String, file_2::String) Exodus.copy_mesh(file_1, file_2) end -function FiniteElementContainers.node_id_map( +function node_id_map( mesh::FileMesh{<:ExodusDatabase} ) return read_id_map(mesh.mesh_obj, NodeMap) end -function FiniteElementContainers.nodeset( +function nodeset( mesh::FileMesh{<:ExodusDatabase}, id::Integer ) @@ -146,14 +139,14 @@ function FiniteElementContainers.nodeset( return sort!(convert.(Int64, nset.nodes)) end -function FiniteElementContainers.nodesets( +function nodesets( mesh::FileMesh{<:ExodusDatabase}, ids ) - return FiniteElementContainers.nodeset.((mesh,), ids) + return nodeset.((mesh,), ids) end -function FiniteElementContainers.sideset( +function sideset( mesh::FileMesh{<:ExodusDatabase}, id::Integer ) @@ -180,16 +173,19 @@ function FiniteElementContainers.sideset( return elems[perm], nodes, sides[perm], side_nodes end -function FiniteElementContainers.sidesets( +function sidesets( mesh::FileMesh{<:ExodusDatabase}, ids ) - return FiniteElementContainers.sideset.((mesh,), ids) + return sideset.((mesh,), ids) end + +# TODO move below stuff to PostProcessor.jl since we'll always want to output +# to exodus. # PostProcessor implementations -function FiniteElementContainers.PostProcessor( - ::Type{<:FiniteElementContainers.ExodusMesh}, +function PostProcessor( + ::Type{<:ExodusMesh}, file_name::String, vars... ) @@ -206,7 +202,7 @@ function FiniteElementContainers.PostProcessor( append!(element_var_names, names(var)) # L2QuadratureField case below elseif isa(var.fspace.coords, NamedTuple) - max_q_points = mapreduce(FiniteElementContainers.num_quadrature_points, maximum, values(var.fspace.ref_fes)) + max_q_points = mapreduce(num_quadrature_points, maximum, values(var.fspace.ref_fes)) temp_names = Symbol[] for name in names(var) for q in 1:max_q_points @@ -243,14 +239,14 @@ function FiniteElementContainers.PostProcessor( return PostProcessor(exo) end -function FiniteElementContainers.write_field(pp::PostProcessor, time_index, block_name, field_name, field::Matrix{<:Number}) +function write_field(pp::PostProcessor, time_index, block_name, field_name, field::Matrix{<:Number}) for q in axes(field, 1) var_name = String(field_name) * "_$q" write_values(pp.field_output_db, ElementVariable, time_index, block_name, var_name, field[q, :]) end end -function FiniteElementContainers.write_field(pp::PostProcessor, time_index, block_name, field_name, field::Matrix{<:SymmetricTensor{2, 3, <:Number, 6}}) +function write_field(pp::PostProcessor, time_index, block_name, field_name, field::Matrix{<:SymmetricTensor{2, 3, <:Number, 6}}) exts = ("xx", "yy", "zz", "yz", "xz", "xy") for (n, ext) in enumerate(exts) for q in axes(field, 1) @@ -261,7 +257,7 @@ function FiniteElementContainers.write_field(pp::PostProcessor, time_index, bloc end end -function FiniteElementContainers.write_field(pp::PostProcessor, time_index, block_name, field_name, field::Matrix{<:Tensor{2, 3, <:Number, 9}}) +function write_field(pp::PostProcessor, time_index, block_name, field_name, field::Matrix{<:Tensor{2, 3, <:Number, 9}}) exts = ("xx", "yy", "zz", "yz", "xz", "xy", "zy", "zx", "yx") for (n, ext) in enumerate(exts) for q in axes(field, 1) @@ -272,7 +268,7 @@ function FiniteElementContainers.write_field(pp::PostProcessor, time_index, bloc end end -function FiniteElementContainers.write_field(pp::PostProcessor, time_index::Int, field_names, field::H1Field) +function write_field(pp::PostProcessor, time_index::Int, field_names, field::H1Field) @assert length(field_names) == num_fields(field) for n in axes(field, 1) name = String(field_names[n]) @@ -280,7 +276,7 @@ function FiniteElementContainers.write_field(pp::PostProcessor, time_index::Int, end end -function FiniteElementContainers.write_field(pp::PostProcessor, time_index::Int, field_names, field::NamedTuple) +function write_field(pp::PostProcessor, time_index::Int, field_names, field::NamedTuple) @assert length(field_names) == length(field) field_names = String.(field_names) for (block, val) in field @@ -290,8 +286,6 @@ function FiniteElementContainers.write_field(pp::PostProcessor, time_index::Int, end end -function FiniteElementContainers.write_times(pp::PostProcessor, time_index::Int, time_val::Float64) +function write_times(pp::PostProcessor, time_index::Int, time_val::Float64) Exodus.write_time(pp.field_output_db, time_index, time_val) end - -end # module diff --git a/src/meshes/Meshes.jl b/src/meshes/Meshes.jl new file mode 100644 index 0000000..7c9d2dc --- /dev/null +++ b/src/meshes/Meshes.jl @@ -0,0 +1,100 @@ +elem_type_map = Dict{String, Type{<:ReferenceFiniteElements.AbstractElementType}}( + "HEX" => Hex8, + "HEX8" => Hex8, + "QUAD" => Quad4, + "QUAD4" => Quad4, + "QUAD9" => Quad9, + "TRI" => Tri3, + "TRI3" => Tri3, + "TRI6" => Tri6, + "TET" => Tet4, + "TETRA4" => Tet4, + "TETRA10" => Tet10 +) + +# different mesh types +abstract type AbstractMeshType end +struct AbaqusMesh <: AbstractMeshType +end +struct ExodusMesh <: AbstractMeshType +end + +""" +$(TYPEDEF) +""" +abstract type AbstractMesh end + + + +""" +$(TYPEDSIGNATURES) +Returns file name for an mesh type +""" +file_name(mesh::AbstractMesh) = mesh.file_name + +# TODO this one is making JET not happy +""" +$(TYPEDEF) +$(TYPEDFIELDS) +Mesh type that has a handle to an open mesh file object. +This type's methods are "overridden" in extensions. + +See FiniteElementContainersExodusExt for an example. +""" +struct FileMesh{MeshObj} <: AbstractMesh + file_name::String + mesh_obj::MeshObj +end + + + +function _create_edges!(all_edges, block_edges, el_to_nodes, ref_fe) + local_edge_to_nodes = ref_fe.edge_nodes + + # loop over all elements connectivity + for e in axes(el_to_nodes, 2) + el_nodes = el_to_nodes[:, e] + el_edges = map(x -> el_nodes[x], local_edge_to_nodes) + # loop over edges + local_edges = () + for edge in el_edges + sorted_edge = sort(edge).data + if !haskey(all_edges, sorted_edge) + all_edges[sorted_edge] = length(all_edges) + 1 + local_edges = (local_edges..., length(all_edges) + 1) + else + local_edges = (local_edges..., all_edges[sorted_edge]) + end + end + push!(block_edges, local_edges) + end + return nothing +end + +# this one isn't quite working +# function _create_faces!(all_faces, block_faces, el_to_nodes, ref_fe) +# local_face_to_nodes = ref_fe.face_nodes +# # display(local_face_to_nodes) +# # loop over all elements connectivity +# for e in axes(el_to_nodes, 2) +# el_nodes = el_to_nodes[:, e] +# el_faces = map(x -> el_nodes[x], local_face_to_nodes) +# # display(el_faces) +# # loop over faces +# local_faces = () +# for face in el_faces +# sorted_face = sort(face).data +# if !haskey(all_faces, sorted_face) +# all_faces[sorted_face] = length(all_faces) +# local_faces = (local_faces..., length(all_faces)) +# else +# local_faces = (local_faces..., all_faces[sorted_face]) +# end +# end +# push!(block_faces, local_faces) +# end +# return nothing +# end + +include("StructuredMesh.jl") +include("UnstructuredMesh.jl") \ No newline at end of file diff --git a/src/meshes/StructuredMesh.jl b/src/meshes/StructuredMesh.jl new file mode 100644 index 0000000..e17451d --- /dev/null +++ b/src/meshes/StructuredMesh.jl @@ -0,0 +1,308 @@ +struct StructuredMesh{ + ND, + RT <: Number, + IT <: Integer, + EConns +} <: AbstractMesh + nodal_coords::H1Field{RT, Vector{RT}, ND} + element_block_names::Vector{Symbol} + element_types::Vector{Symbol} + element_conns::EConns + element_id_maps::Dict{Symbol, Vector{IT}} + node_id_map::Vector{IT} + nodeset_nodes::Dict{Symbol, Vector{IT}} + sideset_elems::Dict{Symbol, Vector{IT}} + sideset_nodes::Dict{Symbol, Vector{IT}} + sideset_sides::Dict{Symbol, Vector{IT}} + sideset_side_nodes::Dict{Symbol, Matrix{IT}} +end + +function StructuredMesh(element_type, mins, maxs, counts) + @assert length(mins) == length(maxs) + @assert length(maxs) == length(counts) + for (n, (m1, m2)) in enumerate(zip(mins, maxs)) + if m1 >= m2 + @error "Dimension $n has negative or zero length" + throw(BoundsError()) + end + end + element_type = uppercase(element_type) + if occursin(element_type, "HEX") + @assert false "Implement hex case" + elseif occursin(element_type, "QUAD") + element_type = :QUAD4 + num_dims = 2 + nodal_coords, element_conns, nodeset_nodes, + sideset_elems, sideset_nodes, sideset_sides, sideset_side_nodes = _quad4_structured_mesh_data( + counts[1], counts[2], (mins[1], maxs[1]), (mins[2], maxs[2]) + ) + elseif occursin(element_type, "TET") + @assert false "Implement tet case" + elseif occursin(element_type, "TRI") + element_type = :TRI3 + num_dims = 2 + nodal_coords, element_conns, nodeset_nodes, + sideset_elems, sideset_nodes, sideset_sides, sideset_side_nodes = _tri3_structured_mesh_data( + counts[1], counts[2], (mins[1], maxs[1]), (mins[2], maxs[2]) + ) + else + throw(ArgumentError("Unsupported element type $element_type")) + end + # coords, conns + nodal_coords = H1Field(nodal_coords) + element_block_names = Symbol[:block_1] + element_types = Symbol[element_type] + element_id_maps = Dict(:block_1 => 1:size(element_conns, 2) |> collect) + element_conns = Dict(:block_1 => L2ElementField(element_conns)) + node_id_map = 1:size(nodal_coords, 2) |> collect + + element_conns = NamedTuple(element_conns) + + return StructuredMesh( + nodal_coords, + element_block_names, element_types, + element_conns, element_id_maps, + node_id_map, + nodeset_nodes, + sideset_elems, sideset_nodes, + sideset_sides, sideset_side_nodes + ) +end + +function Base.show(io::IO, mesh::StructuredMesh) + println(io, "StructuredMesh:") + println(io, " Number of dimensions = $(size(mesh.nodal_coords, 1))") + println(io, " Number of nodes = $(size(mesh.nodal_coords, 2))") + println(io, " Number of elements = $(size(mesh.element_conns[:block_1], 2))") + println(io, " Element type = $(mesh.element_types[1])") + +end + +function _quad4_structured_mesh_data(N_x, N_y, x_extent, y_extent) + E_x = N_x - 1 + E_y = N_y - 1 + + coords = Matrix{Float64}(undef, 2, N_x * N_y) + _two_dimensional_coords!(coords, x_extent, y_extent, N_x, N_y) + + node(i, j) = i + (j - 1) * N_x + + conns = Matrix{Int64}(undef, 4, E_x * E_y) + n = 1 + for ex in 1:E_x + for ey in 1:E_y + conns[1, n] = node(ex, ey) + conns[2, n] = node(ex + 1, ey) + conns[3, n] = node(ex + 1, ey + 1) + conns[4, n] = node(ex, ey + 1) + n = n + 1 + end + end + nodeset_nodes = _two_dimensional_nsets(N_x, N_y) + return coords, conns, nodeset_nodes, _quad4_ssets(conns, N_x, N_y)... +end + +function _quad4_ssets(conns, N_x, N_y) + E_x = N_x - 1 + E_y = N_y - 1 + elem(i, j) = (i - 1) * E_y + j + + sideset_elems = Dict{Symbol, Vector{Int}}( + :bottom => Int[], + :right => Int[], + :top => Int[], + :left => Int[] + ) + sideset_side_nodes = Dict{Symbol, Matrix{Int}}( + :bottom => Matrix{Int}(undef, 2, E_x), + :right => Matrix{Int}(undef, 2, E_y), + :top => Matrix{Int}(undef, 2, E_x), + :left => Matrix{Int}(undef, 2, E_y) + ) + sideset_sides = Dict{Symbol, Vector{Int}}( + :bottom => Int[], + :right => Int[], + :top => Int[], + :left => Int[] + ) + + # bottom + j = 1 + for i in 1:E_x + e = elem(i, j) + n1, n2 = conns[1, e], conns[2, e] + push!(sideset_elems[:bottom], e) + push!(sideset_sides[:bottom], 1) + sideset_side_nodes[:bottom][:, i] .= [n1, n2] + end + + # right + i = E_x + for j in 1:E_y + e = elem(i, j) + n2, n3 = conns[2, e], conns[3, e] + push!(sideset_elems[:right], e) + push!(sideset_sides[:right], 2) + sideset_side_nodes[:right][:, j] .= [n2, n3] + end + + # top + j = E_y + for i in 1:E_x + e = elem(i, j) + n3, n4 = conns[3, e], conns[4, e] + push!(sideset_elems[:top], e) + push!(sideset_sides[:top], 3) + sideset_side_nodes[:top][:, i] .= [n3, n4] + end + + # left + i = 1 + for j in 1:E_y + e = elem(i, j) + n4, n1 = conns[4, e], conns[1, e] + push!(sideset_elems[:left], e) + push!(sideset_sides[:left], 4) + sideset_side_nodes[:left][:, j] .= [n4, n1] + end + + sideset_nodes = Dict{Symbol, Vector{Int}}() + for (k, v) in sideset_side_nodes + sideset_nodes[k] = unique(v) + end + + return sideset_elems, sideset_nodes, sideset_sides, sideset_side_nodes +end + +function _tri3_structured_mesh_data(N_x, N_y, x_extent, y_extent) + E_x = N_x - 1 + E_y = N_y - 1 + + coords = Matrix{Float64}(undef, 2, N_x * N_y) + _two_dimensional_coords!(coords, x_extent, y_extent, N_x, N_y) + + conns = Matrix{Int64}(undef, 3, 2 * E_x * E_y) + n = 1 + for ex in 1:E_x + for ey in 1:E_y + conns[1, n] = (ex - 1) + N_x * (ey - 1) + 1 + conns[2, n] = ex + N_x * (ey - 1) + 1 + conns[3, n] = ex + N_x * ey + 1 + conns[1, n + 1] = (ex - 1) + N_x * (ey - 1) + 1 + conns[2, n + 1] = ex + N_x * ey + 1 + conns[3, n + 1] = (ex - 1) + N_x * ey + 1 + n = n + 2 + end + end + nodeset_nodes = _two_dimensional_nsets(N_x, N_y) + return coords, conns, nodeset_nodes, _tri3_ssets(conns, N_x, N_y)... +end + +# TODO finish me +function _tri3_ssets(conns, N_x, N_y) + E_x = N_x - 1 + E_y = N_y - 1 + quad(i, j) = (i - 1) * E_y + j + tri_a(q) = 2q - 1 + tri_b(q) = 2q + + sideset_elems = Dict{Symbol, Vector{Int}}( + :bottom => Int[], + :right => Int[], + :top => Int[], + :left => Int[] + ) + sideset_side_nodes = Dict{Symbol, Matrix{Int}}( + :bottom => Matrix{Int}(undef, 2, E_x), + :right => Matrix{Int}(undef, 2, E_y), + :top => Matrix{Int}(undef, 2, E_x), + :left => Matrix{Int}(undef, 2, E_y) + ) + sideset_sides = Dict{Symbol, Vector{Int}}( + :bottom => Int[], + :right => Int[], + :top => Int[], + :left => Int[] + ) + + # bottom + j = 1 + for i in 1:E_x + q = quad(i, j) + e = tri_a(q) + n1, n2 = conns[1, e], conns[2, e] + push!(sideset_elems[:bottom], e) + push!(sideset_sides[:bottom], 1) + sideset_side_nodes[:bottom][:, i] .= [n1, n2] + end + + # right + i = E_x + for j in 1:E_y + q = quad(i, j) + e = tri_a(q) + n2, n3 = conns[2, e], conns[3, e] + push!(sideset_elems[:right], e) + push!(sideset_sides[:right], 2) + sideset_side_nodes[:right][:, j] .= [n2, n3] + end + + # top + j = E_y + for i in 1:E_x + q = quad(i, j) + e = tri_b(q) + n3, n4 = conns[2, e], conns[3, e] + push!(sideset_elems[:top], e) + push!(sideset_sides[:top], 2) + sideset_side_nodes[:top][:, i] .= [n3, n4] + end + + # left + i = 1 + for j in 1:E_y + q = quad(i, j) + e = tri_b(q) + n4, n1 = conns[3, e], conns[1, e] + push!(sideset_elems[:left], e) + push!(sideset_sides[:left], 3) + sideset_side_nodes[:left][:, j] .= [n4, n1] + end + + sideset_nodes = Dict{Symbol, Vector{Int}}() + for (k, v) in sideset_side_nodes + sideset_nodes[k] = unique(v) + end + + return sideset_elems, sideset_nodes, sideset_sides, sideset_side_nodes +end + +function _two_dimensional_coords!(coords, x_extent, y_extent, N_x, N_y) + xs = LinRange(x_extent[1], x_extent[2], N_x) + ys = LinRange(y_extent[1], y_extent[2], N_y) + n = 1 + for ny in 1:N_x + for nx in 1:N_y + coords[1, n] = xs[nx] + coords[2, n] = ys[ny] + n = n + 1 + end + end + return nothing +end + +function _two_dimensional_nsets(N_x, N_y) + node(i, j) = i + (j - 1) * N_x + + bottom = [node(i, 1) for i in 1:N_x] + right = [node(N_x, j) for j in 1:N_y] + top = [node(i, N_y) for i in 1:N_x] + left = [node(1, j) for j in 1:N_y] + + return Dict{Symbol, Vector{Int}}( + :bottom => bottom, + :right => right, + :top => top, + :left => left + ) +end diff --git a/src/meshes/UnstructuredMesh.jl b/src/meshes/UnstructuredMesh.jl new file mode 100644 index 0000000..83ff23c --- /dev/null +++ b/src/meshes/UnstructuredMesh.jl @@ -0,0 +1,164 @@ +""" +$(TYPEDEF) +$(TYPEDSIGNATURES) +$(TYPEDFIELDS) +""" +struct UnstructuredMesh{ + MeshObj, + ND, + RT <: Number, + IT <: Integer, + EConns, + EdgeConns, + FaceConns +} <: AbstractMesh + mesh_obj::MeshObj + nodal_coords::H1Field{RT, Vector{RT}, ND} + element_block_names::Vector{Symbol} + element_types::Vector{Symbol} + element_conns::EConns + element_id_maps::Dict{Symbol, Vector{IT}} + node_id_map::Vector{IT} + nodeset_nodes::Dict{Symbol, Vector{IT}} + sideset_elems::Dict{Symbol, Vector{IT}} + sideset_nodes::Dict{Symbol, Vector{IT}} + sideset_sides::Dict{Symbol, Vector{IT}} + sideset_side_nodes::Dict{Symbol, Matrix{IT}} + # new additions + edge_conns::EdgeConns + face_conns::FaceConns +end + +""" +$(TYPEDSIGNATURES) +""" +function UnstructuredMesh(file_type::AbstractMeshType, file_name::String, create_edges::Bool, create_faces::Bool) + file = FileMesh(file_type, file_name) + return UnstructuredMesh(file, create_edges, create_faces) +end + +function UnstructuredMesh(file::FileMesh{T}, create_edges::Bool, create_faces::Bool) where T + + # read nodal coordinates + if num_dimensions(file) == 2 + coord_syms = (:X, :Y) + elseif num_dimensions(file) == 3 + coord_syms = (:X, :Y, :Z) + # else + # @assert false "Bad number of dimensions $(num_dimensions(file))" + end + + nodal_coords = coordinates(file) + nodal_coords = H1Field(nodal_coords) + + # read element block types, conn, etc. + el_block_ids = element_block_ids(file) + el_block_names = element_block_names(file) + el_block_names = Symbol.(el_block_names) + el_types = Symbol.(element_type.((file,), el_block_ids)) + el_conns = element_connectivity.((file,), el_block_ids) + # el_conns = Dict(zip(el_block_names, el_conns)) + el_conns = map(Connectivity, el_conns) + el_conns = NamedTuple{tuple(el_block_names...)}(tuple(el_conns...)) + el_id_maps = element_block_id_map.((file,), el_block_ids) + el_id_maps = Dict(zip(el_block_names, el_id_maps)) + # el_id_maps = NamedTuple{tuple(el_block_names...)}(tuple(el_id_maps...)) + + # read node id map + n_id_map = convert.(Int64, node_id_map(file)) + + # read nodesets + nset_names = Symbol.(nodeset_names(file)) + nsets = nodesets(file, nodeset_ids(file)) + nset_nodes = Dict(zip(nset_names, nsets)) + + # read sidesets + sset_names = Symbol.(sideset_names(file)) + ssets = sidesets(file, sideset_ids(file)) + + sset_elems = Dict(zip(sset_names, map(x -> x[1], ssets))) + sset_nodes = Dict(zip(sset_names, map(x -> x[2], ssets))) + sset_sides = Dict(zip(sset_names, map(x -> x[3], ssets))) + sset_side_nodes = Dict(zip(sset_names, map(x -> x[4], ssets))) + + + # TODO also add edges/faces for sidesets, this may be tricky... + + # TODO + # write methods to create edge and face connectivity + # hack for now since we need a ref fe for this stuff + # + # TODO maybe move this into function space... + + if create_edges + edges = () + all_edges = Dict{Tuple{Vararg{Int}}, Int}() + + for n in 1:length(el_types) + ref_fe = ReferenceFE(elem_type_map[el_types[n]]{Lagrange, 1}()) + block_edges = Vector{SVector{length(ref_fe.edge_nodes), Int}}(undef, 0) + _create_edges!(all_edges, block_edges, values(el_conns)[n], ref_fe) + edges = (edges..., Connectivity{length(block_edges[1]), length(block_edges)}(reduce(vcat, block_edges))) + # TODO need to create an id map + end + + edges = NamedTuple{keys(el_conns)}(edges) + else + edges = nothing + end + + # TODO need to finish this up + if create_faces + @assert num_dimensions(file) > 2 "Faces require a 3D mesh" + @assert false "Need to fix this..." + # faces = () + # all_faces = Dict{Tuple{Vararg{Int}}, Int}() + + # for n in 1:length(el_types) + # ref_fe = ReferenceFE(elem_type_map[el_types[n]]{Lagrange, 1}()) + # block_faces = Vector{SVector{length(ref_fe.face_nodes), Int}}(undef, 0) + # _create_faces!(all_faces, block_faces, values(el_conns)[n], ref_fe) + # faces = (faces..., block_faces) + # # TODO need to create an id map + # end + # @show length(all_faces) + # faces = NamedTuple{keys(el_conns)}(faces) + else + faces = nothing + end + + return UnstructuredMesh( + file, + nodal_coords, + el_block_names, el_types, el_conns, el_id_maps, + n_id_map, + nset_nodes, + sset_elems, + sset_nodes, + sset_sides, sset_side_nodes, + edges, faces + ) +end + +num_dimensions(mesh::UnstructuredMesh) = num_dimensions(mesh.mesh_obj) + +function _mesh_file_type end + +include("Exodus.jl") + +# dispatch based on file extension +""" +$(TYPEDSIGNATURES) +""" +function UnstructuredMesh(file_name::String; create_edges=false, create_faces=false) + ext = splitext(file_name) + if occursin(".g", file_name) || occursin(".e", file_name) || occursin(".exo", file_name) + return UnstructuredMesh(ExodusMesh(), file_name, create_edges, create_faces) + elseif occursin(".i", file_name) || occursin(".inp", file_name) + return UnstructuredMesh(AbaqusMesh(), file_name, create_edges, create_faces) + else + throw(ErrorException("Unsupported file type with extension $ext")) + end +end + +# TODO write a show method diff --git a/test/TestMesh.jl b/test/TestMesh.jl index 110f23a..9689765 100644 --- a/test/TestMesh.jl +++ b/test/TestMesh.jl @@ -1,14 +1,54 @@ +using FiniteElementContainers +using Test function test_structured_mesh() - coords, conns = FiniteElementContainers.create_structured_mesh_data(3, 3, [0., 1.], [0., 1.]) - @test coords ≈ [ - 0.0 0.5 1.0 0.0 0.5 1.0 0.0 0.5 1.0; - 0.0 0.0 0.0 0.5 0.5 0.5 1.0 1.0 1.0 - ] - @test conns == [ - 1 1 4 4 2 2 5 5; - 2 5 5 8 3 6 6 9; - 5 4 8 7 6 5 9 8 - ] + # testing bad input + @test_throws ArgumentError StructuredMesh("bad element", (0., 0.), (1., 1.), (3, 3)) + @test_throws BoundsError StructuredMesh("tri3", (0., 0.), (0., 1.), (3, 3)) + + # Quad4 test + mesh = StructuredMesh("quad", (0., 0.), (1., 1.), (3, 3)) + coords = mesh.nodal_coords + @test coords ≈ H1Field([ + 0.0 0.5 1.0 0.0 0.5 1.0 0.0 0.5 1.0; + 0.0 0.0 0.0 0.5 0.5 0.5 1.0 1.0 1.0 + ]) + @test mesh.element_conns[:block_1] ≈ L2ElementField([ + 1 4 2 5; + 2 5 3 6; + 5 8 6 9; + 4 7 5 8 + ]) + @test all(coords[2, mesh.nodeset_nodes[:bottom]] .≈ 0.) + @test all(coords[1, mesh.nodeset_nodes[:right]] .≈ 1.) + @test all(coords[2, mesh.nodeset_nodes[:top]] .≈ 1.) + @test all(coords[1, mesh.nodeset_nodes[:left]] .≈ 0.) + + @test all(coords[2, mesh.sideset_nodes[:bottom]] .≈ 0.) + @test all(coords[1, mesh.sideset_nodes[:right]] .≈ 1.) + @test all(coords[2, mesh.sideset_nodes[:top]] .≈ 1.) + @test all(coords[1, mesh.sideset_nodes[:left]] .≈ 0.) + # tri3 test + mesh = StructuredMesh("tri", (0., 0.), (1., 1.), (3, 3)) + @test mesh.nodal_coords ≈ H1Field([ + 0.0 0.5 1.0 0.0 0.5 1.0 0.0 0.5 1.0; + 0.0 0.0 0.0 0.5 0.5 0.5 1.0 1.0 1.0 + ]) + @test mesh.element_conns[:block_1] ≈ L2ElementField([ + 1 1 4 4 2 2 5 5; + 2 5 5 8 3 6 6 9; + 5 4 8 7 6 5 9 8 + ]) + @test all(coords[2, mesh.nodeset_nodes[:bottom]] .≈ 0.) + @test all(coords[1, mesh.nodeset_nodes[:right]] .≈ 1.) + @test all(coords[2, mesh.nodeset_nodes[:top]] .≈ 1.) + @test all(coords[1, mesh.nodeset_nodes[:left]] .≈ 0.) + + @test all(coords[2, mesh.sideset_nodes[:bottom]] .≈ 0.) + @test all(coords[1, mesh.sideset_nodes[:right]] .≈ 1.) + @test all(coords[2, mesh.sideset_nodes[:top]] .≈ 1.) + @test all(coords[1, mesh.sideset_nodes[:left]] .≈ 0.) + + @show mesh end # @testset ExtendedTestSet "Mesh" begin @@ -21,22 +61,22 @@ end # @test ExtendedTestSet "Mesh definition" begin function test_bad_mesh_methods() mesh = DummyMesh() - @test_throws AssertionError coordinates(mesh) - @test_throws AssertionError element_block_id_map(mesh, 1) - @test_throws AssertionError element_block_ids(mesh) - @test_throws AssertionError element_block_names(mesh) - @test_throws AssertionError element_connectivity(mesh, 1) - @test_throws AssertionError element_type(mesh, 1) - @test_throws AssertionError nodeset(mesh, 1) - @test_throws AssertionError nodesets(mesh, [1]) - @test_throws AssertionError nodeset_ids(mesh) - @test_throws AssertionError nodeset_names(mesh) - @test_throws AssertionError num_dimensions(mesh) - @test_throws AssertionError num_nodes(mesh) - @test_throws AssertionError sideset(mesh, 1) - @test_throws AssertionError sidesets(mesh) - @test_throws AssertionError sideset_ids(mesh) - @test_throws AssertionError sideset_names(mesh) + @test_throws MethodError coordinates(mesh) + @test_throws MethodError element_block_id_map(mesh, 1) + @test_throws MethodError element_block_ids(mesh) + @test_throws MethodError element_block_names(mesh) + @test_throws MethodError element_connectivity(mesh, 1) + @test_throws MethodError element_type(mesh, 1) + @test_throws MethodError nodeset(mesh, 1) + @test_throws MethodError nodesets(mesh, [1]) + @test_throws MethodError nodeset_ids(mesh) + @test_throws MethodError nodeset_names(mesh) + @test_throws MethodError num_dimensions(mesh) + @test_throws MethodError num_nodes(mesh) + @test_throws MethodError sideset(mesh, 1) + @test_throws MethodError sidesets(mesh) + @test_throws MethodError sideset_ids(mesh) + @test_throws MethodError sideset_names(mesh) end function test_bad_mesh_file_type() diff --git a/test/ext/TestPartitionedArraysExt.jl b/test/ext/TestPartitionedArraysExt.jl new file mode 100644 index 0000000..f5fc852 --- /dev/null +++ b/test/ext/TestPartitionedArraysExt.jl @@ -0,0 +1,168 @@ +using Exodus +using FiniteElementContainers +using PartitionedArrays +using Test + +function test_decomp_to_epu() + mesh_file = dirname(Base.source_dir()) * "/poisson/poisson.g" + num_ranks = 4 + FiniteElementContainers.decompose_mesh(mesh_file, num_ranks, 1) + + new_mesh_file = dirname(Base.source_dir()) * "/poisson/temp_mesh.g" + + for n in 1:num_ranks + @test isfile(mesh_file * ".$num_ranks.$(n - 1)") + end + @test isfile(mesh_file * ".nem") + @test isfile(mesh_file * ".pex") + + for n in 1:num_ranks + cp( + mesh_file * ".$num_ranks.$(n - 1)", + new_mesh_file * ".$num_ranks.$(n - 1)"; + force = true + ) + end + + Exodus.epu(new_mesh_file) + temp = split(new_mesh_file, "/")[end] + @test Exodus.exodiff(mesh_file, String(temp)) + + # cleanup + for n in 1:num_ranks + rm(mesh_file * ".$num_ranks.$(n - 1)"; force = true) + rm(new_mesh_file * ".$num_ranks.$(n - 1)"; force = true) + end + rm(mesh_file * ".nem"; force = true) + rm(mesh_file * ".pex"; force = true) + rm(temp; force = true) + rm(dirname(mesh_file) * "/decomp.log"; force = true) + rm(dirname(temp) * "/epu.log"; force = true) + rm(dirname(dirname(mesh_file)) * "/epu_err.log"; force = true) + rm(dirname(dirname(mesh_file)) * "/exodiff.log"; force = true) +end + +# function test_global_colors(num_dofs, num_ranks) +# mesh_file = dirname(Base.source_dir()) * "/poisson/poisson.g" +# FiniteElementContainers.decompose_mesh(mesh_file, num_ranks, 1) +# # global_colorings = FiniteElementContainers.global_colorings(mesh_file, num_dofs, num_ranks) + + + +# exo = ExodusDatabase(mesh_file, "r") +# n_nodes = Exodus.initialization(exo).num_nodes +# # make sure it's the right length +# @test length(global_colorings) == n_nodes * num_dofs +# # ensure each proc has a sensible number +# for n in axes(global_colorings, 1) +# @test global_colorings[n] >= 1 +# @test global_colorings[n] <= num_ranks +# end + +# # cleanup +# for n in 1:num_ranks +# rm(mesh_file * ".$num_ranks.$(n - 1)"; force = true) +# end +# rm(mesh_file * ".nem"; force = true) +# rm(mesh_file * ".pex"; force = true) +# rm(dirname(mesh_file) * "/decomp.log"; force = true) +# end + +# function test_variable_partition(distribute, num_dofs, num_ranks) +# # mesh_file = dirname(Base.source_dir()) * "/poisson/poisson.g" +# mesh_file = Base.source_dir() * "/square.g" +# FiniteElementContainers.decompose_mesh(mesh_file, num_ranks, num_dofs) +# global_colorings = FiniteElementContainers.global_colorings(mesh_file, num_dofs, num_ranks) + +# ranks = distribute(LinearIndices((num_ranks,))) +# meshes = map(ranks) do rank +# UnstructuredMesh(mesh_file, num_ranks, rank) +# end +# parts = FiniteElementContainers.create_partition(mesh_file, num_dofs, num_ranks, ranks) + +# # test that the partitions owned and ghost members are the right size +# map(meshes, parts.parts, ranks) do mesh, part, rank +# node_map = mesh.node_id_map +# num_dofs_in_rank = count(x -> x == rank, global_colorings) + +# n_local = length(part.ranges[1]) +# n_ghost = length(part.ghost.ghost_to_owner) +# @test n_local == num_dofs_in_rank +# @test n_local + n_ghost == length(node_map) +# end + +# # now test that the exo_to_par and par_to_exo maps are consistent +# exo_to_par, par_to_exo = parts.exo_to_par, parts.par_to_exo + +# for n in 1:length(global_colorings) +# @test n in values(exo_to_par) +# @test n in values(par_to_exo) +# end + +# par_node_maps = map(meshes, parts.parts) do mesh, part +# node_map = mesh.node_id_map +# # n_ghost = length(part.ghost.ghost_to_owner) +# # n_local = length(part.ranges[1]) +# # ranges = part.ranges[1] + +# par_node_map = map(x -> exo_to_par[x], node_map) +# exo_node_map = map(x -> par_to_exo[x], par_node_map) + +# # checking they're invertible maps +# for n in axes(node_map, 1) +# @test node_map[n] == exo_node_map[n] +# @test node_map[n] == node_map[par_to_exo[exo_to_par[n]]] +# end + +# # NOTE this below is dumb. +# # we shouldn't expect node id maps to be ordered in terms +# # of local/ghost since then exodus would have an implicit local/global +# # which it does not. +# # instead we should add tests that test things based on load balance info +# # @test all(part.ranges[1] |> collect .== part[1:n_local]) +# # @show "hur" +# # # @test all(part.ranges[1] |> collect .== par_node_map[1:n_local]) +# # # par_node_map +# # parts + +# # test connectivities are invertible +# exo_conns = mesh.element_conns +# par_conns = deepcopy(exo_conns) +# for (block, conn) in enumerate(values(par_conns)) +# for e in axes(conn, 2) +# for n in axes(conn, 1) +# conn[n, e] = exo_to_par[node_map[conn[n, e]]] +# end +# end + +# # TODO this isn't quite right yet +# # # now invert real quick +# # temp_conns = deepcopy(conn) +# # for e in axes(temp_conns, 2) +# # for n in axes(temp_conns, 1) +# # temp_conns[n, e] = par_to_exo[conn[n, e]] +# # end +# # end + +# # @test all(values(exo_conns)[block] .== values(temp_conns)) +# end +# end + +# # cleanup +# for n in 1:num_ranks +# rm(mesh_file * ".$num_ranks.$(n - 1)"; force = true) +# end +# rm(mesh_file * ".nem"; force = true) +# rm(mesh_file * ".pex"; force = true) +# rm(dirname(mesh_file) * "/decomp.log"; force = true) +# # parts +# par_node_maps +# end + +if !Sys.iswindows() + test_decomp_to_epu() +end +# test_global_colors(1, 4) +# test_global_colors(2, 4) +# test_global_colors(3, 4) +# test_variable_partition(identity, 1, 4) diff --git a/test/ext/script.jl b/test/ext/script.jl new file mode 100644 index 0000000..e239d6a --- /dev/null +++ b/test/ext/script.jl @@ -0,0 +1,91 @@ +using Exodus +using FiniteElementContainers +using PartitionedArrays + +include("../../test/poisson/TestPoissonCommon.jl") +# f(X, _) = 2. * π^2 * sin(π * X[1]) * sin(π * X[2]) +f(X, _) = 1. +bc_func(_, _) = 0. + +mesh_file = Base.source_dir() * "/square.g" +output_file = Base.source_dir() * "/output.e" +num_dofs = 1 +num_ranks = 4 +distribute = identity + +ranks = distribute(LinearIndices((num_ranks,))) + +# decompose mesh and global dofs to colors +# NOTE this is all happening on rank 0 currently +# put this one in seperate map as barrier +map(ranks) do rank + FiniteElementContainers.decompose_mesh(mesh_file, num_ranks, rank) +end + +# STEPS +# 1. Make global node to rank owners (plural) +# along with other stuff + +global_node_owners = FiniteElementContainers.global_colorings(mesh_file, 1, num_ranks) +global_nodes_to_colors = map(minimum, global_node_owners) +global_dof_owners = FiniteElementContainers.global_colorings(mesh_file, num_dofs, num_ranks) +global_dofs_to_colors = map(minimum, global_dof_owners) + +n_dofs_per_parts = map(ranks) do rank + n_dofs_per_parts = count(x -> x == rank, global_dofs_to_colors) + return n_dofs_per_parts +end +# 2. Make variable partition +parts = variable_partition(n_dofs_per_parts, sum(n_dofs_per_parts)) +# 3. Make exo_to_par and par_to_exo maps +# for now need to make one that is one dof for a node partition +# and for a dof partition, TODO +dof_exo_to_par, dof_par_to_exo = FiniteElementContainers._exo_to_par_dicts( + mesh_file, num_dofs, num_ranks, + global_dofs_to_colors, + n_dofs_per_parts, parts, ranks +) +node_exo_to_par, node_par_to_exo = FiniteElementContainers._exo_to_par_dicts( + mesh_file, 1, num_ranks, + global_nodes_to_colors, + n_dofs_per_parts, parts, ranks +) + +# 4. map meshes over to newer numbering maybe? +meshes = map(ranks) do rank + mesh = UnstructuredMesh(mesh_file, num_ranks, rank) + # FiniteElementContainers._renumber_mesh(mesh, node_exo_to_par) +end + +# 5. Need to update ghost nodes in variable_partition +parts = map(meshes, parts, ranks) do mesh, part, rank + node_map = mesh.node_id_map + cmaps = Exodus.read_node_cmaps(rank, mesh.mesh_obj.mesh_obj) + + ghost_nodes = Vector{Int}(undef, 0) + ghost_owners = Vector{Int}(undef, 0) + + for cmap in cmaps + exo_global_nodes = node_map[cmap.node_ids] + par_global_nodes = map(x -> dof_exo_to_par[x], exo_global_nodes) + for (exo_node, par_node, proc) in zip(exo_global_nodes, par_global_nodes, cmap.proc_ids) + if global_dofs_to_colors[exo_node] == proc + push!(ghost_nodes, par_node) + push!(ghost_owners, proc) + end + end + end + ghost_nodes + union_ghost(part, ghost_nodes, ghost_owners) +end + +v = pones(parts) + +# map(meshes) do mesh +# V = FunctionSpace(mesh, H1Field, Lagrange) +# physics = Poisson(f) +# props = create_properties(physics) +# u = ScalarFunction(V, :u) +# asm = SparseMatrixAssembler(u; use_condensed=true) + +# end \ No newline at end of file diff --git a/test/ext/square.g b/test/ext/square.g new file mode 100644 index 0000000..f41996e Binary files /dev/null and b/test/ext/square.g differ diff --git a/test/poisson/TestPoisson.jl b/test/poisson/TestPoisson.jl index 483df83..54d8c29 100644 --- a/test/poisson/TestPoisson.jl +++ b/test/poisson/TestPoisson.jl @@ -20,6 +20,17 @@ bc_func_neumann(_, _) = SVector{1, Float64}(1.) # read mesh and relevant quantities +function test_poisson(backend, cond, nlsolver, lsolver) + test_poisson_dirichlet(backend, cond, nlsolver, lsolver) + test_poisson_dirichlet_multi_block_quad4_quad4(backend, cond, nlsolver, lsolver) + test_poisson_dirichlet_multi_block_quad4_tri3(backend, cond, nlsolver, lsolver) + test_poisson_dirichlet_structured_mesh_quad4(backend, cond, nlsolver, lsolver) + test_poisson_dirichlet_structured_mesh_tri3(backend, cond, nlsolver, lsolver) + test_poisson_neumann(backend, cond, nlsolver, lsolver) + # test_poisson_neumann_structured_mesh_quad4(backend, cond, nlsolver, lsolver) + # test_poisson_neumann_structured_mesh_tri3(backend, cond, nlsolver, lsolver) +end + function test_poisson_dirichlet( dev, use_condensed, nsolver, lsolver @@ -70,6 +81,7 @@ function test_poisson_dirichlet( display(solver.timer) end + function test_poisson_neumann( dev, use_condensed, nsolver, lsolver @@ -222,3 +234,223 @@ function test_poisson_dirichlet_multi_block_quad4_tri3( rm(output_file; force=true) display(solver.timer) end + +function test_poisson_dirichlet_structured_mesh_quad4( + dev, use_condensed, + nsolver, lsolver +) + # mesh = UnstructuredMesh(mesh_file) + mesh = StructuredMesh("quad", (0., 0.), (1., 1.), (11, 11)) + V = FunctionSpace(mesh, H1Field, Lagrange) + physics = Poisson(f) + props = create_properties(physics) + u = ScalarFunction(V, :u) + asm = SparseMatrixAssembler(u; use_condensed=use_condensed) + + # setup and update bcs + dbcs = DirichletBC[ + DirichletBC(:u, :bottom, bc_func), + DirichletBC(:u, :right, bc_func), + DirichletBC(:u, :top, bc_func), + DirichletBC(:u, :left, bc_func), + ] + + # setup the parameters + p = create_parameters(mesh, asm, physics, props; dirichlet_bcs=dbcs) + + if dev != cpu + p = p |> dev + asm = asm |> dev + end + + # setup solver and integrator + solver = nsolver(lsolver(asm)) + integrator = QuasiStaticIntegrator(solver) + evolve!(integrator, p) + + if dev != cpu + p = p |> cpu + end + + U = p.h1_field + + # pp = PostProcessor(mesh, output_file, u) + # write_times(pp, 1, 0.0) + # write_field(pp, 1, ("u",), U) + # close(pp) + + # if !Sys.iswindows() + # @test exodiff(output_file, gold_file) + # end + # rm(output_file; force=true) + display(solver.timer) +end + +function test_poisson_dirichlet_structured_mesh_tri3( + dev, use_condensed, + nsolver, lsolver +) + # mesh = UnstructuredMesh(mesh_file) + mesh = StructuredMesh("quad", (0., 0.), (1., 1.), (11, 11)) + V = FunctionSpace(mesh, H1Field, Lagrange) + physics = Poisson(f) + props = create_properties(physics) + u = ScalarFunction(V, :u) + asm = SparseMatrixAssembler(u; use_condensed=use_condensed) + + # setup and update bcs + dbcs = DirichletBC[ + DirichletBC(:u, :bottom, bc_func), + DirichletBC(:u, :right, bc_func), + DirichletBC(:u, :top, bc_func), + DirichletBC(:u, :left, bc_func), + ] + + # setup the parameters + p = create_parameters(mesh, asm, physics, props; dirichlet_bcs=dbcs) + + if dev != cpu + p = p |> dev + asm = asm |> dev + end + + # setup solver and integrator + solver = nsolver(lsolver(asm)) + integrator = QuasiStaticIntegrator(solver) + evolve!(integrator, p) + + if dev != cpu + p = p |> cpu + end + + U = p.h1_field + + # pp = PostProcessor(mesh, output_file, u) + # write_times(pp, 1, 0.0) + # write_field(pp, 1, ("u",), U) + # close(pp) + + # if !Sys.iswindows() + # @test exodiff(output_file, gold_file) + # end + # rm(output_file; force=true) + display(solver.timer) +end + +function test_poisson_neumann_structured_mesh_quad4( + dev, use_condensed, + nsolver, lsolver +) + mesh = StructuredMesh("quad", (0., 0.), (1., 1.), (11, 11)) + V = FunctionSpace(mesh, H1Field, Lagrange) + physics = Poisson(f) + props = create_properties(physics) + u = ScalarFunction(V, :u) + asm = SparseMatrixAssembler(u; use_condensed=use_condensed) + + # setup and update bcs + dbcs = DirichletBC[ + DirichletBC(:u, :bottom, bc_func), + DirichletBC(:u, :right, bc_func) + ] + + nbcs = NeumannBC[ + NeumannBC(:u, :top, bc_func_neumann), + NeumannBC(:u, :left, bc_func_neumann) + ] + + # direct solver test + # setup the parameters + p = create_parameters( + mesh, asm, physics, props; + dirichlet_bcs=dbcs, + neumann_bcs=nbcs + ) + + if dev != cpu + p = p |> dev + asm = asm |> dev + end + + # setup solver and integrator + solver = nsolver(lsolver(asm)) + integrator = QuasiStaticIntegrator(solver) + evolve!(integrator, p) + + if dev != cpu + p = p |> cpu + end + + # TODO make a neumann gold file + # U = p.h1_field + + # pp = PostProcessor(mesh, output_file, u) + # write_times(pp, 1, 0.0) + # write_field(pp, 1, ("u",), U) + # close(pp) + + # if !Sys.iswindows() + # @test exodiff(output_file, gold_file) + # end + # rm(output_file; force=true) + display(solver.timer) +end + +function test_poisson_neumann_structured_mesh_tri3( + dev, use_condensed, + nsolver, lsolver +) + mesh = StructuredMesh("tri", (0., 0.), (1., 1.), (11, 11)) + V = FunctionSpace(mesh, H1Field, Lagrange) + physics = Poisson(f) + props = create_properties(physics) + u = ScalarFunction(V, :u) + asm = SparseMatrixAssembler(u; use_condensed=use_condensed) + + # setup and update bcs + dbcs = DirichletBC[ + DirichletBC(:u, :bottom, bc_func), + DirichletBC(:u, :right, bc_func) + ] + + nbcs = NeumannBC[ + NeumannBC(:u, :top, bc_func_neumann), + NeumannBC(:u, :left, bc_func_neumann) + ] + + # direct solver test + # setup the parameters + p = create_parameters( + mesh, asm, physics, props; + dirichlet_bcs=dbcs, + neumann_bcs=nbcs + ) + + if dev != cpu + p = p |> dev + asm = asm |> dev + end + + # setup solver and integrator + solver = nsolver(lsolver(asm)) + integrator = QuasiStaticIntegrator(solver) + evolve!(integrator, p) + + if dev != cpu + p = p |> cpu + end + + # TODO make a neumann gold file + # U = p.h1_field + + # pp = PostProcessor(mesh, output_file, u) + # write_times(pp, 1, 0.0) + # write_field(pp, 1, ("u",), U) + # close(pp) + + # if !Sys.iswindows() + # @test exodiff(output_file, gold_file) + # end + # rm(output_file; force=true) + display(solver.timer) +end diff --git a/test/runtests.jl b/test/runtests.jl index 243ec4b..cce2499 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -2,11 +2,11 @@ using Adapt using AMDGPU using Aqua using CUDA -using Exodus using FiniteElementContainers using ForwardDiff using LinearAlgebra using MPI +using PartitionedArrays using ReferenceFiniteElements using SparseArrays using StaticArrays @@ -43,28 +43,19 @@ function test_poisson() # cpu tests for cond in condensed for lsolver in lsolvers - test_poisson_dirichlet(cpu, cond, NewtonSolver, lsolver) - test_poisson_dirichlet_multi_block_quad4_quad4(cpu, cond, NewtonSolver, lsolver) - test_poisson_dirichlet_multi_block_quad4_tri3(cpu, cond, NewtonSolver, lsolver) - test_poisson_neumann(cpu, cond, NewtonSolver, lsolver) + test_poisson(cpu, cond, NewtonSolver, lsolver) end end if AMDGPU.functional() for cond in condensed - test_poisson_dirichlet(rocm, cond, NewtonSolver, cg_solver) - test_poisson_dirichlet_multi_block_quad4_quad4(rocm, cond, NewtonSolver, cg_solver) - test_poisson_dirichlet_multi_block_quad4_tri3(rocm, cond, NewtonSolver, cg_solver) - test_poisson_neumann(rocm, cond, NewtonSolver, cg_solver) + test_poisson(rocm, cond, NewtonSolver, cg_solver) end end if CUDA.functional() for cond in condensed - test_poisson_dirichlet(cuda, cond, NewtonSolver, cg_solver) - test_poisson_dirichlet_multi_block_quad4_quad4(cuda, cond, NewtonSolver, cg_solver) - test_poisson_dirichlet_multi_block_quad4_tri3(cuda, cond, NewtonSolver, cg_solver) - test_poisson_neumann(cuda, cond, NewtonSolver, cg_solver) + test_poisson(cuda, cond, NewtonSolver, cg_solver) end end end @@ -98,6 +89,10 @@ end @testset "Mechanics" test_mechanics() end +@testset "Extension tests" begin + include("ext/TestPartitionedArraysExt.jl") +end + @testset "Aqua" begin Aqua.test_all(FiniteElementContainers; ambiguities=false) end