diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index f7f8962..1c35451 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -18,7 +18,7 @@ jobs: fail-fast: false matrix: version: - - '1.12.0-beta3' + - '1.12' - '1.11' - '1.10' os: diff --git a/Project.toml b/Project.toml index 9c0ec5e..220bf4a 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Exodus" uuid = "f57ae99e-f805-4780-bdca-96e224be1e5a" authors = ["cmhamel "] -version = "0.14.1" +version = "0.14.2" [deps] DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" @@ -25,7 +25,6 @@ MPI = "0.20" Meshes = "0.53" PartitionedArrays = "0.5" Test = "1" -TestSetExtensions = "2" Unitful = "1" julia = "1" @@ -35,8 +34,7 @@ MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" Meshes = "eacbb407-ea5a-433e-ab97-5258b1ca43fa" PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -TestSetExtensions = "98d24dd4-01ad-11ea-1b02-c9a08f80db04" Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" [targets] -test = ["Aqua", "MPI", "Meshes", "PartitionedArrays", "Test", "TestSetExtensions", "Unitful"] +test = ["Aqua", "MPI", "Meshes", "PartitionedArrays", "Test", "Unitful"] diff --git a/ext/ExodusPartitionedArraysExt.jl b/ext/ExodusPartitionedArraysExt.jl index 12292bc..3df8243 100644 --- a/ext/ExodusPartitionedArraysExt.jl +++ b/ext/ExodusPartitionedArraysExt.jl @@ -3,6 +3,15 @@ module ExodusPartitionedArraysExt using Exodus using PartitionedArrays +# New sketch + +# 1. Read in node cmaps +# 2. Get node to elem connectivity +# 3. Also get elem and id maps +# 4. Loop over node to elem conn and +# get all unique ids + + # Some helpers for IO function Exodus.ExodusDatabase(ranks, mesh_file::String) # first open nemesis file to get number of procs @@ -16,8 +25,9 @@ function Exodus.ExodusDatabase(ranks, mesh_file::String) init_global = Exodus.InitializationGlobal(nem) n_nodes_global = Exodus.num_nodes(init_global) + n_elems_global = Exodus.num_elements(init_global) - return num_proc, n_nodes_global + return num_proc, n_nodes_global, n_elems_global end @info "Reading exodus files" @@ -36,6 +46,36 @@ function Exodus.close(exos::V) where V <: AbstractArray{<:ExodusDatabase} end end +function PartitionedArrays.partition_from_color( + ranks, exos, + global_elem_to_color, + global_node_to_color +) + tuple_of_arrays(map(exos, ranks) do exo, rank + node_map = read_id_map(exo, NodeMap) + node_procs = global_node_to_color[node_map] + + own_nodes = findall(x -> x == rank, node_procs) + ghost_nodes = findall(x -> x != rank, node_procs) + + new_own_nodes = convert.(Int64, node_map[own_nodes]) + own = OwnIndices(length(global_node_to_color), rank, new_own_nodes) + + new_ghost_nodes = convert.(Int64, node_map[ghost_nodes]) + ghost_procs = global_node_to_color[new_ghost_nodes] + ghost = GhostIndices(length(global_node_to_color), new_ghost_nodes, ghost_procs) + + dof_own_and_ghost = OwnAndGhostIndices(own, ghost, global_node_to_color) + + elem_map = convert.(Int64, read_id_map(exo, ElementMap)) + own = OwnIndices(length(global_elem_to_color), rank, elem_map) + ghost = GhostIndices(length(global_elem_to_color), Int64[], Int64[]) + elem_own_and_ghost = OwnAndGhostIndices(own, ghost, global_elem_to_color) + + dof_own_and_ghost, elem_own_and_ghost + end) +end + # PArrays overrides # Bug in this currently # function PartitionedArrays.OwnAndGhostIndices(ranks, exos, inits, global_to_color) @@ -44,6 +84,9 @@ end # internal_node_ids, internal_proc_ids = Exodus.read_internal_nodes_and_procs(rank, exo) # ghost_node_ids, ghost_proc_ids = Exodus.read_ghost_nodes_and_procs(rank, exo) +# ghost_node_ids = unique(ghost_node_ids) +# # ghost_proc_ids = + # own_indices = OwnIndices(n_nodes_global, rank, internal_node_ids) # ghost_indices = GhostIndices(n_nodes_global, ghost_node_ids, ghost_proc_ids) @@ -52,24 +95,73 @@ end # return indices # end -# dumb for now since each proc has to read each mesh part -function PartitionedArrays.partition_from_color(ranks, file_name::String, global_to_color) - parts = partition_from_color(ranks, global_to_color) - exos, inits = ExodusDatabase(ranks, file_name) +# function PartitionedArrays.OwnAndGhostIndices(exos, global_to_color) +# n_nodes_global = length(global_to_color) +# map(1:length(exos), exos) do rank, exo +# internal_node_ids, _ = Exodus.read_internal_nodes_and_procs(rank, exo) +# ghost_node_ids, ghost_proc_ids = Exodus.read_ghost_nodes_and_procs(rank, exo) +# own_indices = OwnIndices(n_nodes_global, rank, internal_node_ids) +# ghost_indices = GhostIndices(n_nodes_global, ghost_node_ids, ghost_proc_ids) +# return OwnAndGhostIndices(own_indices, ghost_indices, global_to_color) +# end +# end - # below doesn't work - # parts = OwnAndGhostIndices(ranks, exos, inits, global_to_color) +# # dumb for now since each proc has to read each mesh part +# function PartitionedArrays.partition_from_color(ranks, file_name::String, global_to_color) +# parts = partition_from_color(ranks, global_to_color) +# exos, inits = ExodusDatabase(ranks, file_name) +# # return OwnAndGhostIndices(ranks, exos, inits, global_to_color) +# # below doesn't work +# # parts = OwnAndGhostIndices(ranks, exos, inits, global_to_color) + +# # now update ghost nodes +# node_procs = map(ranks, exos) do rank, exo +# ghost_nodes, ghost_procs = Exodus.read_ghost_nodes_and_procs(rank, exo) +# end +# ghost_nodes, ghost_procs = tuple_of_arrays(node_procs) - # now update ghost nodes - node_procs = map(ranks, exos) do rank, exo - ghost_nodes, ghost_procs = Exodus.read_ghost_nodes_and_procs(rank, exo) - end - ghost_nodes, ghost_procs = tuple_of_arrays(node_procs) +# parts = map(parts, ghost_nodes, ghost_procs) do part, gids, owners +# replace_ghost(part, gids, owners) +# end +# return parts + +# # # now update ghost elems +# # out = map(exos, ranks) do exo, rank +# # ghost_elems, ghost_procs = Exodus.read_ghost_elements_and_procs(rank, exo) +# # end +# # ghost_elems, ghost_procs = tuple_of_arrays(out) + +# # parts = map(ghost_elems, ghost_procs, parts) do ge, gp, part +# # replace_ghost(part, ge, gp) +# # end +# # return parts +# end - parts = map(parts, ghost_nodes, ghost_procs) do part, gids, owners - replace_ghost(part, gids, owners) - end - return parts -end +# function PartitionedArrays.partition_from_color(ranks, file_name::String) +# n_procs = length(ranks) |> Int32 +# global_nodes = Exodus.collect_global_node_numberings(file_name, n_procs) + +# # open exo files +# exos = map(ranks) do rank +# ExodusDatabase(file_name * ".$(n_procs).$(lpad(rank - 1, Exodus.exodus_pad(n_procs), '0'))", "r") +# end + +# # create element partition +# # TODO use actual element numbering in partition +# num_elems = map(exos) do exo +# Exodus.initialization(exo).num_elements +# end + +# element_parts = variable_partition(num_elems, sum(num_elems)) + +# # create node partition +# # TODO modify to have dofs as well +# num_nodes = map(ranks) do rank +# filter(x -> x == rank, global_nodes) |> length +# end + +# dof_parts = variable_partition(num_nodes, sum(num_nodes)) +# return element_parts, dof_parts +# end end # module diff --git a/src/Decomp.jl b/src/Decomp.jl index 4bbbcbb..fd8d4fd 100644 --- a/src/Decomp.jl +++ b/src/Decomp.jl @@ -39,12 +39,17 @@ nem_spread_error(cmd::Cmd) = throw(NemSpreadException(cmd)) """ $(TYPEDSIGNATURES) """ -function nem_slice(file_name::String, n_procs::I) where I <: Integer +function nem_slice(file_name::String, n_procs::I; use_nodal=false) where I <: Integer nem_file = file_name * ".nem" dir_name = dirname(file_name) * "/" # TODO this will be an issue for windows + if use_nodal + decomp_type = "-n" + else + decomp_type = "-e" + end stdout_file = joinpath(dir_name, "decomp.log") stderr_file = joinpath(dir_name, "decomp_err.log") - nem_slice_cmd = String["-e", "-S", "-l", "inertial", "-c", "-o", + nem_slice_cmd = String[decomp_type, "-S", "-l", "inertial", "-c", "-o", "$(abspath(nem_file))", "-m", "mesh=$n_procs", "$file_name"] @@ -96,7 +101,11 @@ end """ $(TYPEDSIGNATURES) """ -function decomp(file_name::String, n_procs::I) where I <: Integer +function decomp( + file_name::String, + n_procs::I; + use_nodal=false +) where I <: Integer @assert !Sys.iswindows() "This method is not supported on Windows" @assert isfile(file_name) "File $file_name not found in decomp. Can't proceed." @@ -113,7 +122,7 @@ function decomp(file_name::String, n_procs::I) where I <: Integer end # nem slice first - nem_slice(file_name_abs, n_procs) + nem_slice(file_name_abs, n_procs; use_nodal=use_nodal) # now nem spread nem_spread(file_name_abs, n_procs) diff --git a/src/ExodusTypes.jl b/src/ExodusTypes.jl index 64b42e9..d30c7c0 100644 --- a/src/ExodusTypes.jl +++ b/src/ExodusTypes.jl @@ -633,6 +633,21 @@ function ExodusDatabase{M, I, B, F}( ) end + +""" +$(TYPEDSIGNATURES) +Method that safely does stuff by ensuring +the file closes no matter what. +""" +function ExodusDatabase(f::Function, file_name::String, mode::String) + exo = ExodusDatabase(file_name, mode) + try + return f(exo) + finally + close(exo) + end +end + function _juliac_safe_rpad(s::AbstractString, n::Integer) len = ncodeunits(s) padlen = max(0, n - len) diff --git a/src/Helpers.jl b/src/Helpers.jl index 87e8790..3c21ee9 100644 --- a/src/Helpers.jl +++ b/src/Helpers.jl @@ -22,6 +22,47 @@ function collect_element_connectivities(exo::ExodusDatabase{M, I, B, F}) where { return conns end +# assumes decomp has been run first +# this method figures out which proc should own which node +# base on a minimum rank ordering. +# if ranks 1, 2, 3, 4 share nodes, rank 1 owns them +# if ranks 2, 3, 4 share nodes, rank 2 owns them +function collect_global_element_and_node_numberings( + file_name::String, n_procs; + func = maximum +) + n_procs = n_procs |> Int32 + + exo = ExodusDatabase(file_name, "r") + n_elems_global = num_elements(initialization(exo)) + n_nodes_global = num_nodes(initialization(exo)) + close(exo) + + global_elems = Vector{Int32}(undef, n_elems_global) + global_nodes = Vector{Vector{Int32}}(undef, n_nodes_global) + for n in 1:n_nodes_global + global_nodes[n] = Vector{Int32}(undef, 0) + end + + for n in 1:n_procs + exo = ExodusDatabase(file_name * ".$(n_procs).$(lpad(n - 1, exodus_pad(n_procs), '0'))", "r") + elem_map = read_id_map(exo, ElementMap) + for elem in elem_map + global_elems[elem] = n + end + + node_map = read_id_map(exo, NodeMap) + for node in node_map + push!(global_nodes[node], n) + end + close(exo) + end + + new_global_nodes = map(func, global_nodes) + + return global_elems, new_global_nodes +end + """ $(TYPEDSIGNATURES) """ @@ -87,7 +128,6 @@ function collect_element_to_element_connectivities(exo::ExodusDatabase{M, I, B, return elem_to_elem end - function exodus_pad(n_procs::Int32) if n_procs < 10 @@ -104,6 +144,43 @@ function exodus_pad(n_procs::Int32) return pad_size end +function read_element_cmaps(rank, exo) + lb_params = Exodus.LoadBalanceParameters(exo, rank - 1) + cmap_params = Exodus.CommunicationMapParameters(exo, lb_params, rank - 1) + cmap_ids, cmap_elem_cts = cmap_params.elem_cmap_ids, cmap_params.elem_cmap_elem_cnts + elem_cmaps = map((x, y) -> Exodus.ElementCommunicationMap(exo, x, y, Int32(rank - 1)), cmap_ids, cmap_elem_cts) + return elem_cmaps +end + +# function read_ghost_elements_and_procs(rank, exo) +# # need this to get the right ids +# id_map = read_id_map(exo, ElementMap) + +# elem_cmaps = read_element_cmaps(rank, exo) + +# ghost_elem_ids = mapreduce(x -> x.elem_ids, vcat, elem_cmaps) +# ghost_proc_ids = mapreduce(x -> x.proc_ids, vcat, elem_cmaps) + +# # make sure the ghosts are in global +# ghost_elem_ids = id_map[ghost_elem_ids] + +# # now sort and get unique ghost node ids only +# unique_ids = unique(i -> ghost_elem_ids[i], 1:length(ghost_elem_ids)) + +# ghost_elem_ids = ghost_elem_ids[unique_ids] +# ghost_proc_ids = ghost_proc_ids[unique_ids] + +# # maybe this operation isn't necessary? +# sort_ids = sortperm(ghost_elem_ids) + +# ghost_elem_ids = ghost_elem_ids[sort_ids] +# ghost_proc_ids = ghost_proc_ids[sort_ids] + +# ghost_elem_ids = convert.(Int64, ghost_elem_ids) + +# return ghost_elem_ids, ghost_proc_ids +# end + # for ghost nodes downstream """ $(TYPEDSIGNATURES) @@ -120,7 +197,6 @@ end $(TYPEDSIGNATURES) """ function read_ghost_nodes_and_procs(rank, exo) - # need this to get the right ids id_map = read_id_map(exo, NodeMap) @@ -131,19 +207,6 @@ function read_ghost_nodes_and_procs(rank, exo) # make sure the ghosts are in global ghost_node_ids = id_map[ghost_node_ids] - - # now sort and get unique ghost node ids only - unique_ids = unique(i -> ghost_node_ids[i], 1:length(ghost_node_ids)) - - ghost_node_ids = ghost_node_ids[unique_ids] - ghost_proc_ids = ghost_proc_ids[unique_ids] - - # maybe this operation isn't necessary? - sort_ids = sortperm(ghost_node_ids) - - ghost_node_ids = ghost_node_ids[sort_ids] - ghost_proc_ids = ghost_proc_ids[sort_ids] - ghost_node_ids = convert.(Int64, ghost_node_ids) return ghost_node_ids, ghost_proc_ids @@ -164,50 +227,76 @@ function read_internal_nodes_and_procs(rank, exo) return internal_node_ids, fill(rank, length(internal_node_ids)) end -""" -For collecting global_to_color -$(TYPEDSIGNATURES) -""" -function collect_global_to_color(file_name::String, n_procs::Int, n_dofs::Int=1) - n_procs = n_procs |> Int32 - global_to_color_dict = Dict{Int64, Int32}() - - exo = ExodusDatabase(file_name, "r") - n_nodes_global = num_nodes(initialization(exo)) - close(exo) +# # TODO convert to use MPI maybe? +# function collect_global_element_to_color(file_name::String, n_procs::Int) +# n_procs = n_procs |> Int32 +# global_to_color_dict = Dict{Int64, Int32}() + +# for n in 1:n_procs +# exo = ExodusDatabase(file_name * ".$(n_procs).$(lpad(n - 1, exodus_pad(n_procs), '0'))", "r") +# id_map = read_id_map(exo, ElementMap) +# for id in id_map +# global_to_color_dict[id] = n +# end +# close(exo) +# end + +# global_to_color = zeros(Int64, length(global_to_color_dict)) +# for (key, val) in global_to_color_dict +# global_to_color[key] = val +# end +# return global_to_color +# end + +# """ +# For collecting global_to_color +# $(TYPEDSIGNATURES) +# """ +# function collect_global_node_to_color(file_name::String, n_procs::Int, n_dofs::Int=1) +# n_procs = n_procs |> Int32 +# global_to_color_dict = Dict{Int64, Int32}() + +# exo = ExodusDatabase(file_name, "r") +# n_nodes_global = num_nodes(initialization(exo)) +# close(exo) + +# n_dofs_global = n_nodes_global * n_dofs +# dofs = reshape(1:n_dofs_global, (n_dofs, n_nodes_global)) + +# for n in 1:n_procs +# exo = ExodusDatabase(file_name * ".$(n_procs).$(lpad(n - 1, exodus_pad(n_procs), '0'))", "r") +# internal_node_ids, _ = read_internal_nodes_and_procs(n, exo) +# ghost_node_ids, ghost_proc_ids = read_ghost_nodes_and_procs(n, exo) + +# # modify if we have more than one dof +# # if n_dofs > 1 +# # internal_node_ids = convert.(Int32, dofs[:, internal_node_ids] |> vec) +# # ghost_node_ids = convert.(Int32, dofs[:, ghost_node_ids] |> vec) +# # new_ghost_proc_ids = ghost_proc_ids +# # for n in 2:n_dofs +# # new_ghost_proc_ids = hcat(new_ghost_proc_ids, ghost_proc_ids) +# # end +# # ghost_proc_ids = new_ghost_proc_ids' |> vec +# # end +# if n_dofs > 1 +# @assert false "fix me" +# end +# display(internal_node_ids) +# for node in internal_node_ids +# global_to_color_dict[node] = n +# end + +# for (node, proc) in zip(ghost_node_ids, ghost_proc_ids) +# global_to_color_dict[node] = proc +# end +# close(exo) +# end + +# global_to_color = zeros(Int64, length(global_to_color_dict)) +# for (key, val) in global_to_color_dict +# global_to_color[key] = val +# end +# return global_to_color +# end - n_dofs_global = n_nodes_global * n_dofs - dofs = reshape(1:n_dofs_global, (n_dofs, n_nodes_global)) - for n in 1:n_procs - exo = ExodusDatabase(file_name * ".$(n_procs).$(lpad(n - 1, exodus_pad(n_procs), '0'))", "r") - internal_node_ids, _ = read_internal_nodes_and_procs(n, exo) - ghost_node_ids, ghost_proc_ids = read_ghost_nodes_and_procs(n, exo) - - # modify if we have more than one dof - if n_dofs > 1 - internal_node_ids = convert.(Int32, dofs[:, internal_node_ids] |> vec) - ghost_node_ids = convert.(Int32, dofs[:, ghost_node_ids] |> vec) - new_ghost_proc_ids = ghost_proc_ids - for n in 2:n_dofs - new_ghost_proc_ids = hcat(new_ghost_proc_ids, ghost_proc_ids) - end - ghost_proc_ids = new_ghost_proc_ids' |> vec - end - - for node in internal_node_ids - global_to_color_dict[node] = n - end - - for (node, proc) in zip(ghost_node_ids, ghost_proc_ids) - global_to_color_dict[node] = proc - end - close(exo) - end - - global_to_color = zeros(Int64, length(global_to_color_dict)) - for (key, val) in global_to_color_dict - global_to_color[key] = val - end - return global_to_color -end diff --git a/test/TestHelpers.jl b/test/TestHelpers.jl new file mode 100644 index 0000000..3715b33 --- /dev/null +++ b/test/TestHelpers.jl @@ -0,0 +1,49 @@ +@exodus_unit_test_set "Helpers" begin + if !Sys.iswindows() + @exodus_unit_test_set "Global numberings" begin + mesh_file = "mesh/square_meshes/mesh_test.g" + n_procs = 4 + decomp(mesh_file, n_procs) + exo = ExodusDatabase(mesh_file, "r") + elem_nums, node_nums = Exodus.collect_global_element_and_node_numberings( + mesh_file, n_procs + ) + + n_elems = Exodus.initialization(exo).num_elements + n_nodes = Exodus.initialization(exo).num_nodes + close(exo) + + # make sure it's the right length + @test length(elem_nums) == n_elems + @test length(node_nums) == n_nodes + # ensure each proc has a sensible number + for n in axes(elem_nums, 1) + @test elem_nums[n] >= 1 + @test elem_nums[n] <= n_procs + end + + for n in axes(node_nums, 1) + @test node_nums[n] >= 1 + @test node_nums[n] <= n_procs + end + + # check to see if the elem maps check out + for n in 1:n_procs + shard_file = mesh_file * ".$n_procs.$(n - 1)" + elem_map = ExodusDatabase(shard_file, "r") do exo + read_id_map(exo, ElementMap) + end + + for e in elem_map + @test elem_nums[e] == n + end + end + + # cleanup + for n in 1:n_procs + shard_file = mesh_file * ".$n_procs.$(n - 1)" + rm(shard_file; force = true) + end + end + end +end diff --git a/test/TestParallelExodus.jl b/test/TestParallelExodus.jl index 2b281dd..eb7b294 100644 --- a/test/TestParallelExodus.jl +++ b/test/TestParallelExodus.jl @@ -1,54 +1,54 @@ -# Make these tests also include own and ghost node indices tests -# also test comm maps, etc. -if Sys.iswindows() - println("Skipping ExodusPartitionedArraysExt test on Windows") -else - @exodus_unit_test_set "ExodusPartitionedArraysExt" begin - decomp("mesh/cube_meshes/mesh_test.g", 8) - ranks = LinearIndices((8,)) - exos, inits = ExodusDatabase(ranks, "mesh/cube_meshes/mesh_test.g") - close(exos) - end +# # Make these tests also include own and ghost node indices tests +# # also test comm maps, etc. +# if Sys.iswindows() +# println("Skipping ExodusPartitionedArraysExt test on Windows") +# else +# @exodus_unit_test_set "ExodusPartitionedArraysExt" begin +# decomp("mesh/cube_meshes/mesh_test.g", 8) +# ranks = LinearIndices((8,)) +# exos, inits = ExodusDatabase(ranks, "mesh/cube_meshes/mesh_test.g") +# close(exos) +# end - @exodus_unit_test_set "ExodusPartitionedArraysExt" begin - mesh_file = "mesh/cube_meshes/mesh_test.g" - decomp(mesh_file, 8) - global_to_color = Exodus.collect_global_to_color(mesh_file, 8) - ranks = LinearIndices((8,)) - parts = partition_from_color(ranks, mesh_file, global_to_color) - temp = pones(parts) - consistent!(temp) - assemble!(temp) - end +# @exodus_unit_test_set "ExodusPartitionedArraysExt" begin +# mesh_file = "mesh/cube_meshes/mesh_test.g" +# decomp(mesh_file, 8) +# global_to_color = Exodus.collect_global_to_color(mesh_file, 8) +# ranks = LinearIndices((8,)) +# parts = partition_from_color(ranks, mesh_file, global_to_color) +# temp = pones(parts) +# consistent!(temp) +# assemble!(temp) +# end - @exodus_unit_test_set "ExodusPartitionedArraysExt" begin - mesh_file = "mesh/cube_meshes/mesh_test.g" - decomp(mesh_file, 8) - global_to_color = Exodus.collect_global_to_color(mesh_file, 8, 2) - ranks = LinearIndices((8,)) - parts = partition_from_color(ranks, mesh_file, global_to_color) - temp = pones(parts) - consistent!(temp) - assemble!(temp) - end +# @exodus_unit_test_set "ExodusPartitionedArraysExt" begin +# mesh_file = "mesh/cube_meshes/mesh_test.g" +# decomp(mesh_file, 8) +# global_to_color = Exodus.collect_global_to_color(mesh_file, 8, 2) +# ranks = LinearIndices((8,)) +# parts = partition_from_color(ranks, mesh_file, global_to_color) +# temp = pones(parts) +# consistent!(temp) +# assemble!(temp) +# end - # @exodus_unit_test_set "ExodusPartitionedArraysExt - with mpi" begin - # decomp("mesh/cube_meshes/mesh_test.g", 8) - # mpiexec(cmd -> run(`$cmd -n 8 julia --project=@. mpi/TestMPI.jl`)) - # end +# # @exodus_unit_test_set "ExodusPartitionedArraysExt - with mpi" begin +# # decomp("mesh/cube_meshes/mesh_test.g", 8) +# # mpiexec(cmd -> run(`$cmd -n 8 julia --project=@. mpi/TestMPI.jl`)) +# # end - @exodus_unit_test_set "ParallelExodus.jl" begin - mesh_file = "mesh/cube_meshes/mesh_test.g" - decomp(mesh_file, 8) - exo = ExodusDatabase(mesh_file * ".8.0", "r") - lb_params = Exodus.LoadBalanceParameters(exo, 0) - @show lb_params - cmap_params = Exodus.CommunicationMapParameters(exo, lb_params, 0) - @show cmap_params +# @exodus_unit_test_set "ParallelExodus.jl" begin +# mesh_file = "mesh/cube_meshes/mesh_test.g" +# decomp(mesh_file, 8) +# exo = ExodusDatabase(mesh_file * ".8.0", "r") +# lb_params = Exodus.LoadBalanceParameters(exo, 0) +# @show lb_params +# cmap_params = Exodus.CommunicationMapParameters(exo, lb_params, 0) +# @show cmap_params - proc_elem_maps = Exodus.ProcessorElementMaps(exo, 0) - # elem_comm_map = Exodus.ElementCommunicationMap( - # exo, 0, Exodus.num_elements(exo.init), 0 - # ) - end -end +# proc_elem_maps = Exodus.ProcessorElementMaps(exo, 0) +# # elem_comm_map = Exodus.ElementCommunicationMap( +# # exo, 0, Exodus.num_elements(exo.init), 0 +# # ) +# end +# end diff --git a/test/TestMeshesExt.jl b/test/ext/TestMeshesExt.jl similarity index 100% rename from test/TestMeshesExt.jl rename to test/ext/TestMeshesExt.jl diff --git a/test/TestUnitfulExt.jl b/test/ext/TestUnitfulExt.jl similarity index 99% rename from test/TestUnitfulExt.jl rename to test/ext/TestUnitfulExt.jl index e0fb7da..52bfe08 100644 --- a/test/TestUnitfulExt.jl +++ b/test/ext/TestUnitfulExt.jl @@ -402,4 +402,4 @@ @test_throws Exodus.SetNameException read_values(exo_new, SideSetVariable, 1, "fake", "stress_xx", u"MPa") @test_throws Exodus.VariableIDException read_values(exo_new, SideSetVariable, 1, 1, 6, u"MPa") @test_throws Exodus.VariableNameException read_values(exo_new, SideSetVariable, 1, 1, "fake_variable", u"MPa") -end \ No newline at end of file +end diff --git a/test/runtests.jl b/test/runtests.jl index 7db3a03..e2436c5 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -6,14 +6,13 @@ using Meshes using MPI using PartitionedArrays using Test -using TestSetExtensions using Unitful # macro for testing macro exodus_unit_test_set(test_name::String, ex) return quote local test_set_name = rpad($test_name, 64) - @testset ExtendedTestSet "$test_set_name" begin + @testset "$test_set_name" begin local val = $ex val end @@ -337,10 +336,20 @@ if Sys.iswindows() end end -@includetests ARGS +# @includetests ARGS +include("TestHelpers.jl") +include("TestIO.jl") +include("TestParallelExodus.jl") +include("TestRead.jl") +include("TestReadWrite.jl") +include("TestWrite.jl") + +include("ext/TestMeshesExt.jl") +include("ext/TestUnitfulExt.jl") + # Aqua testing -@testset ExtendedTestSet "Aqua.jl" begin +@testset "Aqua.jl" begin Aqua.test_all(Exodus) end