diff --git a/CMakeLists.txt b/CMakeLists.txt index 51768e79d..aa3637e30 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -595,6 +595,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_fms/test-lib.sh.in ${CMAKE_CURRE # build any helper modules that are used by unit tests list(APPEND TEST_MODS_SRC + test_fms/common/test_fms.F90 test_fms/diag_manager/testing_utils.F90 test_fms/fms/test_fms.F90 test_fms/fms2_io/argparse.F90 @@ -613,7 +614,9 @@ add_library(testLibs "${TEST_MODS_SRC}") target_link_libraries(testLibs FMS::fms) target_compile_definitions(testLibs PRIVATE TEST_MOS_KIND_=r8_kind) target_include_directories(testLibs PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/common/include PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/fms/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/mpp/include PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/data_override/include PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/mosaic2 PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) @@ -704,7 +707,6 @@ list(APPEND TEST_SRC_SINGLE_TARGET test_fms/mpp/test_mpp.F90 test_fms/mpp/test_mpp_gatscat.F90 test_fms/mpp/test_mpp_get_ascii_lines.F90 - test_fms/mpp/test_mpp_global_field.F90 test_fms/mpp/test_mpp_global_field_ug.F90 test_fms/mpp/test_mpp_global_sum_ad.F90 test_fms/mpp/test_mpp_init_logfile.F90 @@ -745,7 +747,7 @@ list(APPEND TEST_SRC_SINGLE_TARGET test_fms/time_manager/test_time_manager.F90 ) # list any test sources that are used for two executables (r4 and r8) -list(APPEND TEST_SRC_MULTI_TARGET +list(APPEND TEST_SRC_TWO_TARGETS test_fms/astronomy/test_astronomy.F90 test_fms/axis_utils/test_axis_utils.F90 test_fms/column_diagnostics/test_column_diagnostics.F90 @@ -771,6 +773,10 @@ list(APPEND TEST_SRC_MULTI_TARGET test_fms/tracer_manager/test_tracer_manager.F90 test_fms/tridiagonal/test_tridiagonal.F90 ) +# list any test sources that are used for four executables (r4, r8, i4, and i8) +list(APPEND TEST_SRC_FOUR_TARGETS + test_fms/mpp/test_mpp_global_field.F90 + ) # add all the test executables and set up linking + compile flags foreach (testFile ${TEST_SRC_SINGLE_TARGET}) @@ -795,12 +801,13 @@ foreach (testFile ${TEST_SRC_SINGLE_TARGET}) ${moduleDir}) target_include_directories(${TName} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/include PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/fms/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/mpp/include PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/data_override/include PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/mosaic2 PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) endforeach () -foreach (testFile ${TEST_SRC_MULTI_TARGET}) +foreach (testFile ${TEST_SRC_TWO_TARGETS}) get_filename_component (TName ${testFile} NAME_WE) add_executable(${TName}_r8 ${testFile}) @@ -854,6 +861,97 @@ foreach (testFile ${TEST_SRC_MULTI_TARGET}) target_compile_definitions(${TName}_r4 PRIVATE WRITE_FILES_MOD_=write_files_r4) endforeach () +foreach (testFile ${TEST_SRC_FOUR_TARGETS}) + get_filename_component (TName ${testFile} NAME_WE) + + add_executable(${TName}_r8 ${testFile}) + target_compile_definitions(${TName}_r8 PRIVATE "${r8_defs}") + set_target_properties(${TName}_r8 PROPERTIES COMPILE_FLAGS ${r8_flags}) + target_link_libraries(${TName}_r8 PUBLIC FMS::fms + PRIVATE testLibs + ) + + add_executable(${TName}_r4 ${testFile}) + target_compile_definitions(${TName}_r4 PRIVATE "${r4_defs}") + # seems counterintuitive but r4 tests use r8 default + # they specify kind values explicitly with the preprocessor where needed (TEST_FMS_KIND_) + set_target_properties(${TName}_r4 PROPERTIES COMPILE_FLAGS ${r8_flags}) + target_link_libraries(${TName}_r4 PUBLIC FMS::fms + PRIVATE testLibs + ) + + add_executable(${TName}_i8 ${testFile}) + target_compile_definitions(${TName}_i8 PRIVATE "${r8_defs}") + set_target_properties(${TName}_i8 PROPERTIES COMPILE_FLAGS ${r8_flags}) + target_link_libraries(${TName}_i8 PUBLIC FMS::fms + PRIVATE testLibs + ) + + add_executable(${TName}_i4 ${testFile}) + target_compile_definitions(${TName}_i4 PRIVATE "${r8_defs}") + set_target_properties(${TName}_i4 PROPERTIES COMPILE_FLAGS ${r8_flags}) + target_link_libraries(${TName}_i4 PUBLIC FMS::fms + PRIVATE testLibs + ) + + if(WITH_YAML) + target_link_libraries(${TName}_r4 PRIVATE yaml ${LIBYAML_LIBRARIES}) + target_link_libraries(${TName}_r8 PRIVATE yaml ${LIBYAML_LIBRARIES}) + target_link_libraries(${TName}_i4 PRIVATE yaml ${LIBYAML_LIBRARIES}) + target_link_libraries(${TName}_i8 PRIVATE yaml ${LIBYAML_LIBRARIES}) + endif() + + if(OPENMP) + target_link_libraries(${TName}_r4 PRIVATE OpenMP::OpenMP_Fortran) + target_link_libraries(${TName}_r8 PRIVATE OpenMP::OpenMP_Fortran) + target_link_libraries(${TName}_i4 PRIVATE OpenMP::OpenMP_Fortran) + target_link_libraries(${TName}_i8 PRIVATE OpenMP::OpenMP_Fortran) + endif() + + set_target_properties(${TName}_r4 PROPERTIES Fortran_MODULE_DIRECTORY + ${moduleDir}) + set_target_properties(${TName}_r8 PROPERTIES Fortran_MODULE_DIRECTORY + ${moduleDir}) + set_target_properties(${TName}_i4 PROPERTIES Fortran_MODULE_DIRECTORY + ${moduleDir}) + set_target_properties(${TName}_i8 PROPERTIES Fortran_MODULE_DIRECTORY + ${moduleDir}) + target_include_directories(${TName}_r4 PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/fms/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/mpp/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/data_override/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/mosaic2 + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) + target_include_directories(${TName}_r8 PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/fms/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/mpp/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/data_override/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/mosaic2 + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) + target_include_directories(${TName}_i4 PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/fms/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/mpp/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/data_override/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/mosaic2 + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) + target_include_directories(${TName}_i8 PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/fms/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/mpp/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/data_override/include + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/mosaic2 + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) + + target_compile_definitions(${TName}_r8 PRIVATE FMS_TEST_TYPE_=real) + target_compile_definitions(${TName}_r4 PRIVATE FMS_TEST_TYPE_=real) + target_compile_definitions(${TName}_i8 PRIVATE FMS_TEST_TYPE_=integer) + target_compile_definitions(${TName}_i4 PRIVATE FMS_TEST_TYPE_=integer) + + target_compile_definitions(${TName}_r8 PRIVATE FMS_TEST_KIND_=r8_kind) + target_compile_definitions(${TName}_r4 PRIVATE FMS_TEST_KIND_=r4_kind) + target_compile_definitions(${TName}_i8 PRIVATE FMS_TEST_KIND_=i8_kind) + target_compile_definitions(${TName}_i4 PRIVATE FMS_TEST_KIND_=i4_kind) +endforeach () + # gather all test scripts file(GLOB_RECURSE TEST_SCRIPTS ${CMAKE_CURRENT_SOURCE_DIR}/test_fms/*/*.sh) # leave out scripts that aren't tests diff --git a/configure.ac b/configure.ac index 42eaae67e..b41250259 100644 --- a/configure.ac +++ b/configure.ac @@ -528,6 +528,7 @@ AC_CONFIG_FILES([ test_fms/test-lib.sh test_fms/intel_coverage.sh test_fms/Makefile + test_fms/common/Makefile test_fms/astronomy/Makefile test_fms/diag_manager/Makefile test_fms/data_override/Makefile diff --git a/test_fms/Makefile.am b/test_fms/Makefile.am index 42725cbf8..9fccb3bdb 100644 --- a/test_fms/Makefile.am +++ b/test_fms/Makefile.am @@ -24,7 +24,7 @@ ACLOCAL_AMFLAGS = -I m4 # Make targets will be run in each subdirectory. Order is significant. -SUBDIRS = astronomy coupler diag_manager data_override exchange monin_obukhov drifters \ +SUBDIRS = common astronomy coupler diag_manager data_override exchange monin_obukhov drifters \ mosaic2 interpolator fms mpp time_interp time_manager horiz_interp topography \ field_manager axis_utils affinity fms2_io parser string_utils sat_vapor_pres tracer_manager \ random_numbers diag_integral column_diagnostics tridiagonal block_control diff --git a/test_fms/common/Makefile.am b/test_fms/common/Makefile.am new file mode 100644 index 000000000..2b079f339 --- /dev/null +++ b/test_fms/common/Makefile.am @@ -0,0 +1,26 @@ +#*********************************************************************** +#* Apache License 2.0 +#* +#* This file is part of the GFDL Flexible Modeling System (FMS). +#* +#* Licensed under the Apache License, Version 2.0 (the "License"); +#* you may not use this file except in compliance with the License. +#* You may obtain a copy of the License at +#* +#* http://www.apache.org/licenses/LICENSE-2.0 +#* +#* FMS is distributed in the hope that it will be useful, but WITHOUT +#* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied; +#* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +#* PARTICULAR PURPOSE. See the License for the specific language +#* governing permissions and limitations under the License. +#*********************************************************************** + +# Find the fms and mpp mod files. +AM_CPPFLAGS = -I$(MODDIR) -I$(top_srcdir)/test_fms/common/include + +noinst_LIBRARIES = libtest_fms.a +libtest_fms_a_SOURCES = test_fms.F90 include/test_fms.inc include/test_fms_real.inc + +# Clean up +CLEANFILES = *.o *.mod *.a diff --git a/test_fms/common/include/test_fms.inc b/test_fms/common/include/test_fms.inc new file mode 100644 index 000000000..4c6252a10 --- /dev/null +++ b/test_fms/common/include/test_fms.inc @@ -0,0 +1,69 @@ +!*********************************************************************** +!* Apache License 2.0 +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* Licensed under the Apache License, Version 2.0 (the "License"); +!* you may not use this file except in compliance with the License. +!* You may obtain a copy of the License at +!* +!* http://www.apache.org/licenses/LICENSE-2.0 +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied; +!* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +!* PARTICULAR PURPOSE. See the License for the specific language +!* governing permissions and limitations under the License. +!*********************************************************************** + + subroutine ARR_INIT_2D_ (arr) + FMS_TEST_TYPE_ (FMS_TEST_KIND_), intent(out) :: arr(:,:) + real(r8_kind) :: unif(size(arr,1), size(arr,2)) + type(randomNumberStream), save :: random_stream + logical, save :: initialized = .false. + + if (.not.initialized) then + random_stream = initializeRandomNumberStream(0) + initialized = .true. + endif + + call getRandomNumbers(random_stream, unif) + + arr = TYPECAST_ (1e9_r8_kind * (unif - 0.5_r8_kind), FMS_TEST_KIND_) + end subroutine ARR_INIT_2D_ + + subroutine ARR_INIT_3D_ (arr) + FMS_TEST_TYPE_ (FMS_TEST_KIND_), intent(out) :: arr(:,:,:) + integer :: k + + do k = 1, size(arr, 3) + call arr_init(arr(:, :, k)) + enddo + end subroutine ARR_INIT_3D_ + + subroutine ARR_COMPARE_2D_ (arr0, arr1, msg) + FMS_TEST_TYPE_ (FMS_TEST_KIND_), intent(in), dimension(:,:) :: arr0, arr1 + character(*), intent(in) :: msg + + if (any(arr0.ne.arr1)) then + call mpp_error(FATAL, "[2D] Unexpected result: " // msg) + endif + end subroutine ARR_COMPARE_2D_ + + subroutine ARR_COMPARE_3D_ (arr0, arr1, msg) + FMS_TEST_TYPE_ (FMS_TEST_KIND_), intent(in), dimension(:,:,:) :: arr0, arr1 + character(*), intent(in) :: msg + + if (any(arr0.ne.arr1)) then + call mpp_error(FATAL, "[3D] Unexpected result: " // msg) + endif + end subroutine ARR_COMPARE_3D_ + + subroutine ARR_COMPARE_4D_ (arr0, arr1, msg) + FMS_TEST_TYPE_ (FMS_TEST_KIND_), intent(in), dimension(:,:,:,:) :: arr0, arr1 + character(*), intent(in) :: msg + + if (any(arr0.ne.arr1)) then + call mpp_error(FATAL, "[4D] Unexpected result: " // msg) + endif + end subroutine ARR_COMPARE_4D_ diff --git a/test_fms/common/include/test_fms_real.inc b/test_fms/common/include/test_fms_real.inc new file mode 100644 index 000000000..e0eb5629d --- /dev/null +++ b/test_fms/common/include/test_fms_real.inc @@ -0,0 +1,80 @@ +!*********************************************************************** +!* Apache License 2.0 +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* Licensed under the Apache License, Version 2.0 (the "License"); +!* you may not use this file except in compliance with the License. +!* You may obtain a copy of the License at +!* +!* http://www.apache.org/licenses/LICENSE-2.0 +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied; +!* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +!* PARTICULAR PURPOSE. See the License for the specific language +!* governing permissions and limitations under the License. +!*********************************************************************** + + subroutine ARR_COMPARE_TOL_2D_ (arr0, arr1, tol, msg) + real(FMS_TEST_KIND_), intent(in), dimension(:,:) :: arr0, arr1 + real(FMS_TEST_KIND_), intent(in) :: tol + character(*), intent(in) :: msg + + if (any(abs(arr1 - arr0).gt.tol)) then + call mpp_error(FATAL, "[2D] Unexpected result: " // msg) + endif + end subroutine ARR_COMPARE_TOL_2D_ + + subroutine ARR_COMPARE_TOL_3D_ (arr0, arr1, tol, msg) + real(FMS_TEST_KIND_), intent(in), dimension(:,:,:) :: arr0, arr1 + real(FMS_TEST_KIND_), intent(in) :: tol + character(*), intent(in) :: msg + + if (any(abs(arr1 - arr0).gt.tol)) then + call mpp_error(FATAL, "[3D] Unexpected result: " // msg) + endif + end subroutine ARR_COMPARE_TOL_3D_ + + subroutine ARR_COMPARE_TOL_4D_ (arr0, arr1, tol, msg) + real(FMS_TEST_KIND_), intent(in), dimension(:,:,:,:) :: arr0, arr1 + real(FMS_TEST_KIND_), intent(in) :: tol + character(*), intent(in) :: msg + + if (any(abs(arr1 - arr0).gt.tol)) then + call mpp_error(FATAL, "[4D] Unexpected result: " // msg) + endif + end subroutine ARR_COMPARE_TOL_4D_ + + subroutine ARR_COMPARE_TOL_2D_SCALAR_ (arr, ans, tol, msg) + real(FMS_TEST_KIND_), intent(in), dimension(:,:) :: arr + real(FMS_TEST_KIND_), intent(in) :: ans + real(FMS_TEST_KIND_), intent(in) :: tol + character(*), intent(in) :: msg + + if (any(abs(arr - ans).gt.tol)) then + call mpp_error(FATAL, "[2D] Unexpected result: " // msg) + endif + end subroutine ARR_COMPARE_TOL_2D_SCALAR_ + + subroutine ARR_COMPARE_TOL_3D_SCALAR_ (arr, ans, tol, msg) + real(FMS_TEST_KIND_), intent(in), dimension(:,:,:) :: arr + real(FMS_TEST_KIND_), intent(in) :: ans + real(FMS_TEST_KIND_), intent(in) :: tol + character(*), intent(in) :: msg + + if (any(abs(arr - ans).gt.tol)) then + call mpp_error(FATAL, "[3D] Unexpected result: " // msg) + endif + end subroutine ARR_COMPARE_TOL_3D_SCALAR_ + + subroutine ARR_COMPARE_TOL_4D_SCALAR_ (arr, ans, tol, msg) + real(FMS_TEST_KIND_), intent(in), dimension(:,:,:,:) :: arr + real(FMS_TEST_KIND_), intent(in) :: ans + real(FMS_TEST_KIND_), intent(in) :: tol + character(*), intent(in) :: msg + + if (any(abs(arr - ans).gt.tol)) then + call mpp_error(FATAL, "[4D] Unexpected result: " // msg) + endif + end subroutine ARR_COMPARE_TOL_4D_SCALAR_ diff --git a/test_fms/common/test_fms.F90 b/test_fms/common/test_fms.F90 new file mode 100644 index 000000000..13e8c0eca --- /dev/null +++ b/test_fms/common/test_fms.F90 @@ -0,0 +1,270 @@ +!*********************************************************************** +!* Apache License 2.0 +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* Licensed under the Apache License, Version 2.0 (the "License"); +!* you may not use this file except in compliance with the License. +!* You may obtain a copy of the License at +!* +!* http://www.apache.org/licenses/LICENSE-2.0 +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied; +!* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +!* PARTICULAR PURPOSE. See the License for the specific language +!* governing permissions and limitations under the License. +!*********************************************************************** + +! This module allows arrays to be permuted, and provides a data type for the +! purpose of storing permuted array bounds. It provides procedures for +! initializing a 2D or 3D array with random data, and for comparing a 2D or +! 3D array with reference answers. + +module fms_test_mod + use random_numbers_mod, only: randomNumberStream, initializeRandomNumberStream, getRandomNumbers + use mpp_mod, only: mpp_error, FATAL + use platform_mod + + implicit none + + interface arr_init + module procedure :: arr_init_2d_r4, arr_init_2d_r8, arr_init_2d_i4, arr_init_2d_i8 + module procedure :: arr_init_3d_r4, arr_init_3d_r8, arr_init_3d_i4, arr_init_3d_i8 + end interface arr_init + + interface arr_compare + module procedure :: arr_compare_2d_r4, arr_compare_2d_r8, arr_compare_2d_i4, arr_compare_2d_i8 + module procedure :: arr_compare_3d_r4, arr_compare_3d_r8, arr_compare_3d_i4, arr_compare_3d_i8 + module procedure :: arr_compare_4d_r4, arr_compare_4d_r8, arr_compare_4d_i4, arr_compare_4d_i8 + end interface arr_compare + + interface arr_compare_tol + module procedure :: arr_compare_tol_2d_r4, arr_compare_tol_2d_r8 + module procedure :: arr_compare_tol_3d_r4, arr_compare_tol_3d_r8 + module procedure :: arr_compare_tol_4d_r4, arr_compare_tol_4d_r8 + + module procedure :: arr_compare_tol_2d_scalar_r4, arr_compare_tol_2d_scalar_r8 + module procedure :: arr_compare_tol_3d_scalar_r4, arr_compare_tol_3d_scalar_r8 + module procedure :: arr_compare_tol_4d_scalar_r4, arr_compare_tol_4d_scalar_r8 + end interface arr_compare_tol + + ! TODO: `permutable_indices` should really be implemented as a parameterized derived type, but because gfortran 13 + ! doesn't support parameterized derived types with type-bound procedures, the following workaround is needed. This + ! should be changed to a parameterized derived type once gfortran 13 support is no longer needed. + + type permutable_indices_2d + integer :: lb(2), ub(2) + + contains + + procedure :: permute => permutable_indices_permute_2d + procedure :: n => permutable_indices_n_2d + end type permutable_indices_2d + + type permutable_indices_3d + integer :: lb(3), ub(3) + + contains + + procedure :: permute => permutable_indices_permute_3d + procedure :: n => permutable_indices_n_3d + end type permutable_indices_3d + + type permutable_indices_4d + integer :: lb(4), ub(4) + + contains + + procedure :: permute => permutable_indices_permute_4d + procedure :: n => permutable_indices_n_4d + end type permutable_indices_4d + + contains + +#define FMS_TEST_TYPE_ real +#define TYPECAST_ real + +#define FMS_TEST_KIND_ r4_kind + +#define ARR_INIT_2D_ arr_init_2d_r4 +#define ARR_INIT_3D_ arr_init_3d_r4 +#define ARR_COMPARE_2D_ arr_compare_2d_r4 +#define ARR_COMPARE_3D_ arr_compare_3d_r4 +#define ARR_COMPARE_4D_ arr_compare_4d_r4 +#include "test_fms.inc" +#undef ARR_INIT_2D_ +#undef ARR_INIT_3D_ +#undef ARR_COMPARE_2D_ +#undef ARR_COMPARE_3D_ +#undef ARR_COMPARE_4D_ + +#define ARR_COMPARE_TOL_2D_ arr_compare_tol_2d_r4 +#define ARR_COMPARE_TOL_3D_ arr_compare_tol_3d_r4 +#define ARR_COMPARE_TOL_4D_ arr_compare_tol_4d_r4 +#define ARR_COMPARE_TOL_2D_SCALAR_ arr_compare_tol_2d_scalar_r4 +#define ARR_COMPARE_TOL_3D_SCALAR_ arr_compare_tol_3d_scalar_r4 +#define ARR_COMPARE_TOL_4D_SCALAR_ arr_compare_tol_4d_scalar_r4 +#include "test_fms_real.inc" +#undef ARR_COMPARE_TOL_2D_ +#undef ARR_COMPARE_TOL_3D_ +#undef ARR_COMPARE_TOL_4D_ +#undef ARR_COMPARE_TOL_2D_SCALAR_ +#undef ARR_COMPARE_TOL_3D_SCALAR_ +#undef ARR_COMPARE_TOL_4D_SCALAR_ + +#undef FMS_TEST_KIND_ +#define FMS_TEST_KIND_ r8_kind + +#define ARR_INIT_2D_ arr_init_2d_r8 +#define ARR_INIT_3D_ arr_init_3d_r8 +#define ARR_COMPARE_2D_ arr_compare_2d_r8 +#define ARR_COMPARE_3D_ arr_compare_3d_r8 +#define ARR_COMPARE_4D_ arr_compare_4d_r8 +#include "test_fms.inc" +#undef ARR_INIT_2D_ +#undef ARR_INIT_3D_ +#undef ARR_COMPARE_2D_ +#undef ARR_COMPARE_3D_ +#undef ARR_COMPARE_4D_ +#undef ARR_COMPARE_TOL_2D_SCALAR_ +#undef ARR_COMPARE_TOL_3D_SCALAR_ +#undef ARR_COMPARE_TOL_4D_SCALAR_ + +#define ARR_COMPARE_TOL_2D_ arr_compare_tol_2d_r8 +#define ARR_COMPARE_TOL_3D_ arr_compare_tol_3d_r8 +#define ARR_COMPARE_TOL_4D_ arr_compare_tol_4d_r8 +#define ARR_COMPARE_TOL_2D_SCALAR_ arr_compare_tol_2d_scalar_r8 +#define ARR_COMPARE_TOL_3D_SCALAR_ arr_compare_tol_3d_scalar_r8 +#define ARR_COMPARE_TOL_4D_SCALAR_ arr_compare_tol_4d_scalar_r8 +#include "test_fms_real.inc" +#undef ARR_COMPARE_TOL_2D_ +#undef ARR_COMPARE_TOL_3D_ +#undef ARR_COMPARE_TOL_4D_ +#undef ARR_COMPARE_TOL_2D_SCALAR_ +#undef ARR_COMPARE_TOL_3D_SCALAR_ +#undef ARR_COMPARE_TOL_4D_SCALAR_ + +#undef FMS_TEST_KIND_ + +#undef FMS_TEST_TYPE_ +#undef TYPECAST_ + +#define FMS_TEST_TYPE_ integer +#define TYPECAST_ int + +#define FMS_TEST_KIND_ i4_kind +#define ARR_INIT_2D_ arr_init_2d_i4 +#define ARR_INIT_3D_ arr_init_3d_i4 +#define ARR_COMPARE_2D_ arr_compare_2d_i4 +#define ARR_COMPARE_3D_ arr_compare_3d_i4 +#define ARR_COMPARE_4D_ arr_compare_4d_i4 +#include "test_fms.inc" +#undef FMS_TEST_KIND_ +#undef ARR_INIT_2D_ +#undef ARR_INIT_3D_ +#undef ARR_COMPARE_2D_ +#undef ARR_COMPARE_3D_ +#undef ARR_COMPARE_4D_ + +#define FMS_TEST_KIND_ i8_kind +#define ARR_INIT_2D_ arr_init_2d_i8 +#define ARR_INIT_3D_ arr_init_3d_i8 +#define ARR_COMPARE_2D_ arr_compare_2d_i8 +#define ARR_COMPARE_3D_ arr_compare_3d_i8 +#define ARR_COMPARE_4D_ arr_compare_4d_i8 +#include "test_fms.inc" +#undef FMS_TEST_KIND_ +#undef ARR_INIT_2D_ +#undef ARR_INIT_3D_ +#undef ARR_COMPARE_2D_ +#undef ARR_COMPARE_3D_ +#undef ARR_COMPARE_4D_ + +#undef FMS_TEST_TYPE_ +#undef TYPECAST_ + + subroutine permutable_indices_permute_2d(self, p) + class(permutable_indices_2d), intent(inout) :: self + integer, intent(in) :: p + + call permute_arr(self%lb, p) + call permute_arr(self%ub, p) + end subroutine permutable_indices_permute_2d + + subroutine permutable_indices_permute_3d(self, p) + class(permutable_indices_3d), intent(inout) :: self + integer, intent(in) :: p + + call permute_arr(self%lb, p) + call permute_arr(self%ub, p) + end subroutine permutable_indices_permute_3d + + subroutine permutable_indices_permute_4d(self, p) + class(permutable_indices_4d), intent(inout) :: self + integer, intent(in) :: p + + call permute_arr(self%lb, p) + call permute_arr(self%ub, p) + end subroutine permutable_indices_permute_4d + + function permutable_indices_n_2d(self, i) result(n) + class(permutable_indices_2d), intent(inout) :: self + integer, intent(in) :: i + integer :: n + + n = self%ub(i) - self%lb(i) + 1 + end function permutable_indices_n_2d + + function permutable_indices_n_3d(self, i) result(n) + class(permutable_indices_3d), intent(inout) :: self + integer, intent(in) :: i + integer :: n + + n = self%ub(i) - self%lb(i) + 1 + end function permutable_indices_n_3d + + function permutable_indices_n_4d(self, i) result(n) + class(permutable_indices_4d), intent(inout) :: self + integer, intent(in) :: i + integer :: n + + n = self%ub(i) - self%lb(i) + 1 + end function permutable_indices_n_4d + + pure recursive function factorial(n) result(res) + integer, intent(in) :: n + integer :: res + + if (n.eq.0) then + res = 1 + else + res = n * factorial(n-1) + endif + end function factorial + + subroutine permute_arr(arr, p) + integer, intent(inout) :: arr(:) !< List to be permuted + integer, intent(in) :: p !< Which permutation to produce: may range from 1 to size(arr)! + integer :: choices(size(arr)) + integer :: n, k, i, f, indx + + n = size(arr) + if (p.lt.1 .or. p.gt.factorial(n)) then + print *, "Error: p parameter is out of bounds" + stop 1 + endif + + choices = arr + k = p - 1 + + do i=1,n + f = factorial(n - i) + indx = k / f + 1 + k = mod(k, f) + + arr(i) = choices(indx) + choices(indx) = choices(n + 1 - i) + enddo + end subroutine permute_arr +end module fms_test_mod diff --git a/test_fms/data_override/Makefile.am b/test_fms/data_override/Makefile.am index c43fb1789..6f7fe0fee 100644 --- a/test_fms/data_override/Makefile.am +++ b/test_fms/data_override/Makefile.am @@ -23,10 +23,10 @@ # uramirez, Ed Hartnett # Find the needed mod and .inc files. -AM_CPPFLAGS = -I$(MODDIR) -I$(top_srcdir)/include -I$(top_srcdir)/test_fms/data_override/include +AM_CPPFLAGS = -I$(MODDIR) -I$(top_srcdir)/include -I$(top_srcdir)/test_fms/data_override/include -I$(top_builddir)/test_fms/common # Link to the FMS library. -LDADD = $(top_builddir)/libFMS/libFMS.la +LDADD = $(top_builddir)/libFMS/libFMS.la $(top_builddir)/test_fms/common/libtest_fms.a # Build this test program. check_PROGRAMS = \ diff --git a/test_fms/data_override/include/test_data_override_ongrid.inc b/test_fms/data_override/include/test_data_override_ongrid.inc index 513ce07ea..1813ea909 100644 --- a/test_fms/data_override/include/test_data_override_ongrid.inc +++ b/test_fms/data_override/include/test_data_override_ongrid.inc @@ -16,30 +16,33 @@ !* governing permissions and limitations under the License. !*********************************************************************** -subroutine COMPARE_DATA_ (Domain_in, actual_result, expected_result) +subroutine COMPARE_DATA_ (Domain_in, actual_result, expected_result, p) integer, parameter :: lkind = TEST_FMS_KIND_ !< Real precision of the test type(domain2d), intent(in) :: Domain_in !< Domain with mask table - real(lkind), intent(in) :: expected_result !< Expected result from data_override real(lkind), dimension(:,:), intent(in) :: actual_result !< Result from data_override + real(lkind), intent(in) :: expected_result !< Expected result from data_override + integer, intent(in) :: p !< Dimension order permutation index integer :: xsizec, ysizec !< Size of the compute domain integer :: xsized, ysized !< Size of the data domain - integer :: nx, ny !< Size of acual_result - integer :: nhx, nhy !< Size of the halos + integer :: n(2) !< Size of acual_result + integer :: nh(2) !< Size of the halos integer :: i, j !< Helper indices !< Data is only expected to be overridden for the compute domain -not at the halos. call mpp_get_compute_domain(Domain_in, xsize=xsizec, ysize=ysizec) call mpp_get_data_domain(Domain_in, xsize=xsized, ysize=ysized) - !< Note that actual_result has indices at (1:nx,1:ny) not (is:ie,js:je) - nhx= (xsized-xsizec)/2 - nhy = (ysized-ysizec)/2 - nx = size(actual_result, 1) - ny = size(actual_result, 2) + !< Note that actual_result has indices at (1:n(1),1:n(2)) not (is:ie,js:je) + n(1) = size(actual_result, 1) + n(2) = size(actual_result, 2) + + nh(1) = (xsized-xsizec)/2 + nh(2) = (ysized-ysizec)/2 + call permute_arr(nh, p) - do i = 1, nx - do j = 1, ny - if (i <= nhx .or. i > (nx-nhx) .or. j <= nhy .or. j > (ny-nhy)) then + do i = 1, n(1) + do j = 1, n(2) + if (i <= nh(1) .or. i > (n(1)-nh(1)) .or. j <= nh(2) .or. j > (n(2)-nh(2))) then !< This is the result at the halos it should 999. if (actual_result(i,j) .ne. 999._lkind) then print *, "for i=", i, " and j=", j, " result=", actual_result(i,j) @@ -58,13 +61,19 @@ end subroutine COMPARE_DATA_ !> @brief Tests ongrid data overrides. !! In the first case there is no time interpolation !! In the second case there is time interpolation -subroutine ONGRID_TEST_ +subroutine ONGRID_TEST_ (p) + integer, intent(in) :: p integer, parameter :: lkind = TEST_FMS_KIND_ !< Real precision of the test - real(lkind) :: expected_result !< Expected result from data_override - type(time_type) :: Time !< Time - real(lkind), allocatable, dimension(:,:) :: runoff !< Data to be written + real(lkind) :: expected_result !< Expected result from data_override + type(time_type) :: Time !< Time + real(lkind), allocatable, dimension(:,:) :: runoff !< Data to be written + type(permutable_indices_2d) :: ind !< Dimension boundaries - allocate(runoff(is:ie,js:je)) + ind%lb = [is, js] + ind%ub = [ie, je] + call ind%permute(p) + + allocate(runoff(ind%lb(1):ind%ub(1), ind%lb(2):ind%ub(2))) runoff = 999._lkind !< Run it when time=3 @@ -73,7 +82,7 @@ subroutine ONGRID_TEST_ !< Because you are getting the data when time=3, and this is an "ongrid" case, the expected result is just !! equal to the data at time=3, which is 3. expected_result = 3._lkind - call COMPARE_DATA_ (Domain, runoff, expected_result) + call COMPARE_DATA_ (Domain, runoff, expected_result, p) !< Run it when time=4 runoff = 999._lkind @@ -82,24 +91,28 @@ subroutine ONGRID_TEST_ !< You are getting the data when time=4, the data at time=3 is 3. and at time=5 is 4., so the expected result !! is the average of the 2 (because this is is an "ongrid" case and there is no horizontal interpolation). expected_result = (3._lkind + 4._lkind) / 2._lkind - call COMPARE_DATA_ (Domain, runoff, expected_result) - - deallocate(runoff) + call COMPARE_DATA_ (Domain, runoff, expected_result, p) end subroutine ONGRID_TEST_ !> @brief Tests bilinear data_override with and increasing and decreasing grid case !! and comares the output betweeen the cases to ensure it is correct -subroutine BILINEAR_TEST_ +subroutine BILINEAR_TEST_ (p) + integer, intent(in) :: p integer, parameter :: lkind = TEST_FMS_KIND_ !< Real precision of the test - type(time_type) :: Time !< Time - real(lkind), allocatable, dimension(:,:) :: runoff_decreasing !< Data to be written - real(lkind), allocatable, dimension(:,:) :: runoff_increasing !< Data to be written + type(time_type) :: Time !< Time + real(lkind), allocatable, dimension(:,:) :: runoff_decreasing !< Data to be written + real(lkind), allocatable, dimension(:,:) :: runoff_increasing !< Data to be written + type(permutable_indices_2d) :: ind !< Dimension boundaries integer :: i, j, k logical :: success - allocate(runoff_decreasing(is:ie,js:je)) - allocate(runoff_increasing(is:ie,js:je)) + ind%lb = [is, js] + ind%ub = [ie, je] + call ind%permute(p) + + allocate(runoff_decreasing(ind%lb(1):ind%ub(1), ind%lb(2):ind%ub(2))) + allocate(runoff_increasing(ind%lb(1):ind%ub(1), ind%lb(2):ind%ub(2))) runoff_decreasing = 999_lkind runoff_increasing = 999_lkind @@ -118,21 +131,26 @@ subroutine BILINEAR_TEST_ endif enddo enddo - deallocate(runoff_decreasing, runoff_increasing) end subroutine BILINEAR_TEST_ -subroutine WEIGHT_FILE_TEST_ +subroutine WEIGHT_FILE_TEST_ (p) + integer, intent(in) :: p integer, parameter :: lkind = TEST_FMS_KIND_ !< Real precision of the test - type(time_type) :: Time !< Time - real(lkind), allocatable, dimension(:,:) :: runoff !< Data from normal override - real(lkind), allocatable, dimension(:,:) :: runoff_weight !< Data from weight file override - real(lkind) :: threshold !< Threshold for the difference in answers + type(time_type) :: Time !< Time + real(lkind), allocatable, dimension(:,:) :: runoff !< Data from normal override + real(lkind), allocatable, dimension(:,:) :: runoff_weight !< Data from weight file override + real(lkind) :: threshold !< Threshold for the difference in answers + type(permutable_indices_2d) :: ind !< Dimension boundaries integer :: i, j, k logical :: success - allocate(runoff(is:ie,js:je)) - allocate(runoff_weight(is:ie,js:je)) + ind%lb = [is, js] + ind%ub = [ie, je] + call ind%permute(p) + + allocate(runoff(ind%lb(1):ind%ub(1), ind%lb(2):ind%ub(2))) + allocate(runoff_weight(ind%lb(1):ind%ub(1), ind%lb(2):ind%ub(2))) runoff = 999_lkind runoff_weight = 999_lkind @@ -156,7 +174,6 @@ subroutine WEIGHT_FILE_TEST_ endif enddo enddo - deallocate(runoff, runoff_weight) end subroutine WEIGHT_FILE_TEST_ subroutine SCALAR_TEST_ @@ -184,16 +201,22 @@ subroutine SCALAR_TEST_ if (co2 .ne. expected_result) call mpp_error(FATAL, "co2 was not overridden to the correct value!") end subroutine SCALAR_TEST_ -subroutine ENSEMBLE_TEST_ +subroutine ENSEMBLE_TEST_ (p) + integer, intent(in) :: p integer, parameter :: lkind = TEST_FMS_KIND_ !< Real precision of the test - real(lkind) :: expected_result !< Expected result from data_override - type(time_type) :: Time !< Time - real(lkind), allocatable, dimension(:,:) :: runoff !< Data to be written - integer :: scale_fac !< Scale factor to use when determining - !! the expected answer + real(lkind) :: expected_result !< Expected result from data_override + type(time_type) :: Time !< Time + real(lkind), allocatable, dimension(:,:) :: runoff !< Data to be written + type(permutable_indices_2d) :: ind !< Dimension boundaries + integer :: scale_fac !< Scale factor to use when determining + !! the expected answer logical :: sucessful !< .True. if the data_override was sucessful - allocate(runoff(is:ie,js:je)) + ind%lb = [is, js] + ind%ub = [ie, je] + call ind%permute(p) + + allocate(runoff(ind%lb(1):ind%ub(1), ind%lb(2):ind%ub(2))) scale_fac = ensemble_id if (test_case .eq. ensemble_same_yaml) scale_fac = 1 @@ -206,7 +229,7 @@ subroutine ENSEMBLE_TEST_ !< Because you are getting the data when time=3, and this is an "ongrid" case, the expected result is just !! equal to the data at time=3, which is 3+scale_fac. expected_result = 3._lkind + real(scale_fac,kind=lkind) - call COMPARE_DATA_ (Domain, runoff, expected_result) + call COMPARE_DATA_ (Domain, runoff, expected_result, p) !< Run it when time=4 runoff = 999._lkind @@ -217,19 +240,23 @@ subroutine ENSEMBLE_TEST_ !! so the expected result is the average of the 2 (because this is is an "ongrid" case and there !! is no horizontal interpolation). expected_result = (3._lkind + real(scale_fac,kind=lkind) + 4._lkind + real(scale_fac,kind=lkind)) / 2._lkind - call COMPARE_DATA_ (Domain, runoff, expected_result) - - deallocate(runoff) + call COMPARE_DATA_ (Domain, runoff, expected_result, p) end subroutine ENSEMBLE_TEST_ -subroutine MULTI_FILE_TESTS_ +subroutine MULTI_FILE_TESTS_ (p) + integer, intent(in) :: p integer, parameter :: lkind = TEST_FMS_KIND_ !< Real precision of the test - real(lkind) :: expected_result !< Expected result from data_override - type(time_type) :: Time !< Time - real(lkind), allocatable, dimension(:,:) :: runoff !< Data to be written + real(lkind) :: expected_result !< Expected result from data_override + type(time_type) :: Time !< Time + real(lkind), allocatable, dimension(:,:) :: runoff !< Data to be written + type(permutable_indices_2d) :: ind !< Dimension boundaries logical :: sucessful !< .True. if the data_override was sucessful - allocate(runoff(is:ie,js:je)) + ind%lb = [is, js] + ind%ub = [ie, je] + call ind%permute(p) + + allocate(runoff(ind%lb(1):ind%ub(1), ind%lb(2):ind%ub(2))) !< Run it when time=3, this is going to use the previous file and the current file Time = set_date(1,1,4,0,0,0) @@ -237,7 +264,7 @@ subroutine MULTI_FILE_TESTS_ call data_override('OCN','runoff',runoff, Time, override=sucessful) if (.not. sucessful) call mpp_error(FATAL, "The data was not overridden correctly") expected_result = 3._lkind - call COMPARE_DATA_ (Domain, runoff, expected_result) + call COMPARE_DATA_ (Domain, runoff, expected_result, p) !< Run it when time=4, this is going to use the current file Time = set_date(1,1,5,0,0,0) @@ -245,7 +272,7 @@ subroutine MULTI_FILE_TESTS_ call data_override('OCN','runoff',runoff, Time, override=sucessful) if (.not. sucessful) call mpp_error(FATAL, "The data was not overridden correctly") expected_result = 4._lkind - call COMPARE_DATA_ (Domain, runoff, expected_result) + call COMPARE_DATA_ (Domain, runoff, expected_result, p) !< Run it when time=5, this is going to use the current file and the next file Time = set_date(1,1,6,0,0,0) @@ -253,6 +280,5 @@ subroutine MULTI_FILE_TESTS_ call data_override('OCN','runoff',runoff, Time, override=sucessful) if (.not. sucessful) call mpp_error(FATAL, "The data was not overridden correctly") expected_result = 5._lkind - call COMPARE_DATA_ (Domain, runoff, expected_result) - + call COMPARE_DATA_ (Domain, runoff, expected_result, p) end subroutine MULTI_FILE_TESTS_ diff --git a/test_fms/data_override/test_data_override2_mono.sh b/test_fms/data_override/test_data_override2_mono.sh index 4e12f171e..ac870edf9 100755 --- a/test_fms/data_override/test_data_override2_mono.sh +++ b/test_fms/data_override/test_data_override2_mono.sh @@ -24,6 +24,9 @@ # Set common test settings. . ../test-lib.sh +# TODO: Enable this test once generalized indices work is complete +SKIP_TESTS="test_data_override2_mono.2" + output_dir [ ! -d "INPUT" ] && mkdir -p "INPUT" @@ -84,6 +87,9 @@ _EOF #Repeat the test with yaml if needed if [ -z $parser_skip ]; then + # TODO: Enable this test once generalized indices work is complete + SKIP_TESTS="$SKIP_TESTS test_data_override2_mono.4" + rm -rf INPUT/* sed 's/write_only = .False./write_only = .True./g' input_base.nml > input.nml test_expect_success "Creating input files" ' diff --git a/test_fms/data_override/test_data_override2_ongrid.sh b/test_fms/data_override/test_data_override2_ongrid.sh index 7c6fc35cc..7e397ba23 100755 --- a/test_fms/data_override/test_data_override2_ongrid.sh +++ b/test_fms/data_override/test_data_override2_ongrid.sh @@ -24,6 +24,12 @@ # Set common test settings. . ../test-lib.sh +# TODO: Enable these tests once generalized indices work is complete +SKIP_TESTS="test_data_override2_ongrid.2 \ + test_data_override2_ongrid.3 \ + test_data_override2_ongrid.4 \ + test_data_override2_ongrid.5" + output_dir rm -rf data_table data_table.yaml input.nml input_base.nml diff --git a/test_fms/data_override/test_data_override_ensembles.sh b/test_fms/data_override/test_data_override_ensembles.sh index bdde31294..d60d07cc0 100755 --- a/test_fms/data_override/test_data_override_ensembles.sh +++ b/test_fms/data_override/test_data_override_ensembles.sh @@ -66,6 +66,11 @@ _EOF #The test only runs with yaml if [ -z $parser_skip ]; then + # TODO: Enable these tests once generalized indices work is complete + SKIP_TESTS="test_data_override_ensembles.2 \ + test_data_override_ensembles.3 \ + test_data_override_ensembles.4" + rm -rf INPUT/. sed 's/write_only = .False./write_only = .True./g' input_base.nml > input.nml diff --git a/test_fms/data_override/test_data_override_multi_file.sh b/test_fms/data_override/test_data_override_multi_file.sh index abca2f915..dffa50bc0 100755 --- a/test_fms/data_override/test_data_override_multi_file.sh +++ b/test_fms/data_override/test_data_override_multi_file.sh @@ -24,6 +24,9 @@ # Set common test settings. . ../test-lib.sh +# TODO: Enable this test once generalized indices work is complete +SKIP_TESTS="test_data_override_multi_file.2" + output_dir rm -rf data_table data_table.yaml input.nml input_base.nml diff --git a/test_fms/data_override/test_data_override_ongrid.F90 b/test_fms/data_override/test_data_override_ongrid.F90 index fa550f234..e24962582 100644 --- a/test_fms/data_override/test_data_override_ongrid.F90 +++ b/test_fms/data_override/test_data_override_ongrid.F90 @@ -35,6 +35,7 @@ program test_data_override_ongrid nf90_double, nf90_unlimited use ensemble_manager_mod, only: get_ensemble_size, ensemble_manager_init use fms_mod, only: string, fms_init, fms_end +use fms_test_mod, only: permutable_indices_2d, factorial, permute_arr implicit none @@ -137,24 +138,24 @@ program test_data_override_ongrid select case (test_case) case (ongrid) - call ongrid_test_r4 - call ongrid_test_r8 + call run_tests(ongrid_test_r4, 2) + call run_tests(ongrid_test_r8, 2) case (bilinear) - call bilinear_test_r4 - call bilinear_test_r8 + call run_tests(bilinear_test_r4, 2) + call run_tests(bilinear_test_r8, 2) case (scalar) call scalar_test_r4 call scalar_test_r8 case (weight_file) - call weight_file_test_r4 - call weight_file_test_r8 + call run_tests(weight_file_test_r4, 2) + call run_tests(weight_file_test_r8, 2) case (ensemble_case, ensemble_same_yaml) - call ensemble_test_r4 - call ensemble_test_r8 + call run_tests(ensemble_test_r4, 2) + call run_tests(ensemble_test_r8, 2) call mpp_set_current_pelist(pelist) case (multi_file) - call multi_file_r4 - call multi_file_r8 + call run_tests(multi_file_r4, 2) + call run_tests(multi_file_r8, 2) end select endif @@ -162,6 +163,22 @@ program test_data_override_ongrid contains +subroutine run_tests(test, ndims) + interface + subroutine test_permuted_indices(f) + integer, intent(in) :: f + end subroutine test_permuted_indices + end interface + + procedure(test_permuted_indices) :: test + integer, intent(in) :: ndims + integer :: p + + do p=1,factorial(ndims) + call test(p) + enddo +end subroutine run_tests + subroutine create_grid_spec_file type(FmsNetcdfFile_t) :: fileobj diff --git a/test_fms/data_override/test_data_override_weights.sh b/test_fms/data_override/test_data_override_weights.sh index 6cba8c0cc..987c62fe9 100755 --- a/test_fms/data_override/test_data_override_weights.sh +++ b/test_fms/data_override/test_data_override_weights.sh @@ -24,6 +24,9 @@ # Set common test settings. . ../test-lib.sh +# TODO: Enable this test once generalized indices work is complete +SKIP_TESTS="test_data_override_weights.2" + output_dir [ ! -d "INPUT" ] && mkdir -p "INPUT" diff --git a/test_fms/interpolator/Makefile.am b/test_fms/interpolator/Makefile.am index da7da4a7e..48e76d95d 100644 --- a/test_fms/interpolator/Makefile.am +++ b/test_fms/interpolator/Makefile.am @@ -23,10 +23,10 @@ # uramirez, Ed Hartnett # Find the fms_mod.mod file. -AM_CPPFLAGS = -I$(MODDIR) -I$(top_srcdir)/include +AM_CPPFLAGS = -I$(MODDIR) -I$(top_srcdir)/include -I$(top_builddir)/test_fms/common # Link to the FMS library. -LDADD = $(top_builddir)/libFMS/libFMS.la +LDADD = $(top_builddir)/libFMS/libFMS.la $(top_builddir)/test_fms/common/libtest_fms.a # Build this test program. check_PROGRAMS = test_interpolator test_interpolator2_r4 test_interpolator2_r8 @@ -36,8 +36,8 @@ test_interpolator_SOURCES = test_interpolator.F90 test_interpolator2_r4_SOURCES = test_interpolator2.F90 test_interpolator_write_climatology.inc test_interpolator2_r8_SOURCES = test_interpolator2.F90 test_interpolator_write_climatology.inc -test_interpolator2_r4_CPPFLAGS=-DTEST_FMS_KIND_=4 -I$(MODDIR) -test_interpolator2_r8_CPPFLAGS=-DTEST_FMS_KIND_=8 -I$(MODDIR) +test_interpolator2_r4_CPPFLAGS=-DTEST_FMS_KIND_=4 $(AM_CPPFLAGS) +test_interpolator2_r8_CPPFLAGS=-DTEST_FMS_KIND_=8 $(AM_CPPFLAGS) # Run the test program. TESTS = test_interpolator2.sh diff --git a/test_fms/interpolator/test_interpolator2.F90 b/test_fms/interpolator/test_interpolator2.F90 index 872d93867..8170a6fa6 100644 --- a/test_fms/interpolator/test_interpolator2.F90 +++ b/test_fms/interpolator/test_interpolator2.F90 @@ -39,7 +39,8 @@ program test_interpolator2 use fms_mod, only: fms_init use constants_mod, only: PI use platform_mod, only: r4_kind, r8_kind - + use fms_test_mod, only: permutable_indices_2d, permutable_indices_3d, permutable_indices_4d, factorial, & + arr_compare_tol use interpolator_mod implicit none @@ -47,12 +48,12 @@ program test_interpolator2 character(100), parameter :: ncfile='immadeup.o3.climatology.nc' !< fake climatology file integer, parameter :: lkind=TEST_FMS_KIND_ !> the interpolation methods are not perfect.Will not get perfectly agreeing answers - real(r8_kind), parameter :: tol=0.1_lkind + real(TEST_FMS_KIND_), parameter :: tol=1e-1 integer :: calendar_type !> climatology related variables and arrays (made up data) - integer :: nlonlat !< number of latitude and longitudinal center coordinates in file - integer :: nlonlatb !< number of latitude and longitudinal boundary coordinates in file + integer :: nlonlat !< number of latitude and longitudinal center coordinates in file + integer :: nlonlatb !< number of latitude and longitudinal boundary coordinates in file integer :: ntime !< number of time slices integer :: npfull !< number of p levels integer :: nphalf !< number of half p levels @@ -67,10 +68,10 @@ program test_interpolator2 !> model related variables and arrays integer :: nlonlat_mod, nlonlatb_mod !< number of latitude and longitude coordinates in the model - real(TEST_FMS_KIND_), allocatable :: lat_mod(:,:) !< model coordinates - real(TEST_FMS_KIND_), allocatable :: lon_mod(:,:) !< model coordinates - real(TEST_FMS_KIND_), allocatable :: latb_mod(:,:) !< model coordinates - real(TEST_FMS_KIND_), allocatable :: lonb_mod(:,:) !< model coordinates + real(TEST_FMS_KIND_), allocatable :: lat_mod(:,:) !< model coordinates + real(TEST_FMS_KIND_), allocatable :: lon_mod(:,:) !< model coordinates + real(TEST_FMS_KIND_), allocatable :: latb_mod(:,:) !< model coordinates + real(TEST_FMS_KIND_), allocatable :: lonb_mod(:,:) !< model coordinates !> array holding model times type(time_type), allocatable :: model_time_julian(:), model_time_noleap(:) @@ -142,74 +143,121 @@ subroutine test_interpolator_init(clim_type) call interpolator_init(clim_type,trim(ncfile), lonb_mod, latb_mod, data_out_of_bounds=data_out_of_bounds) end subroutine test_interpolator_init + !===============================================! - subroutine test_interpolator(clim_type, model_time) - !> call the variants of interpolator (4D-2d) that interpolates data at a given time-point - !! The tests here do not test the "no_axis" interpolator routines - !! This subroutine also tests obtain_interpolator_time_slices for the 2D case. + subroutine test_interpolator_2d(clim_type, phalf_in, test_time, answer) + type(interpolate_type), intent(inout) :: clim_type + real(TEST_FMS_KIND_), dimension(:,:,:), intent(in) :: phalf_in + type(time_type), intent(in) :: test_time + real(TEST_FMS_KIND_), intent(in) :: answer - implicit none + real(TEST_FMS_KIND_), allocatable, dimension(:,:) :: interp_data + type(permutable_indices_2d) :: dims + integer :: p + + do p=1,factorial(2) + dims%lb = [1, 1] + dims%ub = [nlonlat, nlonlat] + + call dims%permute(p) + + allocate(interp_data(dims%ub(1), dims%ub(2))) + interp_data = 0. + + !> test interpolator_2D_r4/8 + call interpolator(clim_type, test_time, interp_data, 'ozone') + call arr_compare_tol(interp_data, answer, tol, 'test interpolator_2D') + + interp_data = 0. + + !> Test obtain_interpolator_time_slices + call obtain_interpolator_time_slices(clim_type,test_time) + call interpolator(clim_type, test_time, interp_data, 'ozone') + call unset_interpolator_time_flag(clim_type) + call arr_compare_tol(interp_data, answer, tol, 'test interpolator_2D') + + deallocate(interp_data) + enddo + end subroutine test_interpolator_2d + + subroutine test_interpolator_3d(clim_type, phalf_in, test_time, answer) + type(interpolate_type), intent(inout) :: clim_type + real(TEST_FMS_KIND_), dimension(:,:,:), intent(in) :: phalf_in + type(time_type), intent(in) :: test_time + real(TEST_FMS_KIND_), intent(in) :: answer + + real(TEST_FMS_KIND_), allocatable, dimension(:,:,:) :: interp_data + type(permutable_indices_3d) :: dims + integer :: p + + do p=1,factorial(3) + dims%lb = [1, 1, 1] + dims%ub = [nlonlat, nlonlat, npfull] + + call dims%permute(p) + + allocate(interp_data(dims%ub(1), dims%ub(2), dims%ub(3))) + interp_data = 0. + + !> test interpolator_3_r4/8 + call interpolator(clim_type, test_time, phalf_in, interp_data, 'ozone') + call arr_compare_tol(interp_data, answer, tol, 'test interpolator_3D') + + deallocate(interp_data) + enddo + end subroutine test_interpolator_3d + + subroutine test_interpolator_4d(clim_type, phalf_in, test_time, answer) + type(interpolate_type), intent(inout) :: clim_type + real(TEST_FMS_KIND_), dimension(:,:,:), intent(in) :: phalf_in + type(time_type), intent(in) :: test_time + real(TEST_FMS_KIND_), intent(in) :: answer + + real(TEST_FMS_KIND_), allocatable, dimension(:,:,:,:) :: interp_data + type(permutable_indices_4d) :: dims + integer :: p + + do p=1,factorial(4) + dims%lb = [1, 1, 1, 1] + dims%ub = [nlonlat, nlonlat, npfull, 1] + + call dims%permute(p) + + allocate(interp_data(dims%ub(1), dims%ub(2), dims%ub(3), dims%ub(4))) + interp_data = 0. + + !> test interpolator_4D_r4/8 + call interpolator(clim_type, test_time, phalf_in, interp_data, 'ozone') + call arr_compare_tol(interp_data, answer, tol, 'test interpolator_4D') + deallocate(interp_data) + enddo + end subroutine test_interpolator_4d + + !> call the variants of interpolator (4D-2d) that interpolates data at a given time-point + !! The tests here do not test the "no_axis" interpolator routines + !! This subroutine also tests obtain_interpolator_time_slices for the 2D case. + subroutine test_interpolator(clim_type, model_time) type(interpolate_type), intent(inout) :: clim_type type(time_type), dimension(ntime), intent(in) :: model_time type(time_type) :: test_time - real(TEST_FMS_KIND_), dimension(nlonlat_mod,nlonlat_mod,npfull,1) :: interp_data ! test interpolator_4D_r4/8 - call interpolator(clim_type, test_time, phalf_in, interp_data, 'ozone') - do i=1, npfull - do j=1, nlonlat_mod - do k=1, nlonlat_mod - call check_answers(interp_data(k,j,i,1), answer, tol, 'test interpolator_4D') - end do - end do - end do - - !> test interpolator_3_r4/8 - call interpolator(clim_type, test_time, phalf_in, interp_data(:,:,:,1), 'ozone') - do i=1, npfull - do j=1, nlonlat_mod - do k=1, nlonlat_mod - call check_answers(interp_data(k,j,i,1), answer, tol, 'test interpolator_3D') - end do - end do - end do - - !> test interpolator_2D_r4/8 - call interpolator(clim_type, test_time, interp_data(:,:,1,1), 'ozone') - do j=1, nlonlat_mod - do k=1, nlonlat_mod - call check_answers(interp_data(k,j,1,1), answer, tol, 'test interpolator_2D') - end do - end do - - !> Test obtain_interpolator_time_slices - call obtain_interpolator_time_slices(clim_type,test_time) - call interpolator(clim_type, test_time, interp_data(:,:,1,1), 'ozone') - call unset_interpolator_time_flag(clim_type) - do j=1, nlonlat_mod - do k=1, nlonlat_mod - call check_answers(interp_data(k,j,1,1), answer, tol, 'test interpolator_2D') - end do - end do - + call test_interpolator_2d(clim_type, phalf_in, test_time, answer) + call test_interpolator_3d(clim_type, phalf_in, test_time, answer) + call test_interpolator_4d(clim_type, phalf_in, test_time, answer) end do - end subroutine test_interpolator !===============================================! subroutine test_interpolator_end(clim_type) @@ -242,31 +290,15 @@ subroutine test_interpolator_no_time_axis(clim_type) !> test interpolator_4D_no_time_axis_r4/8 call interpolator(clim_type, phalf_in, interp_data, 'ozone') - do i=1, npfull - do j=1, nlonlat - do k=1, nlonlat - call check_answers(interp_data(k,j,i,1), ozone(k,j,i,1), tol, 'test interpolator_4D_no_time_axis') - end do - end do - end do + call arr_compare_tol(interp_data(:,:,:,:), ozone(:,:,:,:), tol, 'test interpolator_4D_no_time_axis') !> test interpolator_3D_no_time_axis_r4/8 call interpolator(clim_type, phalf_in, interp_data(:,:,:,1), 'ozone') - do i=1, npfull - do j=1, nlonlat - do k=1, nlonlat - call check_answers(interp_data(k,j,i,1), ozone(k,j,i,1), tol, 'test interpolator_3D_no_time_axis') - end do - end do - end do + call arr_compare_tol(interp_data(:,:,:,1), ozone(:,:,:,1), tol, 'test interpolator_3D_no_time_axis') !> test interpolator_2D_no_time_axis_r4/8 call interpolator(clim_type, interp_data(:,:,1,1), 'ozone') - do j=1, nlonlat - do k=1, nlonlat - call check_answers(interp_data(k,j,1,1), ozone(k,j,1,1), tol, 'test interpolator_2D_no_time_axis') - end do - end do + call arr_compare_tol(interp_data(:,:,1,1), ozone(:,:,1,1), tol, 'test interpolator_2D_no_time_axis') end subroutine test_interpolator_no_time_axis !===============================================! @@ -339,21 +371,6 @@ subroutine run_test_set end subroutine run_test_set !===============================================! - subroutine check_answers(results, answers, tol, whoami) - - implicit none - real(TEST_FMS_KIND_), intent(in) :: results, answers - real(r8_kind), intent(in) :: tol - character(*) :: whoami - - if (real(abs(results-answers),r8_kind).gt.tol) then - !if (results.ne.answers) then - write(*,*) ' EXPECTED ', answers, ' but computed ', results - call mpp_error(FATAL, trim(whoami)) - end if - - end subroutine check_answers - !===============================================! subroutine write_header diff --git a/test_fms/interpolator/test_interpolator2.sh b/test_fms/interpolator/test_interpolator2.sh index 7248b92c2..46dafdb3f 100755 --- a/test_fms/interpolator/test_interpolator2.sh +++ b/test_fms/interpolator/test_interpolator2.sh @@ -27,6 +27,16 @@ # Set common test settings. . ../test-lib.sh +# TODO: Enable these tests once generalized indices work is complete +SKIP_TESTS="test_interpolator2.2 \ + test_interpolator2.3 \ + test_interpolator2.4 \ + test_interpolator2.5 \ + test_interpolator2.6 \ + test_interpolator2.7 \ + test_interpolator2.8 \ + test_interpolator2.9" + # Tests to skip if input files not present if test ! -z "$test_input_path" ; then rm -rf INPUT && mkdir INPUT diff --git a/test_fms/mpp/Makefile.am b/test_fms/mpp/Makefile.am index 89717a645..4181616fe 100644 --- a/test_fms/mpp/Makefile.am +++ b/test_fms/mpp/Makefile.am @@ -20,10 +20,10 @@ # @uramirez, Ed Hartnett, @underwoo # Find the needed mod and inc files. -AM_CPPFLAGS = -I${top_srcdir}/include -I$(MODDIR) +AM_CPPFLAGS = -I${top_srcdir}/include -I$(MODDIR) -I$(top_srcdir)/test_fms/mpp/include -I$(top_builddir)/test_fms/common # Link to the FMS library. -LDADD = ${top_builddir}/libFMS/libFMS.la +LDADD = ${top_builddir}/libFMS/libFMS.la $(top_builddir)/test_fms/common/libtest_fms.a # Build these test programs. check_PROGRAMS = test_mpp \ @@ -61,7 +61,10 @@ check_PROGRAMS = test_mpp \ test_mpp_update_domains_ad \ test_mpp_transmit \ test_mpp_alltoall \ - test_mpp_global_field \ + test_mpp_global_field_r4 \ + test_mpp_global_field_r8 \ + test_mpp_global_field_i4 \ + test_mpp_global_field_i8 \ test_mpp_global_field_ug \ test_mpp_global_sum_ad \ test_mpp_init_logfile \ @@ -75,7 +78,8 @@ test_mpp_SOURCES = test_mpp.F90 test_mpp_domains_SOURCES = \ compare_data_checksums.F90 \ test_domains_utility_mod.F90 \ - test_mpp_domains.F90 + test_mpp_domains.F90 \ + include/group_update.inc test_mpp_nesting_SOURCES = \ compare_data_checksums.F90 \ test_domains_utility_mod.F90 \ @@ -120,10 +124,10 @@ test_global_arrays_SOURCES = test_global_arrays.F90 test_redistribute_int_SOURCES = test_redistribute_int.F90 test_mpp_transmit_SOURCES = test_mpp_transmit.F90 test_mpp_alltoall_SOURCES = test_mpp_alltoall.F90 -test_mpp_global_field_SOURCES = \ - compare_data_checksums.F90 \ - compare_data_checksums_int.F90 \ - test_mpp_global_field.F90 +test_mpp_global_field_r4_SOURCES = test_mpp_global_field.F90 +test_mpp_global_field_r8_SOURCES = test_mpp_global_field.F90 +test_mpp_global_field_i4_SOURCES = test_mpp_global_field.F90 +test_mpp_global_field_i8_SOURCES = test_mpp_global_field.F90 test_mpp_global_field_ug_SOURCES = \ compare_data_checksums.F90 \ compare_data_checksums_int.F90 \ @@ -136,6 +140,11 @@ test_super_grid_SOURCES = test_super_grid.F90 test_mpp_chksum_SOURCES = test_mpp_chksum.F90 test_stdlog_SOURCES = test_stdlog.F90 +test_mpp_global_field_r4_CPPFLAGS = $(AM_CPPFLAGS) -DFMS_TEST_TYPE_=real -DFMS_TEST_KIND_=r4_kind +test_mpp_global_field_r8_CPPFLAGS = $(AM_CPPFLAGS) -DFMS_TEST_TYPE_=real -DFMS_TEST_KIND_=r8_kind +test_mpp_global_field_i4_CPPFLAGS = $(AM_CPPFLAGS) -DFMS_TEST_TYPE_=integer -DFMS_TEST_KIND_=i4_kind +test_mpp_global_field_i8_CPPFLAGS = $(AM_CPPFLAGS) -DFMS_TEST_TYPE_=integer -DFMS_TEST_KIND_=i8_kind + # ifort gets a internal error during compilation for this test, issue #1071 # we'll just remove the openmp flag if present since it doesn't use openmp at all test_mpp_alltoall.$(OBJEXT): FCFLAGS:= $(filter-out -fopenmp,$(FCFLAGS)) @@ -239,7 +248,6 @@ test_mpp_update_domains_int.$(OBJEXT): compare_data_checksums_int.mod fill_halo. test_mpp_update_domains.$(OBJEXT): test_mpp_update_domains_real.mod test_mpp_update_domains_int.mod test_update_domains_performance.$(OBJEXT): compare_data_checksums_int.mod compare_data_checksums.mod -test_mpp_global_field.$(OBJEXT): compare_data_checksums_int.mod compare_data_checksums.mod test_mpp_global_field_ug.$(OBJEXT): compare_data_checksums_int.mod compare_data_checksums.mod test_mpp_domains.$(OBJEXT): compare_data_checksums.mod test_domains_utility_mod.mod test_mpp_nesting.$(OBJEXT): compare_data_checksums.mod test_domains_utility_mod.mod diff --git a/test_fms/mpp/include/group_update.inc b/test_fms/mpp/include/group_update.inc new file mode 100644 index 000000000..575a0f028 --- /dev/null +++ b/test_fms/mpp/include/group_update.inc @@ -0,0 +1,647 @@ +#define INDICES_MEM_ mem%lb(1):mem%ub(1), \ + mem%lb(2):mem%ub(2), \ + mem%lb(3):mem%ub(3) + +#define INDICES_MEM_ISHIFT_ mem_ishift%lb(1):mem_ishift%ub(1), \ + mem_ishift%lb(2):mem_ishift%ub(2), \ + mem_ishift%lb(3):mem_ishift%ub(3) + +#define INDICES_MEM_JSHIFT_ mem_jshift%lb(1):mem_jshift%ub(1), \ + mem_jshift%lb(2):mem_jshift%ub(2), \ + mem_jshift%lb(3):mem_jshift%ub(3) + +#define INDICES_MEM_IJSHIFT_ mem_ijshift%lb(1):mem_ijshift%ub(1), \ + mem_ijshift%lb(2):mem_ijshift%ub(2), \ + mem_ijshift%lb(3):mem_ijshift%ub(3) + +#define INDICES_COMPUTE_ compute%lb(1):compute%ub(1), \ + compute%lb(2):compute%ub(2), \ + compute%lb(3):compute%ub(3) + +#define INDICES_COMPUTE_ISHIFT_ compute_ishift%lb(1):compute_ishift%ub(1), \ + compute_ishift%lb(2):compute_ishift%ub(2), \ + compute_ishift%lb(3):compute_ishift%ub(3) + +#define INDICES_COMPUTE_JSHIFT_ compute_jshift%lb(1):compute_jshift%ub(1), \ + compute_jshift%lb(2):compute_jshift%ub(2), \ + compute_jshift%lb(3):compute_jshift%ub(3) + +#define INDICES_COMPUTE_IJSHIFT_ compute_ijshift%lb(1):compute_ijshift%ub(1), \ + compute_ijshift%lb(2):compute_ijshift%ub(2), \ + compute_ijshift%lb(3):compute_ijshift%ub(3) + +#define INDICES_COMPUTE_HALO_ compute_halo%lb(1):compute_halo%ub(1), \ + compute_halo%lb(2):compute_halo%ub(2), \ + compute_halo%lb(3):compute_halo%ub(3) + +#define INDICES_DATA_ data%lb(1):data%ub(1), \ + data%lb(2):data%ub(2), \ + data%lb(3):data%ub(3) + +#define INDICES_DATA_ISHIFT_ data_ishift%lb(1):data_ishift%ub(1), \ + data_ishift%lb(2):data_ishift%ub(2), \ + data_ishift%lb(3):data_ishift%ub(3) + +#define INDICES_DATA_JSHIFT_ data_jshift%lb(1):data_jshift%ub(1), \ + data_jshift%lb(2):data_jshift%ub(2), \ + data_jshift%lb(3):data_jshift%ub(3) + +#define INDICES_DATA_IJSHIFT_ data_ijshift%lb(1):data_ijshift%ub(1), \ + data_ijshift%lb(2):data_ijshift%ub(2), \ + data_ijshift%lb(3):data_ijshift%ub(3) + + subroutine TEST_GROUP_UPDATE_ (type, p) + character(len=*), intent(in) :: type !< Name of the test to run + integer, intent(in) :: p !< Permutation of array indices (ranges from 1 to 3!) + + real(FMS_TEST_KIND_), parameter :: zero = 0 + type(domain2D) :: domain + integer :: num_contact, ntiles, npes_per_tile + integer :: i, j, k, l, n, shift + integer :: isc, iec, jsc, jec, isd, ied, jsd, jed + integer :: ism, iem, jsm, jem + + integer, allocatable, dimension(:) :: pe_start, pe_end, tile1, tile2 + integer, allocatable, dimension(:) :: istart1, iend1, jstart1, jend1 + integer, allocatable, dimension(:) :: istart2, iend2, jstart2, jend2 + integer, allocatable, dimension(:,:) :: layout2D, global_indices + real(FMS_TEST_KIND_), allocatable, dimension(:,:,:,:) :: x1, y1, x2, y2 + real(FMS_TEST_KIND_), allocatable, dimension(:,:,:,:) :: a1, a2 + real(FMS_TEST_KIND_), allocatable, dimension(:,:,:) :: base + integer :: id1, id2, id3 + logical :: folded_north + logical :: cubic_grid + character(len=3) :: text + integer :: nx_save, ny_save + type(mpp_group_update_type) :: group_update + type(mpp_group_update_type), allocatable :: update_list(:) + type(permutable_indices_3d) :: mem, mem_ishift, mem_jshift, mem_ijshift, & + compute, compute_ishift, compute_jshift, compute_ijshift, compute_halo, & + data, data_ishift, data_jshift, data_ijshift + + folded_north = .false. + cubic_grid = .false. + + nx_save = nx + ny_save = ny + !--- check the type + select case(type) + case ( 'Folded-north' ) + ntiles = 1 + shift = 0 + num_contact = 2 + folded_north = .true. + npes_per_tile = npes + if(layout_tripolar(1)*layout_tripolar(2) == npes ) then + layout = layout_tripolar + else + call mpp_define_layout( (/1,nx,1,ny/), npes_per_tile, layout ) + endif + case ( 'Cubic-Grid' ) + if( nx_cubic == 0 ) then + call mpp_error(NOTE,'test_group_update: for Cubic_grid mosaic, nx_cubic is zero, '//& + 'No test is done for Cubic-Grid mosaic. ' ) + return + endif + if( nx_cubic .NE. ny_cubic ) then + call mpp_error(NOTE,'test_group_update: for Cubic_grid mosaic, nx_cubic does not equal ny_cubic, '//& + 'No test is done for Cubic-Grid mosaic. ' ) + return + endif + shift = 1 + nx = nx_cubic + ny = ny_cubic + ntiles = 6 + num_contact = 12 + cubic_grid = .true. + if( mod(npes, ntiles) == 0 ) then + npes_per_tile = npes/ntiles + write(outunit,*)'NOTE from update_domains_performance ==> For Mosaic "', trim(type), & + '", each tile will be distributed over ', npes_per_tile, ' processors.' + else + call mpp_error(NOTE,'test_group_update: npes should be multiple of ntiles No test is done for '//trim(type)) + return + endif + if(layout_cubic(1)*layout_cubic(2) == npes_per_tile) then + layout = layout_cubic + else + call mpp_define_layout( (/1,nx,1,ny/), npes_per_tile, layout ) + endif + case default + call mpp_error(FATAL, 'test_group_update: no such test: '//type) + end select + + allocate(layout2D(2,ntiles), global_indices(4,ntiles), pe_start(ntiles), pe_end(ntiles) ) + do n = 1, ntiles + pe_start(n) = (n-1)*npes_per_tile + pe_end(n) = n*npes_per_tile-1 + end do + + do n = 1, ntiles + global_indices(:,n) = (/1,nx,1,ny/) + layout2D(:,n) = layout + end do + + allocate(tile1(num_contact), tile2(num_contact) ) + allocate(istart1(num_contact), iend1(num_contact), jstart1(num_contact), jend1(num_contact) ) + allocate(istart2(num_contact), iend2(num_contact), jstart2(num_contact), jend2(num_contact) ) + + !--- define domain + if(folded_north) then + !--- Contact line 1, between tile 1 (EAST) and tile 1 (WEST) --- cyclic + tile1(1) = 1; tile2(1) = 1 + istart1(1) = nx; iend1(1) = nx; jstart1(1) = 1; jend1(1) = ny + istart2(1) = 1; iend2(1) = 1; jstart2(1) = 1; jend2(1) = ny + !--- Contact line 2, between tile 1 (NORTH) and tile 1 (NORTH) --- folded-north-edge + tile1(2) = 1; tile2(2) = 1 + istart1(2) = 1; iend1(2) = nx/2; jstart1(2) = ny; jend1(2) = ny + istart2(2) = nx; iend2(2) = nx/2+1; jstart2(2) = ny; jend2(2) = ny + call mpp_define_mosaic(global_indices, layout2D, domain, ntiles, num_contact, tile1, tile2, & + istart1, iend1, jstart1, jend1, istart2, iend2, jstart2, jend2, & + pe_start, pe_end, whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo, & + name = type, symmetry = .false. ) + else if( cubic_grid ) then + call define_cubic_mosaic(type, domain, (/nx,nx,nx,nx,nx,nx/), (/ny,ny,ny,ny,ny,ny/), & + global_indices, layout2D, pe_start, pe_end ) + endif + + !--- setup data + call mpp_get_compute_domain( domain, isc, iec, jsc, jec ) + call mpp_get_data_domain ( domain, isd, ied, jsd, jed ) + call mpp_get_memory_domain ( domain, ism, iem, jsm, jem ) + + mem%lb = [ism, jsm, 1] + mem%ub = [iem, jem, nz] + call mem%permute(p) + + mem_ishift%lb = [ism, jsm, 1] + mem_ishift%ub = [iem+shift, jem, nz] + call mem_ishift%permute(p) + + mem_jshift%lb = [ism, jsm, 1] + mem_jshift%ub = [iem, jem+shift, nz] + call mem_jshift%permute(p) + + mem_ijshift%lb = [ism, jsm, 1] + mem_ijshift%ub = [iem+shift, jem+shift, nz] + call mem_ijshift%permute(p) + + compute%lb = [isc, jsc, 1] + compute%ub = [iec, jec, nz] + call compute%permute(p) + + compute_ishift%lb = [isc, jsc, 1] + compute_ishift%ub = [iec+shift, jec, nz] + call compute_ishift%permute(p) + + compute_jshift%lb = [isc, jsc, 1] + compute_jshift%ub = [iec, jec+shift, nz] + call compute_jshift%permute(p) + + compute_ijshift%lb = [isc, jsc, 1] + compute_ijshift%ub = [iec+shift, jec+shift, nz] + call compute_ijshift%permute(p) + + compute_halo%lb = [isc-1, jsc-1, 1] + compute_halo%ub = [iec+1, jec+1, nz] + call compute_halo%permute(p) + + data%lb = [isd, jsd, 1] + data%ub = [ied, jed, nz] + call data%permute(p) + + data_ishift%lb = [isd, jsd, 1] + data_ishift%ub = [ied+shift, jed, nz] + call data_ishift%permute(p) + + data_jshift%lb = [isd, jsd, 1] + data_jshift%ub = [ied, jed+shift, nz] + call data_jshift%permute(p) + + data_ijshift%lb = [isd, jsd, 1] + data_ijshift%ub = [ied+shift, jed+shift, nz] + call data_ijshift%permute(p) + + if(num_fields<1) then + call mpp_error(FATAL, "test_mpp_domains: num_fields must be a positive integer") + endif + + allocate(update_list(num_fields)) + + id1 = mpp_clock_id( type//' group 2D', flags=MPP_CLOCK_SYNC ) + id2 = mpp_clock_id( type//' non-group 2D', flags=MPP_CLOCK_SYNC ) + id3 = mpp_clock_id( type//' non-block group 2D', flags=MPP_CLOCK_SYNC ) + + allocate(a1(INDICES_MEM_, num_fields)) + allocate(x1(INDICES_MEM_ISHIFT_, num_fields)) + allocate(y1(INDICES_MEM_JSHIFT_, num_fields)) + allocate(a2(INDICES_MEM_, num_fields)) + allocate(x2(INDICES_MEM_ISHIFT_, num_fields)) + allocate(y2(INDICES_MEM_JSHIFT_, num_fields)) + allocate(base(INDICES_COMPUTE_IJSHIFT_)) + + a1 = zero + x1 = zero + y1 = zero + + call arr_init(base) + + !--- Test for partial direction update + do l =1, num_fields + call mpp_create_group_update(group_update, a1(:,:,:,l), domain, flags=WUPDATE+SUPDATE) + end do + + do l = 1, num_fields + a1(INDICES_COMPUTE_HALO_,l) = 999 + a1(INDICES_COMPUTE_,l) = base(INDICES_COMPUTE_) + l*1e3 + enddo + + a2 = a1 + call mpp_do_group_update(group_update, domain, zero) + + do l = 1, num_fields + call mpp_update_domains( a2(:,:,:,l), domain, flags=WUPDATE+SUPDATE, complete=l==num_fields ) + enddo + + do l = 1, num_fields + write(text, '(i3.3)') l + call compare_checksums(a1(INDICES_DATA_,l),a2(INDICES_DATA_,l),type//' CENTER South West '//text) + enddo + + call mpp_clear_group_update(group_update) + + !--- Test for DGRID update + if(type == 'Cubic-Grid' ) then + x1 = zero + y1 = zero + + do l =1, num_fields + call mpp_create_group_update(group_update, x1(:,:,:,l), y1(:,:,:,l), domain, gridtype=DGRID_NE) + end do + + do l = 1, num_fields + y1(INDICES_COMPUTE_ISHIFT_,l) = base(INDICES_COMPUTE_ISHIFT_) + l*1e3 + 1e6 + x1(INDICES_COMPUTE_JSHIFT_,l) = base(INDICES_COMPUTE_JSHIFT_) + l*1e3 + 2*1e6 + enddo + x2 = x1 + y2 = y1 + call mpp_start_group_update(group_update, domain, zero) + call mpp_complete_group_update(group_update, domain, zero) + + do l = 1, num_fields + call mpp_update_domains( x2(:,:,:,l), y2(:,:,:,l), domain, gridtype=DGRID_NE, complete=l==num_fields ) + enddo + + !--- compare checksum + do l = 1, num_fields + write(text, '(i3.3)') l + call compare_checksums(x1(INDICES_DATA_ISHIFT_,l),x2(INDICES_DATA_ISHIFT_,l),type//' DGRID X'//text) + call compare_checksums(y1(INDICES_DATA_JSHIFT_,l),y2(INDICES_DATA_JSHIFT_,l),type//' DGRID Y'//text) + enddo + + call mpp_clear_group_update(group_update) + endif + + !--- Test for CGRID + a1 = zero + x1 = zero + y1 = zero + + do l =1, num_fields + call mpp_create_group_update(group_update, a1(:,:,:,l), domain) + call mpp_create_group_update(group_update, x1(:,:,:,l), y1(:,:,:,l), domain, gridtype=CGRID_NE) + end do + + do n = 1, num_iter + a1 = zero + x1 = zero + y1 = zero + + do l = 1, num_fields + a1(INDICES_COMPUTE_,l) = base(INDICES_COMPUTE_) + l*1e3 + x1(INDICES_COMPUTE_ISHIFT_,l) = base(INDICES_COMPUTE_ISHIFT_) + l*1e3 + 1e6 + y1(INDICES_COMPUTE_JSHIFT_,l) = base(INDICES_COMPUTE_JSHIFT_) + l*1e3 + 2*1e6 + enddo + a2 = a1; x2 = x1; y2 = y1 + call mpp_clock_begin(id1) + call mpp_do_group_update(group_update, domain, zero) + call mpp_clock_end (id1) + + call mpp_clock_begin(id2) + do l = 1, num_fields + call mpp_update_domains( a2(:,:,:,l), domain, complete=l==num_fields ) + enddo + do l = 1, num_fields + call mpp_update_domains( x2(:,:,:,l), y2(:,:,:,l), domain, gridtype=CGRID_NE, complete=l==num_fields ) + enddo + call mpp_clock_end(id2) + + !--- compare checksum + if( n == num_iter ) then + do l = 1, num_fields + write(text, '(i3.3)') l + call compare_checksums(a1(INDICES_DATA_,l),a2(INDICES_DATA_,l),type//' CENTER '//text) + call compare_checksums(x1(INDICES_DATA_ISHIFT_,l),x2(INDICES_DATA_ISHIFT_,l),type//' CGRID X'//text) + call compare_checksums(y1(INDICES_DATA_JSHIFT_,l),y2(INDICES_DATA_JSHIFT_,l),type//' CGRID Y'//text) + enddo + endif + + a1 = zero + x1 = zero + y1 = zero + + do l = 1, num_fields + a1(INDICES_COMPUTE_,l) = base(INDICES_COMPUTE_) + l*1e3 + x1(INDICES_COMPUTE_ISHIFT_,l) = base(INDICES_COMPUTE_ISHIFT_) + l*1e3 + 1e6 + y1(INDICES_COMPUTE_JSHIFT_,l) = base(INDICES_COMPUTE_JSHIFT_) + l*1e3 + 2*1e6 + enddo + call mpp_clock_begin(id3) + call mpp_start_group_update(group_update, domain, zero) + call mpp_complete_group_update(group_update, domain, zero) + call mpp_clock_end (id3) + !--- compare checksum + if( n == num_iter ) then + do l = 1, num_fields + write(text, '(i3.3)') l + call compare_checksums(a1(INDICES_DATA_,l),a2(INDICES_DATA_,l), type//' nonblock CENTER '//text) + call compare_checksums(x1(INDICES_DATA_ISHIFT_,l),x2(INDICES_DATA_ISHIFT_,l), type//' nonblock CGRID X'//text) + call compare_checksums(y1(INDICES_DATA_JSHIFT_,l),y2(INDICES_DATA_JSHIFT_,l), type//' nonblock CGRID Y'//text) + enddo + endif + enddo + + call mpp_clear_group_update(group_update) + + !--- The following is to test overlapping start and complete + if( num_fields > 1 ) then + do l =1, num_fields + call mpp_create_group_update(update_list(l), a1(:,:,:,l), domain) + call mpp_create_group_update(update_list(l), x1(:,:,:,l), y1(:,:,:,l), domain, gridtype=CGRID_NE) + end do + + do n = 1, num_iter + + a1 = zero + x1 = zero + y1 = zero + + do l = 1, num_fields + a1(INDICES_COMPUTE_,l) = base(INDICES_COMPUTE_) + l*1e3 + x1(INDICES_COMPUTE_ISHIFT_,l) = base(INDICES_COMPUTE_ISHIFT_) + l*1e3 + 1e6 + y1(INDICES_COMPUTE_JSHIFT_,l) = base(INDICES_COMPUTE_JSHIFT_) + l*1e3 + 2*1e6 + enddo + do l = 1, num_fields-1 + call mpp_start_group_update(update_list(l), domain, zero) + enddo + + call mpp_complete_group_update(update_list(1), domain, zero) + call mpp_start_group_update(update_list(num_fields), domain, zero) + do l = 2, num_fields + call mpp_complete_group_update(update_list(l), domain, zero) + enddo + !--- compare checksum + if( n == num_iter ) then + do l = 1, num_fields + write(text, '(i3.3)') l + call compare_checksums(a1(INDICES_DATA_,l),a2(INDICES_DATA_,l), & + type//' multiple nonblock CENTER '//text) + call compare_checksums(x1(INDICES_DATA_ISHIFT_,l),x2(INDICES_DATA_ISHIFT_,l), & + type//' multiple nonblock CGRID X'//text) + call compare_checksums(y1(INDICES_DATA_JSHIFT_,l),y2(INDICES_DATA_JSHIFT_,l), & + type//' multiple nonblock CGRID Y'//text) + enddo + endif + enddo + endif + + do l =1, num_fields + call mpp_clear_group_update(update_list(l)) + enddo + deallocate(update_list) + + !--- test scalar 4-D variable + call mpp_create_group_update(group_update, a1(:,:,:,:), domain) + + a1 = zero + x1 = zero + y1 = zero + + do l = 1, num_fields + a1(INDICES_COMPUTE_,l) = base(INDICES_COMPUTE_) + l*1e3 + enddo + + a2 = a1 + x2 = x1 + y2 = y1 + + call mpp_clock_begin(id1) + call mpp_do_group_update(group_update, domain, zero) + call mpp_clock_end (id1) + + call mpp_clock_begin(id2) + call mpp_update_domains( a2(:,:,:,:), domain ) + call mpp_clock_end(id2) + + !--- compare checksum + do l = 1, num_fields + write(text, '(i3.3)') l + call compare_checksums(a1(INDICES_DATA_,l),a2(INDICES_DATA_,l),type//' 4D CENTER '//text) + enddo + + a1 = 0 + do l = 1, num_fields + a1(INDICES_COMPUTE_,l) = base(INDICES_COMPUTE_) + l*1e3 + enddo + call mpp_clock_begin(id3) + call mpp_start_group_update(group_update, domain, zero) + call mpp_complete_group_update(group_update, domain, zero) + call mpp_clock_end (id3) + + !--- compare checksum + do l = 1, num_fields + write(text, '(i3.3)') l + call compare_checksums(a1(INDICES_DATA_,l),a2(INDICES_DATA_,l), type//' nonblock 4D CENTER '//text) + enddo + + + + !--- test for BGRID. + deallocate(a1, x1, y1) + deallocate(a2, x2, y2) + call mpp_clear_group_update(group_update) + + allocate(a1(INDICES_MEM_IJSHIFT_, num_fields)) + allocate(x1(INDICES_MEM_IJSHIFT_, num_fields)) + allocate(y1(INDICES_MEM_IJSHIFT_, num_fields)) + allocate(a2(INDICES_MEM_IJSHIFT_, num_fields)) + allocate(x2(INDICES_MEM_IJSHIFT_, num_fields)) + allocate(y2(INDICES_MEM_IJSHIFT_, num_fields)) + + do l =1, num_fields + call mpp_create_group_update(group_update, a1(:,:,:,l), domain, position=CORNER) + call mpp_create_group_update(group_update, x1(:,:,:,l), y1(:,:,:,l), domain, gridtype=BGRID_NE) + end do + + do n = 1, num_iter + a1 = 0; x1 = 0; y1 = 0 + do l = 1, num_fields + a1(INDICES_COMPUTE_IJSHIFT_,l) = base(INDICES_COMPUTE_IJSHIFT_) + l*1e3 + x1(INDICES_COMPUTE_IJSHIFT_,l) = base(INDICES_COMPUTE_IJSHIFT_) + l*1e3 + 1e6 + y1(INDICES_COMPUTE_IJSHIFT_,l) = base(INDICES_COMPUTE_IJSHIFT_) + l*1e3 + 2*1e6 + enddo + + a2 = a1 + x2 = x1 + y2 = y1 + + call mpp_clock_begin(id1) + call mpp_do_group_update(group_update, domain, zero) + call mpp_clock_end (id1) + + call mpp_clock_begin(id2) + do l = 1, num_fields + call mpp_update_domains( a2(:,:,:,l), domain, position=CORNER, complete=l==num_fields ) + enddo + do l = 1, num_fields + call mpp_update_domains( x2(:,:,:,l), y2(:,:,:,l), domain, gridtype=BGRID_NE, complete=l==num_fields ) + enddo + call mpp_clock_end(id2) + + !--- compare checksum + if( n == num_iter ) then + do l = 1, num_fields + write(text, '(i3.3)') l + call compare_checksums(a1(INDICES_DATA_IJSHIFT_,l),a2(INDICES_DATA_IJSHIFT_,l),type // ' CORNER ' // text) + call compare_checksums(x1(INDICES_DATA_IJSHIFT_,l),x2(INDICES_DATA_IJSHIFT_,l),type // ' BGRID X' // text) + call compare_checksums(y1(INDICES_DATA_IJSHIFT_,l),y2(INDICES_DATA_IJSHIFT_,l),type // ' BGRID Y' // text) + enddo + endif + + a1 = zero + x1 = zero + y1 = zero + + do l = 1, num_fields + a1(INDICES_COMPUTE_IJSHIFT_,l) = base(INDICES_COMPUTE_IJSHIFT_) + l*1e3 + x1(INDICES_COMPUTE_IJSHIFT_,l) = base(INDICES_COMPUTE_IJSHIFT_) + l*1e3 + 1e6 + y1(INDICES_COMPUTE_IJSHIFT_,l) = base(INDICES_COMPUTE_IJSHIFT_) + l*1e3 + 2*1e6 + enddo + call mpp_clock_begin(id3) + call mpp_start_group_update(group_update, domain, zero) + call mpp_complete_group_update(group_update, domain, zero) + call mpp_clock_end (id3) + !--- compare checksum + if( n == num_iter ) then + do l = 1, num_fields + write(text, '(i3.3)') l + call compare_checksums(a1(INDICES_DATA_IJSHIFT_,l),a2(INDICES_DATA_IJSHIFT_,l), & + type//' nonblockCORNER '//text) + call compare_checksums(x1(INDICES_DATA_IJSHIFT_,l),x2(INDICES_DATA_IJSHIFT_,l), & + type//' nonblock BGRID X'//text) + call compare_checksums(y1(INDICES_DATA_IJSHIFT_,l),y2(INDICES_DATA_IJSHIFT_,l), & + type//' nonblock BGRID Y'//text) + enddo + endif + enddo + + call mpp_clear_group_update(group_update) + + !----------------------------------------------------------------------------- + ! test for AGRID vector and scalar pair + !----------------------------------------------------------------------------- + deallocate(x1, y1) + deallocate(x2, y2) + + allocate(x1(INDICES_MEM_, num_fields)) + allocate(y1(INDICES_MEM_, num_fields)) + allocate(x2(INDICES_MEM_, num_fields)) + allocate(y2(INDICES_MEM_, num_fields)) + + x1 = zero + y1 = zero + + do l = 1, num_fields + x1(INDICES_COMPUTE_,l) = base(INDICES_COMPUTE_) + l*1e3 + 1e6 + y1(INDICES_COMPUTE_,l) = base(INDICES_COMPUTE_) + l*1e3 + 2*1e6 + enddo + + x2 = x1 + y2 = y1 + + do l =1, num_fields + call mpp_create_group_update(group_update, x1(:,:,:,l), y1(:,:,:,l), domain, gridtype=AGRID) + end do + + do l = 1, num_fields + call mpp_update_domains( x2(:,:,:,l), y2(:,:,:,l), domain, gridtype=AGRID, complete=l==num_fields ) + enddo + + call mpp_start_group_update(group_update, domain, zero) + call mpp_complete_group_update(group_update, domain, zero) + + !--- compare checksum + do l = 1, num_fields + write(text, '(i3.3)') l + call compare_checksums(x1(INDICES_DATA_,l),x2(INDICES_DATA_,l),type//' AGRID X'//text) + call compare_checksums(y1(INDICES_DATA_,l),y2(INDICES_DATA_,l),type//' AGRID Y'//text) + enddo + + call mpp_clear_group_update(group_update) + + x1 = zero + y1 = zero + + do l = 1, num_fields + x1(INDICES_COMPUTE_,l) = base(INDICES_COMPUTE_) + l*1e3 + 1e6 + y1(INDICES_COMPUTE_,l) = base(INDICES_COMPUTE_) + l*1e3 + 2*1e6 + enddo + + x2 = x1 + y2 = y1 + + do l =1, num_fields + call mpp_create_group_update(group_update, x1(:,:,:,l), y1(:,:,:,l), domain, gridtype=AGRID, flags=SCALAR_PAIR) + end do + + do l = 1, num_fields + call mpp_update_domains( x2(:,:,:,l), y2(:,:,:,l), domain, gridtype=AGRID, flags=SCALAR_PAIR, & + & complete=l==num_fields) + enddo + + call mpp_start_group_update(group_update, domain, zero) + call mpp_complete_group_update(group_update, domain, zero) + + !--- compare checksum + do l = 1, num_fields + write(text, '(i3.3)') l + call compare_checksums(x1(INDICES_DATA_,l),x2(INDICES_DATA_,l),type//' AGRID SCALAR_PAIR X'//text) + call compare_checksums(y1(INDICES_DATA_,l),y2(INDICES_DATA_,l),type//' AGRID SCALAR_PAIR Y'//text) + enddo + + call mpp_clear_group_update(group_update) + + deallocate(pe_start, pe_end, tile1, tile2) + deallocate(istart1, iend1, jstart1, jend1) + deallocate(istart2, iend2, jstart2, jend2) + deallocate(layout2D, global_indices) + + deallocate(a1, x1, y1) + deallocate(a2, x2, y2) + deallocate(base) + call mpp_deallocate_domain(domain) + +end subroutine TEST_GROUP_UPDATE_ + +#undef INDICES_MEM_ +#undef INDICES_MEM_ISHIFT_ +#undef INDICES_MEM_JSHIFT_ +#undef INDICES_MEM_IJSHIFT_ +#undef INDICES_COMPUTE_ +#undef INDICES_COMPUTE_ISHIFT_ +#undef INDICES_COMPUTE_JSHIFT_ +#undef INDICES_COMPUTE_IJSHIFT_ +#undef INDICES_COMPUTE_HALO_ +#undef INDICES_DATA_ +#undef INDICES_DATA_ISHIFT_ +#undef INDICES_DATA_JSHIFT_ +#undef INDICES_DATA_IJSHIFT_ diff --git a/test_fms/mpp/test_mpp_domains.F90 b/test_fms/mpp/test_mpp_domains.F90 index 2d7ad6acf..b9ba0f3b0 100644 --- a/test_fms/mpp/test_mpp_domains.F90 +++ b/test_fms/mpp/test_mpp_domains.F90 @@ -58,6 +58,7 @@ program test_mpp_domains use compare_data_checksums use test_domains_utility_mod use platform_mod + use fms_test_mod, only: permutable_indices_3d, factorial, arr_init, arr_compare implicit none @@ -113,7 +114,7 @@ program test_mpp_domains test_group, test_global_sum, test_subset, test_nonsym_edge, & test_halosize_performance, test_adjoint, wide_halo, & test_unstruct - integer :: i, j, k, n + integer :: i, j, k, n, p integer :: layout(2) integer :: id integer :: outunit, errunit, io_status @@ -224,10 +225,15 @@ program test_mpp_domains if (mpp_pe() == mpp_root_pe()) print *, '--------------------> Calling test_unstruct <-------------------' endif if( test_group) then - if (mpp_pe() == mpp_root_pe()) print *, '--------------------> Calling test_group <-------------------' - call test_group_update( 'Folded-north' ) - call test_group_update( 'Cubic-Grid' ) - if (mpp_pe() == mpp_root_pe()) print *, '--------------------> Calling test_group <-------------------' + if (mpp_pe() == mpp_root_pe()) print *, '--------------------> Testing group_update <-------------------' + do p=1,factorial(3) + call test_group_update_r4('Folded-north', p) + call test_group_update_r4('Cubic-Grid', p) + + call test_group_update_r8('Folded-north', p) + call test_group_update_r8('Cubic-Grid', p) + enddo + if (mpp_pe() == mpp_root_pe()) print *, '--------------------> Finished testing group_update <-------------------' endif if( test_interface ) then @@ -2468,512 +2474,17 @@ subroutine test_mpp_global_sum( type ) end subroutine test_mpp_global_sum - !############################################################### - subroutine test_group_update( type ) - character(len=*), intent(in) :: type - - type(domain2D) :: domain - integer :: num_contact, ntiles, npes_per_tile - integer :: i, j, k, l, n, shift - integer :: isc, iec, jsc, jec, isd, ied, jsd, jed - integer :: ism, iem, jsm, jem - - integer, allocatable, dimension(:) :: pe_start, pe_end, tile1, tile2 - integer, allocatable, dimension(:) :: istart1, iend1, jstart1, jend1 - integer, allocatable, dimension(:) :: istart2, iend2, jstart2, jend2 - integer, allocatable, dimension(:,:) :: layout2D, global_indices - real, allocatable, dimension(:,:,:,:) :: x1, y1, x2, y2 - real, allocatable, dimension(:,:,:,:) :: a1, a2 - real, allocatable, dimension(:,:,:) :: base - integer :: id1, id2, id3 - logical :: folded_north - logical :: cubic_grid - character(len=3) :: text - integer :: nx_save, ny_save - type(mpp_group_update_type) :: group_update - type(mpp_group_update_type), allocatable :: update_list(:) - - folded_north = .false. - cubic_grid = .false. - - nx_save = nx - ny_save = ny - !--- check the type - select case(type) - case ( 'Folded-north' ) - ntiles = 1 - shift = 0 - num_contact = 2 - folded_north = .true. - npes_per_tile = npes - if(layout_tripolar(1)*layout_tripolar(2) == npes ) then - layout = layout_tripolar - else - call mpp_define_layout( (/1,nx,1,ny/), npes_per_tile, layout ) - endif - case ( 'Cubic-Grid' ) - if( nx_cubic == 0 ) then - call mpp_error(NOTE,'test_group_update: for Cubic_grid mosaic, nx_cubic is zero, '//& - 'No test is done for Cubic-Grid mosaic. ' ) - return - endif - if( nx_cubic .NE. ny_cubic ) then - call mpp_error(NOTE,'test_group_update: for Cubic_grid mosaic, nx_cubic does not equal ny_cubic, '//& - 'No test is done for Cubic-Grid mosaic. ' ) - return - endif - shift = 1 - nx = nx_cubic - ny = ny_cubic - ntiles = 6 - num_contact = 12 - cubic_grid = .true. - if( mod(npes, ntiles) == 0 ) then - npes_per_tile = npes/ntiles - write(outunit,*)'NOTE from update_domains_performance ==> For Mosaic "', trim(type), & - '", each tile will be distributed over ', npes_per_tile, ' processors.' - else - call mpp_error(NOTE,'test_group_update: npes should be multiple of ntiles No test is done for '//trim(type)) - return - endif - if(layout_cubic(1)*layout_cubic(2) == npes_per_tile) then - layout = layout_cubic - else - call mpp_define_layout( (/1,nx,1,ny/), npes_per_tile, layout ) - endif - case default - call mpp_error(FATAL, 'test_group_update: no such test: '//type) - end select - - allocate(layout2D(2,ntiles), global_indices(4,ntiles), pe_start(ntiles), pe_end(ntiles) ) - do n = 1, ntiles - pe_start(n) = (n-1)*npes_per_tile - pe_end(n) = n*npes_per_tile-1 - end do - - do n = 1, ntiles - global_indices(:,n) = (/1,nx,1,ny/) - layout2D(:,n) = layout - end do - - allocate(tile1(num_contact), tile2(num_contact) ) - allocate(istart1(num_contact), iend1(num_contact), jstart1(num_contact), jend1(num_contact) ) - allocate(istart2(num_contact), iend2(num_contact), jstart2(num_contact), jend2(num_contact) ) - - !--- define domain - if(folded_north) then - !--- Contact line 1, between tile 1 (EAST) and tile 1 (WEST) --- cyclic - tile1(1) = 1; tile2(1) = 1 - istart1(1) = nx; iend1(1) = nx; jstart1(1) = 1; jend1(1) = ny - istart2(1) = 1; iend2(1) = 1; jstart2(1) = 1; jend2(1) = ny - !--- Contact line 2, between tile 1 (NORTH) and tile 1 (NORTH) --- folded-north-edge - tile1(2) = 1; tile2(2) = 1 - istart1(2) = 1; iend1(2) = nx/2; jstart1(2) = ny; jend1(2) = ny - istart2(2) = nx; iend2(2) = nx/2+1; jstart2(2) = ny; jend2(2) = ny - call mpp_define_mosaic(global_indices, layout2D, domain, ntiles, num_contact, tile1, tile2, & - istart1, iend1, jstart1, jend1, istart2, iend2, jstart2, jend2, & - pe_start, pe_end, whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo, & - name = type, symmetry = .false. ) - else if( cubic_grid ) then - call define_cubic_mosaic(type, domain, (/nx,nx,nx,nx,nx,nx/), (/ny,ny,ny,ny,ny,ny/), & - global_indices, layout2D, pe_start, pe_end ) - endif - - !--- setup data - call mpp_get_compute_domain( domain, isc, iec, jsc, jec ) - call mpp_get_data_domain ( domain, isd, ied, jsd, jed ) - call mpp_get_memory_domain ( domain, ism, iem, jsm, jem ) - - if(num_fields<1) then - call mpp_error(FATAL, "test_mpp_domains: num_fields must be a positive integer") - endif - - allocate(update_list(num_fields)) - - id1 = mpp_clock_id( type//' group 2D', flags=MPP_CLOCK_SYNC ) - id2 = mpp_clock_id( type//' non-group 2D', flags=MPP_CLOCK_SYNC ) - id3 = mpp_clock_id( type//' non-block group 2D', flags=MPP_CLOCK_SYNC ) - - allocate( a1(ism:iem, jsm:jem, nz, num_fields) ) - allocate( x1(ism:iem+shift,jsm:jem, nz, num_fields) ) - allocate( y1(ism:iem, jsm:jem+shift, nz, num_fields) ) - allocate( a2(ism:iem, jsm:jem, nz, num_fields) ) - allocate( x2(ism:iem+shift,jsm:jem, nz, num_fields) ) - allocate( y2(ism:iem, jsm:jem+shift, nz, num_fields) ) - allocate( base(isc:iec+shift,jsc:jec+shift,nz) ) - a1 = 0; x1 = 0; y1 = 0 - - base = 0 - do k = 1,nz - do j = jsc, jec+shift - do i = isc, iec+shift - base(i,j,k) = k + i*1e-3 + j*1e-6 - end do - end do - end do - - !--- Test for partial direction update - do l =1, num_fields - call mpp_create_group_update(group_update, a1(:,:,:,l), domain, flags=WUPDATE+SUPDATE) - end do - - do l = 1, num_fields - a1(isc:iec,jsc:jec,:,l) = base(isc:iec,jsc:jec,:) + l*1e3 - do k=1,nz - do i=isc-1,iec+1 - a1(i,jsc-1,k,l) = 999; - a1(i,jec+1,k,l) = 999; - enddo - do j=jsc,jec - a1(isc-1,j,k,l) = 999 - a1(iec+1,j,k,l) = 999 - enddo - enddo - enddo - - a2 = a1 - call mpp_do_group_update(group_update, domain, a1(isc,jsc,1,1)) - - do l = 1, num_fields - call mpp_update_domains( a2(:,:,:,l), domain, flags=WUPDATE+SUPDATE, complete=l==num_fields ) - enddo - - do l = 1, num_fields - write(text, '(i3.3)') l - call compare_checksums(a1(isd:ied,jsd:jed,:,l),a2(isd:ied,jsd:jed,:,l),type//' CENTER South West '//text) - enddo - - call mpp_clear_group_update(group_update) - - !--- Test for DGRID update - if(type == 'Cubic-Grid' ) then - x1 = 0; y1 = 0 - do l =1, num_fields - call mpp_create_group_update(group_update, x1(:,:,:,l), y1(:,:,:,l), domain, gridtype=DGRID_NE) - end do - - do l = 1, num_fields - y1(isc:iec+shift,jsc:jec, :,l) = base(isc:iec+shift,jsc:jec, :) + l*1e3 + 1e6 - x1(isc:iec, jsc:jec+shift,:,l) = base(isc:iec, jsc:jec+shift,:) + l*1e3 + 2*1e6 - enddo - x2 = x1; y2 = y1 - call mpp_start_group_update(group_update, domain, x1(isc,jsc,1,1)) - call mpp_complete_group_update(group_update, domain, x1(isc,jsc,1,1)) - - do l = 1, num_fields - call mpp_update_domains( x2(:,:,:,l), y2(:,:,:,l), domain, gridtype=DGRID_NE, complete=l==num_fields ) - enddo - - !--- compare checksum - do l = 1, num_fields - write(text, '(i3.3)') l - call compare_checksums(x1(isd:ied+shift,jsd:jed, :,l),x2(isd:ied+shift,jsd:jed, :,l),type//' DGRID X'//text) - call compare_checksums(y1(isd:ied, jsd:jed+shift,:,l),y2(isd:ied, jsd:jed+shift,:,l),type//' DGRID Y'//text) - enddo - - call mpp_clear_group_update(group_update) - endif - !--- Test for CGRID - a1 = 0; x1 = 0; y1 = 0 - do l =1, num_fields - call mpp_create_group_update(group_update, a1(:,:,:,l), domain) - call mpp_create_group_update(group_update, x1(:,:,:,l), y1(:,:,:,l), domain, gridtype=CGRID_NE) - end do - - do n = 1, num_iter - a1 = 0; x1 = 0; y1 = 0 - do l = 1, num_fields - a1(isc:iec, jsc:jec, :,l) = base(isc:iec, jsc:jec, :) + l*1e3 - x1(isc:iec+shift,jsc:jec, :,l) = base(isc:iec+shift,jsc:jec, :) + l*1e3 + 1e6 - y1(isc:iec, jsc:jec+shift,:,l) = base(isc:iec, jsc:jec+shift,:) + l*1e3 + 2*1e6 - enddo - a2 = a1; x2 = x1; y2 = y1 - call mpp_clock_begin(id1) - call mpp_do_group_update(group_update, domain, a1(isc,jsc,1,1)) - call mpp_clock_end (id1) - - call mpp_clock_begin(id2) - do l = 1, num_fields - call mpp_update_domains( a2(:,:,:,l), domain, complete=l==num_fields ) - enddo - do l = 1, num_fields - call mpp_update_domains( x2(:,:,:,l), y2(:,:,:,l), domain, gridtype=CGRID_NE, complete=l==num_fields ) - enddo - call mpp_clock_end(id2) - - !--- compare checksum - if( n == num_iter ) then - do l = 1, num_fields - write(text, '(i3.3)') l - call compare_checksums(a1(isd:ied, jsd:jed, :,l),a2(isd:ied, jsd:jed, :,l),type//' CENTER '//text) - call compare_checksums(x1(isd:ied+shift,jsd:jed, :,l),x2(isd:ied+shift,jsd:jed, :,l),type//' CGRID X'//text) - call compare_checksums(y1(isd:ied, jsd:jed+shift,:,l),y2(isd:ied, jsd:jed+shift,:,l),type//' CGRID Y'//text) - enddo - endif - a1 = 0; x1 = 0; y1 = 0 - do l = 1, num_fields - a1(isc:iec, jsc:jec, :,l) = base(isc:iec, jsc:jec, :) + l*1e3 - x1(isc:iec+shift,jsc:jec, :,l) = base(isc:iec+shift,jsc:jec, :) + l*1e3 + 1e6 - y1(isc:iec, jsc:jec+shift,:,l) = base(isc:iec, jsc:jec+shift,:) + l*1e3 + 2*1e6 - enddo - call mpp_clock_begin(id3) - call mpp_start_group_update(group_update, domain, a1(isc,jsc,1,1)) - call mpp_complete_group_update(group_update, domain, a1(isc,jsc,1,1)) - call mpp_clock_end (id3) - !--- compare checksum - if( n == num_iter ) then - do l = 1, num_fields - write(text, '(i3.3)') l - call compare_checksums(a1(isd:ied, jsd:jed, :,l),a2(isd:ied, jsd:jed, :,l), & - type//' nonblock CENTER '//text) - call compare_checksums(x1(isd:ied+shift,jsd:jed, :,l),x2(isd:ied+shift,jsd:jed, :,l), & - type//' nonblock CGRID X'//text) - call compare_checksums(y1(isd:ied, jsd:jed+shift,:,l),y2(isd:ied, jsd:jed+shift,:,l), & - type//' nonblock CGRID Y'//text) - enddo - endif - enddo - - call mpp_clear_group_update(group_update) - - !--- The following is to test overlapping start and complete - if( num_fields > 1 ) then - do l =1, num_fields - call mpp_create_group_update(update_list(l), a1(:,:,:,l), domain) - call mpp_create_group_update(update_list(l), x1(:,:,:,l), y1(:,:,:,l), domain, gridtype=CGRID_NE) - end do - - do n = 1, num_iter - - a1 = 0; x1 = 0; y1 = 0 - do l = 1, num_fields - a1(isc:iec, jsc:jec, :,l) = base(isc:iec, jsc:jec, :) + l*1e3 - x1(isc:iec+shift,jsc:jec, :,l) = base(isc:iec+shift,jsc:jec, :) + l*1e3 + 1e6 - y1(isc:iec, jsc:jec+shift,:,l) = base(isc:iec, jsc:jec+shift,:) + l*1e3 + 2*1e6 - enddo - do l = 1, num_fields-1 - call mpp_start_group_update(update_list(l), domain, a1(isc,jsc,1,1)) - enddo - - call mpp_complete_group_update(update_list(1), domain, a1(isc,jsc,1,1)) - call mpp_start_group_update(update_list(num_fields), domain, a1(isc,jsc,1,1)) - do l = 2, num_fields - call mpp_complete_group_update(update_list(l), domain, a1(isc,jsc,1,1)) - enddo - !--- compare checksum - if( n == num_iter ) then - do l = 1, num_fields - write(text, '(i3.3)') l - call compare_checksums(a1(isd:ied, jsd:jed, :,l),a2(isd:ied, jsd:jed, :,l), & - type//' multiple nonblock CENTER '//text) - call compare_checksums(x1(isd:ied+shift,jsd:jed, :,l),x2(isd:ied+shift,jsd:jed, :,l), & - type//' multiple nonblock CGRID X'//text) - call compare_checksums(y1(isd:ied, jsd:jed+shift,:,l),y2(isd:ied, jsd:jed+shift,:,l), & - type//' multiple nonblock CGRID Y'//text) - enddo - endif - enddo - endif - - do l =1, num_fields - call mpp_clear_group_update(update_list(l)) - enddo - deallocate(update_list) - - !--- test scalar 4-D variable - call mpp_create_group_update(group_update, a1(:,:,:,:), domain) - - a1 = 0; x1 = 0; y1 = 0 - do l = 1, num_fields - a1(isc:iec, jsc:jec, :,l) = base(isc:iec, jsc:jec, :) + l*1e3 - enddo - a2 = a1; x2 = x1; y2 = y1 - call mpp_clock_begin(id1) - call mpp_do_group_update(group_update, domain, a1(isc,jsc,1,1)) - call mpp_clock_end (id1) - - call mpp_clock_begin(id2) - call mpp_update_domains( a2(:,:,:,:), domain ) - call mpp_clock_end(id2) - - !--- compare checksum - do l = 1, num_fields - write(text, '(i3.3)') l - call compare_checksums(a1(isd:ied, jsd:jed, :,l),a2(isd:ied, jsd:jed, :,l),type//' 4D CENTER '//text) - enddo - - a1 = 0 - do l = 1, num_fields - a1(isc:iec, jsc:jec, :,l) = base(isc:iec, jsc:jec, :) + l*1e3 - enddo - call mpp_clock_begin(id3) - call mpp_start_group_update(group_update, domain, a1(isc,jsc,1,1)) - call mpp_complete_group_update(group_update, domain, a1(isc,jsc,1,1)) - call mpp_clock_end (id3) - - !--- compare checksum - do l = 1, num_fields - write(text, '(i3.3)') l - call compare_checksums(a1(isd:ied, jsd:jed, :,l),a2(isd:ied, jsd:jed, :,l), & - type//' nonblock 4D CENTER '//text) - enddo - - - - !--- test for BGRID. - deallocate(a1, x1, y1) - deallocate(a2, x2, y2) - call mpp_clear_group_update(group_update) - - allocate( a1(ism:iem+shift,jsm:jem+shift, nz, num_fields) ) - allocate( x1(ism:iem+shift,jsm:jem+shift, nz, num_fields) ) - allocate( y1(ism:iem+shift,jsm:jem+shift, nz, num_fields) ) - allocate( a2(ism:iem+shift,jsm:jem+shift, nz, num_fields) ) - allocate( x2(ism:iem+shift,jsm:jem+shift, nz, num_fields) ) - allocate( y2(ism:iem+shift,jsm:jem+shift, nz, num_fields) ) - - do l =1, num_fields - call mpp_create_group_update(group_update, a1(:,:,:,l), domain, position=CORNER) - call mpp_create_group_update(group_update, x1(:,:,:,l), y1(:,:,:,l), domain, gridtype=BGRID_NE) - end do - - do n = 1, num_iter - a1 = 0; x1 = 0; y1 = 0 - do l = 1, num_fields - a1(isc:iec+shift,jsc:jec+shift,:,l) = base(isc:iec+shift,jsc:jec+shift,:) + l*1e3 - x1(isc:iec+shift,jsc:jec+shift,:,l) = base(isc:iec+shift,jsc:jec+shift,:) + l*1e3 + 1e6 - y1(isc:iec+shift,jsc:jec+shift,:,l) = base(isc:iec+shift,jsc:jec+shift,:) + l*1e3 + 2*1e6 - enddo - a2 = a1; x2 = x1; y2 = y1 - call mpp_clock_begin(id1) - call mpp_do_group_update(group_update, domain, a1(isc,jsc,1,1)) - call mpp_clock_end (id1) - - call mpp_clock_begin(id2) - do l = 1, num_fields - call mpp_update_domains( a2(:,:,:,l), domain, position=CORNER, complete=l==num_fields ) - enddo - do l = 1, num_fields - call mpp_update_domains( x2(:,:,:,l), y2(:,:,:,l), domain, gridtype=BGRID_NE, complete=l==num_fields ) - enddo - call mpp_clock_end(id2) - - !--- compare checksum - if( n == num_iter ) then - do l = 1, num_fields - write(text, '(i3.3)') l - call compare_checksums(a1(isd:ied+shift,jsd:jed+shift,:,l),a2(isd:ied+shift,jsd:jed+shift,:,l),type//& - & ' CORNER '// text) - call compare_checksums(x1(isd:ied+shift,jsd:jed+shift,:,l),x2(isd:ied+shift,jsd:jed+shift,:,l),type//& - & ' BGRID X'// text) - call compare_checksums(y1(isd:ied+shift,jsd:jed+shift,:,l),y2(isd:ied+shift,jsd:jed+shift,:,l),type//& - & ' BGRID Y'// text) - enddo - endif - - a1 = 0; x1 = 0; y1 = 0 - do l = 1, num_fields - a1(isc:iec+shift,jsc:jec+shift,:,l) = base(isc:iec+shift,jsc:jec+shift,:) + l*1e3 - x1(isc:iec+shift,jsc:jec+shift,:,l) = base(isc:iec+shift,jsc:jec+shift,:) + l*1e3 + 1e6 - y1(isc:iec+shift,jsc:jec+shift,:,l) = base(isc:iec+shift,jsc:jec+shift,:) + l*1e3 + 2*1e6 - enddo - call mpp_clock_begin(id3) - call mpp_start_group_update(group_update, domain, a1(isc,jsc,1,1)) - call mpp_complete_group_update(group_update, domain, a1(isc,jsc,1,1)) - call mpp_clock_end (id3) - !--- compare checksum - if( n == num_iter ) then - do l = 1, num_fields - write(text, '(i3.3)') l - call compare_checksums(a1(isd:ied+shift,jsd:jed+shift,:,l),a2(isd:ied+shift,jsd:jed+shift,:,l), & - type//' nonblockCORNER '//text) - call compare_checksums(x1(isd:ied+shift,jsd:jed+shift,:,l),x2(isd:ied+shift,jsd:jed+shift,:,l), & - type//' nonblock BGRID X'//text) - call compare_checksums(y1(isd:ied+shift,jsd:jed+shift,:,l),y2(isd:ied+shift,jsd:jed+shift,:,l), & - type//' nonblock BGRID Y'//text) - enddo - endif - enddo - - call mpp_clear_group_update(group_update) - - !----------------------------------------------------------------------------- - ! test for AGRID vector and scalar pair - !----------------------------------------------------------------------------- - deallocate(x1, y1) - deallocate(x2, y2) - - allocate( x1(ism:iem,jsm:jem, nz, num_fields) ) - allocate( y1(ism:iem,jsm:jem, nz, num_fields) ) - allocate( x2(ism:iem,jsm:jem, nz, num_fields) ) - allocate( y2(ism:iem,jsm:jem, nz, num_fields) ) - - x1 = 0; y1 = 0 - do l = 1, num_fields - x1(isc:iec,jsc:jec,:,l) = base(isc:iec,jsc:jec,:) + l*1e3 + 1e6 - y1(isc:iec,jsc:jec,:,l) = base(isc:iec,jsc:jec,:) + l*1e3 + 2*1e6 - enddo - x2 = x1; y2 = y1 - - do l =1, num_fields - call mpp_create_group_update(group_update, x1(:,:,:,l), y1(:,:,:,l), domain, gridtype=AGRID) - end do - - do l = 1, num_fields - call mpp_update_domains( x2(:,:,:,l), y2(:,:,:,l), domain, gridtype=AGRID, complete=l==num_fields ) - enddo - - call mpp_start_group_update(group_update, domain, a1(isc,jsc,1,1)) - call mpp_complete_group_update(group_update, domain, a1(isc,jsc,1,1)) - - !--- compare checksum - do l = 1, num_fields - write(text, '(i3.3)') l - call compare_checksums(x1(isd:ied,jsd:jed,:,l),x2(isd:ied,jsd:jed,:,l),type//' AGRID X'//text) - call compare_checksums(y1(isd:ied,jsd:jed,:,l),y2(isd:ied,jsd:jed,:,l),type//' AGRID Y'//text) - enddo - - call mpp_clear_group_update(group_update) - - x1 = 0; y1 = 0 - do l = 1, num_fields - x1(isc:iec,jsc:jec,:,l) = base(isc:iec,jsc:jec,:) + l*1e3 + 1e6 - y1(isc:iec,jsc:jec,:,l) = base(isc:iec,jsc:jec,:) + l*1e3 + 2*1e6 - enddo - x2 = x1; y2 = y1 - - do l =1, num_fields - call mpp_create_group_update(group_update, x1(:,:,:,l), y1(:,:,:,l), domain, gridtype=AGRID, flags=SCALAR_PAIR) - end do - - do l = 1, num_fields - call mpp_update_domains( x2(:,:,:,l), y2(:,:,:,l), domain, gridtype=AGRID, flags=SCALAR_PAIR, & - & complete=l==num_fields) - enddo - - call mpp_start_group_update(group_update, domain, x1(isc,jsc,1,1)) - call mpp_complete_group_update(group_update, domain, x1(isc,jsc,1,1)) - - !--- compare checksum - do l = 1, num_fields - write(text, '(i3.3)') l - call compare_checksums(x1(isd:ied,jsd:jed,:,l),x2(isd:ied,jsd:jed,:,l),type//' AGRID SCALAR_PAIR X'//text) - call compare_checksums(y1(isd:ied,jsd:jed,:,l),y2(isd:ied,jsd:jed,:,l),type//' AGRID SCALAR_PAIR Y'//text) - enddo - - call mpp_clear_group_update(group_update) - - deallocate(pe_start, pe_end, tile1, tile2) - deallocate(istart1, iend1, jstart1, jend1) - deallocate(istart2, iend2, jstart2, jend2) - deallocate(layout2D, global_indices) - - deallocate(a1, x1, y1) - deallocate(a2, x2, y2) - deallocate(base) - call mpp_deallocate_domain(domain) - -end subroutine test_group_update +#define FMS_TEST_KIND_ r4_kind +#define TEST_GROUP_UPDATE_ test_group_update_r4 +#include "group_update.inc" +#undef FMS_TEST_KIND_ +#undef TEST_GROUP_UPDATE_ + +#define FMS_TEST_KIND_ r8_kind +#define TEST_GROUP_UPDATE_ test_group_update_r8 +#include "group_update.inc" +#undef FMS_TEST_KIND_ +#undef TEST_GROUP_UPDATE_ !############################################################### diff --git a/test_fms/mpp/test_mpp_domains2.sh b/test_fms/mpp/test_mpp_domains2.sh index fb4de0bf5..056ffb493 100755 --- a/test_fms/mpp/test_mpp_domains2.sh +++ b/test_fms/mpp/test_mpp_domains2.sh @@ -28,6 +28,9 @@ # Set common test settings. . ../test-lib.sh +# TODO: Enable this test once generalized indices work is complete +SKIP_TESTS="test_mpp_domains2.12" + # TODO edge update, fails on non-blocking with gnu #SKIP_TESTS="$SKIP_TESTS $(basename $0 .sh).6" diff --git a/test_fms/mpp/test_mpp_global_field.F90 b/test_fms/mpp/test_mpp_global_field.F90 index d3362f5f6..514f750c0 100644 --- a/test_fms/mpp/test_mpp_global_field.F90 +++ b/test_fms/mpp/test_mpp_global_field.F90 @@ -16,29 +16,41 @@ !* governing permissions and limitations under the License. !*********************************************************************** program test_mpp_global_field - use platform_mod - use compare_data_checksums - use compare_data_checksums_int - use mpp_mod, only : mpp_init, mpp_error, FATAL, mpp_init_test_requests_allocated - use mpp_mod, only : mpp_declare_pelist, mpp_pe, mpp_npes, mpp_root_pe - !use mpp_mod, only : mpp_clock_begin, mpp_clock_end, mpp_clock_id, MPP_CLOCK_SYNC, MPP_CLOCK_DETAILED - use mpp_domains_mod, only : domain2D - use mpp_domains_mod, only : CENTER, EAST, NORTH, CORNER, XUPDATE, YUPDATE - use mpp_domains_mod, only : mpp_domains_init, mpp_domains_exit - use mpp_domains_mod, only : mpp_define_layout, mpp_define_domains - use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_data_domain, mpp_domains_set_stack_size - use mpp_domains_mod, only : mpp_global_field + use mpp_mod, only: mpp_init, mpp_error, FATAL, mpp_init_test_requests_allocated + use mpp_mod, only: mpp_declare_pelist, mpp_pe, mpp_npes, mpp_root_pe + use mpp_domains_mod, only: domain2D + use mpp_domains_mod, only: CENTER, EAST, NORTH, CORNER, XUPDATE, YUPDATE + use mpp_domains_mod, only: mpp_domains_init, mpp_domains_exit + use mpp_domains_mod, only: mpp_define_layout, mpp_define_domains + use mpp_domains_mod, only: mpp_get_compute_domain, mpp_get_data_domain, mpp_domains_set_stack_size + use mpp_domains_mod, only: mpp_global_field + use fms_test_mod, only: permutable_indices_2d, permutable_indices_3d, factorial, arr_init, arr_compare implicit none + type test_params_t + logical :: symmetry + integer :: position, shift(2) + character(15) :: name + end type test_params_t + + type(test_params_t), parameter :: test_params(*) = [ & + test_params_t(symmetry=.false., position=CENTER, shift=[0,0], name="no symmetry"), & + test_params_t(symmetry=.true., position=CENTER, shift=[0,0], name="center symmetry"), & + test_params_t(symmetry=.true., position=CORNER, shift=[1,1], name="corner symmetry"), & + test_params_t(symmetry=.true., position=EAST, shift=[1,0], name="east symmetry"), & + test_params_t(symmetry=.true., position=NORTH, shift=[0,1], name="north symmetry")] + integer, parameter :: nx=20, ny=20, nz=40 integer, parameter :: whalo=2, ehalo=2, shalo=2, nhalo=2 integer, parameter :: stackmax=4000000 + FMS_TEST_TYPE_ (FMS_TEST_KIND_), parameter :: zero = 0 + integer :: pe, npes, ierr integer :: layout(2) - + integer :: i, p !> call mpp_init call mpp_init(test_level=mpp_init_test_requests_allocated) @@ -51,450 +63,92 @@ program test_mpp_global_field call mpp_domains_init() call mpp_domains_set_stack_size(stackmax) - !> call test_global_field_r4_2d - call test_global_field_r4_2d( 'Non-symmetry' ) - call test_global_field_r4_2d( 'Symmetry center' ) - call test_global_field_r4_2d( 'Symmetry corner' ) - call test_global_field_r4_2d( 'Symmetry east' ) - call test_global_field_r4_2d( 'Symmetry north' ) - !> call test_global_field_r8_2d - call test_global_field_r8_2d( 'Non-symmetry' ) - call test_global_field_r8_2d( 'Symmetry center' ) - call test_global_field_r8_2d( 'Symmetry corner' ) - call test_global_field_r8_2d( 'Symmetry east' ) - call test_global_field_r8_2d( 'Symmetry north' ) - !> call test_global_field_i4_2d - call test_global_field_i4_2d( 'Non-symmetry' ) - call test_global_field_i4_2d( 'Symmetry center' ) - call test_global_field_i4_2d( 'Symmetry corner' ) - call test_global_field_i4_2d( 'Symmetry east' ) - call test_global_field_i4_2d( 'Symmetry north' ) - !> call test_global_field_i8_2d - call test_global_field_i8_2d( 'Non-symmetry' ) - call test_global_field_i8_2d( 'Symmetry center' ) - call test_global_field_i8_2d( 'Symmetry corner' ) - call test_global_field_i8_2d( 'Symmetry east' ) - call test_global_field_i8_2d( 'Symmetry north' ) - !> call test_global_field_r4_3d tests - call test_global_field_r4_3d( 'Non-symmetry' ) - call test_global_field_r4_3d( 'Symmetry center' ) - call test_global_field_r4_3d( 'Symmetry corner' ) - call test_global_field_r4_3d( 'Symmetry east' ) - call test_global_field_r4_3d( 'Symmetry north' ) - !> call test_global_field_r8_3d tests - call test_global_field_r8_3d( 'Non-symmetry' ) - call test_global_field_r8_3d( 'Symmetry center' ) - call test_global_field_r8_3d( 'Symmetry corner' ) - call test_global_field_r8_3d( 'Symmetry east' ) - call test_global_field_r8_3d( 'Symmetry north' ) - !> call test_global_field_i4_3d tests - call test_global_field_i4_3d( 'Non-symmetry' ) - call test_global_field_i4_3d( 'Symmetry center' ) - call test_global_field_i4_3d( 'Symmetry corner' ) - call test_global_field_i4_3d( 'Symmetry east' ) - call test_global_field_i4_3d( 'Symmetry north' ) - !> call test_global_field_i8_3d tests - call test_global_field_i8_3d( 'Non-symmetry' ) - call test_global_field_i8_3d( 'Symmetry center' ) - call test_global_field_i8_3d( 'Symmetry corner' ) - call test_global_field_i8_3d( 'Symmetry east' ) - call test_global_field_i8_3d( 'Symmetry north' ) + do i=1, size(test_params) + ! 2D tests + do p=1,factorial(2) + call run_tests_2d(test_params(i), p) + enddo + + ! 3D tests + do p=1,factorial(3) + call run_tests_3d(test_params(i), p) + enddo + enddo !> exit call mpp_domains_exit() call MPI_finalize(ierr) contains - !> - !> test_global_field_r4_2d - !> - subroutine test_global_field_r4_2d( type ) - - implicit none - character(len=*), intent(in) :: type - - real(kind=r4_kind), parameter :: zero = 0.0 + subroutine run_tests_2d(test_params, p) + type(test_params_t), intent(in) :: test_params + integer, intent(in) :: p !< Permutation of array indices (ranges from 1 to rank!) type(domain2D) :: domain - integer :: position, ishift, jshift, ni, nj, i, j, k - integer :: is, ie, js, je, isd, ied, jsd, jed - !integer :: id + integer :: i, j + type(permutable_indices_2d) :: compute, data, global, global_with_halo, global_x, global_y integer, allocatable :: pelist(:) - real(kind=r4_kind), allocatable :: global1(:,:), x(:,:), gcheck(:,:) - + FMS_TEST_TYPE_ (FMS_TEST_KIND_), allocatable :: global0(:,:), local(:,:), global1(:,:) !> set up domain - call mpp_define_layout( (/1,nx,1,ny/), npes, layout ) - select case(type) - case( 'Non-symmetry' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type ) - case( 'Symmetry center', 'Symmetry corner', 'Symmetry east', 'Symmetry north' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type, symmetry = .true. ) - case default - call mpp_error( FATAL, 'TEST_MPP_DOMAINS: no such test: '//type//' in test_global_field' ) - end select + call mpp_define_layout([1,nx,1,ny], npes, layout) + call mpp_define_domains([1,nx,1,ny], layout, domain, whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo, & + name=trim(test_params%name), symmetry=test_params%symmetry) !> get compute domain - call mpp_get_compute_domain( domain, is, ie, js, je ) - !> get data domain - call mpp_get_data_domain ( domain, isd, ied, jsd, jed ) - - !> determine if an extra point is needed - ishift = 0 ; jshift = 0 ; position=CENTER - select case(type) - case ('Symmetry corner') - ishift = 1 ; jshift = 1 ; position=CORNER - case ('Symmetry east') - ishift = 1 ; jshift = 0 ; position=EAST - case ('Symmetry north') - ishift = 0 ; jshift = 1 ; position=NORTH - end select - - ie = ie+ishift ; je = je+jshift - ied = ied+ishift ; jed = jed+jshift - ni = nx+ishift ; nj = ny+jshift - - !> assign global - allocate( global1(1-whalo:ni+ehalo,1-shalo:nj+nhalo) ) - global1 = zero - do j=1, nj - do i=1, ni - global1(i,j) = real( i*1e-3+j*1e-6, kind=r4_kind ) - end do - enddo - - allocate( gcheck(ni,nj) ) - - !> allocate for global domain - allocate( x(isd:ied,jsd:jed) ) - x(:,:) = global1(isd:ied,jsd:jed) + call mpp_get_compute_domain(domain, compute%lb(1), compute%ub(1), compute%lb(2), compute%ub(2)) + compute%ub = compute%ub + test_params%shift - !> test the data on data domain - gcheck = zero - !id = mpp_clock_id( type//' global field on data domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,1:nj), gcheck, type//' mpp_global_field on r4 data domain' ) - - - !> Since in the disjoint redistribute mpp test, pelist1 = (npes/2+1 .. npes-1) - !! will be declared. But for the x-direction global field, mpp_sync_self will - !! be called. For some pe count, pelist1 will be set ( only on pe of pelist1 ) - !! in the mpp_sync_self call, later when calling mpp_declare_pelist(pelist1), - !! deadlock will happen. For example npes = 6 and layout = (2,3), pelist = (4,5) - !! will be set in mpp_sync_self. To solve the problem, some explicit mpp_declare_pelist - !! on all pe is needed for those partial pelist. But for y-update, it is ok. - !! because the pelist in y-update is not continous. - allocate( pelist(0:layout(1)-1) ) - do j = 0, layout(2)-1 - do i = 0, layout(1)-1 - pelist(i) = j*layout(1) + i - end do - call mpp_declare_pelist(pelist) - end do - deallocate(pelist) - - !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,js:je), gcheck(1:ni,js:je), type// & - & ' mpp_global_field xupdate only on r4 data domain' ) - - !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(is:ie,1:nj), gcheck(is:ie,1:nj), type// & - & ' mpp_global_field yupdate only on r4 data domain' ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,1:nj), gcheck, type//' mpp_global_field on r4 data domain' ) - - !> test the data on compute domain - - deallocate(x) - allocate( x(is:ie,js:je) ) - x(is:ie,js:je) = global1(is:ie,js:je) - - gcheck = zero - !id = mpp_clock_id( type//' global field on compute domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je), gcheck, position=position ) - !call mpp_clock_end(id) - !>compare checksums between global and x arrays - call compare_checksums( global1(1:ni,1:nj), gcheck, type//' mpp_global_field on r4 compute domain' ) - - !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je), gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,js:je), gcheck(1:ni,js:je), type// & - & ' mpp_global_field xupdate only on r4 compute domain' ) - - !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je), gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(is:ie,1:nj), gcheck(is:ie,1:nj), type// & - & ' mpp_global_field yupdate only on r4 compute domain' ) - - deallocate(global1, gcheck, x) - - end subroutine test_global_field_r4_2d - !> - !> test_global_field_r8_2d - !> - subroutine test_global_field_r8_2d( type ) - - implicit none - - character(len=*), intent(in) :: type - - real(kind=r8_kind), parameter :: zero = 0.0 - - type(domain2D) :: domain - integer :: position, ishift, jshift, ni, nj, i, j, k - integer :: is, ie, js, je, isd, ied, jsd, jed - !integer :: id - integer, allocatable :: pelist(:) - real(kind=r8_kind), allocatable :: global1(:,:), x(:,:), gcheck(:,:) - - - !> set up domain - call mpp_define_layout( (/1,nx,1,ny/), npes, layout ) - select case(type) - case( 'Non-symmetry' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type ) - case( 'Symmetry center', 'Symmetry corner', 'Symmetry east', 'Symmetry north' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type, symmetry = .true. ) - case default - call mpp_error( FATAL, 'TEST_MPP_DOMAINS: no such test: '//type//' in test_global_field' ) - end select - - !> get compute domain - call mpp_get_compute_domain( domain, is, ie, js, je ) !> get data domain - call mpp_get_data_domain ( domain, isd, ied, jsd, jed ) + call mpp_get_data_domain(domain, data%lb(1), data%ub(1), data%lb(2), data%ub(2)) + data%ub = data%ub + test_params%shift - !> determine if an extra point is needed - ishift = 0 ; jshift = 0 ; position=CENTER - select case(type) - case ('Symmetry corner') - ishift = 1 ; jshift = 1 ; position=CORNER - case ('Symmetry east') - ishift = 1 ; jshift = 0 ; position=EAST - case ('Symmetry north') - ishift = 0 ; jshift = 1 ; position=NORTH - end select + global%lb = [1, 1] + global%ub = [nx, ny] + test_params%shift - ie = ie+ishift ; je = je+jshift - ied = ied+ishift ; jed = jed+jshift - ni = nx+ishift ; nj = ny+jshift + global_with_halo%lb = global%lb - [whalo, shalo] + global_with_halo%ub = global%ub + [ehalo, nhalo] - !> assign global - allocate( global1(1-whalo:ni+ehalo,1-shalo:nj+nhalo) ) - global1 = zero - do j=1, nj - do i=1, ni - global1(i,j) = real( i*1e-3+j*1e-6, kind=r8_kind ) - end do - enddo + global_x%lb = [global%lb(1), compute%lb(2)] + global_x%ub = [global%ub(1), compute%ub(2)] - allocate( gcheck(ni,nj) ) + global_y%lb = [compute%lb(1), global%lb(2)] + global_y%ub = [compute%ub(1), global%ub(2)] - !> allocate for global domain - allocate( x(isd:ied,jsd:jed) ) - x(:,:) = global1(isd:ied,jsd:jed) - - !> test the data on data domain - gcheck = zero - !id = mpp_clock_id( type//' global field on data domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,1:nj), gcheck, type//' mpp_global_field on r8 data domain' ) - - - !> Since in the disjoint redistribute mpp test, pelist1 = (npes/2+1 .. npes-1) - !! will be declared. But for the x-direction global field, mpp_sync_self will - !! be called. For some pe count, pelist1 will be set ( only on pe of pelist1 ) - !! in the mpp_sync_self call, later when calling mpp_declare_pelist(pelist1), - !! deadlock will happen. For example npes = 6 and layout = (2,3), pelist = (4,5) - !! will be set in mpp_sync_self. To solve the problem, some explicit mpp_declare_pelist - !! on all pe is needed for those partial pelist. But for y-update, it is ok. - !! because the pelist in y-update is not continous. - allocate( pelist(0:layout(1)-1) ) - do j = 0, layout(2)-1 - do i = 0, layout(1)-1 - pelist(i) = j*layout(1) + i - end do - call mpp_declare_pelist(pelist) - end do - deallocate(pelist) - - !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,js:je), gcheck(1:ni,js:je), type// & - & ' mpp_global_field xupdate only on r8 data domain' ) - - !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(is:ie,1:nj), gcheck(is:ie,1:nj), type// & - & ' mpp_global_field yupdate only on r8 data domain' ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,1:nj), gcheck, type//' mpp_global_field on r8 data domain' ) - - !> test the data on compute domain - - deallocate(x) - allocate( x(is:ie,js:je) ) - x(is:ie,js:je) = global1(is:ie,js:je) - - gcheck = zero - !id = mpp_clock_id( type//' global field on compute domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je), gcheck, position=position ) - !call mpp_clock_end(id) - !>compare checksums between global and x arrays - call compare_checksums( global1(1:ni,1:nj), gcheck, type//' mpp_global_field on r8 compute domain' ) - - !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je), gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,js:je), gcheck(1:ni,js:je), type// & - & ' mpp_global_field xupdate only on r8 compute domain' ) - - !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je), gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(is:ie,1:nj), gcheck(is:ie,1:nj), type// & - & ' mpp_global_field yupdate only on r8 compute domain' ) - - deallocate(global1, gcheck, x) - - end subroutine test_global_field_r8_2d - !> - !> test_global_field_i4_2d - !> - subroutine test_global_field_i4_2d( type ) - - implicit none - - character(len=*), intent(in) :: type - - integer(kind=i4_kind), parameter :: zero = 0 - - type(domain2D) :: domain - integer :: position, ishift, jshift, ni, nj, i, j, k - integer :: is, ie, js, je, isd, ied, jsd, jed - !integer :: id - integer, allocatable :: pelist(:) - integer(kind=i4_kind), allocatable :: global1(:,:), x(:,:), gcheck(:,:) - - - !> set up domain - call mpp_define_layout( (/1,nx,1,ny/), npes, layout ) - select case(type) - case( 'Non-symmetry' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type ) - case( 'Symmetry center', 'Symmetry corner', 'Symmetry east', 'Symmetry north' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type, symmetry = .true. ) - case default - call mpp_error( FATAL, 'TEST_MPP_DOMAINS: no such test: '//type//' in test_global_field' ) - end select - - !> get compute domain - call mpp_get_compute_domain( domain, is, ie, js, je ) - !> get data domain - call mpp_get_data_domain ( domain, isd, ied, jsd, jed ) - - !> determine if an extra point is needed - ishift = 0 ; jshift = 0 ; position=CENTER - select case(type) - case ('Symmetry corner') - ishift = 1 ; jshift = 1 ; position=CORNER - case ('Symmetry east') - ishift = 1 ; jshift = 0 ; position=EAST - case ('Symmetry north') - ishift = 0 ; jshift = 1 ; position=NORTH - end select - - ie = ie+ishift ; je = je+jshift - ied = ied+ishift ; jed = jed+jshift - ni = nx+ishift ; nj = ny+jshift + call compute%permute(p) + call data%permute(p) + call global%permute(p) + call global_with_halo%permute(p) + call global_x%permute(p) + call global_y%permute(p) !> assign global - allocate( global1(1-whalo:ni+ehalo,1-shalo:nj+nhalo) ) - global1 = zero - do j=1, nj - do i=1, ni - global1(i,j) = int( i*1e3+j*1e6, kind=i4_kind ) - end do - enddo + allocate(global0(global_with_halo%lb(1):global_with_halo%ub(1), global_with_halo%lb(2):global_with_halo%ub(2))) + global0 = zero + call arr_init(global0(global%lb(1):global%ub(1), global%lb(2):global%ub(2))) - allocate( gcheck(ni,nj) ) + allocate(global1(global%lb(1):global%ub(1), global%lb(2):global%ub(2))) !> allocate for global domain - allocate( x(isd:ied,jsd:jed) ) - x(:,:) = global1(isd:ied,jsd:jed) + allocate(local(data%lb(1):data%ub(1), data%lb(2):data%ub(2))) + local(:,:) = global0(data%lb(1):data%ub(1), data%lb(2):data%ub(2)) !> test the data on data domain - gcheck = zero - !id = mpp_clock_id( type//' global field on data domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,1:nj), gcheck, type//' mpp_global_field on i4 data domain' ) - + global1 = zero + call mpp_global_field(domain, local, global1, position=test_params%position) + call arr_compare(global0(global%lb(1):global%ub(1), global%lb(2):global%ub(2)), global1, & + 'mpp_global_field on data domain with ' // trim(test_params%name)) !> Since in the disjoint redistribute mpp test, pelist1 = (npes/2+1 .. npes-1) !! will be declared. But for the x-direction global field, mpp_sync_self will - !! be called. For some pe count, pelist1 will be set ( only on pe of pelist1 ) + !! be called. For some pe count, pelist1 will be set (only on pe of pelist1) !! in the mpp_sync_self call, later when calling mpp_declare_pelist(pelist1), !! deadlock will happen. For example npes = 6 and layout = (2,3), pelist = (4,5) !! will be set in mpp_sync_self. To solve the problem, some explicit mpp_declare_pelist !! on all pe is needed for those partial pelist. But for y-update, it is ok. !! because the pelist in y-update is not continous. - allocate( pelist(0:layout(1)-1) ) + allocate(pelist(0:layout(1)-1)) do j = 0, layout(2)-1 do i = 0, layout(1)-1 pelist(i) = j*layout(1) + i @@ -504,600 +158,124 @@ subroutine test_global_field_i4_2d( type ) deallocate(pelist) !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,js:je), gcheck(1:ni,js:je), type// & - & ' mpp_global_field xupdate only on i4 data domain' ) - - !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(is:ie,1:nj), gcheck(is:ie,1:nj), type// & - & ' mpp_global_field yupdate only on i4 data domain' ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,1:nj), gcheck, type//' mpp_global_field on i4 data domain' ) - - !> test the data on compute domain - - deallocate(x) - allocate( x(is:ie,js:je) ) - x(is:ie,js:je) = global1(is:ie,js:je) - - gcheck = zero - !id = mpp_clock_id( type//' global field on compute domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je), gcheck, position=position ) - !call mpp_clock_end(id) - !>compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,1:nj), gcheck, type//' mpp_global_field on i4 compute domain' ) - - !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je), gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,js:je), gcheck(1:ni,js:je), type// & - & ' mpp_global_field xupdate only on i4 compute domain' ) + global1 = zero + call mpp_global_field(domain, local, global1, flags=XUPDATE, position=test_params%position) + call arr_compare(global0(global_x%lb(1):global_x%ub(1),global_x%lb(2):global_x%ub(2)), & + global1(global_x%lb(1):global_x%ub(1),global_x%lb(2):global_x%ub(2)), & + 'mpp_global_field xupdate only on data domain with ' // trim(test_params%name)) !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je), gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(is:ie,1:nj), gcheck(is:ie,1:nj), type// & - & ' mpp_global_field yupdate only on i4 compute domain' ) - - deallocate(global1, gcheck, x) - - end subroutine test_global_field_i4_2d - !> - !> test_global_field_i8_2d - !> - subroutine test_global_field_i8_2d( type ) - - implicit none - - character(len=*), intent(in) :: type - - integer(kind=i8_kind), parameter :: zero = 0 - - type(domain2D) :: domain - integer :: position, ishift, jshift, ni, nj, i, j, k - integer :: is, ie, js, je, isd, ied, jsd, jed - !integer :: id - integer, allocatable :: pelist(:) - integer(kind=i8_kind), allocatable :: global1(:,:), x(:,:), gcheck(:,:) - - - !> set up domain - call mpp_define_layout( (/1,nx,1,ny/), npes, layout ) - select case(type) - case( 'Non-symmetry' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type ) - case( 'Symmetry center', 'Symmetry corner', 'Symmetry east', 'Symmetry north' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type, symmetry = .true. ) - case default - call mpp_error( FATAL, 'TEST_MPP_DOMAINS: no such test: '//type//' in test_global_field' ) - end select - - !> get compute domain - call mpp_get_compute_domain( domain, is, ie, js, je ) - !> get data domain - call mpp_get_data_domain ( domain, isd, ied, jsd, jed ) - - !> determine if an extra point is needed - ishift = 0 ; jshift = 0 ; position=CENTER - select case(type) - case ('Symmetry corner') - ishift = 1 ; jshift = 1 ; position=CORNER - case ('Symmetry east') - ishift = 1 ; jshift = 0 ; position=EAST - case ('Symmetry north') - ishift = 0 ; jshift = 1 ; position=NORTH - end select - - ie = ie+ishift ; je = je+jshift - ied = ied+ishift ; jed = jed+jshift - ni = nx+ishift ; nj = ny+jshift - - !> assign global - allocate( global1(1-whalo:ni+ehalo,1-shalo:nj+nhalo) ) global1 = zero - do j=1, nj - do i=1, ni - global1(i,j) = int( i*1e3+j*1e6, kind=i8_kind ) - end do - enddo - - allocate( gcheck(ni,nj) ) - - !> allocate for global domain - allocate( x(isd:ied,jsd:jed) ) - x(:,:) = global1(isd:ied,jsd:jed) - - !> test the data on data domain - gcheck = zero - !id = mpp_clock_id( type//' global field on data domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,1:nj), gcheck, type//' mpp_global_field on i8 data domain' ) + call mpp_global_field(domain, local, global1, flags=YUPDATE, position=test_params%position) + call arr_compare(global0(global_y%lb(1):global_y%ub(1),global_y%lb(2):global_y%ub(2)), & + global1(global_y%lb(1):global_y%ub(1),global_y%lb(2):global_y%ub(2)), & + 'mpp_global_field yupdate only on data domain with ' // trim(test_params%name)) - - !> Since in the disjoint redistribute mpp test, pelist1 = (npes/2+1 .. npes-1) - !! will be declared. But for the x-direction global field, mpp_sync_self will - !! be called. For some pe count, pelist1 will be set ( only on pe of pelist1 ) - !! in the mpp_sync_self call, later when calling mpp_declare_pelist(pelist1), - !! deadlock will happen. For example npes = 6 and layout = (2,3), pelist = (4,5) - !! will be set in mpp_sync_self. To solve the problem, some explicit mpp_declare_pelist - !! on all pe is needed for those partial pelist. But for y-update, it is ok. - !! because the pelist in y-update is not continous. - allocate( pelist(0:layout(1)-1) ) - do j = 0, layout(2)-1 - do i = 0, layout(1)-1 - pelist(i) = j*layout(1) + i - end do - call mpp_declare_pelist(pelist) - end do - deallocate(pelist) - - !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,js:je), gcheck(1:ni,js:je), type// & - & ' mpp_global_field xupdate only on i8 data domain' ) - - !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(is:ie,1:nj), gcheck(is:ie,1:nj), type// & - & ' mpp_global_field yupdate only on i8 data domain' ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,1:nj), gcheck, type//' mpp_global_field on i8 data domain' ) + call mpp_global_field(domain, local, global1, position=test_params%position) + call arr_compare(global0(global%lb(1):global%ub(1), global%lb(2):global%ub(2)), global1, & + 'mpp_global_field on data domain with ' // trim(test_params%name)) !> test the data on compute domain - deallocate(x) - allocate( x(is:ie,js:je) ) - x(is:ie,js:je) = global1(is:ie,js:je) - - gcheck = zero - !id = mpp_clock_id( type//' global field on compute domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je), gcheck, position=position ) - !call mpp_clock_end(id) - !>compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,1:nj), gcheck, type//' mpp_global_field on i8 compute domain' ) - - !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je), gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,js:je), gcheck(1:ni,js:je), type// & - & ' mpp_global_field xupdate only on i8 compute domain' ) - - !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je), gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(is:ie,1:nj), gcheck(is:ie,1:nj), type// & - & ' mpp_global_field yupdate only on i8 compute domain' ) - - deallocate(global1, gcheck, x) - - end subroutine test_global_field_i8_2d - !> - !> test_global_field_r4_3d - !> - subroutine test_global_field_r4_3d( type ) - - implicit none - - character(len=*), intent(in) :: type - - real(kind=r4_kind) :: zero = 0.0 - - type(domain2D) :: domain - integer :: position, ishift, jshift, ni, nj, i, j, k - integer :: is, ie, js, je, isd, ied, jsd, jed - !integer :: id - integer, allocatable :: pelist(:) - real(kind=r4_kind), allocatable :: global1(:,:,:), x(:,:,:), gcheck(:,:,:) - - - !> set up domain - call mpp_define_layout( (/1,nx,1,ny/), npes, layout ) - select case(type) - case( 'Non-symmetry' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type ) - case( 'Symmetry center', 'Symmetry corner', 'Symmetry east', 'Symmetry north' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type, symmetry = .true. ) - case default - call mpp_error( FATAL, 'TEST_MPP_DOMAINS: no such test: '//type//' in test_global_field' ) - end select - - !> get compute domain - call mpp_get_compute_domain( domain, is, ie, js, je ) - !> get data domain - call mpp_get_data_domain ( domain, isd, ied, jsd, jed ) - - !> determine if an extra point is needed - ishift = 0 ; jshift = 0 ; position = CENTER - select case(type) - case ('Symmetry corner') - ishift = 1 ; jshift = 1 ; position=CORNER - case ('Symmetry east') - ishift = 1 ; jshift = 0 ; position=EAST - case ('Symmetry north') - ishift = 0 ; jshift = 1 ; position=NORTH - end select + deallocate(local) + allocate(local(compute%lb(1):compute%ub(1), compute%lb(2):compute%ub(2))) + local(:,:) = global0(compute%lb(1):compute%ub(1), compute%lb(2):compute%ub(2)) - ie = ie+ishift ; je = je+jshift - ied = ied+ishift ; jed = jed+jshift - ni = nx+ishift ; nj = ny+jshift - - !> assign global1 - allocate( global1(1-whalo:ni+ehalo,1-shalo:nj+nhalo,nz) ) global1 = zero - do k=1, nz - do j=1, nj - do i=1, ni - global1(i,j,k) = real( k+i*1e-3+j*1e-6, kind=r4_kind ) - end do - end do - enddo - - allocate( gcheck(ni,nj,nz) ) - - !> for data domain - allocate( x(isd:ied,jsd:jed, nz) ) - x(:,:,:) = global1(isd:ied,jsd:jed,:) - - !> test the data on data domain - gcheck = zero - !id = mpp_clock_id( type//' global field on data domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,1:nj,:), gcheck, type//' mpp_global_field on r4 data domain' ) - - !> Since in the disjoint redistribute mpp test, pelist1 = (npes/2+1 .. npes-1) - !! will be declared. But for the x-direction global field, mpp_sync_self will - !! be called. For some pe count, pelist1 will be set ( only on pe of pelist1 ) - !! in the mpp_sync_self call, later when calling mpp_declare_pelist(pelist1), - !! deadlock will happen. For example npes = 6 and layout = (2,3), pelist = (4,5) - !! will be set in mpp_sync_self. To solve the problem, some explicit mpp_declare_pelist - !! on all pe is needed for those partial pelist. But for y-update, it is ok. - !! because the pelist in y-update is not continous. - allocate( pelist(0:layout(1)-1) ) - do j = 0, layout(2)-1 - do i = 0, layout(1)-1 - pelist(i) = j*layout(1) + i - end do - call mpp_declare_pelist(pelist) - end do - deallocate(pelist) + call mpp_global_field(domain, local, global1, position=test_params%position) + call arr_compare(global0(global%lb(1):global%ub(1), global%lb(2):global%ub(2)), global1, & + 'mpp_global_field on compute domain with ' // trim(test_params%name)) !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,js:je,:), gcheck(1:ni,js:je,:),type// & - & ' mpp_global_field xupdate only on r4 data domain' ) - - !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(is:ie,1:nj,:), gcheck(is:ie,1:nj,:),type// & - & ' mpp_global_field yupdate only on r4 data domain' ) - - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,1:nj,:), gcheck,type//' mpp_global_field on r4 data domain' ) - - !> test the data on compute domain - gcheck = zero - !id = mpp_clock_id( type//' global field on compute domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je,:), gcheck, position=position ) - !call mpp_clock_end(id) - !>compare checksums between global and x arrays - call compare_checksums( global1(1:ni,1:nj,:), gcheck, type//' mpp_global_field on r4 compute domain' ) - - !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je,:), gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,js:je,:), gcheck(1:ni,js:je,:), & - type//' mpp_global_field xupdate only on r4 compute domain' ) + global1 = zero + call mpp_global_field(domain, local, global1, flags=XUPDATE, position=test_params%position) + call arr_compare(global0(global_x%lb(1):global_x%ub(1),global_x%lb(2):global_x%ub(2)), & + global1(global_x%lb(1):global_x%ub(1),global_x%lb(2):global_x%ub(2)), & + 'mpp_global_field xupdate only on compute domain with ' // trim(test_params%name)) !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je,:), gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(is:ie,1:nj,:), gcheck(is:ie,1:nj,:), & - type//' mpp_global_field yupdate only on r4 compute domain' ) - - deallocate(global1, gcheck, x) - - end subroutine test_global_field_r4_3d - !> - !> test_global_field_r8_3d - !> - subroutine test_global_field_r8_3d( type ) - - implicit none - - character(len=*), intent(in) :: type + global1 = zero + call mpp_global_field(domain, local, global1, flags=YUPDATE, position=test_params%position) + call arr_compare(global0(global_y%lb(1):global_y%ub(1),global_y%lb(2):global_y%ub(2)), & + global1(global_y%lb(1):global_y%ub(1),global_y%lb(2):global_y%ub(2)), & + 'mpp_global_field yupdate only on compute domain with ' // trim(test_params%name)) + end subroutine run_tests_2d - real(kind=r8_kind) :: zero = 0.0 + subroutine run_tests_3d(test_params, p) + type(test_params_t), intent(in) :: test_params + integer, intent(in) :: p !< Permutation of array indices (ranges from 1 to rank!) type(domain2D) :: domain - integer :: position, ishift, jshift, ni, nj, i, j, k - integer :: is, ie, js, je, isd, ied, jsd, jed - !integer :: id + integer :: i, j + type(permutable_indices_3d) :: compute, data, global, global_with_halo, global_x, global_y integer, allocatable :: pelist(:) - real(kind=r8_kind), allocatable :: global1(:,:,:), x(:,:,:), gcheck(:,:,:) - + FMS_TEST_TYPE_ (FMS_TEST_KIND_), allocatable :: global0(:,:,:), local(:,:,:), global1(:,:,:) !> set up domain - call mpp_define_layout( (/1,nx,1,ny/), npes, layout ) - select case(type) - case( 'Non-symmetry' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type ) - case( 'Symmetry center', 'Symmetry corner', 'Symmetry east', 'Symmetry north' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type, symmetry = .true. ) - case default - call mpp_error( FATAL, 'TEST_MPP_DOMAINS: no such test: '//type//' in test_global_field' ) - end select + call mpp_define_layout([1,nx,1,ny], npes, layout) + call mpp_define_domains([1,nx,1,ny], layout, domain, whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo, & + name=trim(test_params%name), symmetry=test_params%symmetry) !> get compute domain - call mpp_get_compute_domain( domain, is, ie, js, je ) - !> get data domain - call mpp_get_data_domain ( domain, isd, ied, jsd, jed ) - - !> determine if an extra point is needed - ishift = 0 ; jshift = 0 ; position = CENTER - select case(type) - case ('Symmetry corner') - ishift = 1 ; jshift = 1 ; position=CORNER - case ('Symmetry east') - ishift = 1 ; jshift = 0 ; position=EAST - case ('Symmetry north') - ishift = 0 ; jshift = 1 ; position=NORTH - end select - - ie = ie+ishift ; je = je+jshift - ied = ied+ishift ; jed = jed+jshift - ni = nx+ishift ; nj = ny+jshift - - !> assign global1 - allocate( global1(1-whalo:ni+ehalo,1-shalo:nj+nhalo,nz) ) - global1 = zero - do k=1, nz - do j=1, nj - do i=1, ni - global1(i,j,k) = real( k+i*1e-3+j*1e-6, kind=r8_kind ) - end do - end do - enddo - - allocate( gcheck(ni,nj,nz) ) - - !> for data domain - allocate( x(isd:ied,jsd:jed, nz) ) - x(:,:,:) = global1(isd:ied,jsd:jed,:) - - !> test the data on data domain - gcheck = zero - !id = mpp_clock_id( type//' global field on data domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,1:nj,:), gcheck, type//' mpp_global_field on r8 data domain' ) - - !> Since in the disjoint redistribute mpp test, pelist1 = (npes/2+1 .. npes-1) - !! will be declared. But for the x-direction global field, mpp_sync_self will - !! be called. For some pe count, pelist1 will be set ( only on pe of pelist1 ) - !! in the mpp_sync_self call, later when calling mpp_declare_pelist(pelist1), - !! deadlock will happen. For example npes = 6 and layout = (2,3), pelist = (4,5) - !! will be set in mpp_sync_self. To solve the problem, some explicit mpp_declare_pelist - !! on all pe is needed for those partial pelist. But for y-update, it is ok. - !! because the pelist in y-update is not continous. - allocate( pelist(0:layout(1)-1) ) - do j = 0, layout(2)-1 - do i = 0, layout(1)-1 - pelist(i) = j*layout(1) + i - end do - call mpp_declare_pelist(pelist) - end do - deallocate(pelist) - - !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,js:je,:), gcheck(1:ni,js:je,:),type// & - & ' mpp_global_field xupdate only on r8 data domain' ) - - !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(is:ie,1:nj,:), gcheck(is:ie,1:nj,:),type// & - & ' mpp_global_field yupdate only on r8 data domain' ) - - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,1:nj,:), gcheck,type//' mpp_global_field on r8 data domain' ) - - !> test the data on compute domain - gcheck = zero - !id = mpp_clock_id( type//' global field on compute domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je,:), gcheck, position=position ) - !call mpp_clock_end(id) - !>compare checksums between global and x arrays - call compare_checksums( global1(1:ni,1:nj,:), gcheck, type//' mpp_global_field on r8 compute domain' ) + call mpp_get_compute_domain(domain, compute%lb(1), compute%ub(1), compute%lb(2), compute%ub(2)) + compute%ub(1:2) = compute%ub(1:2) + test_params%shift + compute%lb(3) = 1 + compute%ub(3) = nz - !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je,:), gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(1:ni,js:je,:), gcheck(1:ni,js:je,:), & - type//' mpp_global_field xupdate only on r8 compute domain' ) - - !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je,:), gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums( global1(is:ie,1:nj,:), gcheck(is:ie,1:nj,:), & - type//' mpp_global_field yupdate only on r8 compute domain' ) - - deallocate(global1, gcheck, x) - - end subroutine test_global_field_r8_3d - !> - !> test_global_field_i4_3d - !> - subroutine test_global_field_i4_3d( type ) - - implicit none - - character(len=*), intent(in) :: type - - integer(kind=i4_kind) :: zero = 0 + !> get data domain + call mpp_get_data_domain(domain, data%lb(1), data%ub(1), data%lb(2), data%ub(2)) + data%ub(1:2) = data%ub(1:2) + test_params%shift + data%lb(3) = 1 + data%ub(3) = nz - type(domain2D) :: domain - integer :: position, ishift, jshift, ni, nj, i, j, k - integer :: is, ie, js, je, isd, ied, jsd, jed - !integer :: id - integer, allocatable :: pelist(:) - integer(kind=i4_kind), allocatable :: global1(:,:,:), x(:,:,:), gcheck(:,:,:) + global%lb = [1, 1, 1] + global%ub = [nx, ny, nz] + global%ub(1:2) = global%ub(1:2) + test_params%shift + global_with_halo%lb = global%lb - [whalo, shalo, 0] + global_with_halo%ub = global%ub + [ehalo, nhalo, 0] - !> set up domain - call mpp_define_layout( (/1,nx,1,ny/), npes, layout ) - select case(type) - case( 'Non-symmetry' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type ) - case( 'Symmetry center', 'Symmetry corner', 'Symmetry east', 'Symmetry north' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type, symmetry = .true. ) - case default - call mpp_error( FATAL, 'TEST_MPP_DOMAINS: no such test: '//type//' in test_global_field' ) - end select + global_x%lb = [global%lb(1), compute%lb(2), global%lb(3)] + global_x%ub = [global%ub(1), compute%ub(2), global%ub(3)] - !> get compute domain - call mpp_get_compute_domain( domain, is, ie, js, je ) - !> get data domain - call mpp_get_data_domain ( domain, isd, ied, jsd, jed ) + global_y%lb = [compute%lb(1), global%lb(2), global%lb(3)] + global_y%ub = [compute%ub(1), global%ub(2), global%ub(3)] - !> determine if an extra point is needed - ishift = 0 ; jshift = 0 ; position = CENTER - select case(type) - case ('Symmetry corner') - ishift = 1 ; jshift = 1 ; position=CORNER - case ('Symmetry east') - ishift = 1 ; jshift = 0 ; position=EAST - case ('Symmetry north') - ishift = 0 ; jshift = 1 ; position=NORTH - end select + call compute%permute(p) + call data%permute(p) + call global%permute(p) + call global_with_halo%permute(p) + call global_x%permute(p) + call global_y%permute(p) - ie = ie+ishift ; je = je+jshift - ied = ied+ishift ; jed = jed+jshift - ni = nx+ishift ; nj = ny+jshift + !> assign global0 + allocate(global0(global_with_halo%lb(1):global_with_halo%ub(1), global_with_halo%lb(2):global_with_halo%ub(2), & + global_with_halo%lb(3):global_with_halo%ub(3))) - !> assign global1 - allocate( global1(1-whalo:ni+ehalo,1-shalo:nj+nhalo,nz) ) - global1 = zero - do k=1, nz - do j=1, nj - do i=1, ni - global1(i,j,k) = int( k+i*1e3+j*1e6, kind=i4_kind ) - end do - end do - enddo + global0 = zero + call arr_init(global0(global%lb(1):global%ub(1), global%lb(2):global%ub(2), global%lb(3):global%ub(3))) - allocate( gcheck(ni,nj,nz) ) + allocate(global1(global%lb(1):global%ub(1), global%lb(2):global%ub(2), global%lb(3):global%ub(3))) !> for data domain - allocate( x(isd:ied,jsd:jed, nz) ) - x(:,:,:) = global1(isd:ied,jsd:jed,:) + allocate(local(data%lb(1):data%ub(1), data%lb(2):data%ub(2), data%lb(3):data%ub(3))) + local(:,:,:) = global0(data%lb(1):data%ub(1), data%lb(2):data%ub(2), data%lb(3):data%ub(3)) !> test the data on data domain - gcheck = zero - !id = mpp_clock_id( type//' global field on data domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,1:nj,:), gcheck, type//' mpp_global_field on i4 data domain' ) + global1 = zero + call mpp_global_field(domain, local, global1, position=test_params%position) + call arr_compare(global0(global%lb(1):global%ub(1), global%lb(2):global%ub(2), global%lb(3):global%ub(3)), & + global1, 'mpp_global_field on data domain with ' // trim(test_params%name)) !> Since in the disjoint redistribute mpp test, pelist1 = (npes/2+1 .. npes-1) !! will be declared. But for the x-direction global field, mpp_sync_self will - !! be called. For some pe count, pelist1 will be set ( only on pe of pelist1 ) + !! be called. For some pe count, pelist1 will be set (only on pe of pelist1) !! in the mpp_sync_self call, later when calling mpp_declare_pelist(pelist1), !! deadlock will happen. For example npes = 6 and layout = (2,3), pelist = (4,5) !! will be set in mpp_sync_self. To solve the problem, some explicit mpp_declare_pelist !! on all pe is needed for those partial pelist. But for y-update, it is ok. !! because the pelist in y-update is not continous. - allocate( pelist(0:layout(1)-1) ) + allocate(pelist(0:layout(1)-1)) do j = 0, layout(2)-1 do i = 0, layout(1)-1 pelist(i) = j*layout(1) + i @@ -1107,207 +285,50 @@ subroutine test_global_field_i4_3d( type ) deallocate(pelist) !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,js:je,:), gcheck(1:ni,js:je,:),type// & - & ' mpp_global_field xupdate only on i4 data domain' ) + global1 = zero + call mpp_global_field(domain, local, global1, flags=XUPDATE, position=test_params%position) + call arr_compare(global0(global_x%lb(1):global_x%ub(1), global_x%lb(2):global_x%ub(2), & + global_x%lb(3):global_x%ub(3)), global1(global_x%lb(1):global_x%ub(1), & + global_x%lb(2):global_x%ub(2), global_x%lb(3):global_x%ub(3)), & + 'mpp_global_field xupdate only on data domain with ' // trim(test_params%name)) !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(is:ie,1:nj,:), gcheck(is:ie,1:nj,:),type// & - & ' mpp_global_field yupdate only on i4 data domain' ) + global1 = zero + call mpp_global_field(domain, local, global1, flags=YUPDATE, position=test_params%position) + call arr_compare(global0(global_y%lb(1):global_y%ub(1), global_y%lb(2):global_y%ub(2), & + global_y%lb(3):global_y%ub(3)), global1(global_y%lb(1):global_y%ub(1), & + global_y%lb(2):global_y%ub(2), global_y%lb(3):global_y%ub(3)), & + 'mpp_global_field yupdate only on data domain with ' // trim(test_params%name)) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,1:nj,:), gcheck,type//' mpp_global_field on i4 data domain' ) + call mpp_global_field(domain, local, global1, position=test_params%position) + call arr_compare(global0(global%lb(1):global%ub(1), global%lb(2):global%ub(2), global%lb(3):global%ub(3)), & + global1, 'mpp_global_field on data domain with ' // trim(test_params%name)) !> test the data on compute domain - gcheck = zero - !id = mpp_clock_id( type//' global field on compute domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je,:), gcheck, position=position ) - !call mpp_clock_end(id) - !>compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,1:nj,:), gcheck, type//' mpp_global_field on i4 compute domain' ) - - !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je,:), gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,js:je,:), gcheck(1:ni,js:je,:), & - type//' mpp_global_field xupdate only on i4 compute domain' ) - - !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je,:), gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(is:ie,1:nj,:), gcheck(is:ie,1:nj,:), & - type//' mpp_global_field yupdate only on i4 compute domain' ) - - deallocate(global1, gcheck, x) - - end subroutine test_global_field_i4_3d - !> - !> test_global_field_i8_3d - !> - subroutine test_global_field_i8_3d( type ) - - implicit none - - character(len=*), intent(in) :: type - - integer(kind=i8_kind) :: zero = 0 - - type(domain2D) :: domain - integer :: position, ishift, jshift, ni, nj, i, j, k - integer :: is, ie, js, je, isd, ied, jsd, jed - !integer :: id - integer, allocatable :: pelist(:) - integer(kind=i8_kind), allocatable :: global1(:,:,:), x(:,:,:), gcheck(:,:,:) - - - !> set up domain - call mpp_define_layout( (/1,nx,1,ny/), npes, layout ) - select case(type) - case( 'Non-symmetry' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type ) - case( 'Symmetry center', 'Symmetry corner', 'Symmetry east', 'Symmetry north' ) - call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, & - shalo=shalo, nhalo=nhalo, name=type, symmetry = .true. ) - case default - call mpp_error( FATAL, 'TEST_MPP_DOMAINS: no such test: '//type//' in test_global_field' ) - end select - !> get compute domain - call mpp_get_compute_domain( domain, is, ie, js, je ) - !> get data domain - call mpp_get_data_domain ( domain, isd, ied, jsd, jed ) + deallocate(local) + allocate(local(compute%lb(1):compute%ub(1), compute%lb(2):compute%ub(2), compute%lb(3):compute%ub(3))) + local(:,:,:) = global0(compute%lb(1):compute%ub(1), compute%lb(2):compute%ub(2), compute%lb(3):compute%ub(3)) - !> determine if an extra point is needed - ishift = 0 ; jshift = 0 ; position = CENTER - select case(type) - case ('Symmetry corner') - ishift = 1 ; jshift = 1 ; position=CORNER - case ('Symmetry east') - ishift = 1 ; jshift = 0 ; position=EAST - case ('Symmetry north') - ishift = 0 ; jshift = 1 ; position=NORTH - end select - - ie = ie+ishift ; je = je+jshift - ied = ied+ishift ; jed = jed+jshift - ni = nx+ishift ; nj = ny+jshift - - !> assign global1 - allocate( global1(1-whalo:ni+ehalo,1-shalo:nj+nhalo,nz) ) global1 = zero - do k=1, nz - do j=1, nj - do i=1, ni - global1(i,j,k) = int( k+i*1e3+j*1e6, kind=i8_kind ) - end do - end do - enddo - - allocate( gcheck(ni,nj,nz) ) - - !> for data domain - allocate( x(isd:ied,jsd:jed, nz) ) - x(:,:,:) = global1(isd:ied,jsd:jed,:) - - !> test the data on data domain - gcheck = zero - !id = mpp_clock_id( type//' global field on data domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,1:nj,:), gcheck, type//' mpp_global_field on i8 data domain' ) - - !> Since in the disjoint redistribute mpp test, pelist1 = (npes/2+1 .. npes-1) - !! will be declared. But for the x-direction global field, mpp_sync_self will - !! be called. For some pe count, pelist1 will be set ( only on pe of pelist1 ) - !! in the mpp_sync_self call, later when calling mpp_declare_pelist(pelist1), - !! deadlock will happen. For example npes = 6 and layout = (2,3), pelist = (4,5) - !! will be set in mpp_sync_self. To solve the problem, some explicit mpp_declare_pelist - !! on all pe is needed for those partial pelist. But for y-update, it is ok. - !! because the pelist in y-update is not continous. - allocate( pelist(0:layout(1)-1) ) - do j = 0, layout(2)-1 - do i = 0, layout(1)-1 - pelist(i) = j*layout(1) + i - end do - call mpp_declare_pelist(pelist) - end do - deallocate(pelist) + call mpp_global_field(domain, local, global1, position=test_params%position) + call arr_compare(global0(global%lb(1):global%ub(1), global%lb(2):global%ub(2), global%lb(3):global%ub(3)), & + global1, 'mpp_global_field on compute domain with ' // trim(test_params%name)) !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,js:je,:), gcheck(1:ni,js:je,:),type// & - & ' mpp_global_field xupdate only on i8 data domain' ) - - !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(is:ie,1:nj,:), gcheck(is:ie,1:nj,:),type// & - & ' mpp_global_field yupdate only on i8 data domain' ) - - !call mpp_clock_begin(id) - call mpp_global_field( domain, x, gcheck, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,1:nj,:), gcheck,type//' mpp_global_field on i8 data domain' ) - - !> test the data on compute domain - gcheck = zero - !id = mpp_clock_id( type//' global field on compute domain', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED ) - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je,:), gcheck, position=position ) - !call mpp_clock_end(id) - !>compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,1:nj,:), gcheck, type//' mpp_global_field on i8 compute domain' ) - - !> xupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je,:), gcheck, flags=XUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(1:ni,js:je,:), gcheck(1:ni,js:je,:), & - type//' mpp_global_field xupdate only on i8 compute domain' ) + global1 = zero + call mpp_global_field(domain, local, global1, flags=XUPDATE, position=test_params%position) + call arr_compare(global0(global_x%lb(1):global_x%ub(1), global_x%lb(2):global_x%ub(2), & + global_x%lb(3):global_x%ub(3)), global1(global_x%lb(1):global_x%ub(1), & + global_x%lb(2):global_x%ub(2), global_x%lb(3):global_x%ub(3)), & + 'mpp_global_field xupdate only on compute domain with ' // trim(test_params%name)) !> yupdate - gcheck = zero - !call mpp_clock_begin(id) - call mpp_global_field( domain, x(is:ie,js:je,:), gcheck, flags=YUPDATE, position=position ) - !call mpp_clock_end(id) - !> compare checksums between global and x arrays - call compare_checksums_int( global1(is:ie,1:nj,:), gcheck(is:ie,1:nj,:), & - type//' mpp_global_field yupdate only on i8 compute domain' ) - - deallocate(global1, gcheck, x) - - end subroutine test_global_field_i8_3d - + global1 = zero + call mpp_global_field(domain, local, global1, flags=YUPDATE, position=test_params%position) + call arr_compare(global0(global_y%lb(1):global_y%ub(1), global_y%lb(2):global_y%ub(2), & + global_y%lb(3):global_y%ub(3)), global1(global_y%lb(1):global_y%ub(1), & + global_y%lb(2):global_y%ub(2), global_y%lb(3):global_y%ub(3)), & + 'mpp_global_field yupdate only on compute domain with ' // trim(test_params%name)) + end subroutine run_tests_3d end program test_mpp_global_field diff --git a/test_fms/mpp/test_mpp_global_field.sh b/test_fms/mpp/test_mpp_global_field.sh index bf32fd12a..4ebfa49d4 100755 --- a/test_fms/mpp/test_mpp_global_field.sh +++ b/test_fms/mpp/test_mpp_global_field.sh @@ -27,8 +27,19 @@ # Set common test settings. . ../test-lib.sh +# TODO: Enable these tests once generalized indices work is complete +SKIP_TESTS="test_mpp_global_field.1 \ + test_mpp_global_field.2 \ + test_mpp_global_field.3 \ + test_mpp_global_field.4" + touch input.nml -test_expect_success "mpp global field functions with mixed precision" ' - mpirun -n 4 ./test_mpp_global_field -' + +for datatype in r4 r8 i4 i8 +do + test_expect_success "mpp global field functions ($datatype)" " + mpirun -n 4 ./test_mpp_global_field_$datatype + " +done + test_done