diff --git a/CMakeLists.txt b/CMakeLists.txt index 86895816e..39bf51cd7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -48,32 +48,17 @@ esma_add_library (${this} SRCS ${srcs} DEPENDENCIES MAPL GFTL_SHARED::gftl-shared esmf) -if (FV_PRECISION STREQUAL R4) - target_link_libraries (${this} PUBLIC FMS::fms_r4) - target_compile_definitions (${this} PRIVATE SINGLE_FV OVERLOAD_R4) -elseif (FV_PRECISION STREQUAL R4R8) # FV is R4 but FMS is R8 - # fvdycore needs r4 .mod interfaces - get_target_property(inc_r4 FMS::fms_r4 INTERFACE_INCLUDE_DIRECTORIES) - target_include_directories(${this} PRIVATE $) - - # But fvdycore should not *compile* with FMS::fms_r8 includes - target_link_libraries(${this} PUBLIC $) +target_link_libraries (${this} PUBLIC FMS::fms) - target_compile_definitions (${this} PRIVATE SINGLE_FV OVERLOAD_R4) - - add_dependencies(${this} FMS::fms_r4 FMS::fms_r8) +if (FV_PRECISION STREQUAL R4) + target_compile_definitions (${this} PRIVATE -DSINGLE_FV -DOVERLOAD_R4) elseif (FV_PRECISION STREQUAL R8) - target_link_libraries (${this} PUBLIC FMS::fms_r8) string(REPLACE " " ";" tmp ${FREAL8}) foreach(flag ${tmp}) target_compile_options (${this} PRIVATE $<$:${flag}>) endforeach() endif () -#if (CMAKE_Fortran_COMPILER_ID MATCHES Intel AND CMAKE_BUILD_TYPE MATCHES Release) -#set (CMAKE_Fortran_FLAGS_RELEASE "${GEOS_Fortran_FLAGS_AGGRESSIVE}") -#endif () - if (CRAY_POINTER) set_target_properties (${this} PROPERTIES COMPILE_FLAGS ${CRAY_POINTER}) endif() @@ -90,6 +75,17 @@ target_compile_definitions (${this} PRIVATE MOIST_CAPPA USE_COND) ecbuild_info("This sets the INTERNAL_FILE_NML preprocessor variable") target_compile_definitions (${this} PRIVATE INTERNAL_FILE_NML) +include(check_fms1_io_support) +check_fms1_io_support(FMS1_IO_SUPPORTED) +if(FMS1_IO_SUPPORTED) + message(STATUS "Using deprecated FMS1 I/O for ${this}") + target_compile_definitions (${this} PRIVATE FMS1_IO) +else() + message(STATUS "Using FMS2 I/O for ${this}") + ecbuild_info("This sets the NO_GFDL_SHARED preprocessor variable") + target_compile_definitions (${this} PRIVATE NO_GFDL_SHARED) +endif() + esma_add_subdirectories( model/mapz-driver model/tp-core-driver) diff --git a/model/boundary.F90 b/model/boundary.F90 index 4c5179a02..a2da2d4e2 100644 --- a/model/boundary.F90 +++ b/model/boundary.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -53,14 +53,18 @@ module boundary_mod ! ! mpp_domains_mod/td> ! mpp_get_compute_domain, mpp_get_data_domain, mpp_get_global_domain, -! ENTER, CORNER, NORTH, EAST,nest_domain_type, WEST, SOUTH, +! ENTER, CORNER, NORTH, EAST,nest_domain_type, WEST, SOUTH, ! mpp_get_C2F_index, mpp_update_nest_fine,mpp_global_field, mpp_get_pelist ! mpp_get_F2C_index, mpp_update_nest_coarse ! ! use fv_mp_mod, only: ng, isc,jsc,iec,jec, isd,jsd,ied,jed, is,js,ie,je, is_master +#if defined (SINGLE_FV) + use constantsr4_mod, only: grav +#else use constants_mod, only: grav +#endif use mpp_domains_mod, only: mpp_get_compute_domain, mpp_get_data_domain, mpp_get_global_domain use mpp_domains_mod, only: CENTER, CORNER, NORTH, EAST @@ -82,10 +86,10 @@ module boundary_mod public fill_nested_grid, nested_grid_BC_apply_intT public nested_grid_BC_send, nested_grid_BC_recv, nested_grid_BC_save_proc -!>@briefThe interface 'nested_grid_BC' includes subroutines 'nested_grid_BC_2d' and 'nested_grid_BC_3d' +!>@briefThe interface 'nested_grid_BC' includes subroutines 'nested_grid_BC_2d' and 'nested_grid_BC_3d' !! that fetch coarse-grid data, interpolate it to nested-grid boundary cells, !! apply the interpolated data directly to the boundary halo cells without saving the datatype. - interface nested_grid_BC + interface nested_grid_BC module procedure nested_grid_BC_2d module procedure nested_grid_BC_mpp module procedure nested_grid_BC_mpp_send @@ -93,7 +97,7 @@ module boundary_mod module procedure nested_grid_BC_3d end interface -!>@brief The interface 'fill_nested_grid' includes subroutines 'fill_nested_grid_2d' and 'fill_nested_grid_3d' +!>@brief The interface 'fill_nested_grid' includes subroutines 'fill_nested_grid_2d' and 'fill_nested_grid_3d' !! that fill nested-grid data with interpolated data from the coarse grid. !>@details This is one method to create a new nested grid, and may be useful when cold-starting. interface fill_nested_grid @@ -102,7 +106,7 @@ module boundary_mod end interface !>@brief The interface'update_coarse_grid_mpp'contains subroutines that -!! fetch data from the nested grid and +!! fetch data from the nested grid and !! interpolate it to the coarse grid using the method described by !! \cite harris2013two. interface update_coarse_grid @@ -153,7 +157,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) else debug = .false. end if - + if (is == 1) then if (pd) then @@ -181,7 +185,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) end do end if - + end if if (js == 1) then @@ -211,7 +215,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) end do end if - + end if if (ie == npx - 1) then @@ -220,7 +224,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=jstart,jend+jstag do i=ie+1+istag,ied+istag - + if (real(i) >= ie+istag + q(ie+istag,j)/(q(ie+istag-1,j)-q(ie+istag,j)+1.e-12) .and. & q(ie+istag,j) < q(ie+istag-1,j)) then q(i,j) = q(i-1,j) @@ -291,7 +295,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) else q(i,j) = 0.5*( real(2-i)*q(1,j) - real(1-i)*q(2,j) ) end if - + if (real(j) <= 1. - q(i,1)/(q(i,2) - q(i,1) + 1.e-12) .and. q(i,2) > q(i,1)) then q(i,j) = q(i,j) + 0.5*q(i,j+1) @@ -306,10 +310,10 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=jsd,0 do i=isd,0 - + q(i,j) = 0.5*( real(2-i)*q(1,j) - real(1-i)*q(2,j) ) + & 0.5*( real(2-j)*q(i,1) - real(1-j)*q(i,2) ) - + end do end do @@ -338,7 +342,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) else q(i,j) = q(i,j) + 0.5*( real(j - (je+jstag-1))*q(i,je+jstag) + real((je+jstag) - j)*q(i,je+jstag-1) ) end if - + end do end do @@ -346,10 +350,10 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=je+1+jstag,jed+jstag do i=isd,0 - + q(i,j) = 0.5*( real(2-i)*q(1,j) - real(1-i)*q(2,j) ) + & 0.5*( real(j - (je+jstag-1))*q(i,je+jstag) + real((je+jstag) - j)*q(i,je+jstag-1) ) - + end do end do @@ -363,8 +367,8 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=je+1+jstag,jed+jstag do i=ie+1+istag,ied+istag - - + + if (real(i) >= ie+istag + q(ie+istag,j)/(q(ie+istag-1,j)-q(ie+istag,j)+1.e-12) .and. & q(ie+istag-1,j) > q(ie+istag,j)) then q(i,j) = 0.5*q(i-1,j) @@ -378,7 +382,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) else q(i,j) = q(i,j) + 0.5*( real(j - (je+jstag-1))*q(i,je+jstag) + real((je+jstag) - j)*q(i,je+jstag-1) ) end if - + end do end do @@ -386,10 +390,10 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=je+1+jstag,jed+jstag do i=ie+1+istag,ied+istag - + q(i,j) = 0.5*( real(i - (ie+istag-1))*q(ie+istag,j) + real((ie+istag) - i)*q(ie+istag-1,j) ) + & 0.5*( real(j - (je+jstag-1))*q(i,je+jstag) + real((je+jstag) - j)*q(i,je+jstag-1) ) - + end do end do @@ -403,22 +407,22 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=0,jsd,-1 do i=ie+1+istag,ied+istag - - + + if (real(i) >= ie+istag + q(ie+istag,j)/(q(ie+istag-1,j)-q(ie+istag,j)+1.e-12) .and. & q(ie+istag-1,j) > q(ie+istag,j)) then q(i,j) = 0.5*q(i-1,j) else q(i,j) = 0.5*(real(i - (ie+istag-1))*q(ie+istag,j) + real((ie+istag) - i)*q(ie+istag-1,j)) end if - + if (real(j) <= 1. - q(i,1)/(q(i,2) - q(i,1) + 1.e-12) .and. & q(i,2) > q(i,1)) then q(i,j) = q(i,j) + 0.5*q(i,j+1) else q(i,j) = q(i,j) + 0.5*(real(2-j)*q(i,1) - real(1-j)*q(i,2)) end if - + end do end do @@ -427,10 +431,10 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=jsd,0 do i=ie+1+istag,ied+istag - + q(i,j) = 0.5*( real(i - (ie+istag-1))*q(ie+istag,j) + real((ie+istag) - i)*q(ie+istag-1,j) ) + & 0.5*( real(2-j)*q(i,1) - real(1-j)*q(i,2) ) - + end do end do @@ -446,7 +450,7 @@ subroutine fill_nested_grid_2D(var_nest, var_coarse, ind, wt, istag, jstag, & type(fv_grid_bounds_type), intent(IN) :: bd real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag), intent(INOUT) :: var_nest - real, dimension(isg:ieg+istag,jsg:jeg+jstag), intent(IN) :: var_coarse + real, dimension(isg:ieg+istag,jsg:jeg+jstag), intent(IN) :: var_coarse integer, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,2), intent(IN) :: ind real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,4), intent(IN) :: wt integer, intent(IN) :: istag, jstag, isg, ieg, jsg, jeg @@ -499,13 +503,13 @@ subroutine fill_nested_grid_2D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc) + & wt(i,j,2)*var_coarse(ic, jc+1) + & wt(i,j,3)*var_coarse(ic+1,jc+1) + & - wt(i,j,4)*var_coarse(ic+1,jc) + wt(i,j,4)*var_coarse(ic+1,jc) end do end do end subroutine fill_nested_grid_2D - + subroutine fill_nested_grid_3D(var_nest, var_coarse, ind, wt, istag, jstag, & isg, ieg, jsg, jeg, npz, bd, istart_in, iend_in, jstart_in, jend_in) @@ -566,7 +570,7 @@ subroutine fill_nested_grid_3D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc, k) + & wt(i,j,2)*var_coarse(ic, jc+1,k) + & wt(i,j,3)*var_coarse(ic+1,jc+1,k) + & - wt(i,j,4)*var_coarse(ic+1,jc, k) + wt(i,j,4)*var_coarse(ic+1,jc, k) end do end do @@ -574,7 +578,7 @@ subroutine fill_nested_grid_3D(var_nest, var_coarse, ind, wt, istag, jstag, & end do end subroutine fill_nested_grid_3D - + subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, jstag, & npx, npy, npz, bd, isg, ieg, jsg, jeg, nstep_in, nsplit_in, proc_in) @@ -686,7 +690,7 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, wt(i,j,1)*wbuffer(ic, jc, k) + & wt(i,j,2)*wbuffer(ic, jc+1,k) + & wt(i,j,3)*wbuffer(ic+1,jc+1,k) + & - wt(i,j,4)*wbuffer(ic+1,jc, k) + wt(i,j,4)*wbuffer(ic+1,jc, k) end do end do @@ -718,7 +722,7 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, wt(i,j,1)*sbuffer(ic, jc, k) + & wt(i,j,2)*sbuffer(ic, jc+1,k) + & wt(i,j,3)*sbuffer(ic+1,jc+1,k) + & - wt(i,j,4)*sbuffer(ic+1,jc, k) + wt(i,j,4)*sbuffer(ic+1,jc, k) end do end do @@ -738,7 +742,7 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, wt(i,j,1)*ebuffer(ic, jc, k) + & wt(i,j,2)*ebuffer(ic, jc+1,k) + & wt(i,j,3)*ebuffer(ic+1,jc+1,k) + & - wt(i,j,4)*ebuffer(ic+1,jc, k) + wt(i,j,4)*ebuffer(ic+1,jc, k) end do end do @@ -770,7 +774,7 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, wt(i,j,1)*nbuffer(ic, jc, k) + & wt(i,j,2)*nbuffer(ic, jc+1,k) + & wt(i,j,3)*nbuffer(ic+1,jc+1,k) + & - wt(i,j,4)*nbuffer(ic+1,jc, k) + wt(i,j,4)*nbuffer(ic+1,jc, k) end do end do @@ -937,7 +941,7 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist wt(i,j,1)*wbuffer(ic, jc) + & wt(i,j,2)*wbuffer(ic, jc+1) + & wt(i,j,3)*wbuffer(ic+1,jc+1) + & - wt(i,j,4)*wbuffer(ic+1,jc) + wt(i,j,4)*wbuffer(ic+1,jc) end do end do @@ -967,7 +971,7 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist wt(i,j,1)*sbuffer(ic, jc) + & wt(i,j,2)*sbuffer(ic, jc+1) + & wt(i,j,3)*sbuffer(ic+1,jc+1) + & - wt(i,j,4)*sbuffer(ic+1,jc) + wt(i,j,4)*sbuffer(ic+1,jc) end do end do @@ -985,7 +989,7 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist wt(i,j,1)*ebuffer(ic, jc) + & wt(i,j,2)*ebuffer(ic, jc+1) + & wt(i,j,3)*ebuffer(ic+1,jc+1) + & - wt(i,j,4)*ebuffer(ic+1,jc) + wt(i,j,4)*ebuffer(ic+1,jc) end do end do @@ -1015,7 +1019,7 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist wt(i,j,1)*nbuffer(ic, jc) + & wt(i,j,2)*nbuffer(ic, jc+1) + & wt(i,j,3)*nbuffer(ic+1,jc+1) + & - wt(i,j,4)*nbuffer(ic+1,jc) + wt(i,j,4)*nbuffer(ic+1,jc) end do end do @@ -1073,7 +1077,7 @@ subroutine nested_grid_BC_2D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc) + & wt(i,j,2)*var_coarse(ic, jc+1) + & wt(i,j,3)*var_coarse(ic+1,jc+1) + & - wt(i,j,4)*var_coarse(ic+1,jc) + wt(i,j,4)*var_coarse(ic+1,jc) end do end do @@ -1103,7 +1107,7 @@ subroutine nested_grid_BC_2D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc) + & wt(i,j,2)*var_coarse(ic, jc+1) + & wt(i,j,3)*var_coarse(ic+1,jc+1) + & - wt(i,j,4)*var_coarse(ic+1,jc) + wt(i,j,4)*var_coarse(ic+1,jc) end do end do @@ -1121,7 +1125,7 @@ subroutine nested_grid_BC_2D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc) + & wt(i,j,2)*var_coarse(ic, jc+1) + & wt(i,j,3)*var_coarse(ic+1,jc+1) + & - wt(i,j,4)*var_coarse(ic+1,jc) + wt(i,j,4)*var_coarse(ic+1,jc) end do end do @@ -1152,7 +1156,7 @@ subroutine nested_grid_BC_2D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc) + & wt(i,j,2)*var_coarse(ic, jc+1) + & wt(i,j,3)*var_coarse(ic+1,jc+1) + & - wt(i,j,4)*var_coarse(ic+1,jc) + wt(i,j,4)*var_coarse(ic+1,jc) end do end do @@ -1209,7 +1213,7 @@ subroutine nested_grid_BC_3D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc, k) + & wt(i,j,2)*var_coarse(ic, jc+1,k) + & wt(i,j,3)*var_coarse(ic+1,jc+1,k) + & - wt(i,j,4)*var_coarse(ic+1,jc, k) + wt(i,j,4)*var_coarse(ic+1,jc, k) end do end do @@ -1241,7 +1245,7 @@ subroutine nested_grid_BC_3D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc, k) + & wt(i,j,2)*var_coarse(ic, jc+1,k) + & wt(i,j,3)*var_coarse(ic+1,jc+1,k) + & - wt(i,j,4)*var_coarse(ic+1,jc, k) + wt(i,j,4)*var_coarse(ic+1,jc, k) end do end do @@ -1261,7 +1265,7 @@ subroutine nested_grid_BC_3D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc, k) + & wt(i,j,2)*var_coarse(ic, jc+1,k) + & wt(i,j,3)*var_coarse(ic+1,jc+1,k) + & - wt(i,j,4)*var_coarse(ic+1,jc, k) + wt(i,j,4)*var_coarse(ic+1,jc, k) end do end do @@ -1293,7 +1297,7 @@ subroutine nested_grid_BC_3D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc, k) + & wt(i,j,2)*var_coarse(ic, jc+1,k) + & wt(i,j,3)*var_coarse(ic+1,jc+1,k) + & - wt(i,j,4)*var_coarse(ic+1,jc, k) + wt(i,j,4)*var_coarse(ic+1,jc, k) end do end do @@ -1344,7 +1348,7 @@ subroutine nested_grid_BC_recv(nest_domain, istag, jstag, npz, & integer, intent(IN) :: istag, jstag, npz type(fv_nest_BC_type_3d), intent(INOUT), target :: nest_BC_buffers - + real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz) :: var_coarse_dummy integer :: position @@ -1386,7 +1390,7 @@ subroutine nested_grid_BC_recv(nest_domain, istag, jstag, npz, & nest_BC_buffers%west_t1(i,j,k) = 0. enddo enddo - enddo + enddo else allocate(nest_BC_buffers%west_t1(1,1,1)) nest_BC_buffers%west_t1(1,1,1) = 0. @@ -1442,7 +1446,7 @@ subroutine nested_grid_BC_recv(nest_domain, istag, jstag, npz, & end subroutine nested_grid_BC_recv -!>@brief The subroutine 'nested_grid_BC_save_proc' saves data received by 'nested_grid_BC_recv' +!>@brief The subroutine 'nested_grid_BC_save_proc' saves data received by 'nested_grid_BC_recv' !! into the datatype 'fv_nest_BC_type'. subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & npx, npy, npz, bd, nest_BC, nest_BC_buffers, pd_in) @@ -1457,7 +1461,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & !!NOTE: if declaring an ALLOCATABLE array with intent(OUT), the resulting dummy array !! will NOT be allocated! This goes for allocatable members of derived types as well. type(fv_nest_BC_type_3d), intent(INOUT), target :: nest_BC, nest_BC_buffers - + real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz) :: var_coarse_dummy real, dimension(:,:,:), pointer :: var_east, var_west, var_south, var_north @@ -1515,7 +1519,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & wt(i,j,1)*buf_west(ic, jc,k) + & wt(i,j,2)*buf_west(ic, jc+1,k) + & wt(i,j,3)*buf_west(ic+1,jc+1,k) + & - wt(i,j,4)*buf_west(ic+1,jc,k) + wt(i,j,4)*buf_west(ic+1,jc,k) end do end do @@ -1530,7 +1534,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & var_west(i,j,k) = max(var_west(i,j,k), 0.5*nest_BC%west_t0(i,j,k)) end do end do - end do + end do endif end if @@ -1562,7 +1566,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & wt(i,j,1)*buf_south(ic, jc,k) + & wt(i,j,2)*buf_south(ic, jc+1,k) + & wt(i,j,3)*buf_south(ic+1,jc+1,k) + & - wt(i,j,4)*buf_south(ic+1,jc,k) + wt(i,j,4)*buf_south(ic+1,jc,k) end do end do @@ -1578,7 +1582,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & end do end do - end do + end do endif end if @@ -1599,7 +1603,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & wt(i,j,1)*buf_east(ic, jc,k) + & wt(i,j,2)*buf_east(ic, jc+1,k) + & wt(i,j,3)*buf_east(ic+1,jc+1,k) + & - wt(i,j,4)*buf_east(ic+1,jc,k) + wt(i,j,4)*buf_east(ic+1,jc,k) end do end do @@ -1615,7 +1619,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & end do end do - end do + end do endif end if @@ -1647,7 +1651,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & wt(i,j,1)*buf_north(ic, jc,k) + & wt(i,j,2)*buf_north(ic, jc+1,k) + & wt(i,j,3)*buf_north(ic+1,jc+1,k) + & - wt(i,j,4)*buf_north(ic+1,jc,k) + wt(i,j,4)*buf_north(ic+1,jc,k) end do end do @@ -1663,7 +1667,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & end do end do - end do + end do endif end if @@ -1671,11 +1675,11 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & end subroutine nested_grid_BC_save_proc - ! A NOTE ON BCTYPE: currently only an interpolation BC is implemented, + ! A NOTE ON BCTYPE: currently only an interpolation BC is implemented, ! bctype >= 2 currently correspond ! to a flux BC on the tracers ONLY, which is implemented in fv_tracer. -!>@brief The subroutine 'nested_grid_BC_apply_intT' performs linear interpolation or +!>@brief The subroutine 'nested_grid_BC_apply_intT' performs linear interpolation or !! extrapolation in time for saved BC data, then applies the interlpolated !! data to nested-grid boundary cells. subroutine nested_grid_BC_apply_intT(var_nest, istag, jstag, & @@ -1687,7 +1691,7 @@ subroutine nested_grid_BC_apply_intT(var_nest, istag, jstag, & integer, intent(IN) :: istag, jstag, npx, npy, npz real, intent(IN) :: split, step integer, intent(IN) :: bctype - + type(fv_nest_BC_type_3D), intent(IN), target :: BC real, pointer, dimension(:,:,:) :: var_t0, var_t1 @@ -1884,7 +1888,7 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, if (istag == 0 .and. jstag == 0) then select case (nestupdate) case (1,2,6,7,8) - + !$NO-MP parallel do default(none) shared(npz,js_n,je_n,is_n,ie_n,var_nest_send,var_nest,area) do k=1,npz do j=js_n,je_n @@ -1899,7 +1903,7 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, end select else if (istag == 0 .and. jstag > 0) then - select case (nestupdate) + select case (nestupdate) case (1,6,7,8) !$NO-MP parallel do default(none) shared(npz,js_n,je_n,is_n,ie_n,var_nest_send,var_nest,dx) @@ -1909,7 +1913,7 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, var_nest_send(i,j,k) = var_nest(i,j,k)*dx(i,j) - + end do end do end do @@ -1921,7 +1925,7 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, end select else if (istag > 0 .and. jstag == 0) then - select case (nestupdate) + select case (nestupdate) case (1,6,7,8) !averaging update; in-line average for face-averaged values instead of areal average @@ -1943,7 +1947,7 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, end select else - + call mpp_error(FATAL, "Cannot have both nonzero istag and jstag.") endif @@ -1959,7 +1963,7 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, if (parent_proc .and. .not. (ieu < isu .or. jeu < jsu)) then if (istag == 0 .and. jstag == 0) then - select case (nestupdate) + select case (nestupdate) case (1,2,6,7,8) ! 1 = Conserving update on all variables; 2 = conserving update for cell-centered values; 6 = conserving remap-update !$NO-MP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,ind_update,nest_dat,parent_grid,var_coarse,r) & @@ -1982,7 +1986,7 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, do ini=in,in+r-1 val = val + nest_dat(ini,jnj,k) end do - end do + end do !var_coarse(i,j,k) = val/r**2. @@ -2005,7 +2009,7 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, else if (istag == 0 .and. jstag > 0) then - select case (nestupdate) + select case (nestupdate) case (1,6,7,8) !$NO-MP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,ind_update,nest_dat,parent_grid,var_coarse,r) & @@ -2043,7 +2047,7 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, else if (istag > 0 .and. jstag == 0) then - select case (nestupdate) + select case (nestupdate) case (1,6,7,8) !averaging update; in-line average for face-averaged values instead of areal average !$NO-MP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,ind_update,nest_dat,parent_grid,var_coarse,r) & @@ -2084,9 +2088,9 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, endif deallocate(nest_dat) - + end subroutine update_coarse_grid_mpp - + end module boundary_mod diff --git a/model/dyn_core.F90 b/model/dyn_core.F90 index 8be46319b..f4aa132ba 100644 --- a/model/dyn_core.F90 +++ b/model/dyn_core.F90 @@ -1,23 +1,23 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** ! -!>@brief The module 'dyn_core' peforms the Lagrangian acoustic dynamics +!>@brief The module 'dyn_core' peforms the Lagrangian acoustic dynamics !! described by \cite lin2004vertically. !>@details The forward timestep is handled by routines in 'sw_core.F90'. !! The backwards-in-time PGF is evaluated in one_grad_p or split_p_grad (hydrostatic) and nh_p_grad (nonhydrostatic) @@ -53,7 +53,7 @@ module dyn_core_mod ! ! ! fv_arrays_mod -! fv_grid_type, fv_flags_type, fv_nest_type, +! fv_grid_type, fv_flags_type, fv_nest_type, ! fv_diag_type,fv_grid_bounds_type, R_GRID ! ! @@ -62,7 +62,7 @@ module dyn_core_mod ! ! ! fv_mp_mod -! is_master, start_group_halo_update, +! is_master, start_group_halo_update, ! complete_group_halo_update,group_halo_update_type ! ! @@ -103,8 +103,13 @@ module dyn_core_mod ! ! - use constants_mod, only: rdgas, radius, cp_air, pi - use mpp_mod, only: mpp_pe +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: rdgas, radius, cp_air, pi + use mpp_mod, only: mpp_pe use mpp_domains_mod, only: CGRID_NE, DGRID_NE, AGRID, mpp_get_boundary, mpp_update_domains, & domain2d use mpp_parameter_mod, only: CORNER @@ -159,9 +164,9 @@ module dyn_core_mod !----------------------------------------------------------------------- ! dyn_core :: FV Lagrangian dynamics driver !----------------------------------------------------------------------- - + subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, k_split, n_split, zvir, cp, akap, cappa, grav, hydrostatic, & - u, v, w, delz, pt, q, delp, pe, pk, phis, varflt, ws, omga, ptop, pfull, ua, va, & + u, v, w, delz, pt, q, delp, pe, pk, phis, varflt, ws, omga, ptop, pfull, ua, va, & dudt_rf, dvdt_rf, dwdt_rf, uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, dpx, & ks, gridstruct, flagstruct, neststruct, idiag, bd, domain, & init_step, i_pack, end_step, diss_est,time_total) @@ -188,12 +193,12 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, k_split, n_split, zvir, c real, intent(inout) :: cappa(bd%isd:,bd%jsd:,1:) !< moist kappa real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) !< temperature (K) real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) !< pressure thickness (pascal) - real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, nq) ! + real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, nq) ! real, intent(in), optional:: time_total !< total time (seconds) since start real, intent(inout) :: diss_est(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) !< skeb dissipation estimate !----------------------------------------------------------------------- -! Auxilliary pressure arrays: +! Auxilliary pressure arrays: ! The 5 vars below can be re-computed from delp and ptop. !----------------------------------------------------------------------- ! dyn_aux: @@ -500,7 +505,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, k_split, n_split, zvir, c else last_step = .false. endif - + call timing_on('COMM_TOTAL') call complete_group_halo_update(i_pack(8), domain) call timing_on('COMM_NH') @@ -564,7 +569,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, k_split, n_split, zvir, c enddo enddo - else + else !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,zh,gz) do k=1, npz+1 do j=jsd,jed @@ -648,14 +653,14 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, k_split, n_split, zvir, c ! domain and of each processor element. We must either ! apply an interpolated BC, or extrapolate into the ! boundary halo - ! NOTE: + ! NOTE: !The update_domains calls for uc and vc need to go BEFORE the BCs to ensure cross-restart !bitwise-consistent solutions when doing the spatial extrapolation; should not make a !difference for interpolated BCs from the coarse grid. call nested_grid_BC_apply_intT(vc, & - 0, 1, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*k_split), & + 0, 1, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*k_split), & neststruct%vc_BC, bctype=neststruct%nestbctype ) call nested_grid_BC_apply_intT(uc, & 1, 0, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*k_split), & @@ -724,7 +729,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, k_split, n_split, zvir, c damp_t = damp_vt(k) ! External diffusion only in RI Z-Filter levels if ( npz==1 .or. k<=flagstruct%n_zfilter ) then - d_ext(k) = flagstruct%d_ext + d_ext(k) = flagstruct%d_ext else d_ext(k) = 0.0 endif @@ -743,14 +748,14 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, k_split, n_split, zvir, c nord_w=0; damp_w = d2_divg if ( flagstruct%do_vort_damp ) then ! damping on delp and vorticity: - nord_v(k)=0; + nord_v(k)=0; damp_vt(k) = 0.5*d2_divg endif elseif ( k<=MAX(2,flagstruct%n_sponge-1) .and. flagstruct%d2_bg_k2>0.01 ) then nord_k=0; d2_divg = max(flagstruct%d2_bg, flagstruct%d2_bg_k2) nord_w=0; damp_w = d2_divg if ( flagstruct%do_vort_damp ) then - nord_v(k)=0; + nord_v(k)=0; damp_vt(k) = 0.5*d2_divg endif elseif ( k<=MAX(3,flagstruct%n_sponge) .and. flagstruct%d2_bg_k2>0.05 ) then @@ -857,7 +862,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, k_split, n_split, zvir, c enddo enddo do i=is,iep1 - if (wk(i,j) /= 0.0) then + if (wk(i,j) /= 0.0) then divg2(i,j) = divg2(i,j)/wk(i,j) else divg2(i,j) = 0.0 @@ -886,7 +891,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, k_split, n_split, zvir, c #ifdef USE_COND call nested_grid_BC_apply_intT(q_con, & 0, 0, npx, npy, npz, bd, split_timestep_BC+1, real(n_split*k_split), & - neststruct%q_con_BC, bctype=neststruct%nestbctype ) + neststruct%q_con_BC, bctype=neststruct%nestbctype ) #endif #endif end if @@ -945,7 +950,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, k_split, n_split, zvir, c call nested_grid_BC_apply_intT(delz, & 0, 0, npx, npy, npz, bd, split_timestep_BC+1., real(n_split*k_split), & neststruct%delz_BC, bctype=neststruct%nestbctype ) - + !Compute gz/pkc/pk3; note that now pkc should be nonhydro pert'n pressure call nest_halo_nh(ptop, grav, akap, cp, delp, delz, pt, phis, & #ifdef USE_COND @@ -1097,7 +1102,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, k_split, n_split, zvir, c v(ie+1,j,k) = ebuffer(j-js+1,k) enddo enddo - endif + endif #ifndef ROT3 if ( it/=n_split) & @@ -1468,7 +1473,7 @@ subroutine adv_pe(ua, va, pem, om, gridstruct, bd, npx, npy, npz, ng) do j=js,je do i=is,ie do n=1,3 - v3(n,i,j) = up(i,j)*gridstruct%ec1(n,i,j) + vp(i,j)*gridstruct%ec2(n,i,j) + v3(n,i,j) = up(i,j)*gridstruct%ec1(n,i,j) + vp(i,j)*gridstruct%ec2(n,i,j) enddo enddo enddo @@ -1592,7 +1597,7 @@ subroutine nh_p_grad(u, v, pp, gz, delp, pk, dt, ng, gridstruct, bd, npx, npy, n real, intent(inout) :: pp(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) !< perturbation pressure real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) !< p**kappa real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) !< g * h -real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz) +real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz) real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed, npz) type(fv_grid_type), intent(INOUT), target :: gridstruct ! Local: @@ -1611,7 +1616,7 @@ subroutine nh_p_grad(u, v, pp, gz, delp, pk, dt, ng, gridstruct, bd, npx, npy, n ied = bd%ied jsd = bd%jsd jed = bd%jed - + if ( use_logp ) then top_value = peln1 else @@ -1690,9 +1695,9 @@ subroutine split_p_grad( u, v, pp, gz, delp, pk, beta, dt, ng, gridstruct, bd, n real, intent(inout) :: pp(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) !< perturbation pressure real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) !< p**kappa real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) !< g * h -! real, intent(inout) :: du(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) +! real, intent(inout) :: du(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) ! real, intent(inout) :: dv(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) -real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz) +real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz) real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed, npz) type(fv_grid_type), intent(INOUT), target :: gridstruct ! Local: @@ -1711,7 +1716,7 @@ subroutine split_p_grad( u, v, pp, gz, delp, pk, beta, dt, ng, gridstruct, bd, n ied = bd%ied jsd = bd%jsd jed = bd%jed - + if ( use_logp ) then top_value = peln1 else @@ -1796,7 +1801,7 @@ end subroutine split_p_grad subroutine one_grad_p(u, v, pk, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, & - ptop, hydrostatic, a2b_ord, d_ext) + ptop, hydrostatic, a2b_ord, d_ext) integer, intent(IN) :: ng, npx, npy, npz, a2b_ord real, intent(IN) :: dt, ptop, d_ext @@ -1806,7 +1811,7 @@ subroutine one_grad_p(u, v, pk, gz, divg2, delp, dt, ng, gridstruct, bd, npx, np real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1) real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1) real, intent(inout) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed ,npz) -real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) +real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) type(fv_grid_type), intent(INOUT), target :: gridstruct ! Local: @@ -1939,7 +1944,7 @@ subroutine grad1_p_update(divg2, u, v, pk, gz, dt, ng, gridstruct, bd, npx, npy, real, intent(in):: divg2(bd%is:bd%ie+1,bd%js:bd%je+1) real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1) real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1) -real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) +real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) type(fv_grid_type), intent(INOUT), target :: gridstruct @@ -1992,7 +1997,7 @@ subroutine grad1_p_update(divg2, u, v, pk, gz, dt, ng, gridstruct, bd, npx, npy, enddo !$OMP parallel do default(none) shared(npz,is,ie,js,je,pk,u,beta,gz,divg2,alpha, & -!$OMP gridstruct,v,dt,du,dv) & +!$OMP gridstruct,v,dt,du,dv) & !$OMP private(wk) do k=1,npz @@ -2097,8 +2102,8 @@ subroutine mix_dp(hydrostatic, w, delp, pt, km, ak, bk, CG, fv_debug, bd) ip = ip + 1 endif enddo - if ( fv_debug .and. ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip - ! if ( ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip + if ( fv_debug .and. ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip + ! if ( ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip 1000 continue end subroutine mix_dp @@ -2176,7 +2181,7 @@ subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, q_con, pkz, km, akap, CG, #endif if( j>(js-2) .and. j<(je+2) ) then - do i=max(ifirst,is-1), min(ilast,ie+1) + do i=max(ifirst,is-1), min(ilast,ie+1) pe(i,1,j) = ptop enddo endif @@ -2186,11 +2191,11 @@ subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, q_con, pkz, km, akap, CG, do i=ifirst, ilast p1d(i) = p1d(i) + delp(i,j,k-1) logp(i) = log(p1d(i)) - pk(i,j,k) = exp( akap*logp(i) ) + pk(i,j,k) = exp( akap*logp(i) ) enddo if( j>(js-2) .and. j<(je+2) ) then - do i=max(ifirst,is-1), min(ilast,ie+1) + do i=max(ifirst,is-1), min(ilast,ie+1) pe(i,k,j) = p1d(i) enddo if( j>=js .and. j<=je) then @@ -2260,7 +2265,7 @@ subroutine del2_cubed(q, cd, gridstruct, domain, npx, npy, km, nmax, bd) ! rarea => gridstruct%rarea ! del6_u => gridstruct%del6_u ! del6_v => gridstruct%del6_v - + ! sw_corner => gridstruct%sw_corner ! nw_corner => gridstruct%nw_corner ! se_corner => gridstruct%se_corner @@ -2354,9 +2359,9 @@ subroutine init_ijk_mem(i1, i2, j1, j2, km, array, var) end subroutine init_ijk_mem -#ifdef DO_TOFD +#ifdef DO_TOFD subroutine Beljaars(dt, npx, npy, npz, ng, varflt, thv, delz, u, v, grav, gridstruct, flagstruct, bd, domain) - + real, intent(in ):: dt, grav integer, intent(in):: npx, npy, npz, ng type(fv_grid_type), intent(INOUT), target :: gridstruct diff --git a/model/fv_arrays.F90 b/model/fv_arrays.F90 index d4939881d..a15d43fc2 100644 --- a/model/fv_arrays.F90 +++ b/model/fv_arrays.F90 @@ -24,7 +24,11 @@ module fv_arrays_mod #include use mpp_domains_mod, only: domain2d +#if defined (FMS1_IO) use fms_io_mod, only: restart_file_type +#else + use fms2_io_mod, only: FmsNetcdfFile_t, FmsNetcdfDomainFile_t +#endif use time_manager_mod, only: time_type use horiz_interp_type_mod, only: horiz_interp_type use mpp_domains_mod, only: nest_domain_type @@ -886,7 +890,7 @@ module fv_arrays_mod !< 1: GMAO linear !< 2: GMAO quadratic !< 3: GMAO cubic - + logical :: gmao_top_bc = .false. !< Optional upper BC in remapping of T or TE from GMAO (true or false) logical :: gmao_bot_bc = .false. !< Optional lower BC in remapping of T or TE from GMAO (true or false) @@ -1151,7 +1155,11 @@ module fv_arrays_mod !These are for tracer flux BCs logical :: do_flux_BCs, do_2way_flux_BCs !is_master ! ! - - use constants_mod, only: rvgas, rdgas, grav, hlv, hlf, cp_air + +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: rvgas, rdgas, grav, hlv, hlf, cp_air use fv_mp_mod, only: is_master use fv_arrays_mod, only: r_grid - + implicit none - + private - + public fv_sat_adj, qs_init - + real :: qi_lim = 1. !< cloud ice limiter to prevent large ice build up by deposition real :: qs_mlt = 1.0e-6 !< max cloud water due to snow melt real :: qi_gen = 9.82679e-5 !< max cloud ice generation at -40 C @@ -83,7 +88,7 @@ module fv_cmp_mod real :: tau_l2r = 900. !< cloud water to rain auto - conversion real :: tau_l2v = 300. !< cloud water to water vapor (evaporation) real :: tau_i2v = 300. !< cloud ice to water vapor (sublimation) - real :: tau_frz = 600. !< timescale for liquid-ice freezing + real :: tau_frz = 600. !< timescale for liquid-ice freezing real :: tau_imlt = 600. !< cloud ice melting real :: tau_smlt = 600. !< snow melting real :: tau_i2s = 600. !< cloud ice to snow auto - conversion @@ -96,48 +101,48 @@ module fv_cmp_mod real :: dw_land = 0.05 !< base value for subgrid deviation / variability over land real :: dw_ocean = 0.10 !< base value for ocean - + ! real, parameter :: cp_air = cp_air ! 1004.6, heat capacity of dry air at constant pressure, come from constants_mod real, parameter :: cp_vap = 4.0 * rvgas !< 1846.0, heat capacity of water vapor at constant pressure real, parameter :: cv_air = cp_air - rdgas !< 717.55, heat capacity of dry air at constant volume real, parameter :: cv_vap = 3.0 * rvgas !< 1384.5, heat capacity of water vapor at constant volume real, parameter :: pi = 3.1415926535897931 !< gfs: ratio of circle circumference to diameter - + ! real, parameter :: c_ice = 2106.0 ! ifs: heat capacity of ice at 0 deg c ! real, parameter :: c_liq = 4218.0 ! ifs: heat capacity of liquid at 0 deg c real, parameter :: c_ice = 1972.0 !< gfdl: heat capacity of ice at - 15 deg c real, parameter :: c_liq = 4185.5 !< gfdl: heat capacity of liquid at 15 deg c - + real, parameter :: qpmin = 1.e-8 ! min value for suspended rain/snow/liquid/ice condensate real, parameter :: qvmin = 1.e-20 !< min value for water vapor (treated as zero) real, parameter :: qcmin = 1.e-12 !< min value for cloud condensates - + real, parameter :: dc_vap = cp_vap - c_liq !< - 2339.5, isobaric heating / cooling real, parameter :: dc_ice = c_liq - c_ice !< 2213.5, isobaric heating / colling - + real, parameter :: tice = 273.16 !< freezing temperature real, parameter :: t_wfr = tice - 40. !< homogeneous freezing temperature real, parameter :: t_evap = 273.16 + 10.0 - + real, parameter :: lv0 = hlv - dc_vap * tice !< 3.13905782e6, evaporation latent heat coefficient at 0 deg k real, parameter :: li00 = hlf - dc_ice * tice !< - 2.7105966e5, fusion latent heat coefficient at 0 deg k - + ! real (kind = r_grid), parameter :: e00 = 610.71 ! gfdl: saturation vapor pressure at 0 deg c real (kind = r_grid), parameter :: e00 = 611.21 !< ifs: saturation vapor pressure at 0 deg c - + real (kind = r_grid), parameter :: d2ice = dc_vap + dc_ice !< - 126, isobaric heating / cooling real (kind = r_grid), parameter :: li2 = lv0 + li00 !< 2.86799816e6, sublimation latent heat coefficient at 0 deg k - + real, parameter :: lat2 = (hlv + hlf) ** 2 !< used in bigg mechanism - + real :: d0_vap !< the same as dc_vap, except that cp_vap can be cp_vap or cv_vap real :: lv00 !< the same as lv0, except that cp_vap can be cp_vap or cv_vap - + real, allocatable :: table (:), table2 (:), tablew (:), des2 (:), desw (:) - + logical :: mp_initialized = .false. - + contains @@ -147,31 +152,31 @@ module fv_cmp_mod subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & te0, qv, ql, qi, qr, qs, qg, hs, dpln, delz, pt, dp, cappa, & area, dtdt, out_dt, last_step, qa) - + implicit none - + integer, intent (in) :: is, ie, js, je, ng - + logical, intent (in) :: hydrostatic, consv_te, out_dt, last_step - + real, intent (in) :: zvir, mdt ! remapping time step - + real, intent (in), dimension (is - ng:ie + ng, js - ng:je + ng) :: dp, delz, hs real, intent (in), dimension (is:ie, js:je) :: dpln - + real, intent (inout), dimension (is - ng:ie + ng, js - ng:je + ng) :: pt, qv, ql, qi, qr, qs, qg real, intent (inout), dimension (is - ng:, js - ng:) :: cappa real, intent (inout), dimension (is:ie, js:je) :: dtdt - + real, intent (out), dimension (is - ng:ie + ng, js - ng:je + ng) :: qa, te0 - + real (kind = r_grid), intent (in), dimension (is - ng:ie + ng, js - ng:je + ng) :: area - + real, dimension (is:ie) :: wqsat, dq2dt, qpz, cvm, t0, pt1, qstar real, dimension (is:ie) :: icp2, lcp2, tcp2, tcp3 real, dimension (is:ie) :: den, q_liq, q_sol, q_cond, evap, src, sink, hvar real, dimension (is:ie) :: mc_air, lhl, lhi - + real :: qsw, rh real :: tc, qsi, dqsdt, dq, dq0, pidep, qi_crt, tmp, dtmp real :: tin, rqi, q_plus, q_minus, dqh @@ -181,31 +186,31 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & real :: a1, newqi, newql integer :: i, j - + sdt = 0.5 * mdt ! half remapping time step dt_bigg = mdt ! bigg mechinism time step - + tice0 = tice - 0.01 ! 273.15, standard freezing temperature - + ! ----------------------------------------------------------------------- ! define conversion scalar / factor ! ----------------------------------------------------------------------- - + fac_i2s = sat_adj0 * (1. - exp (- mdt / tau_i2s)) fac_r2g = sat_adj0 * (1. - exp (- mdt / tau_r2g)) fac_l2r = sat_adj0 * (1. - exp (- mdt / tau_l2r)) - + fac_l2v = sat_adj0 * (1. - exp (- sdt / tau_l2v)) fac_i2v = sat_adj0 * (1. - exp (- sdt / tau_i2v)) fac_imlt = sat_adj0 * (1. - exp (- sdt / tau_imlt)) fac_smlt = sat_adj0 * (1. - exp (- mdt / tau_smlt)) - + fac_frz = sat_adj0 * (1. - exp (- mdt / tau_frz)) ! ----------------------------------------------------------------------- ! define heat capacity of dry air and water vapor based on hydrostatical property ! ----------------------------------------------------------------------- - + if (hydrostatic) then c_air = cp_air c_vap = cp_vap @@ -217,7 +222,7 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & lv00 = hlv - d0_vap * tice ! dc_vap = cp_vap - c_liq ! - 2339.5 ! d0_vap = cv_vap - c_liq ! - 2801.0 - + do j = js, je ! start j loop ! Compute subgrid variance (hvar) @@ -227,7 +232,7 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & ! "scale - aware" subgrid variability: 100 - km as the base hvar (i) = min (0.3, max (0.01, dw * sqrt (sqrt (area (i, j)) / 100.e3))) enddo - + do i = is, ie q_liq (i) = ql (i, j) + qr (i, j) q_sol (i) = qi (i, j) + qs (i, j) + qg (i, j) @@ -240,11 +245,11 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & t0 (i) = pt1 (i) ! true temperature qpz (i) = qpz (i) + qv (i, j) ! total_wat conserved in this routine enddo - + ! ----------------------------------------------------------------------- ! define air density based on hydrostatical property ! ----------------------------------------------------------------------- - + if (hydrostatic) then do i = is, ie den (i) = dp (i, j) / (dpln (i, j) * rdgas * pt (i, j)) @@ -254,22 +259,22 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & den (i) = - dp (i, j) / (grav * delz (i, j)) ! moist_air density enddo endif - + ! ----------------------------------------------------------------------- ! define heat capacity and latend heat coefficient ! ----------------------------------------------------------------------- - + do i = is, ie mc_air (i) = (1. - qpz (i)) * c_air ! constant cvm (i) = mc_air (i) + qv (i, j) * c_vap + q_liq (i) * c_liq + q_sol (i) * c_ice lhi (i) = li00 + dc_ice * pt1 (i) icp2 (i) = lhi (i) / cvm (i) enddo - + ! ----------------------------------------------------------------------- ! fix energy conservation ! ----------------------------------------------------------------------- - + if (consv_te) then if (hydrostatic) then do i = is, ie @@ -285,22 +290,22 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & enddo endif endif - + ! ----------------------------------------------------------------------- ! fix negative cloud ice with snow ! ----------------------------------------------------------------------- - + do i = is, ie if (qi (i, j) < 0.) then qs (i, j) = qs (i, j) + qi (i, j) qi (i, j) = 0. endif enddo - + ! ----------------------------------------------------------------------- ! melting of cloud ice to cloud water and rain ! ----------------------------------------------------------------------- - + do i = is, ie if (qi (i, j) > qcmin .and. pt1 (i) > tice) then sink (i) = min (qi (i, j), fac_imlt * (pt1 (i) - tice) / icp2 (i)) @@ -312,20 +317,20 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & pt1 (i) = pt1 (i) - sink (i) * lhi (i) / cvm (i) endif enddo - + ! ----------------------------------------------------------------------- ! update latend heat coefficient ! ----------------------------------------------------------------------- - + do i = is, ie lhi (i) = li00 + dc_ice * pt1 (i) icp2 (i) = lhi (i) / cvm (i) enddo - + ! ----------------------------------------------------------------------- ! fix negative snow with graupel or graupel with available snow ! ----------------------------------------------------------------------- - + do i = is, ie if (qs (i, j) < 0.) then qg (i, j) = qg (i, j) + qs (i, j) @@ -336,13 +341,13 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & qs (i, j) = qs (i, j) - tmp endif enddo - + ! after this point cloud ice & snow are positive definite - + ! ----------------------------------------------------------------------- ! fix negative cloud water with rain or rain with available cloud water ! ----------------------------------------------------------------------- - + do i = is, ie if (ql (i, j) < 0.) then tmp = min (- ql (i, j), max (0., qr (i, j))) @@ -354,11 +359,11 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & qr (i, j) = qr (i, j) + tmp endif enddo - + ! ----------------------------------------------------------------------- ! enforce complete freezing of cloud water to cloud ice below - 48 c ! ----------------------------------------------------------------------- - + do i = is, ie dtmp = tice - 48. - pt1 (i) if (ql (i, j) > 0. .and. dtmp > 0.) then @@ -371,11 +376,11 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & pt1 (i) = pt1 (i) + sink (i) * lhi (i) / cvm (i) endif enddo - + ! ----------------------------------------------------------------------- ! update latend heat coefficient ! ----------------------------------------------------------------------- - + do i = is, ie lhl (i) = lv00 + d0_vap * pt1 (i) lhi (i) = li00 + dc_ice * pt1 (i) @@ -383,11 +388,11 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & icp2 (i) = lhi (i) / cvm (i) tcp3 (i) = lcp2 (i) + icp2 (i) * min (1., dim (tice, pt1 (i)) / 48.) enddo - + ! ----------------------------------------------------------------------- ! condensation / evaporation between water vapor and cloud water ! ----------------------------------------------------------------------- - if (do_evap) then + if (do_evap) then call wqs2_vect (is, ie, pt1, den, wqsat, dq2dt) do i = is, ie dq0 = wqsat(i) - qv(i,j) @@ -414,7 +419,7 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & ! ----------------------------------------------------------------------- ! homogeneous freezing of cloud water to cloud ice ! ----------------------------------------------------------------------- - + do i = is, ie dtmp = (t_wfr - dt_fr) - pt1 (i) ! [ - 40, - 48] if (ql (i, j) > 0. .and. dtmp > 0.) then @@ -428,20 +433,20 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & pt1 (i) = pt1 (i) + sink (i) * lhi (i) / cvm (i) endif enddo - + ! ----------------------------------------------------------------------- ! update latend heat coefficient ! ----------------------------------------------------------------------- - + do i = is, ie lhi (i) = li00 + dc_ice * pt1 (i) icp2 (i) = lhi (i) / cvm (i) enddo - + ! ----------------------------------------------------------------------- ! bigg mechanism (heterogeneous freezing of cloud water to cloud ice) ! ----------------------------------------------------------------------- - if (do_bigg) then + if (do_bigg) then do i = is, ie dtmp = tice0 - pt1 (i) if (ql (i, j) > qcmin .and. dtmp > 0.) then @@ -461,16 +466,16 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & ! ----------------------------------------------------------------------- ! update latend heat coefficient ! ----------------------------------------------------------------------- - + do i = is, ie lhi (i) = li00 + dc_ice * pt1 (i) icp2 (i) = lhi (i) / cvm (i) enddo - + ! ----------------------------------------------------------------------- ! freezing of rain to graupel ! ----------------------------------------------------------------------- - + do i = is, ie dtmp = tice0 - pt1 (i) if (qr (i, j) > qpmin .and. dtmp > 0.) then @@ -484,20 +489,20 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & pt1 (i) = pt1 (i) + sink (i) * lhi (i) / cvm (i) endif enddo - + ! ----------------------------------------------------------------------- ! update latend heat coefficient ! ----------------------------------------------------------------------- - + do i = is, ie lhi (i) = li00 + dc_ice * pt1 (i) icp2 (i) = lhi (i) / cvm (i) enddo - + ! ----------------------------------------------------------------------- ! melting of snow to rain or cloud water ! ----------------------------------------------------------------------- - + do i = is, ie dtmp = pt1 (i) - tice0 if (qs (i, j) > qpmin .and. dtmp > 0.) then @@ -514,11 +519,11 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & pt1 (i) = pt1 (i) - sink (i) * lhi (i) / cvm (i) endif enddo - + ! ----------------------------------------------------------------------- ! autoconversion from cloud water to rain ! ----------------------------------------------------------------------- - + do i = is, ie if (ql (i, j) > ql0_max) then sink (i) = fac_l2r * (ql (i, j) - ql0_max) @@ -526,11 +531,11 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & ql (i, j) = ql (i, j) - sink (i) endif enddo - + ! ----------------------------------------------------------------------- ! update latend heat coefficient ! ----------------------------------------------------------------------- - + do i = is, ie lhi (i) = li00 + dc_ice * pt1 (i) lhl (i) = lv00 + d0_vap * pt1 (i) @@ -538,7 +543,7 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & icp2 (i) = lhi (i) / cvm (i) tcp2 (i) = lcp2 (i) + icp2 (i) enddo - + ! ----------------------------------------------------------------------- ! sublimation / deposition between water vapor and cloud ice ! ----------------------------------------------------------------------- @@ -560,14 +565,14 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & tmp = tice - pt1 (i) ! WRF qi_crt = 4.92e-11 * exp (1.33 * log (1.e3 * exp (0.15 * tmp))) ! GFDL qi_crt = qi_gen * min (qi_lim, 0.1 * tmp) / den (i) - ! GEOS impose CALIPSO ice polynomial from 0 C to -40 C on qi_crt + ! GEOS impose CALIPSO ice polynomial from 0 C to -40 C on qi_crt qi_crt = calipso_ice_polynomial(pt1(i)) * qi_gen / den (i) src (i) = min (sink (i), max (qi_crt - qi (i, j), pidep), tmp / tcp2 (i)) else - if (do_subl) then + if (do_subl) then pidep = pidep * min (1., dim (pt1 (i), t_sub) * 0.2) src (i) = max (pidep, sink (i), - qi (i, j)) - else + else src (i) = 0.0 endif endif @@ -578,11 +583,11 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & cvm (i) = mc_air (i) + qv (i, j) * c_vap + q_liq (i) * c_liq + q_sol (i) * c_ice pt1 (i) = pt1 (i) + src (i) * (lhl (i) + lhi (i)) / cvm (i) enddo - + ! ----------------------------------------------------------------------- ! virtual temp updated ! ----------------------------------------------------------------------- - + do i = is, ie #ifdef USE_COND q_cond (i) = q_liq (i) + q_sol (i) @@ -594,11 +599,11 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & pt (i, j) = pt1 (i) * (1. + zvir * qv (i, j)) #endif enddo - + ! ----------------------------------------------------------------------- ! fix negative graupel with available cloud ice ! ----------------------------------------------------------------------- - + do i = is, ie if (qg (i, j) < 0.) then tmp = min (- qg (i, j), max (0., qi (i, j))) @@ -606,11 +611,11 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & qi (i, j) = qi (i, j) - tmp endif enddo - + ! ----------------------------------------------------------------------- ! autoconversion from cloud ice to snow ! ----------------------------------------------------------------------- - + do i = is, ie qim = qi0_crt * (1.0 - min(qi_lim,exp(0.05 * (pt1(i)-tice)))) / den (i) if (qi (i, j) > qim) then @@ -619,17 +624,17 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & qs (i, j) = qs (i, j) + sink (i) endif enddo - + if (out_dt) then do i = is, ie dtdt (i, j) = dtdt (i, j) + pt1 (i) - t0 (i) enddo endif - + ! ----------------------------------------------------------------------- ! fix energy conservation ! ----------------------------------------------------------------------- - + if (consv_te) then do i = is, ie if (hydrostatic) then @@ -643,11 +648,11 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & endif enddo endif - + ! ----------------------------------------------------------------------- ! update latend heat coefficient ! ----------------------------------------------------------------------- - + do i = is, ie lhi (i) = li00 + dc_ice * pt1 (i) lhl (i) = lv00 + d0_vap * pt1 (i) @@ -655,18 +660,18 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & lcp2 (i) = lhl (i) / cvm (i) icp2 (i) = lhi (i) / cvm (i) enddo - + ! ----------------------------------------------------------------------- ! compute cloud fraction ! ----------------------------------------------------------------------- - + if (do_qa .and. last_step) then ! FV3 will do the cloud PDF ! ----------------------------------------------------------------------- ! combine water species ! ----------------------------------------------------------------------- - + if (preciprad) then do i = is, ie q_sol (i) = qi (i, j) + qs (i, j) + qg (i, j) @@ -681,21 +686,21 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & do i = is, ie q_cond (i) = q_sol (i) + q_liq (i) enddo - + ! ----------------------------------------------------------------------- ! use the "liquid - frozen water temperature" (tin) to compute saturated specific humidity ! ----------------------------------------------------------------------- - + do i = is, ie - + tin = pt1 (i) - (lcp2 (i) * q_cond (i) + icp2 (i) * q_sol (i)) ! minimum temperature ! tin = pt1 (i) - ((lv00 + d0_vap * pt1 (i)) * q_cond (i) + & ! (li00 + dc_ice * pt1 (i)) * q_sol (i)) / (mc_air (i) + qpz (i) * c_vap) - + ! ----------------------------------------------------------------------- ! determine saturated specific humidity ! ----------------------------------------------------------------------- - + if (tin <= t_wfr) then ! ice phase: qstar (i) = iqs1 (tin, den (i)) @@ -709,15 +714,15 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & rqi = calipso_ice_polynomial(tin) qstar (i) = rqi * qsi + (1. - rqi) * qsw endif - + ! ----------------------------------------------------------------------- ! partial cloudiness by pdf: ! assuming subgrid linear distribution in horizontal; this is effectively a smoother for the ! binary cloud scheme; qa = 0.5 if qstar (i) == qpz ! ----------------------------------------------------------------------- - + rh = qpz (i) / qstar (i) - + ! ----------------------------------------------------------------------- ! icloud_f = 0: bug - fixed ! icloud_f = 1: old fvgfs gfdl) mp implementation @@ -731,7 +736,7 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & if (icloud_f == 3) then ! triangular if(q_plus.le.qstar(i)) then - qa (i, j) = qcmin ! little/no cloud cover + qa (i, j) = qcmin ! little/no cloud cover elseif ( (qpz(i).le.qstar(i)).and.(qstar(i).lt.q_plus) ) then ! partial cloud cover qa (i, j) = max(qcmin,min(1., qa (i, j) + (q_plus-qstar(i))*(q_plus-qstar(i)) / ( (q_plus-q_minus)*(q_plus-qpz(i)) ))) elseif ( (q_minus.le.qstar(i)).and.(qstar(i).lt.qpz(i)) ) then ! partial cloud cover @@ -743,7 +748,7 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & if (icloud_f == 2) then ! top-hat if(q_plus.le.qstar(i)) then - qa (i, j) = qcmin ! little/no cloud cover + qa (i, j) = qcmin ! little/no cloud cover elseif (qstar(i) < q_plus .and. q_cond (i) > qcmin) then qa (i, j) = max(qcmin, min(1., (q_plus - qstar(i)) / (dq + dq) )) ! partial cloud cover elseif (qstar(i) .le. q_minus) then @@ -773,86 +778,86 @@ subroutine fv_sat_adj (mdt, zvir, is, ie, js, je, ng, hydrostatic, consv_te, & else qa (i, j) = 0. endif - + enddo - + endif - + enddo ! end j loop - + end subroutine fv_sat_adj ! ======================================================================= -!>@brief the function 'wqs1' computes the +!>@brief the function 'wqs1' computes the !! saturated specific humidity for table ii ! ======================================================================= real function wqs1 (ta, den) - + implicit none - + ! pure water phase; universal dry / moist formular using air density ! input "den" can be either dry or moist air density - + real, intent (in) :: ta, den - + real :: es, ap1, tmin - + integer :: it - + tmin = tice - 160. ap1 = 10. * dim (ta, tmin) + 1. ap1 = min (2621., ap1) it = ap1 es = tablew (it) + (ap1 - it) * desw (it) wqs1 = es / (rvgas * ta * den) - + end function wqs1 ! ======================================================================= -!>@brief the function 'wqs1' computes the saturated specific humidity +!>@brief the function 'wqs1' computes the saturated specific humidity !! for table iii ! ======================================================================= real function iqs1 (ta, den) - + implicit none - + ! water - ice phase; universal dry / moist formular using air density ! input "den" can be either dry or moist air density - + real, intent (in) :: ta, den - + real :: es, ap1, tmin - + integer :: it - + tmin = tice - 160. ap1 = 10. * dim (ta, tmin) + 1. ap1 = min (2621., ap1) it = ap1 es = table2 (it) + (ap1 - it) * des2 (it) iqs1 = es / (rvgas * ta * den) - + end function iqs1 ! ======================================================================= -!>@brief The function 'wqs2'computes the gradient of saturated specific +!>@brief The function 'wqs2'computes the gradient of saturated specific !! humidity for table ii ! ======================================================================= real function wqs2 (ta, den, dqdt) - + implicit none - + ! pure water phase; universal dry / moist formular using air density ! input "den" can be either dry or moist air density - + real, intent (in) :: ta, den - + real, intent (out) :: dqdt - + real :: es, ap1, tmin - + integer :: it - + tmin = tice - 160. ap1 = 10. * dim (ta, tmin) + 1. ap1 = min (2621., ap1) @@ -862,33 +867,33 @@ real function wqs2 (ta, den, dqdt) it = ap1 - 0.5 ! finite diff, del_t = 0.1: dqdt = 10. * (desw (it) + (ap1 - it) * (desw (it + 1) - desw (it))) / (rvgas * ta * den) - + end function wqs2 ! ======================================================================= -!>@brief The function wqs2_vect computes the gradient of saturated +!>@brief The function wqs2_vect computes the gradient of saturated !! specific humidity for table ii. !! It is the same as "wqs2", but written as vector function. ! ======================================================================= subroutine wqs2_vect (is, ie, ta, den, wqsat, dqdt) - + implicit none - + ! pure water phase; universal dry / moist formular using air density ! input "den" can be either dry or moist air density - + integer, intent (in) :: is, ie - + real, intent (in), dimension (is:ie) :: ta, den - + real, intent (out), dimension (is:ie) :: wqsat, dqdt - + real :: es, ap1, tmin - + integer :: i, it - + tmin = tice - 160. - + do i = is, ie ap1 = 10. * dim (ta (i), tmin) + 1. ap1 = min (2621., ap1) @@ -899,28 +904,28 @@ subroutine wqs2_vect (is, ie, ta, den, wqsat, dqdt) ! finite diff, del_t = 0.1: dqdt (i) = 10. * (desw (it) + (ap1 - it) * (desw (it + 1) - desw (it))) / (rvgas * ta (i) * den (i)) enddo - + end subroutine wqs2_vect ! ======================================================================= -!>@brief The function 'iqs2' computes the gradient of saturated specific +!>@brief The function 'iqs2' computes the gradient of saturated specific !! humidity for table iii. ! ======================================================================= real function iqs2 (ta, den, dqdt) - + implicit none - + ! water - ice phase; universal dry / moist formular using air density ! input "den" can be either dry or moist air density - + real, intent (in) :: ta, den - + real, intent (out) :: dqdt - + real :: es, ap1, tmin - + integer :: it - + tmin = tice - 160. ap1 = 10. * dim (ta, tmin) + 1. ap1 = min (2621., ap1) @@ -930,7 +935,7 @@ real function iqs2 (ta, den, dqdt) it = ap1 - 0.5 ! finite diff, del_t = 0.1: dqdt = 10. * (des2 (it) + (ap1 - it) * (des2 (it + 1) - des2 (it))) / (rvgas * ta * den) - + end function iqs2 ! ======================================================================= @@ -939,40 +944,40 @@ end function iqs2 ! ======================================================================= !>@brief The subroutine 'qs_init' initializes lookup tables for the saturation mixing ratio. subroutine qs_init (kmp) - + implicit none - + integer, intent (in) :: kmp - + integer, parameter :: length = 2621 - + integer :: i - + if (mp_initialized) return - + if (is_master ()) write (*, *) 'top layer for gfdl_mp = ', kmp - + ! generate es table (dt = 0.1 deg c) - + allocate (table (length)) allocate (table2 (length)) allocate (tablew (length)) allocate (des2 (length)) allocate (desw (length)) - + call qs_table (length) call qs_table2 (length) call qs_tablew (length) - + do i = 1, length - 1 des2 (i) = max (0., table2 (i + 1) - table2 (i)) desw (i) = max (0., tablew (i + 1) - tablew (i)) enddo des2 (length) = des2 (length - 1) desw (length) = desw (length - 1) - + mp_initialized = .true. - + end subroutine qs_init ! ======================================================================= @@ -981,25 +986,25 @@ end subroutine qs_init ! ======================================================================= subroutine qs_table (n) - + implicit none - + integer, intent (in) :: n - + real (kind = r_grid) :: delt = 0.1 real (kind = r_grid) :: tmin, tem, esh20 real (kind = r_grid) :: wice, wh2o, fac0, fac1, fac2 real (kind = r_grid) :: esupc (400) - + integer :: i real :: tk - + tmin = tice - 160. - + ! ----------------------------------------------------------------------- ! compute es over ice between - 160 deg c and 0 deg c. ! ----------------------------------------------------------------------- - + do i = 1, 1600 tem = tmin + delt * real (i - 1) fac0 = (tem - tice) / (tem * tice) @@ -1007,11 +1012,11 @@ subroutine qs_table (n) fac2 = (d2ice * log (tem / tice) + fac1) / rvgas table (i) = e00 * exp (fac2) enddo - + ! ----------------------------------------------------------------------- ! compute es over water between - 40 deg c and 102 deg c. ! ----------------------------------------------------------------------- - + do i = 1, 1421 tem = 233.16 + delt * real (i - 1) fac0 = (tem - tice) / (tem * tice) @@ -1024,11 +1029,11 @@ subroutine qs_table (n) table (i + 1200) = esh20 endif enddo - + ! ----------------------------------------------------------------------- ! derive blended es over ice and supercooled water between - 40 deg c and 0 deg c ! ----------------------------------------------------------------------- - + do i = 1, 400 tem = 233.16 + delt * real (i - 1) ! wice = 0.05 * (tice - tem) @@ -1038,7 +1043,7 @@ subroutine qs_table (n) wh2o = 1.0 - wice table (i + 1200) = wice * table (i + 1200) + wh2o * esupc (i) enddo - + end subroutine qs_table ! ======================================================================= @@ -1047,22 +1052,22 @@ end subroutine qs_table ! ======================================================================= subroutine qs_tablew (n) - + implicit none - + integer, intent (in) :: n - + real (kind = r_grid) :: delt = 0.1 real (kind = r_grid) :: tmin, tem, fac0, fac1, fac2 - + integer :: i - + tmin = tice - 160. - + ! ----------------------------------------------------------------------- ! compute es over water ! ----------------------------------------------------------------------- - + do i = 1, n tem = tmin + delt * real (i - 1) fac0 = (tem - tice) / (tem * tice) @@ -1070,7 +1075,7 @@ subroutine qs_tablew (n) fac2 = (dc_vap * log (tem / tice) + fac1) / rvgas tablew (i) = e00 * exp (fac2) enddo - + end subroutine qs_tablew ! ======================================================================= @@ -1079,18 +1084,18 @@ end subroutine qs_tablew ! ======================================================================= subroutine qs_table2 (n) - + implicit none - + integer, intent (in) :: n - + real (kind = r_grid) :: delt = 0.1 real (kind = r_grid) :: tmin, tem0, tem1, fac0, fac1, fac2 - + integer :: i, i0, i1 - + tmin = tice - 160. - + do i = 1, n tem0 = tmin + delt * real (i - 1) fac0 = (tem0 - tice) / (tem0 * tice) @@ -1109,18 +1114,18 @@ subroutine qs_table2 (n) endif table2 (i) = e00 * exp (fac2) enddo - + ! ----------------------------------------------------------------------- ! smoother around 0 deg c ! ----------------------------------------------------------------------- - + i0 = 1600 i1 = 1601 tem0 = 0.25 * (table2 (i0 - 1) + 2. * table (i0) + table2 (i0 + 1)) tem1 = 0.25 * (table2 (i1 - 1) + 2. * table (i1) + table2 (i1 + 1)) table2 (i0) = tem0 table2 (i1) = tem1 - + end subroutine qs_table2 real function new_ice_condensate(tk, qlk, qik) @@ -1148,10 +1153,10 @@ real function new_liq_condensate(tk, qlk, qik) end function new_liq_condensate real function calipso_ice_polynomial(tk) - ! Citation: Hu, Y., S. Rodier, K. Xu, W. Sun, J. Huang, B. Lin, P. Zhai, and D. Josset (2010), - ! Occurrence, liquid water content, and fraction of supercooled water clouds from - ! combined CALIOP/IIR/MODIS measurements, J. Geophys. Res., 115, D00H34, - ! doi:10.1029/2009JD012384. + ! Citation: Hu, Y., S. Rodier, K. Xu, W. Sun, J. Huang, B. Lin, P. Zhai, and D. Josset (2010), + ! Occurrence, liquid water content, and fraction of supercooled water clouds from + ! combined CALIOP/IIR/MODIS measurements, J. Geophys. Res., 115, D00H34, + ! doi:10.1029/2009JD012384. real, intent(in) :: tk ! temperature in K real :: tc, ptc diff --git a/model/fv_control.F90 b/model/fv_control.F90 index 5fcdfff97..b6dd3bf3f 100644 --- a/model/fv_control.F90 +++ b/model/fv_control.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -86,14 +86,14 @@ module fv_control_mod ! ! ! mpp_mod -! mpp_send, mpp_sync, mpp_transmit, mpp_set_current_pelist, mpp_declare_pelist, +! mpp_send, mpp_sync, mpp_transmit, mpp_set_current_pelist, mpp_declare_pelist, ! mpp_root_pe, mpp_recv, mpp_sync_self, mpp_broadcast, read_input_nml, -! FATAL, mpp_error, mpp_pe, stdlog, mpp_npes, mpp_get_current_pelist, +! FATAL, mpp_error, mpp_pe, stdlog, mpp_npes, mpp_get_current_pelist, ! input_nml_file, get_unit, WARNING, read_ascii_file ! ! ! mpp_domains_mod -! mpp_get_data_domain, mpp_get_compute_domain, domain2D, mpp_define_nest_domains, +! mpp_get_data_domain, mpp_get_compute_domain, domain2D, mpp_define_nest_domains, ! nest_domain_type, mpp_get_global_domain, mpp_get_C2F_index, mpp_get_F2C_index, ! mpp_broadcast_domain, CENTER, CORNER, NORTH, EAST, WEST, SOUTH ! @@ -107,17 +107,28 @@ module fv_control_mod ! ! ! tracer_manager_mod -! tm_get_number_tracers => get_number_tracers,tm_get_tracer_index => get_tracer_index, -! tm_get_tracer_indices => get_tracer_indices, tm_set_tracer_profile => set_tracer_profile, +! tm_get_number_tracers => get_number_tracers,tm_get_tracer_index => get_tracer_index, +! tm_get_tracer_indices => get_tracer_indices, tm_set_tracer_profile => set_tracer_profile, ! tm_get_tracer_names => get_tracer_names,tm_check_if_prognostic=> check_if_prognostic, ! tm_register_tracers => register_tracers ! ! - use constants_mod, only: pi=>pi_8, kappa, radius, grav, rdgas +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: pi=>pi_8, kappa, radius, grav, rdgas use field_manager_mod, only: MODEL_ATMOS - use fms_mod, only: write_version_number, open_namelist_file, & - check_nml_error, close_file, file_exist + use fms_mod, only: write_version_number, & + check_nml_error +#if defined (FMS1_IO) + use fms_mod, only: open_namelist_file, & + close_file, file_exists => file_exist +#else + use fms2_io_mod, only: file_exists, close_file +#endif use mpp_mod, only: FATAL, mpp_error, mpp_pe, stdlog, & mpp_npes, mpp_get_current_pelist, & input_nml_file, get_unit, WARNING, & @@ -165,34 +176,34 @@ module fv_control_mod character(len=80) , pointer :: grid_name character(len=120), pointer :: grid_file integer, pointer :: grid_type - integer , pointer :: hord_mt - integer , pointer :: kord_mt - integer , pointer :: kord_wz - integer , pointer :: hord_vt - integer , pointer :: hord_tm - integer , pointer :: hord_dp - integer , pointer :: kord_tm - integer , pointer :: hord_tr - integer , pointer :: kord_tr - real , pointer :: scale_z - real , pointer :: w_max - real , pointer :: z_min + integer , pointer :: hord_mt + integer , pointer :: kord_mt + integer , pointer :: kord_wz + integer , pointer :: hord_vt + integer , pointer :: hord_tm + integer , pointer :: hord_dp + integer , pointer :: kord_tm + integer , pointer :: hord_tr + integer , pointer :: kord_tr + real , pointer :: scale_z + real , pointer :: w_max + real , pointer :: z_min real , pointer :: lim_fac integer , pointer :: nord integer , pointer :: nord_tr - real , pointer :: dddmp - real , pointer :: d2_bg + real , pointer :: dddmp + real , pointer :: d2_bg real , pointer :: d4_bg_top real , pointer :: d4_bg_bot - real , pointer :: vtdm4 - real , pointer :: trdm2 - real , pointer :: d2_bg_k1 - real , pointer :: d2_bg_k2 - real , pointer :: d2_divg_max_k1 - real , pointer :: d2_divg_max_k2 - real , pointer :: damp_k_k1 - real , pointer :: damp_k_k2 + real , pointer :: vtdm4 + real , pointer :: trdm2 + real , pointer :: d2_bg_k1 + real , pointer :: d2_bg_k2 + real , pointer :: d2_divg_max_k1 + real , pointer :: d2_divg_max_k2 + real , pointer :: damp_k_k1 + real , pointer :: damp_k_k2 integer , pointer :: n_zs_filter integer , pointer :: nord_zs_filter logical , pointer :: full_zs_filter @@ -202,119 +213,119 @@ module fv_control_mod logical , pointer :: consv_am logical , pointer :: do_sat_adj logical , pointer :: do_f3d - logical , pointer :: no_dycore - logical , pointer :: convert_ke - logical , pointer :: do_vort_damp - logical , pointer :: use_old_omega + logical , pointer :: no_dycore + logical , pointer :: convert_ke + logical , pointer :: do_vort_damp + logical , pointer :: use_old_omega ! PG off centering: - real , pointer :: beta + real , pointer :: beta integer , pointer :: n_zfilter - integer , pointer :: n_sponge - real , pointer :: d_ext - integer , pointer :: nwat - logical , pointer :: warm_start - logical , pointer :: inline_q - real , pointer :: shift_fac - logical , pointer :: do_schmidt - real(kind=R_GRID) , pointer :: stretch_fac - real(kind=R_GRID) , pointer :: target_lat - real(kind=R_GRID) , pointer :: target_lon - - logical , pointer :: reset_eta + integer , pointer :: n_sponge + real , pointer :: d_ext + integer , pointer :: nwat + logical , pointer :: warm_start + logical , pointer :: inline_q + real , pointer :: shift_fac + logical , pointer :: do_schmidt + real(kind=R_GRID) , pointer :: stretch_fac + real(kind=R_GRID) , pointer :: target_lat + real(kind=R_GRID) , pointer :: target_lon + + logical , pointer :: reset_eta real , pointer :: p_fac real , pointer :: a_imp real , pointer :: dz_min - integer , pointer :: n_split - ! Default - integer , pointer :: m_split - integer , pointer :: k_split + integer , pointer :: n_split + ! Default + integer , pointer :: m_split + integer , pointer :: k_split logical , pointer :: use_logp - integer , pointer :: q_split - integer , pointer :: print_freq + integer , pointer :: q_split + integer , pointer :: print_freq logical , pointer :: write_3d_diags - integer , pointer :: npx - integer , pointer :: npy + integer , pointer :: npx + integer , pointer :: npy integer , pointer :: npz - integer , pointer :: npz_rst - - integer , pointer :: ncnst - integer , pointer :: pnats - integer , pointer :: dnats - integer , pointer :: ntiles - integer , pointer :: nf_omega - integer , pointer :: fv_sg_adj - - integer , pointer :: na_init + integer , pointer :: npz_rst + + integer , pointer :: ncnst + integer , pointer :: pnats + integer , pointer :: dnats + integer , pointer :: ntiles + integer , pointer :: nf_omega + integer , pointer :: fv_sg_adj + + integer , pointer :: na_init logical , pointer :: nudge_dz - real , pointer :: p_ref - real , pointer :: dry_mass - integer , pointer :: nt_prog - integer , pointer :: nt_phys - real , pointer :: tau_h2o + real , pointer :: p_ref + real , pointer :: dry_mass + integer , pointer :: nt_prog + integer , pointer :: nt_phys + real , pointer :: tau_h2o real , pointer :: delt_max - real , pointer :: d_con + real , pointer :: d_con real , pointer :: ke_bg - real , pointer :: consv_te - real , pointer :: tau + real , pointer :: consv_te + real , pointer :: tau real , pointer :: rf_cutoff - logical , pointer :: filter_phys - logical , pointer :: dwind_2d - logical , pointer :: breed_vortex_inline + logical , pointer :: filter_phys + logical , pointer :: dwind_2d + logical , pointer :: breed_vortex_inline logical , pointer :: range_warn - integer , pointer :: exact_sum - logical , pointer :: fill - logical , pointer :: fill_dp - logical , pointer :: fill_wz + integer , pointer :: exact_sum + logical , pointer :: fill + logical , pointer :: fill_dp + logical , pointer :: fill_wz logical , pointer :: check_negative - logical , pointer :: non_ortho - logical , pointer :: adiabatic - logical , pointer :: moist_phys - logical , pointer :: do_Held_Suarez + logical , pointer :: non_ortho + logical , pointer :: adiabatic + logical , pointer :: moist_phys + logical , pointer :: do_Held_Suarez logical , pointer :: do_reed_physics logical , pointer :: reed_cond_only - logical , pointer :: adjust_dry_mass - logical , pointer :: fv_debug - logical , pointer :: srf_init - logical , pointer :: mountain - integer , pointer :: remap_option + logical , pointer :: adjust_dry_mass + logical , pointer :: fv_debug + logical , pointer :: srf_init + logical , pointer :: mountain + integer , pointer :: remap_option integer , pointer :: gmao_remap logical , pointer :: gmao_top_bc logical , pointer :: gmao_bot_bc - logical , pointer :: z_tracer + logical , pointer :: z_tracer - logical , pointer :: old_divg_damp - logical , pointer :: fv_land - logical , pointer :: nudge + logical , pointer :: old_divg_damp + logical , pointer :: fv_land + logical , pointer :: nudge logical , pointer :: nudge_ic - logical , pointer :: ncep_ic - logical , pointer :: nggps_ic - logical , pointer :: ecmwf_ic + logical , pointer :: ncep_ic + logical , pointer :: nggps_ic + logical , pointer :: ecmwf_ic logical , pointer :: gfs_phil logical , pointer :: agrid_vel_rst - logical , pointer :: use_new_ncep - logical , pointer :: use_ncep_phy - logical , pointer :: fv_diag_ic - logical , pointer :: external_ic + logical , pointer :: use_new_ncep + logical , pointer :: use_ncep_phy + logical , pointer :: fv_diag_ic + logical , pointer :: external_ic logical , pointer :: external_eta logical , pointer :: read_increment character(len=128) , pointer :: res_latlon_dynamics - character(len=128) , pointer :: res_latlon_tracers - logical , pointer :: hydrostatic + character(len=128) , pointer :: res_latlon_tracers + logical , pointer :: hydrostatic logical , pointer :: phys_hydrostatic logical , pointer :: use_hydro_pressure logical , pointer :: do_uni_zfull !miz logical , pointer :: adj_mass_vmr ! f1p - logical , pointer :: hybrid_z - logical , pointer :: Make_NH - logical , pointer :: make_hybrid_z + logical , pointer :: hybrid_z + logical , pointer :: Make_NH + logical , pointer :: make_hybrid_z logical , pointer :: nudge_qv real, pointer :: add_noise - integer , pointer :: a2b_ord - integer , pointer :: c2l_ord + integer , pointer :: a2b_ord + integer , pointer :: c2l_ord integer, pointer :: ndims @@ -361,7 +372,7 @@ module fv_control_mod !------------------------------------------------------------------------------- !>@brief The subroutine 'fv_init' initializes FV3. !>@details It allocates memory, sets up MPI and processor lists, -!! sets up the grid, and controls FV3 namelist parameters. +!! sets up the grid, and controls FV3 namelist parameters. subroutine fv_init1(Atm, dt_atmos, grids_on_this_pe, p_split) type(fv_atmos_type), allocatable, intent(inout), target :: Atm(:) @@ -387,7 +398,7 @@ subroutine fv_init1(Atm, dt_atmos, grids_on_this_pe, p_split) call setup_pointers(Atm(1)) end subroutine fv_init1 - + subroutine fv_init2(Atm, dt_atmos, grids_on_this_pe, p_split) type(fv_atmos_type), allocatable, intent(inout), target :: Atm(:) @@ -412,7 +423,7 @@ subroutine fv_init2(Atm, dt_atmos, grids_on_this_pe, p_split) call timing_init call timing_on('TOTAL') - ! Setup the run from namelist + ! Setup the run from namelist ntilesMe = size(Atm(:)) !Full number of Atm arrays; one less than number of grids, if multiple grids call run_setup(Atm,dt_atmos, grids_on_this_pe, p_split) ! initializes domain_decomp @@ -422,7 +433,7 @@ subroutine fv_init2(Atm, dt_atmos, grids_on_this_pe, p_split) !In a single-grid run this will still be needed to correctly set the domain call switch_current_Atm(Atm(n)) call setup_pointers(Atm(n)) - + target_lon = target_lon * pi/180. target_lat = target_lat * pi/180. @@ -433,7 +444,7 @@ subroutine fv_init2(Atm, dt_atmos, grids_on_this_pe, p_split) !not sure if this works with multiple grids call tm_register_tracers (MODEL_ATMOS, ncnst, nt_prog, pnats, num_family) if(is_master()) then - write(*,*) 'ncnst=', ncnst,' num_prog=',nt_prog,' pnats=',pnats,' dnats=',dnats,' num_family=',num_family + write(*,*) 'ncnst=', ncnst,' num_prog=',nt_prog,' pnats=',pnats,' dnats=',dnats,' num_family=',num_family print*, '' endif @@ -513,7 +524,7 @@ subroutine fv_init2(Atm, dt_atmos, grids_on_this_pe, p_split) call mpp_get_global_domain( Atm(n)%parent_grid%domain, & isg, ieg, jsg, jeg) - !FIXME: Should replace this by generating the global grid (or at least one face thereof) on the + !FIXME: Should replace this by generating the global grid (or at least one face thereof) on the ! nested PEs instead of sending it around. if (gid == Atm(n)%parent_grid%pelist(1)) then call mpp_send(Atm(n)%parent_grid%grid_global(isg-ng:ieg+1+ng,jsg-ng:jeg+1+ng,1:2,parent_tile), & @@ -585,7 +596,7 @@ subroutine fv_init2(Atm, dt_atmos, grids_on_this_pe, p_split) endif endif end do - + ! Initialize restart functions call fv_restart_init() @@ -602,7 +613,7 @@ subroutine fv_init2(Atm, dt_atmos, grids_on_this_pe, p_split) end subroutine fv_init2 !------------------------------------------------------------------------------- -!>@brief The subroutine 'fv_end' terminates FV3, deallocates memory, +!>@brief The subroutine 'fv_end' terminates FV3, deallocates memory, !! saves restart files, and stops I/O. subroutine fv_end(Atm, grids_on_this_pe, restarts) @@ -645,7 +656,7 @@ subroutine run_setup(Atm, dt_atmos, grids_on_this_pe, p_split) real :: dim0 = 180. !< base dimension real :: dt0 = 1800. !< base time step - real :: ns0 = 5. !< base nsplit for base dimension + real :: ns0 = 5. !< base nsplit for base dimension !< For cubed sphere 5 is better real :: offset = 0.49 !< base to help round up n_split real :: dimx, dl, dp, dxmin, dymin, d_fac @@ -695,18 +706,9 @@ subroutine run_setup(Atm, dt_atmos, grids_on_this_pe, p_split) bubble_do = .false. test_case = 11 ! (USGS terrain) -#ifdef INTERNAL_FILE_NML ! Read Main namelist read (input_nml_file,fv_grid_nml,iostat=ios) ierr = check_nml_error(ios,'fv_grid_nml') -#else - f_unit=open_namelist_file() - rewind (f_unit) -! Read Main namelist - read (f_unit,fv_grid_nml,iostat=ios) - ierr = check_nml_error(ios,'fv_grid_nml') - call close_file(f_unit) -#endif unit = stdlog() write(unit, nml=fv_grid_nml) @@ -720,13 +722,12 @@ subroutine run_setup(Atm, dt_atmos, grids_on_this_pe, p_split) call fv_diag_init_gn(Atm(n)) endif -#ifdef INTERNAL_FILE_NML ! Set input_file_nml for correct parent/nest initialization if (n > 1) then write(nested_grid_filename,'(A4, I2.2)') 'nest', n call read_input_nml(nested_grid_filename) endif - ! Read FVCORE namelist + ! Read FVCORE namelist read (input_nml_file,fv_core_nml,iostat=ios) ierr = check_nml_error(ios,'fv_core_nml') ! Read Test_Case namelist @@ -737,26 +738,6 @@ subroutine run_setup(Atm, dt_atmos, grids_on_this_pe, p_split) if (n > 1) then call read_input_nml endif -#else - if (size(Atm) == 1) then - f_unit = open_namelist_file() - else if (n == 1) then - f_unit = open_namelist_file('input.nml') - else - write(nested_grid_filename,'(A10, I2.2, A4)') 'input_nest', n, '.nml' - f_unit = open_namelist_file(nested_grid_filename) - endif - - ! Read FVCORE namelist - read (f_unit,fv_core_nml,iostat=ios) - ierr = check_nml_error(ios,'fv_core_nml') - - ! Read Test_Case namelist - rewind (f_unit) - read (f_unit,test_case_nml,iostat=ios) - ierr = check_nml_error(ios,'test_case_nml') - call close_file(f_unit) -#endif write(unit, nml=fv_core_nml) write(unit, nml=test_case_nml) @@ -799,7 +780,7 @@ subroutine run_setup(Atm, dt_atmos, grids_on_this_pe, p_split) else dimx = max ( npx, 2*(npy-1) ) endif - + if (grid_type < 4) then n0split = nint ( ns0*abs(dt_atmos)*dimx/(dt0*dim0) + offset ) elseif (grid_type == 4 .or. grid_type == 7) then @@ -877,8 +858,8 @@ subroutine run_setup(Atm, dt_atmos, grids_on_this_pe, p_split) else Atm(n)%neststruct%ioffset = -999 - Atm(n)%neststruct%joffset = -999 - Atm(n)%neststruct%parent_tile = -1 + Atm(n)%neststruct%joffset = -999 + Atm(n)%neststruct%parent_tile = -1 Atm(n)%neststruct%refinement = -1 end if @@ -959,7 +940,7 @@ subroutine run_setup(Atm, dt_atmos, grids_on_this_pe, p_split) enddo do n=1,size(Atm) - + call switch_current_Atm(Atm(n),.false.) call setup_pointers(Atm(n)) !! CLEANUP: WARNING not sure what changes to domain_decomp may cause @@ -978,7 +959,7 @@ subroutine run_setup(Atm, dt_atmos, grids_on_this_pe, p_split) if (nested) then if (mod(npx-1 , refinement) /= 0 .or. mod(npy-1, refinement) /= 0) & call mpp_error(FATAL, 'npx or npy not an even refinement of its coarse grid.') - + !Pelist needs to be set to ALL (which should have been done !in broadcast_domains) to get this to work !call mpp_define_nest_domains(Atm(n)%neststruct%nest_domain, Atm(n)%domain, Atm(parent_grid_num)%domain, & @@ -1034,7 +1015,7 @@ subroutine run_setup(Atm, dt_atmos, grids_on_this_pe, p_split) end subroutine run_setup subroutine init_nesting(Atm, grids_on_this_pe, p_split) - + type(fv_atmos_type), intent(inout), allocatable :: Atm(:) logical, allocatable, intent(INOUT) :: grids_on_this_pe(:) integer, intent(INOUT) :: p_split @@ -1052,22 +1033,14 @@ subroutine init_nesting(Atm, grids_on_this_pe, p_split) nest_pes = 0 ntiles = -999 -#ifdef INTERNAL_FILE_NML read (input_nml_file,nest_nml,iostat=ios) ierr = check_nml_error(ios,'nest_nml') -#else - f_unit=open_namelist_file() - rewind (f_unit) - read (f_unit,nest_nml,iostat=ios) - ierr = check_nml_error(ios,'nest_nml') - call close_file(f_unit) -#endif if (ntiles /= -999) ngrids = ntiles if (ngrids > 10) call mpp_error(FATAL, "More than 10 nested grids not supported") allocate(Atm(ngrids)) - + if (.not. allocated(grids_on_this_pe)) allocate(grids_on_this_pe(ngrids)) grids_on_this_pe = .false. !initialization @@ -1109,7 +1082,7 @@ subroutine init_nesting(Atm, grids_on_this_pe, p_split) if (n > 1) then call mpp_declare_pelist(Atm(n)%pelist, trim(pe_list_name)) !Make sure nested-grid input file exists - if (.not. file_exist('input_'//trim(pe_list_name)//'.nml')) then + if (.not. file_exists('input_'//trim(pe_list_name)//'.nml')) then call mpp_error(FATAL, "Could not find nested grid namelist input_"//trim(pe_list_name)//".nml") endif endif @@ -1124,7 +1097,7 @@ subroutine init_nesting(Atm, grids_on_this_pe, p_split) call mpp_declare_pelist(Atm(n)%pelist, trim(pe_list_name)) !Make sure nested-grid input file exists if (n > 1) then - if (.not. file_exist('input_'//trim(pe_list_name)//'.nml')) then + if (.not. file_exists('input_'//trim(pe_list_name)//'.nml')) then call mpp_error(FATAL, "Could not find nested grid namelist input_"//trim(pe_list_name)//".nml") endif endif @@ -1308,7 +1281,7 @@ subroutine setup_pointers(Atm) a2b_ord => Atm%flagstruct%a2b_ord c2l_ord => Atm%flagstruct%c2l_ord ndims => Atm%flagstruct%ndims - + dx_const => Atm%flagstruct%dx_const dy_const => Atm%flagstruct%dy_const deglon_start => Atm%flagstruct%deglon_start @@ -1334,5 +1307,5 @@ subroutine setup_pointers(Atm) compute_coords_locally => Atm%flagstruct%compute_coords_locally end subroutine setup_pointers - + end module fv_control_mod diff --git a/model/fv_dynamics.F90 b/model/fv_dynamics.F90 index 667049fec..034531022 100644 --- a/model/fv_dynamics.F90 +++ b/model/fv_dynamics.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -116,7 +116,12 @@ module fv_dynamics_mod ! ! - use constants_mod, only: grav, pi=>pi_8, radius, hlv, rdgas, omega, rvgas, cp_vapor +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: grav, pi=>pi_8, radius, hlv, rdgas, omega, rvgas, cp_vapor use dyn_core_mod, only: dyn_core, del2_cubed, init_ijk_mem use fv_mapz_mod, only: compute_total_energy, Lagrangian_to_Eulerian, moist_cv, moist_cp use fv_tracer2d_mod, only: tracer_2d, tracer_2d_1L, tracer_2d_nested @@ -158,7 +163,7 @@ module fv_dynamics_mod !----------------------------------------------------------------------- ! fv_dynamics :: FV dynamical core driver !----------------------------------------------------------------------- - + subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & kappa, cp_air, zvir, ptop, ks, ncnst, & k_split, n_split, & @@ -207,7 +212,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & real, intent(inout), dimension(bd%is :bd%ie ,bd%js :bd%je ,npz) :: dtdt_rf ! Temp tendency from Rayleigh friction !----------------------------------------------------------------------- -! Auxilliary pressure arrays: +! Auxilliary pressure arrays: ! The 5 vars below can be re-computed from delp and ptop. !----------------------------------------------------------------------- ! dyn_aux: @@ -217,7 +222,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & real, intent(inout) :: peln(bd%is:bd%ie,npz+1,bd%js:bd%je) !< ln(pe) real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz) !< finite-volume mean pk real, intent(inout):: q_con(bd%isd:, bd%jsd:, 1:) - + !----------------------------------------------------------------------- ! Others: !----------------------------------------------------------------------- @@ -259,7 +264,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & ! Local Courant number arrays real :: cxL(bd%is:bd%ie+1, bd%jsd:bd%jed, npz) real :: cyL(bd%isd:bd%ied ,bd%js:bd%je+1, npz) -! More Local arrays +! More Local arrays real:: ws(bd%is:bd%ie,bd%js:bd%je) real(kind=8):: te_2d(bd%is:bd%ie,bd%js:bd%je) real:: aam(bd%is:bd%ie,bd%js:bd%je) @@ -312,7 +317,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & nq = nq_tot - flagstruct%dnats rdg = -rdgas * agrav allocate ( dp1(isd:ied, jsd:jed, 1:npz) ) - + #ifdef MOIST_CAPPA allocate ( cappa(isd:ied,jsd:jed,npz) ) call init_ijk_mem(isd,ied, jsd,jed, npz, cappa, 0.) @@ -320,7 +325,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & allocate ( cappa(isd:isd,jsd:jsd,1) ) cappa = 0. #endif - !We call this BEFORE converting pt to virtual potential temperature, + !We call this BEFORE converting pt to virtual potential temperature, !since we interpolate on (regular) temperature rather than theta. if (gridstruct%nested .or. ANY(neststruct%child_grids)) then call timing_on('NEST_BCs') @@ -340,11 +345,11 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & #ifdef USE_COND call nested_grid_BC_apply_intT(q_con, & 0, 0, npx, npy, npz, bd, 1., 1., & - neststruct%q_con_BC, bctype=neststruct%nestbctype ) + neststruct%q_con_BC, bctype=neststruct%nestbctype ) #ifdef MOIST_CAPPA call nested_grid_BC_apply_intT(cappa, & 0, 0, npx, npy, npz, bd, 1., 1., & - neststruct%cappa_BC, bctype=neststruct%nestbctype ) + neststruct%cappa_BC, bctype=neststruct%nestbctype ) #endif #endif endif @@ -438,7 +443,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & enddo else !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,dp1,zvir,q,q_con,sphum,liq_wat, & -!$OMP rainwat,ice_wat,snowwat,graupel,pkz,flagstruct, & +!$OMP rainwat,ice_wat,snowwat,graupel,pkz,flagstruct, & !$OMP cappa,kappa,rdg,delp,pt,delz,nwat) & !$OMP private(cvm) do k=1,npz @@ -643,7 +648,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & call timing_on('DYN_CORE') call dyn_core(npx, npy, npz, ng, sphum, nq, mdt, k_split, n_split, zvir, cp_air, akap, cappa, grav, hydrostatic, & - u, v, w, delz, pt, q, delp, pe, pk, phis, varflt, ws, omga, ptop, pfull, ua, va, & + u, v, w, delz, pt, q, delp, pe, pk, phis, varflt, ws, omga, ptop, pfull, ua, va, & dudt_rf, dvdt_rf, dwdt_rf, uc, vc, & #ifdef SINGLE_FV mfxR8, mfyR8, cxR8, cyR8, & @@ -660,7 +665,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & mfxL=mfxR8 mfyL=mfyR8 cxL= cxR8 - cyL= cyR8 + cyL= cyR8 #endif ! if ( flagstruct%range_warn ) then ! call range_check('CX_dyn', cxL(is:ie,js:je,:)/real(n_split), is, ie, js, je, 0, npz, gridstruct%agrid, & @@ -716,7 +721,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & enddo enddo #else - if( .not. flagstruct%inline_q .and. nq /= 0 ) then + if( .not. flagstruct%inline_q .and. nq /= 0 ) then !-------------------------------------------------------- ! Perform large-time-step scalar transport using the accumulated CFL and ! mass fluxes @@ -758,7 +763,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & #endif if( last_step .and. idiag%id_divg>0 ) then - used = send_data(idiag%id_divg, dp1, fv_time) + used = send_data(idiag%id_divg, dp1, fv_time) if(flagstruct%fv_debug) call prt_mxm('divg', dp1, is, ie, js, je, 0, npz, 1.,gridstruct%area_64, domain) endif endif @@ -909,14 +914,14 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & if( idiag%id_aam>0 ) then used = send_data(idiag%id_aam, aam, fv_time) if ( prt_minmax ) then - gam = g_sum( domain, aam, is, ie, js, je, ng, gridstruct%area_64, 0) + gam = g_sum( domain, aam, is, ie, js, je, ng, gridstruct%area_64, 0) if( is_master() ) write(6,*) 'Total AAM =', gam endif endif endif if( (flagstruct%consv_am.or.idiag%id_amdt>0) .and. (.not.do_adiabatic_init) ) then -!$OMP parallel do default(none) shared(is,ie,js,je,aam,teq,dt2,ps2,ps,idiag) +!$OMP parallel do default(none) shared(is,ie,js,je,aam,teq,dt2,ps2,ps,idiag) do j=js,je do i=is,ie ! Note: the mountain torque computation contains also numerical error @@ -927,7 +932,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & if( idiag%id_amdt>0 ) used = send_data(idiag%id_amdt, aam/bdt, fv_time) if ( flagstruct%consv_am .or. prt_minmax ) then - amdt = g_sum( domain, aam, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=flagstruct%exact_sum) + amdt = g_sum( domain, aam, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=flagstruct%exact_sum) u0 = -radius*amdt/g_sum( domain, m_fac, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=flagstruct%exact_sum) if(is_master() .and. prt_minmax) & write(6,*) 'Dynamic AM tendency (Hadleys)=', amdt/(bdt*1.e18), 'del-u (per day)=', u0*86400./bdt @@ -1146,8 +1151,8 @@ subroutine Rayleigh_Super(dt, npx, npy, npz, ks, pm, phis, tau, u, v, w, pt, & real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz) !< D grid meridional wind (m/s) real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: ) !< cell center vertical wind (m/s) real, intent(inout):: pt(bd%isd:bd%ied,bd%jsd:bd%jed,npz) !< temp - real, intent(inout):: ua(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! - real, intent(inout):: va(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! + real, intent(inout):: ua(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! + real, intent(inout):: va(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! real, intent(inout):: delz(bd%isd: ,bd%jsd: ,1: ) !< delta-height (m); non-hydrostatic only real, intent(in) :: agrid(bd%isd:bd%ied, bd%jsd:bd%jed,2) real, intent(in) :: phis(bd%isd:bd%ied,bd%jsd:bd%jed) !< Surface geopotential (g*Z_surf) @@ -1317,8 +1322,8 @@ subroutine Rayleigh_Friction(dt, npx, npy, npz, ks, pm, tau, u, v, w, pt, & real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz) !< D grid meridional wind (m/s) real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: ) !< cell center vertical wind (m/s) real, intent(inout):: pt(bd%isd:bd%ied,bd%jsd:bd%jed,npz) !< temp - real, intent(inout):: ua(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! - real, intent(inout):: va(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! + real, intent(inout):: ua(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! + real, intent(inout):: va(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! real, intent(inout):: delz(bd%isd: ,bd%jsd: ,1: ) !< delta-height (m); non-hydrostatic only type(fv_grid_type), intent(IN) :: gridstruct type(domain2d), intent(INOUT) :: domain @@ -1332,7 +1337,7 @@ subroutine Rayleigh_Friction(dt, npx, npy, npz, ks, pm, tau, u, v, w, pt, & integer :: is, ie, js, je integer :: isd, ied, jsd, jed - + is = bd%is ie = bd%ie js = bd%js @@ -1462,7 +1467,7 @@ subroutine compute_aam(npz, is, ie, js, je, isd, ied, jsd, jed, gridstruct, bd, integer i, j, k call c2l_ord2(u, v, ua, va, gridstruct, npz, gridstruct%grid_type, bd, gridstruct%nested) - + !$OMP parallel do default(none) shared(is,ie,js,je,npz,gridstruct,aam,m_fac,ps,ptop,delp,agrav,ua) & !$OMP private(r1, r2, dm) do j=js,je diff --git a/model/fv_grid_utils.F90 b/model/fv_grid_utils.F90 index f51170300..6ac5d3956 100644 --- a/model/fv_grid_utils.F90 +++ b/model/fv_grid_utils.F90 @@ -75,7 +75,12 @@ module fv_grid_utils_mod #include - use constants_mod, only: omega, pi=>pi_8, cnst_radius=>radius +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: omega, pi=>pi_8, cnst_radius=>radius use mpp_mod, only: FATAL, mpp_error, WARNING use external_sst_mod, only: i_sst, j_sst, sst_ncep, sst_anom use mpp_domains_mod, only: mpp_update_domains, DGRID_NE, mpp_global_sum diff --git a/model/fv_mapz.F90 b/model/fv_mapz.F90 index 6ca2c4059..1ce53ac63 100644 --- a/model/fv_mapz.F90 +++ b/model/fv_mapz.F90 @@ -81,7 +81,12 @@ module fv_mapz_mod ! ! - use constants_mod, only: radius, pi=>pi_8, rvgas, rdgas, grav, hlv, hlf, hls, cp_air, cp_vapor +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: radius, pi=>pi_8, rvgas, rdgas, grav, hlv, hlf, hls, cp_air, cp_vapor use tracer_manager_mod,only: get_tracer_index use field_manager_mod, only: MODEL_ATMOS use fv_grid_utils_mod, only: ptop_min @@ -176,7 +181,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & real, intent(inout):: delp(isd:ied,jsd:jed,km) !< pressure thickness real, intent(inout):: pe(is-1:ie+1,km+1,js-1:je+1) !< pressure at layer edges real, intent(inout):: ps(isd:ied,jsd:jed) !< surface pressure - + ! u-wind will be ghosted one latitude to the north upon exit real, intent(inout):: u(isd:ied ,jsd:jed+1,km) !< u-wind (m/s) real, intent(inout):: v(isd:ied+1,jsd:jed ,km) !< v-wind (m/s) @@ -219,7 +224,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & real, dimension(is:ie,km) :: dp2, dpe1,dpe2, dpn1,dpn2 real, dimension(is:ie,km+1) :: pe1, pe2, pk2, pn1, pn2, phis real, dimension(is:ie+1,km+1):: pe0, pe3 - real, dimension(is:ie+1,km) :: dpe0,dpe3 + real, dimension(is:ie+1,km) :: dpe0,dpe3 real, dimension(is:ie):: gz, cvm real(kind=8):: tesum, zsum, dtmp real :: rcp, rg, tmp, tpe, rrg, bkh, k1k, dlnp @@ -372,7 +377,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & ! Transform "density pt" to "density temp" !$OMP parallel do default(none) shared(is,ie,isd,ied,js,je,jsd,jed,km,nwat, & !$OMP sphum,liq_wat,rainwat,ice_wat,snowwat,graupel,& -!$OMP q,cappa,r_vir,pt,rrg,delp,delz,k1k) & +!$OMP q,cappa,r_vir,pt,rrg,delp,delz,k1k) & !$OMP private(i,j,k,gz,cvm) do k=1,km do j=js,je @@ -431,7 +436,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & ! TE using 3D winds (pt is virtual potential temperature): !$OMP parallel do default(none) shared(is,ie,isd,ied,js,je,jsd,jed,km,hs,nwat, & !$OMP sphum,liq_wat,rainwat,ice_wat,snowwat,graupel,& -!$OMP te,q,cappa,r_vir,pt,pe,pkz,rrg,delp,delz,k1k,& +!$OMP te,q,cappa,r_vir,pt,pe,pkz,rrg,delp,delz,k1k,& !$OMP gridstruct,u,v,w) & !$OMP private(i,j,k,phis,gz,cvm) do j=js,je @@ -517,7 +522,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & pn2(i,2: km ) = log(peO(i,2:km,j)) enddo dpn1(is:ie,1:km) = pn1(is:ie,2:km+1)-pn1(is:ie,1:km) - dpn2(is:ie,1:km) = pn2(is:ie,2:km+1)-pn2(is:ie,1:km) + dpn2(is:ie,1:km) = pn2(is:ie,2:km+1)-pn2(is:ie,1:km) if (remap_te) then call map_scalar(km, pn1, te, & km, pn2, q2, & @@ -535,9 +540,9 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & endif endif -1000 continue +1000 continue - call timing_off('Remap_T') + call timing_off('Remap_T') call timing_on('Remap_Q') @@ -589,7 +594,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & !---------------- if ( .not. hydrostatic ) then - call timing_on('Remap_NH') + call timing_on('Remap_NH') ! Remap delz for hybrid sigma-p coordinate !$OMP parallel do default(none) shared(is,ie,isd,ied,js,je,jsd,jed,km,kord,ikord_wz, & @@ -664,7 +669,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & w(is:ie,j,:) = w2 1002 continue - call timing_off('Remap_NH') + call timing_off('Remap_NH') endif @@ -1111,7 +1116,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & zsum = mpp_global_sum(domain, tmp_2D, flags=sflag) dtmp = E_Flux*(grav*pdt*4.*pi*radius**2) / (cp*zsum) else - tmp_2D = zsum1*gridstruct%area_64(is:ie,js:je) + tmp_2D = zsum1*gridstruct%area_64(is:ie,js:je) zsum = mpp_global_sum(domain, tmp_2D, flags=sflag) dtmp = E_Flux*(grav*pdt*4.*pi*radius**2) / (cv_air*zsum) endif @@ -1211,7 +1216,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & elseif ( last_step .and. adiabatic ) then !$OMP do - do k=1,km + do k=1,km do j=js,je do i=is,ie pt(i,j,k) = (pt(i,j,k)+dtmp*pkz(i,j,k)) @@ -1553,7 +1558,7 @@ subroutine map_scalar( km, pe1, q1, & real, allocatable :: q4(:,:,:) real qsum, pl, pr, pfac0, pfac1, pfac2, dp, esl integer i, k, l, m, k0 - integer LM1,LP0,LP1 + integer LM1,LP0,LP1 logical gmao_bot, gmao_top gmao_bot=.false. diff --git a/model/fv_nesting.F90 b/model/fv_nesting.F90 index 03d5d640e..1a840e21b 100644 --- a/model/fv_nesting.F90 +++ b/model/fv_nesting.F90 @@ -1,25 +1,25 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** -!>@brief The module 'fv_nesting' is a collection of routines pertaining to grid nesting +!>@brief The module 'fv_nesting' is a collection of routines pertaining to grid nesting !! \cite harris2013two. module fv_nesting_mod @@ -45,7 +45,7 @@ module fv_nesting_mod ! ! ! fv_arrays_mod -! fv_grid_type, fv_flags_type, fv_atmos_type, fv_nest_type, fv_diag_type, +! fv_grid_type, fv_flags_type, fv_atmos_type, fv_nest_type, fv_diag_type, ! fv_nest_BC_type_3D,allocate_fv_nest_BC_type, fv_atmos_type, fv_grid_bounds_type ! ! @@ -82,7 +82,7 @@ module fv_nesting_mod ! ! ! mpp_domains_mod/td> -! mpp_update_domains, mpp_get_data_domain, mpp_get_compute_domain, +! mpp_update_domains, mpp_get_data_domain, mpp_get_compute_domain, ! mpp_get_global_domain, DGRID_NE, mpp_update_domains, domain2D ! ! @@ -110,7 +110,12 @@ module fv_nesting_mod use fv_arrays_mod, only: allocate_fv_nest_BC_type, fv_atmos_type, fv_grid_bounds_type use fv_grid_utils_mod, only: ptop_min, g_sum, cubed_to_latlon, f_p use init_hydro_mod, only: p_var - use constants_mod, only: grav, pi=>pi_8, radius, hlv, rdgas, cp_air, rvgas, cp_vapor, kappa +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: grav, pi=>pi_8, radius, hlv, rdgas, cp_air, rvgas, cp_vapor, kappa use fv_mapz_mod, only: mappm use fv_timing_mod, only: timing_on, timing_off use fv_mp_mod, only: is_master @@ -143,7 +148,7 @@ module fv_nesting_mod !!!! NOTE: Many of the routines here and in boundary.F90 have a lot of !!!! redundant code, which could be cleaned up and simplified. -!>@brief The subroutine 'setup_nested_grid_BCs' fetches data from the coarse grid +!>@brief The subroutine 'setup_nested_grid_BCs' fetches data from the coarse grid !! to set up the nested-grid boundary conditions. subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & u, v, w, pt, delp, delz,q, uc, vc, pkz, & @@ -152,7 +157,7 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & nest_timestep, tracer_nest_timestep, & domain, bd, nwat) - + type(fv_grid_bounds_type), intent(IN) :: bd real, intent(IN) :: zvir @@ -202,7 +207,7 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & jed = bd%jed child_grids => neststruct%child_grids - + !IF nested, set up nested grid BCs for time-interpolation !(actually applying the BCs is done in dyn_core @@ -213,7 +218,7 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & if (neststruct%nested .and. (.not. (neststruct%first_step) .or. make_nh) ) then do_pd = .true. - call set_BCs_t0(ncnst, flagstruct%hydrostatic, neststruct) + call set_BCs_t0(ncnst, flagstruct%hydrostatic, neststruct) else !On first timestep the t0 BCs are not initialized and may contain garbage do_pd = .false. @@ -246,7 +251,7 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & else call divergence_corner(u(isd,jsd,k), v(isd,jsd,k), ua, va, divg(isd,jsd,k), gridstruct, flagstruct, bd) endif - end do + end do endif #ifndef SW_DYNAMICS @@ -260,7 +265,7 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & enddo enddo endif -#endif +#endif !! Nested grid: receive from parent grid if (neststruct%nested) then if (.not. allocated(q_buf)) then @@ -318,7 +323,7 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & else call nested_grid_BC_send(w, neststruct%nest_domain_all(p), 0, 0) call nested_grid_BC_send(delz, neststruct%nest_domain_all(p), 0, 0) - endif + endif #endif call nested_grid_BC_send(u, neststruct%nest_domain_all(p), 0, 1) call nested_grid_BC_send(vc, neststruct%nest_domain_all(p), 0, 1) @@ -327,7 +332,7 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & call nested_grid_BC_send(divg, neststruct%nest_domain_all(p), 1, 1) endif enddo - + !Nested grid: do computations if (nested) then call nested_grid_BC_save_proc(neststruct%nest_domain, & @@ -355,8 +360,8 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & neststruct%w_BC, w_buf) call nested_grid_BC_save_proc(neststruct%nest_domain, & neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz, bd, & - neststruct%delz_BC, delz_buf) !Need a negative-definite method? - + neststruct%delz_BC, delz_buf) !Need a negative-definite method? + call setup_pt_NH_BC(neststruct%pt_BC, neststruct%delp_BC, neststruct%delz_BC, & neststruct%q_BC(sphum), neststruct%q_BC, ncnst, & #ifdef USE_COND @@ -389,10 +394,10 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & if (neststruct%first_step) then if (neststruct%nested) call set_BCs_t0(ncnst, flagstruct%hydrostatic, neststruct) neststruct%first_step = .false. - if (.not. flagstruct%hydrostatic) flagstruct%make_nh= .false. + if (.not. flagstruct%hydrostatic) flagstruct%make_nh= .false. else if (flagstruct%make_nh) then if (neststruct%nested) call set_NH_BCs_t0(neststruct) - flagstruct%make_nh= .false. + flagstruct%make_nh= .false. endif !Unnecessary? @@ -400,7 +405,7 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & !!$ neststruct%divg_BC%east_t0 = neststruct%divg_BC%east_t1 !!$ neststruct%divg_BC%west_t0 = neststruct%divg_BC%west_t1 !!$ neststruct%divg_BC%north_t0 = neststruct%divg_BC%north_t1 -!!$ neststruct%divg_BC%south_t0 = neststruct%divg_BC%south_t1 +!!$ neststruct%divg_BC%south_t0 = neststruct%divg_BC%south_t1 !!$ neststruct%divg_BC%initialized = .true. !!$ endif @@ -432,7 +437,7 @@ subroutine setup_pt_BC(pt_BC, pkz_BC, sphum_BC, npx, npy, npz, zvir, bd) ied = bd%ied jsd = bd%jsd jed = bd%jed - + if (is == 1) then ptBC => pt_BC%west_t1 pkzBC => pkz_BC%west_t1 @@ -514,7 +519,7 @@ subroutine setup_pt_BC(pt_BC, pkz_BC, sphum_BC, npx, npy, npz, zvir, bd) end do end do end if - + end subroutine setup_pt_BC subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & @@ -541,7 +546,7 @@ subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & real, intent(IN) :: zvir real, parameter:: c_liq = 4185.5 !< heat capacity of water at 0C - real, parameter:: c_ice = 1972. !< heat capacity of ice at 0C: c=c_ice+7.3*(T-Tice) + real, parameter:: c_ice = 1972. !< heat capacity of ice at 0C: c=c_ice+7.3*(T-Tice) real, parameter:: cv_vap = cp_vapor - rvgas !< 1384.5 real, dimension(:,:,:), pointer :: ptBC, sphumBC, qconBC, delpBC, delzBC, cappaBC @@ -568,7 +573,7 @@ subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & ied = bd%ied jsd = bd%jsd jed = bd%jed - + rdg = -rdgas / grav cv_air = cp_air - rdgas @@ -583,7 +588,7 @@ subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & ice_wat = get_tracer_index (MODEL_ATMOS, 'ice_wat') rainwat = get_tracer_index (MODEL_ATMOS, 'rainwat') snowwat = get_tracer_index (MODEL_ATMOS, 'snowwat') - graupel = get_tracer_index (MODEL_ATMOS, 'graupel') + graupel = get_tracer_index (MODEL_ATMOS, 'graupel') #endif if (is == 1) then @@ -723,13 +728,13 @@ subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & q_liq = liq_watBC_west(i,j,k) + rainwatBC_west(i,j,k) q_sol = ice_watBC_west(i,j,k) + snowwatBC_west(i,j,k) + graupelBC_west(i,j,k) q_con = q_liq + q_sol -#endif +#endif qconBC(i,j,k) = q_con #ifdef MOIST_CAPPA cvm = (1.-(sphumBC(i,j,k)+q_con))*cv_air+sphumBC(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice cappaBC(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1)) pkz = exp( cappaBC(i,j,k)*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) + (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) #else pkz = exp( kappa*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) @@ -786,13 +791,13 @@ subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & q_liq = liq_watBC_south(i,j,k) + rainwatBC_south(i,j,k) q_sol = ice_watBC_south(i,j,k) + snowwatBC_south(i,j,k) + graupelBC_south(i,j,k) q_con = q_liq + q_sol -#endif +#endif qconBC(i,j,k) = q_con #ifdef MOIST_CAPPA cvm = (1.-(sphumBC(i,j,k)+q_con))*cv_air+sphumBC(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice cappaBC(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1)) pkz = exp( cappaBC(i,j,k)*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) + (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) #else pkz = exp( kappa*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) @@ -837,13 +842,13 @@ subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & q_liq = liq_watBC_east(i,j,k) + rainwatBC_east(i,j,k) q_sol = ice_watBC_east(i,j,k) + snowwatBC_east(i,j,k) + graupelBC_east(i,j,k) q_con = q_liq + q_sol -#endif +#endif qconBC(i,j,k) = q_con #ifdef MOIST_CAPPA cvm = (1.-(sphumBC(i,j,k)+q_con))*cv_air+sphumBC(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice cappaBC(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1)) pkz = exp( cappaBC(i,j,k)*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) + (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) #else pkz = exp( kappa*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) @@ -898,13 +903,13 @@ subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & q_liq = liq_watBC_north(i,j,k) + rainwatBC_north(i,j,k) q_sol = ice_watBC_north(i,j,k) + snowwatBC_north(i,j,k) + graupelBC_north(i,j,k) q_con = q_liq + q_sol -#endif +#endif qconBC(i,j,k) = q_con #ifdef MOIST_CAPPA cvm = (1.-(sphumBC(i,j,k)+q_con))*cv_air+sphumBC(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice cappaBC(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1)) pkz = exp( cappaBC(i,j,k)*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) + (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) #else pkz = exp( kappa*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) @@ -1037,7 +1042,7 @@ end subroutine set_BCs_t0 !! unless flux nested grid BCs are specified, or if a quantity is !! not updated at all. This ability has not been implemented. ! -!>@brief The subroutine'twoway_nesting' performs a two-way update +!>@brief The subroutine'twoway_nesting' performs a two-way update !! of nested-grid data onto the parent grid. subroutine twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir) @@ -1048,12 +1053,12 @@ subroutine twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir) integer :: n, p, sphum - + if (ngrids > 1) then do n=ngrids,2,-1 !loop backwards to allow information to propagate from finest to coarsest grids - !two-way updating + !two-way updating if (Atm(n)%neststruct%twowaynest ) then if (grids_on_this_pe(n) .or. grids_on_this_pe(Atm(n)%parent_grid%grid_number)) then sphum = get_tracer_index (MODEL_ATMOS, 'sphum') @@ -1131,7 +1136,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & real, allocatable :: qdp_coarse(:,:,:) real(kind=f_p), allocatable :: q_diff(:,:,:) real :: L_sum_b(npz), L_sum_a(npz) - + integer :: upoff integer :: is, ie, js, je integer :: isd, ied, jsd, jed @@ -1184,7 +1189,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & do i=isd_p,ied_p parent_grid%ps(i,j) = & - parent_grid%delp(i,j,1)/grav + parent_grid%delp(i,j,1)/grav end do end do @@ -1300,7 +1305,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & parent_grid%q(i,j,k,n) = parent_grid%q(i,j,k,n)/parent_grid%delp(i,j,k) enddo enddo - enddo + enddo enddo endif @@ -1369,7 +1374,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & call mpp_sync!self end if - + end if !Neststruct%nestupdate /= 3 #endif @@ -1449,7 +1454,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & call mpp_sync!self - if (parent_grid%tile == neststruct%parent_tile) then + if (parent_grid%tile == neststruct%parent_tile) then if (neststruct%parent_proc) then @@ -1514,9 +1519,9 @@ subroutine level_sum(q, area, domain, bd, npz, L_sum) type(fv_grid_bounds_type), intent(IN) :: bd real, intent(in) :: area( bd%isd:bd%ied ,bd%jsd:bd%jed) real, intent(in) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) - real, intent(OUT) :: L_sum( npz ) + real, intent(OUT) :: L_sum( npz ) type(domain2d), intent(IN) :: domain - + integer :: i, j, k, n real :: qA!(bd%is:bd%ie, bd%js:bd%je) @@ -1556,7 +1561,7 @@ subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, & real, intent(inout) :: delz(bd%isd: ,bd%jsd: ,1: ) !< delta-height (m); non-hydrostatic only !----------------------------------------------------------------------- -! Auxilliary pressure arrays: +! Auxilliary pressure arrays: ! The 5 vars below can be re-computed from delp and ptop. !----------------------------------------------------------------------- ! dyn_aux: @@ -1565,7 +1570,7 @@ subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, & real, intent(inout) :: pk (bd%is:bd%ie,bd%js:bd%je, npz+1) !< pe**cappa real, intent(inout) :: peln(bd%is:bd%ie,npz+1,bd%js:bd%je) !< ln(pe) real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz) !< finite-volume mean pk - + !----------------------------------------------------------------------- ! Others: !----------------------------------------------------------------------- @@ -1580,7 +1585,7 @@ subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, & integer :: is, ie, js, je integer :: isd, ied, jsd, jed - + is = bd%is ie = bd%ie js = bd%js @@ -1627,7 +1632,7 @@ subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, & end subroutine after_twoway_nest_update -!>@brief The subroutine 'update_remap_tqw' remaps (interpolated) nested-grid data +!>@brief The subroutine 'update_remap_tqw' remaps (interpolated) nested-grid data !! to the coarse-grid's vertical coordinate. !This does not yet do anything for the tracers subroutine update_remap_tqw( npz, ak, bk, ps, delp, t, q, w, hydrostatic, & @@ -1660,13 +1665,13 @@ subroutine update_remap_tqw( npz, ak, bk, ps, delp, t, q, w, hydrostatic, & pe0(i,k) = ak(k) + bk(k)*ps0(i,j) pn0(i,k) = log(pe0(i,k)) enddo - enddo + enddo do k=1,kmd+1 do i=is,ie pe1(i,k) = ak(k) + bk(k)*ps(i,j) pn1(i,k) = log(pe1(i,k)) enddo - enddo + enddo if (do_q) then do iq=1,nq do k=1,kmd @@ -1690,7 +1695,7 @@ subroutine update_remap_tqw( npz, ak, bk, ps, delp, t, q, w, hydrostatic, & enddo !Remap T using logp call mappm(kmd, pn0, tp, npz, pn1, qn1, is,ie, 1, abs(kord_tm), ptop) - + do k=1,npz do i=is,ie t(i,j,k) = qn1(i,k) diff --git a/model/fv_sg.F90 b/model/fv_sg.F90 index ba560ff8c..546763bf2 100644 --- a/model/fv_sg.F90 +++ b/model/fv_sg.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -51,7 +51,12 @@ module fv_sg_mod !----------------------------------------------------------------------- ! FV sub-grid mixing !----------------------------------------------------------------------- - use constants_mod, only: rdgas, rvgas, cp_air, cp_vapor, hlv, hlf, kappa, grav +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: rdgas, rvgas, cp_air, cp_vapor, hlv, hlf, kappa, grav use tracer_manager_mod, only: get_tracer_index use field_manager_mod, only: MODEL_ATMOS use fv_mp_mod, only: mp_reduce_min, is_master @@ -86,7 +91,7 @@ module fv_sg_mod real, parameter:: t2_max = 315. real, parameter:: t3_max = 325. real, parameter:: Lv0 = hlv0 - dc_vap*t_ice !< = 3.147782e6 - real, parameter:: Li0 = hlf0 - dc_ice*t_ice !< = -2.431928e5 + real, parameter:: Li0 = hlf0 - dc_ice*t_ice !< = -2.431928e5 real, parameter:: zvir = rvgas/rdgas - 1. !< = 0.607789855 real, allocatable:: table(:),des(:) @@ -107,26 +112,26 @@ subroutine fv_subgrid_z( isd, ied, jsd, jed, is, ie, js, je, km, nq, dt, & integer, intent(in):: isd, ied, jsd, jed integer, intent(in):: tau !< Relaxation time scale real, intent(in):: dt !< model time step - real, intent(in):: pe(is-1:ie+1,km+1,js-1:je+1) + real, intent(in):: pe(is-1:ie+1,km+1,js-1:je+1) real, intent(in):: peln(is :ie, km+1,js :je) real, intent(in):: delp(isd:ied,jsd:jed,km) !< Delta p at each model level real, intent(in):: delz(isd:,jsd:,1:) !< Delta z at each model level real, intent(in):: pkz(is:ie,js:je,km) logical, intent(in):: hydrostatic integer, intent(in), optional:: k_bot -! +! real, intent(inout):: ua(isd:ied,jsd:jed,km) real, intent(inout):: va(isd:ied,jsd:jed,km) real, intent(inout):: w(isd:,jsd:,1:) real, intent(inout):: ta(isd:ied,jsd:jed,km) !< Temperature real, intent(inout):: qa(isd:ied,jsd:jed,km,nq) !< Specific humidity & tracers - real, intent(inout):: u_dt(is:ie,js:je,km) - real, intent(inout):: v_dt(is:ie,js:je,km) + real, intent(inout):: u_dt(is:ie,js:je,km) + real, intent(inout):: v_dt(is:ie,js:je,km) real, intent(inout):: t_dt(is:ie,js:je,km) real, intent(inout):: w_dt(is:ie,js:je,km) !---------------------------Local variables----------------------------- real, dimension(is:ie,km):: u0, v0, w0, t0, hd, te, gz, tvm, pm, den - real q0(is:ie,km,nq), qcon(is:ie,km) + real q0(is:ie,km,nq), qcon(is:ie,km) real, dimension(is:ie):: gzh, lcp2, icp2, cvm, cpm, qs real ri_ref, ri, pt1, pt2, ratio, tv, cv, tmp, q_liq, q_sol real tv1, tv2, g2, h0, mc, fra, rk, rz, rdt, tvd, tv_surf @@ -193,9 +198,9 @@ subroutine fv_subgrid_z( isd, ied, jsd, jed, is, ie, js, je, km, nq, dt, & if ( pe(is,1,js) < 2. ) then t_min = t1_min else - t_min = t2_min + t_min = t2_min endif - if ( k_bot < min(km,24) ) then + if ( k_bot < min(km,24) ) then t_max = t2_max else t_max = t3_max @@ -230,7 +235,7 @@ subroutine fv_subgrid_z( isd, ied, jsd, jed, is, ie, js, je, km, nq, dt, & !$OMP private(kk,lcp2,icp2,tcp3,dh,dq,den,qs,qsw,dqsdt,qcon,q0, & !$OMP t0,u0,v0,w0,h0,pm,gzh,tvm,tmp,cpm,cvm,q_liq,q_sol, & !$OMP tv,gz,hd,te,ratio,pt1,pt2,tv1,tv2,ri_ref, ri,mc,km1) - do 1000 j=js,je + do 1000 j=js,je do iq=1, nq do k=1,kbot @@ -283,7 +288,7 @@ subroutine fv_subgrid_z( isd, ied, jsd, jed, is, ie, js, je, km, nq, dt, & enddo elseif ( nwat==3 ) then do i=is,ie - q_liq = q0(i,k,liq_wat) + q_liq = q0(i,k,liq_wat) q_sol = q0(i,k,ice_wat) cpm(i) = (1.-(q0(i,k,sphum)+q_liq+q_sol))*cp_air + q0(i,k,sphum)*cp_vapor + q_liq*c_liq + q_sol*c_ice cvm(i) = (1.-(q0(i,k,sphum)+q_liq+q_sol))*cv_air + q0(i,k,sphum)*cv_vap + q_liq*c_liq + q_sol*c_ice @@ -393,7 +398,7 @@ subroutine fv_subgrid_z( isd, ied, jsd, jed, is, ie, js, je, km, nq, dt, & !!! ri_ref = min(ri_max, ri_min + (ri_max-ri_min)*dim(500.e2,pm(i,k))/250.e2 ) ri_ref = min(ri_max, ri_min + (ri_max-ri_min)*dim(400.e2,pm(i,k))/200.e2 ) -#ifdef MAPL_MODE +#ifdef MAPL_MODE ! Enhancing mixing at the model top if ( k==2 ) then ri_ref = 4.*ri_ref @@ -454,7 +459,7 @@ subroutine fv_subgrid_z( isd, ied, jsd, jed, is, ie, js, je, km, nq, dt, & endif enddo -!-------------- +!-------------- ! Retrive Temp: !-------------- if ( hydrostatic ) then @@ -516,7 +521,7 @@ subroutine fv_subgrid_z( isd, ied, jsd, jed, is, ie, js, je, km, nq, dt, & cvm(i) = (1.-(q0(i,kk,sphum)+q_liq+q_sol))*cv_air + q0(i,kk,sphum)*cv_vap + q_liq*c_liq + q_sol*c_ice enddo endif - + do i=is,ie tv = gz(i,kk) + 0.5*(u0(i,kk)**2+v0(i,kk)**2+w0(i,kk)**2) t0(i,kk) = (te(i,kk)- tv) / cvm(i) @@ -582,7 +587,7 @@ subroutine fv_subgrid_z( isd, ied, jsd, jed, is, ie, js, je, km, nq, dt, & end subroutine fv_subgrid_z subroutine qsmith_init - integer, parameter:: length=2621 + integer, parameter:: length=2621 integer i if( .not. allocated(table) ) then @@ -598,7 +603,7 @@ subroutine qsmith_init enddo des(length) = des(length-1) endif - + end subroutine qsmith_init @@ -618,7 +623,7 @@ subroutine qsmith(im, km, k1, t, p, q, qs, dqdt) eps10 = 10.*esl if( .not. allocated(table) ) call qsmith_init - + do k=k1,km do i=1,im ap1 = 10.*DIM(t(i,k), Tmin) + 1. @@ -639,15 +644,15 @@ subroutine qsmith(im, km, k1, t, p, q, qs, dqdt) enddo enddo endif - + end subroutine qsmith - + subroutine qs_table(n,table) integer, intent(in):: n real table (n) real:: dt=0.1 - real esbasw, tbasw, esbasi, tbasi, Tmin, tem, aa, b, c, d, e, esh20 + real esbasw, tbasw, esbasi, tbasi, Tmin, tem, aa, b, c, d, e, esh20 real wice, wh2o integer i ! Constants @@ -675,7 +680,7 @@ subroutine qs_table_m(n,table) real table (n) real esupc(200) real:: dt=0.1 - real esbasw, tbasw, esbasi, tbasi, Tmin, tem, aa, b, c, d, e, esh20 + real esbasw, tbasw, esbasi, tbasi, Tmin, tem, aa, b, c, d, e, esh20 real wice, wh2o integer i @@ -897,7 +902,7 @@ subroutine neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, & qr2(i,j) = qr2(i,j) + dq dq1 = min( dq, qs2(i,j) ) qs2(i,j) = qs2(i,j) - dq1 - qi2(i,j) = qi2(i,j) + dq1 - dq + qi2(i,j) = qi2(i,j) + dq1 - dq pt2(i,j) = pt2(i,j) - dq*icpk(i,j) endif ! fix negative rain water with available vapor @@ -980,8 +985,8 @@ subroutine neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, & do i=is, ie if( qv(i,j,k) < 0. .and. qv(i,j,k-1) > 0. ) then dq = min(-qv(i,j,k)*dp(i,j,k), qv(i,j,k-1)*dp(i,j,k-1)) - qv(i,j,k-1) = qv(i,j,k-1) - dq/dp(i,j,k-1) - qv(i,j,k ) = qv(i,j,k ) + dq/dp(i,j,k ) + qv(i,j,k-1) = qv(i,j,k-1) - dq/dp(i,j,k-1) + qv(i,j,k ) = qv(i,j,k ) + dq/dp(i,j,k ) endif if( qv(i,j,k) < 0. ) then qv(i,j,k+1) = qv(i,j,k+1) + qv(i,j,k)*dp(i,j,k)/dp(i,j,k+1) @@ -990,7 +995,7 @@ subroutine neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, & enddo enddo enddo - + ! Bottom layer; Borrow from above !$OMP parallel do default(none) shared(is,ie,js,je,kbot,qv,dp) private(dq) do j=js, je @@ -1000,8 +1005,8 @@ subroutine neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, & if ( qv(i,j,kbot)>=0. ) goto 123 if ( qv(i,j,k) > 0. ) then dq = min(-qv(i,j,kbot)*dp(i,j,kbot), qv(i,j,k)*dp(i,j,k)) - qv(i,j,k ) = qv(i,j,k ) - dq/dp(i,j,k) - qv(i,j,kbot) = qv(i,j,kbot) + dq/dp(i,j,kbot) + qv(i,j,k ) = qv(i,j,k ) - dq/dp(i,j,k) + qv(i,j,kbot) = qv(i,j,kbot) + dq/dp(i,j,kbot) endif enddo ! k-loop 123 continue @@ -1009,7 +1014,7 @@ subroutine neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, & enddo ! i-loop enddo ! j-loop - + if (present(qa)) then !----------------------------------- ! Fix negative cloud fraction @@ -1026,7 +1031,7 @@ subroutine neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, & enddo enddo enddo - + ! Bottom layer; Borrow from above !$OMP parallel do default(none) shared(is,ie,js,je,qa,kbot,dp) & !$OMP private(dq) @@ -1034,8 +1039,8 @@ subroutine neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, & do i=is, ie if( qa(i,j,kbot) < 0. .and. qa(i,j,kbot-1)>0.) then dq = min(-qa(i,j,kbot)*dp(i,j,kbot), qa(i,j,kbot-1)*dp(i,j,kbot-1)) - qa(i,j,kbot-1) = qa(i,j,kbot-1) - dq/dp(i,j,kbot-1) - qa(i,j,kbot ) = qa(i,j,kbot ) + dq/dp(i,j,kbot ) + qa(i,j,kbot-1) = qa(i,j,kbot-1) - dq/dp(i,j,kbot-1) + qa(i,j,kbot ) = qa(i,j,kbot ) + dq/dp(i,j,kbot ) endif ! if qa is still < 0 qa(i,j,kbot) = max(0., qa(i,j,kbot)) diff --git a/model/fv_update_phys.F90 b/model/fv_update_phys.F90 index 22a50c48d..603db010b 100644 --- a/model/fv_update_phys.F90 +++ b/model/fv_update_phys.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -88,7 +88,12 @@ module fv_update_phys_mod ! ! - use constants_mod, only: kappa, rdgas, rvgas, grav, cp_air, cp_vapor, pi=>pi_8, radius +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: kappa, rdgas, rvgas, grav, cp_air, cp_vapor, pi=>pi_8, radius use field_manager_mod, only: MODEL_ATMOS use mpp_domains_mod, only: mpp_update_domains, domain2d use mpp_parameter_mod, only: AGRID_PARAM=>AGRID @@ -137,7 +142,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, real, intent(in) :: dt, ptop integer, intent(in):: is, ie, js, je, ng integer, intent(in):: isd, ied, jsd, jed - integer, intent(in):: nq ! tracers modified by physics + integer, intent(in):: nq ! tracers modified by physics ! ncnst is the total nmber of tracers logical, intent(in):: moist_phys logical, intent(in):: hydrostatic @@ -176,7 +181,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, real, intent(inout):: qdiag(isd:ied,jsd:jed,npz,nq+1:flagstruct%ncnst) !< diagnostic tracers !----------------------------------------------------------------------- -! Auxilliary pressure arrays: +! Auxilliary pressure arrays: ! The 5 vars below can be re-computed from delp and ptop. !----------------------------------------------------------------------- ! dyn_aux: @@ -216,8 +221,8 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, real, dimension(1,1,1) :: parent_u_dt, parent_v_dt ! dummy variables for nesting -!f1p -!account for change in air molecular weight because of H2O change +!f1p +!account for change in air molecular weight because of H2O change logical, dimension(nq) :: conv_vmr_mmr real :: adj_vmr(is:ie,js:je,npz) character(len=32) :: tracer_units, tracer_name @@ -276,7 +281,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, end if end do end if - + sphum = get_tracer_index (MODEL_ATMOS, 'sphum') liq_wat = get_tracer_index (MODEL_ATMOS, 'liq_wat') ice_wat = get_tracer_index (MODEL_ATMOS, 'ice_wat') @@ -367,7 +372,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, ! Update tracers: !---------------- do m=1,nq - if( m /= w_diff ) then + if( m /= w_diff ) then do j=js,je do i=is,ie q(i,j,k,m) = q(i,j,k,m) + dt*q_dt(i,j,k,m) @@ -392,12 +397,12 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, enddo !----------------------------------------- -! Adjust mass mixing ratio of all tracers +! Adjust mass mixing ratio of all tracers !----------------------------------------- if ( nwat /=0 ) then do m=1,flagstruct%ncnst !-- check to query field_table to determine if tracer needs mass adjustment - if( m /= cld_amt .and. m /= w_diff .and. adjust_mass(MODEL_ATMOS,m)) then + if( m /= cld_amt .and. m /= w_diff .and. adjust_mass(MODEL_ATMOS,m)) then if (m <= nq) then q(is:ie,js:je,k,m) = q(is:ie,js:je,k,m) / ps_dt(is:ie,js:je) if (conv_vmr_mmr(m)) & @@ -478,7 +483,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, call get_atmos_nudge ( Time, dt, is, ie, js, je, & npz, ng, ps(is:ie,js:je), ua(is:ie, js:je,:), & va(is:ie,js:je,:), pt(is:ie,js:je,:), & - q(is:ie,js:je,:,:), ps_dt(is:ie,js:je), u_dt(is:ie,js:je,:), & + q(is:ie,js:je,:,:), ps_dt(is:ie,js:je), u_dt(is:ie,js:je,:), & v_dt(is:ie,js:je,:), t_dt(is:ie,js:je,:), & q_dt(is:ie,js:je,:,:) ) @@ -543,7 +548,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, ! All fields will be updated except winds; wind tendencies added !$OMP parallel do default(none) shared(is,ie,js,je,npz,pe,delp,ps) do j=js,je - do k=2,npz+1 + do k=2,npz+1 do i=is,ie pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) enddo @@ -556,18 +561,18 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, zvir, ptop, ak, bk, ts, ps, delp, ua, va, pt, & nwat, q, phis, gridstruct, bd, domain ) #endif - endif ! end nudging + endif ! end nudging if ( .not.flagstruct%dwind_2d ) then call timing_on('COMM_TOTAL') - if ( gridstruct%square_domain ) then + if ( gridstruct%square_domain ) then call start_group_halo_update(i_pack(1), u_dt, domain, whalo=1, ehalo=1, shalo=1, nhalo=1, complete=.false.) call start_group_halo_update(i_pack(1), v_dt, domain, whalo=1, ehalo=1, shalo=1, nhalo=1, complete=.true.) else call start_group_halo_update(i_pack(1), u_dt, domain, complete=.false.) call start_group_halo_update(i_pack(1), v_dt, domain, complete=.true.) - endif + endif call timing_off('COMM_TOTAL') endif @@ -582,7 +587,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, !$OMP parallel do default(none) shared(is,ie,js,je,npz,pe,delp,peln,pk,ps,u_srf,v_srf, & !$OMP ua,va,pkz,hydrostatic) do j=js,je - do k=2,npz+1 + do k=2,npz+1 do i=is,ie pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) peln(i,k,j) = log( pe(i,k,j) ) @@ -658,7 +663,7 @@ subroutine del2_phys(qdt, delp, gridstruct, cd, npx, npy, km, is, ie, js, je, & real, intent(inout):: qdt(is-ngc:ie+ngc,js-ngc:je+ngc,km) type(fv_grid_type), intent(IN), target :: gridstruct type(domain2d), intent(INOUT) :: domain - + real, pointer, dimension(:,:) :: rarea, dx, dy, sina_u, sina_v, rdxc, rdyc real, pointer, dimension(:,:,:) :: sin_sg ! @@ -724,7 +729,7 @@ subroutine del2_phys(qdt, delp, gridstruct, cd, npx, npy, km, is, ie, js, je, & (mask(is,j)+mask(is,j+1))*dy(is,j)*(q(is-1,j,k)-q(is,j,k))*rdxc(is,j)* & 0.5*(sin_sg(1,j,1) + sin_sg(0,j,3)) if (ie+1==npx .and. .not. gridstruct%nested) fx(i,j) = & - (mask(ie+1,j)+mask(ie+1,j+1))*dy(ie+1,j)*(q(ie,j,k)-q(ie+1,j,k))*rdxc(ie+1,j)* & + (mask(ie+1,j)+mask(ie+1,j+1))*dy(ie+1,j)*(q(ie,j,k)-q(ie+1,j,k))*rdxc(ie+1,j)* & 0.5*(sin_sg(npx,j,1) + sin_sg(npx-1,j,3)) enddo do j=js,je+1 @@ -750,12 +755,12 @@ subroutine del2_phys(qdt, delp, gridstruct, cd, npx, npy, km, is, ie, js, je, & end subroutine del2_phys -!>@brief The subroutine 'update_dwinds_phys' transforms the wind tendencies from +!>@brief The subroutine 'update_dwinds_phys' transforms the wind tendencies from !! the A grid to the D grid for the final update. subroutine update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain) ! Purpose; Transform wind tendencies on A grid to D grid for the final update - + integer, intent(in) :: is, ie, js, je integer, intent(in) :: isd, ied, jsd, jed integer, intent(IN) :: npx,npy, npz @@ -933,12 +938,12 @@ subroutine update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt enddo ! Update: endif ! end grid_type - + enddo ! k-loop - end subroutine update_dwinds_phys + end subroutine update_dwinds_phys -!>@brief The subroutine 'update2d_dwinds_phys' transforms the wind tendencies from +!>@brief The subroutine 'update2d_dwinds_phys' transforms the wind tendencies from !! the A grid to the D grid for the final update. subroutine update2d_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain) diff --git a/model/mapz-driver/CMakeLists.txt b/model/mapz-driver/CMakeLists.txt index 8571675bc..1d1b212e8 100644 --- a/model/mapz-driver/CMakeLists.txt +++ b/model/mapz-driver/CMakeLists.txt @@ -21,18 +21,10 @@ set(srcs output/output.f90 main.F90) -if (FV_PRECISION STREQUAL R4) - set(FMS FMS::fms_r4) -elseif (FV_PRECISION STREQUAL R4R8) # FV is R4 but FMS is R8 - set(FMS FMS::fms_r8) -elseif (FV_PRECISION STREQUAL R8) - set(FMS FMS::fms_r8) -endif () - ecbuild_add_executable( TARGET mapz-driver SOURCES ${srcs} - LIBS ${FMS} MAPL) + LIBS FMS::fms MAPL) target_compile_definitions(mapz-driver PRIVATE MAPL_MODE SPMD TIMING) target_compile_options(mapz-driver PRIVATE ${TRACEBACK}) set_target_properties(${this} PROPERTIES Fortran_MODULE_DIRECTORY ${esma_include}/${this}) diff --git a/model/nh_core.F90 b/model/nh_core.F90 index a8fc838d2..9583ec7e6 100644 --- a/model/nh_core.F90 +++ b/model/nh_core.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -46,7 +46,12 @@ module nh_core_mod ! ! - use constants_mod, only: rdgas, cp_air, grav +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: rdgas, cp_air, grav use tp_core_mod, only: fv_tp_2d use nh_utils_mod, only: update_dz_c, update_dz_d, nest_halo_nh use nh_utils_mod, only: sim_solver, sim1_solver, sim3_solver @@ -59,7 +64,7 @@ module nh_core_mod public Riem_Solver3, Riem_Solver_c, update_dz_c, update_dz_d, nest_halo_nh real, parameter:: r3 = 1./3. -CONTAINS +CONTAINS subroutine Riem_Solver3(ms, dt, is, ie, js, je, km, ng, & isd, ied, jsd, jed, akap, cappa, cp, & diff --git a/model/nh_utils.F90 b/model/nh_utils.F90 index ba60f9b97..ab25d20da 100644 --- a/model/nh_utils.F90 +++ b/model/nh_utils.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -50,7 +50,12 @@ module nh_utils_mod ! ! - use constants_mod, only: rdgas, cp_air, grav +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: rdgas, cp_air, grav use tp_core_mod, only: fv_tp_2d use sw_core_mod, only: fill_4corners, del6_vt_flux use fv_arrays_mod, only: fv_grid_bounds_type, fv_grid_type @@ -65,7 +70,7 @@ module nh_utils_mod real, parameter:: r3 = 1./3. -CONTAINS +CONTAINS subroutine update_dz_c(is, ie, js, je, km, ng, dt, dz_min, dp0, zs, area, ut, vt, gz, ws, & npx, npy, sw_corner, se_corner, ne_corner, nw_corner, bd, grid_type) @@ -246,7 +251,7 @@ subroutine update_dz_d(ndif, damp, hord, is, ie, js, je, km, ng, npx, npy, area, damp(km+1) = damp(km) ndif(km+1) = ndif(km) - + isd = is - ng; ied = ie + ng jsd = js - ng; jed = je + ng @@ -311,7 +316,7 @@ subroutine update_dz_d(ndif, damp, hord, is, ie, js, je, km, ng, npx, npy, area, do j=js, je do i=is,ie ws(i,j) = ( zs(i,j) - zh(i,j,km+1) ) * rdt - enddo + enddo do k=km, 1, -1 do i=is, ie ! Enforce monotonicity of height to prevent blowup @@ -334,7 +339,7 @@ subroutine Riem_Solver_c(ms, dt, is, ie, js, je, km, ng, & real, intent(in), dimension(is-ng:,js-ng:,1:):: q_con, cappa real, intent(in):: hs(is-ng:ie+ng,js-ng:je+ng) real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng,km):: w3 -! OUTPUT PARAMETERS +! OUTPUT PARAMETERS real, intent(inout), dimension(is-ng:ie+ng,js-ng:je+ng,km+1):: gz real, intent( out), dimension(is-ng:ie+ng,js-ng:je+ng,km+1):: pef ! Local: @@ -648,7 +653,7 @@ subroutine imp_diff_w(j, is, ie, js, je, ng, km, cd, delz, ws, w, w3) wt(i,km) = (w(i,km) + 2.*ws(i)*cd/delz(i,km)**2 & + a*wt(i,km-1))/(1. + a + (cd+cd)/delz(i,km)**2 + a*gam(i,km)) enddo - + do k=km-1,1,-1 do i=is,ie wt(i,k) = wt(i,k) - gam(i,k+1)*wt(i,k+1) @@ -687,7 +692,7 @@ subroutine RIM_2D(ms, bdt, is, ie, km, rgas, gama, gm2, pe2, & integer:: i, k, n, ke, kt1, ktop integer:: ks0, ks1 - grg = gama * rgas + grg = gama * rgas rdt = 1. / bdt dt = bdt / real(ms) @@ -724,7 +729,7 @@ subroutine RIM_2D(ms, bdt, is, ie, km, rgas, gama, gm2, pe2, & dts(k) = -dz(k)/sqrt(grg*pf1(k)/rden) #endif if ( bdt > dts(k) ) then - ks0 = k-1 + ks0 = k-1 goto 222 endif enddo @@ -833,7 +838,7 @@ subroutine RIM_2D(ms, bdt, is, ie, km, rgas, gama, gm2, pe2, & m_top(ke) = m_top(ke) + z_frac*dm(k) r_top(ke) = r_top(ke) + z_frac*r_hi(k) go to 444 ! next level - endif + endif enddo 444 continue @@ -849,7 +854,7 @@ subroutine RIM_2D(ms, bdt, is, ie, km, rgas, gama, gm2, pe2, & time_left = time_left - dts(k) m_bot(ke) = m_bot(ke) + dm(k) r_bot(ke) = r_bot(ke) + r_lo(k) - else + else z_frac = time_left/dts(k) m_bot(ke) = m_bot(ke) + z_frac* dm(k) r_bot(ke) = r_bot(ke) + z_frac*r_lo(k) @@ -1502,7 +1507,7 @@ subroutine edge_scalar(q1, qe, i1, i2, km, id) real, intent(out), dimension(i1:i2,km+1):: qe !----------------------------------------------------------------------- real, parameter:: r2o3 = 2./3. - real, parameter:: r4o3 = 4./3. + real, parameter:: r4o3 = 4./3. real gak(km) real bet integer i, k @@ -1612,7 +1617,7 @@ subroutine edge_profile(q1, q2, q1e, q2e, i1, i2, j1, j2, j, km, dp0, uniform_gr gam(i,k) = gk / bet enddo enddo - + a_bot = 1. + gk*(gk+1.5) xt1 = 2.*gk*(gk+1.) do i=i1,i2 diff --git a/model/nwp_nudge.F90 b/model/nwp_nudge.F90 index 3df8eebb6..f78d2a31c 100755 --- a/model/nwp_nudge.F90 +++ b/model/nwp_nudge.F90 @@ -10,12 +10,23 @@ module nwp_nudge_mod use fv_mapz_mod, only: mappm use fv_mp_mod, only: is,js,ie,je, isd,jsd,ied,jed, gid, masterproc, domain, mp_reduce_sum use fv_timing_mod, only: timing_on, timing_off - use constants_mod, only: pi, grav, rdgas, cp_air, kappa, radius + #if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: pi, grav, rdgas, cp_air, kappa, radius use time_manager_mod, only: time_type, get_time, get_date use mpp_mod, only: mpp_error, FATAL, stdlog - use fms_mod, only: write_version_number, open_namelist_file, & - check_nml_error, file_exist, close_file, & - read_data, field_exist + use fms_mod, only: write_version_number, & + check_nml_error +#if defined (FMS1_IO) + use fms_mod, only: open_namelist_file, & + file_exists => file_exist, & + close_file, read_data, field_exist +#else + use fms2_io_mod, only: file_exists, close_file, variable_exists, read_data +#endif use fms_io_mod, only: field_size use mpp_domains_mod, only: mpp_update_domains @@ -56,12 +67,12 @@ module nwp_nudge_mod character(len=128):: file_names(nfile_max) character(len=128):: track_file_name integer :: nfile_total = 0 ! =5 for 1-day (if datasets are 6-hr apart) - real(FVPRC) :: p_wvp = 100.E2 ! cutoff level for specific humidity nudging + real(FVPRC) :: p_wvp = 100.E2 ! cutoff level for specific humidity nudging integer :: kord_data = 8 logical :: tc_mask = .false. - logical :: strong_mask = .true. - logical :: ibtrack = .false. + logical :: strong_mask = .true. + logical :: ibtrack = .false. logical :: nudge_debug = .false. logical :: nudge_t = .false. logical :: nudge_q = .false. @@ -79,22 +90,22 @@ module nwp_nudge_mod real(FVPRC) :: tau_tpw = 86400. ! 1-day real(FVPRC) :: tau_winds = 21600. ! 6-hr real(FVPRC) :: tau_t = 86400. - real(FVPRC) :: tau_virt = 86400. + real(FVPRC) :: tau_virt = 86400. real(FVPRC) :: tau_hght = 86400. real(FVPRC) :: q_min = 1.E-8 real(FVPRC) :: q_rat - integer :: nf_uv = 0 - integer :: nf_t = 2 + integer :: nf_uv = 0 + integer :: nf_t = 2 ! starting layer (top layer is sponge layer and is skipped) - integer :: kstart = 2 + integer :: kstart = 2 ! skip "kbot" layers - integer :: kbot_winds = 0 - integer :: kbot_t = 0 - integer :: kbot_q = 1 + integer :: kbot_winds = 0 + integer :: kbot_t = 0 + integer :: kbot_q = 1 !-- Tropical cyclones -------------------------------------------------------------------- @@ -129,7 +140,7 @@ module nwp_nudge_mod r_min, r_inc, ibtrack, track_file_name, file_names contains - + subroutine do_nwp_nudge ( Time, dt, npz, ps_dt, u_dt, v_dt, t_dt, q_dt, zvir, & ak, bk, ts, ps, delp, ua, va, pt, nwat, q, phis ) @@ -164,7 +175,7 @@ subroutine do_nwp_nudge ( Time, dt, npz, ps_dt, u_dt, v_dt, t_dt, q_dt, zvir, & real(FVPRC) :: dbk, rdt, press(npz), profile(npz), prof_t(npz), prof_q(npz), du, dv - if ( .not. module_is_initialized ) then + if ( .not. module_is_initialized ) then call mpp_error(FATAL,'==> Error from do_nwp_nudge: module not initialized') endif @@ -184,7 +195,7 @@ subroutine do_nwp_nudge ( Time, dt, npz, ps_dt, u_dt, v_dt, t_dt, q_dt, zvir, & do k=1,npz press(k) = 0.5*(ak(k) + ak(k+1)) + 0.5*(bk(k)+bk(k+1))*1.E5 if ( press(k) < 30.E2 ) then - profile(k) = max(0.01, press(k)/30.E2) + profile(k) = max(0.01, press(k)/30.E2) endif enddo profile(1) = 0. @@ -193,16 +204,16 @@ subroutine do_nwp_nudge ( Time, dt, npz, ps_dt, u_dt, v_dt, t_dt, q_dt, zvir, & prof_t(:) = 1. do k=1,npz if ( press(k) < 30.E2 ) then - prof_t(k) = max(0.01, press(k)/30.E2) + prof_t(k) = max(0.01, press(k)/30.E2) endif enddo prof_t(1) = 0. - + ! Water vapor: prof_q(:) = 1. do k=1,npz if ( press(k) < 300.E2 ) then - prof_q(k) = max(0., press(k)/300.E2) + prof_q(k) = max(0., press(k)/300.E2) endif enddo prof_q(1) = 0. @@ -214,7 +225,7 @@ subroutine do_nwp_nudge ( Time, dt, npz, ps_dt, u_dt, v_dt, t_dt, q_dt, zvir, & ptmp = ak(k+1) + bk(k+1)*1.E5 if ( ptmp > p_trop ) then k_trop = k - exit + exit endif enddo endif @@ -297,7 +308,7 @@ subroutine do_nwp_nudge ( Time, dt, npz, ps_dt, u_dt, v_dt, t_dt, q_dt, zvir, & rdt = dt / (tau_hght/factor + dt) do j=js,je - + do i=is,ie pe1(i) = ak(1) peln(i,1) = log(pe1(i)) @@ -582,17 +593,17 @@ subroutine get_obs(Time, dt, zvir, ak, bk, ps, ts, ps_obs, delp, u_obs, v_obs, t call get_int_hght(h2, npz, ak, bk, ps(is:ie,js:je), delp, ps_dat(is:ie,js:je,2), t_dat(:,:,:,2)) ! if(nudge_debug) call prt_maxmin('H_2', h2, is, ie, js, je, 0, 1, 1./grav, master) - gz_int(:,:) = alpha*h1(:,:) + beta*h2(:,:) + gz_int(:,:) = alpha*h1(:,:) + beta*h2(:,:) endif - deallocate ( ut ) - deallocate ( vt ) + deallocate ( ut ) + deallocate ( vt ) end subroutine get_obs subroutine nwp_nudge_init(npz, zvir, ak, bk, ts, phis) - integer, intent(in):: npz ! vertical dimension + integer, intent(in):: npz ! vertical dimension real(FVPRC), intent(in):: zvir real(FVPRC), intent(in), dimension(isd:ied,jsd:jed):: phis real(FVPRC), intent(in), dimension(npz+1):: ak, bk @@ -612,15 +623,8 @@ subroutine nwp_nudge_init(npz, zvir, ak, bk, ts, phis) track_file_name = "No_File_specified" - if( file_exist( 'input.nml' ) ) then - unit = open_namelist_file () - io = 1 - do while ( io .ne. 0 ) - read( unit, nml = nwp_nudge_nml, iostat = io, end = 10 ) - ierr = check_nml_error(io,'nwp_nudge_nml') - end do -10 call close_file ( unit ) - end if + read (input_nml_file, nml=nwp_nudge_nml, iostat=io) + ierr = check_nml_error (io, 'nwp_nudge_nml') call write_version_number (version, tagname) if ( master ) then write( stdlog(), nml = nwp_nudge_nml ) @@ -672,7 +676,7 @@ subroutine nwp_nudge_init(npz, zvir, ak, bk, ts, phis) do j=1,jm lat(j) = lat(j) * deg2rad enddo - + allocate ( ak0(km+1) ) allocate ( bk0(km+1) ) @@ -682,7 +686,7 @@ subroutine nwp_nudge_init(npz, zvir, ak, bk, ts, phis) ! Note: definition of NCEP hybrid is p(k) = a(k)*1.E5 + b(k)*ps ak0(:) = ak0(:) * 1.E5 -! Limiter to prevent NAN at top during remapping +! Limiter to prevent NAN at top during remapping ak0(1) = max(1.e-8, ak0(1)) if ( master ) then @@ -717,7 +721,7 @@ subroutine nwp_nudge_init(npz, zvir, ak, bk, ts, phis) module_is_initialized = .true. - + end subroutine nwp_nudge_init @@ -738,10 +742,10 @@ subroutine get_ncep_analysis ( ps, u, v, t, q, zvir, ts, nfile, fname ) logical:: read_ts = .true. logical:: land_ts = .false. - if( .not. file_exist(fname) ) then + if( .not. file_exists(fname) ) then call mpp_error(FATAL,'==> Error from get_ncep_analysis: file not found') else - if(master) write(*,*) 'Reading NCEP anlysis file:', fname + if(master) write(*,*) 'Reading NCEP anlysis file:', fname endif !---------------------------------- @@ -793,7 +797,7 @@ subroutine get_ncep_analysis ( ps, u, v, t, q, zvir, ts, nfile, fname ) endif enddo !------------------------------------------------------- -! Replace TS over interior land with zonal mean SST/Ice +! Replace TS over interior land with zonal mean SST/Ice !------------------------------------------------------- if ( npt /= 0 ) then tmean= tmean / real(npt) @@ -838,7 +842,7 @@ subroutine get_ncep_analysis ( ps, u, v, t, q, zvir, ts, nfile, fname ) endif ! read_ts - deallocate ( wk2 ) + deallocate ( wk2 ) ! Read in temperature: allocate ( wk3(im,jm,km) ) @@ -913,7 +917,7 @@ subroutine get_ncep_analysis ( ps, u, v, t, q, zvir, ts, nfile, fname ) endif - deallocate ( wk3 ) + deallocate ( wk3 ) nfile = nfile + 1 @@ -1018,8 +1022,8 @@ subroutine ncep2fms( sst ) ! lon: 0.5, 1.5, ..., 359.5 ! lat: -89.5, -88.5, ... , 88.5, 89.5 - delx = 360./real(i_sst) - dely = 180./real(j_sst) + delx = 360./real(i_sst) + dely = 180./real(j_sst) jt = 1 do 5000 j=1,j_sst @@ -1100,7 +1104,7 @@ subroutine get_int_hght(h_int, npz, ak, bk, ps, delp, ps0, tv) do i=is,ie pn0(i,k) = log( ak0(k) + bk0(k)*ps0(i,j) ) enddo - enddo + enddo !------ ! Model !------ @@ -1163,7 +1167,7 @@ subroutine remap_tq( npz, ak, bk, ps, delp, t, q, & pe0(i,k) = ak0(k) + bk0(k)*ps0(i,j) pn0(i,k) = log(pe0(i,k)) enddo - enddo + enddo !------ ! Model !------ @@ -1317,10 +1321,10 @@ subroutine nwp_nudge_end deallocate ( ak0 ) deallocate ( bk0 ) - deallocate ( lat ) - deallocate ( lon ) + deallocate ( lat ) + deallocate ( lon ) - deallocate ( gz0 ) + deallocate ( gz0 ) end subroutine nwp_nudge_end @@ -1353,7 +1357,7 @@ subroutine get_tc_mask(time, mask) do j=js, je do i=is, ie dist = great_circle_dist(pos, agrid(i,j,1:2), radius) - if( dist < 5.*r_vor ) then + if( dist < 5.*r_vor ) then if ( strong_mask ) then mask(i,j) = mask(i,j) * ( 1. - exp(-(0.5*dist/r_vor)**2)*min(1.,(slp_env-slp_o)/5.E2) ) else @@ -1419,7 +1423,7 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, delp, u, ! Advance (local) time call get_date(fv_time, year, month, day, hour, minute, second) if ( year /= year_track_data ) then - if (master) write(*,*) 'Warning: The year in storm track data is not the same as model year' + if (master) write(*,*) 'Warning: The year in storm track data is not the same as model year' return endif time = fv_time @@ -1527,9 +1531,9 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, delp, u, p_sum = 0. do j=js, je do i=is, ie - if( dist(i,j)<(r_vor+del_r) .and. dist(i,j)>r_vor .and. phis(i,j)<200.*grav ) then + if( dist(i,j)<(r_vor+del_r) .and. dist(i,j)>r_vor .and. phis(i,j)<200.*grav ) then p_count = p_count + 1. - p_sum = p_sum + slp(i,j) + p_sum = p_sum + slp(i,j) endif enddo enddo @@ -1591,7 +1595,7 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, delp, u, p_hi = p_env - (p_env-slp_o) * exp( -5.0*f1**2 ) ! upper bound p_lo = p_env - (p_env-slp_o) * exp( -2.0*f1**2 ) ! lower bound - if ( ps(i,j) > p_hi ) then + if ( ps(i,j) > p_hi ) then ! Under-development: delps = relx*(ps(i,j) - p_hi) ! Note: ps is used here to prevent ! over deepening over terrain @@ -1600,7 +1604,7 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, delp, u, delps = relx*(slp(i,j) - p_lo) ! Note: slp is used here else goto 400 ! do nothing; proceed to next storm - endif + endif mass_sink = mass_sink + delps*area(i,j) @@ -1633,7 +1637,7 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, delp, u, !========================================================================================== endif -400 continue +400 continue enddo ! end i-loop enddo ! end j-loop @@ -1644,7 +1648,7 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, delp, u, do j=js, je do i=is, ie if( dist(i,j)<6.*r_vor .and. dist(i,j)>r_vor+del_r ) then - p_sum = p_sum + area(i,j) + p_sum = p_sum + area(i,j) endif enddo enddo @@ -1813,9 +1817,9 @@ subroutine breed_slp(time, dt, npz, ak, bk, ps, phis, delp, ua, va, u_dt, v_dt, p_sum = 0. do j=js, je do i=is, ie - if( dist(i,j)<(r_vor+del_r) .and. dist(i,j)>r_vor .and. phis(i,j)<250.*grav ) then + if( dist(i,j)<(r_vor+del_r) .and. dist(i,j)>r_vor .and. phis(i,j)<250.*grav ) then p_count = p_count + 1. - p_sum = p_sum + slp(i,j) + p_sum = p_sum + slp(i,j) endif enddo enddo @@ -1874,7 +1878,7 @@ subroutine breed_slp(time, dt, npz, ak, bk, ps, phis, delp, ua, va, u_dt, v_dt, relx = relx0 * exp( -4.*f1**2 ) ! Compute p_obs: assuming local radial distributions of slp are Gaussian - if ( ps(i,j) > p_hi ) then + if ( ps(i,j) > p_hi ) then ! under-development delps = relx*(ps(i,j) - p_hi) ! Note: ps is used here to prevent ! over deepening over terrain @@ -1888,7 +1892,7 @@ subroutine breed_slp(time, dt, npz, ak, bk, ps, phis, delp, ua, va, u_dt, v_dt, else ! Leave the model alone If the ps/slp is in between [p_lo,p_hi] goto 400 ! do nothing; proceed to next storm - endif + endif !=============================================================================================== mass_sink = mass_sink + delps*area(i,j) @@ -1910,7 +1914,7 @@ subroutine breed_slp(time, dt, npz, ak, bk, ps, phis, delp, ua, va, u_dt, v_dt, va(i,j,k) = va(i,j,k) * ratio enddo endif -400 continue +400 continue enddo ! end i-loop enddo ! end j-loop @@ -1921,7 +1925,7 @@ subroutine breed_slp(time, dt, npz, ak, bk, ps, phis, delp, ua, va, u_dt, v_dt, do j=js, je do i=is, ie if( dist(i,j)<(6.*r_vor+del_r) .and. dist(i,j)>r_vor+del_r ) then - p_sum = p_sum + area(i,j) + p_sum = p_sum + area(i,j) endif enddo enddo @@ -1971,12 +1975,12 @@ subroutine get_slp_obs(time, nobs, lon_obs, lat_obs, mslp, slp_out, r_out, time_ real*4, intent(in):: lat_obs(nobs) real*4, intent(in):: mslp(nobs) ! observed SLP in pa real*4, intent(in):: slp_out(nobs) ! slp at r_out - real*4, intent(in):: r_out(nobs) ! + real*4, intent(in):: r_out(nobs) ! real*4, intent(in):: time_obs(nobs) real(FVPRC), optional, intent(in):: stime real(FVPRC), optional, intent(out):: fact ! Output - real(FVPRC), intent(out):: x_o , y_o ! position of the storm center + real(FVPRC), intent(out):: x_o , y_o ! position of the storm center real(FVPRC), intent(out):: slp_o ! Observed sea-level-pressure (pa) real(FVPRC), intent(out):: r_vor, p_vor ! Internal: @@ -1997,7 +2001,7 @@ subroutine get_slp_obs(time, nobs, lon_obs, lat_obs, mslp, slp_out, r_out, time_ call get_date(time, year, month, day, hour, minute, second) if ( year /= year_track_data ) then - if (master) write(*,*) 'Warning: The year in storm track data is not the same as model year' + if (master) write(*,*) 'Warning: The year in storm track data is not the same as model year' return endif @@ -2018,7 +2022,7 @@ subroutine get_slp_obs(time, nobs, lon_obs, lat_obs, mslp, slp_out, r_out, time_ ! Linear in (lon,lat) space x_o = lon_obs(n) + (lon_obs(n+1)-lon_obs(n)) * fac y_o = lat_obs(n) + (lat_obs(n+1)-lat_obs(n)) * fac -#else +#else p1(1) = lon_obs(n); p1(2) = lat_obs(n) p2(1) = lon_obs(n+1); p2(2) = lat_obs(n+1) call intp_great_circle(fac, p1, p2, x_o, y_o) @@ -2086,7 +2090,7 @@ subroutine slp_obs_init call mpp_error(FATAL,'==> Error in reading best track data') endif - do while ( ts_name=='start' ) + do while ( ts_name=='start' ) nstorms = nstorms + 1 nobs_tc(nstorms) = nobs ! observation count for this storm @@ -2135,7 +2139,7 @@ subroutine slp_obs_init y_obs(nobs,nstorms) = lat_deg * deg2rad if ( GMT == 'GMT' ) then ! Transfrom x from (-180 , 180) to (0, 360) then to radian - if ( lon_deg < 0 ) then + if ( lon_deg < 0 ) then x_obs(nobs,nstorms) = (360.+lon_deg) * deg2rad else x_obs(nobs,nstorms) = (360.-lon_deg) * deg2rad @@ -2151,7 +2155,7 @@ subroutine slp_obs_init close(unit) - if(master) then + if(master) then write(*,*) 'TC vortex breeding: total storms=', nstorms if ( nstorms/=0 ) then do n=1,nstorms @@ -2180,7 +2184,7 @@ real(FVPRC) function calday(year, month, day, hour, minute, sec) if( month /= 1 ) then do m=1, month-1 - if( m==2 .and. leap_year(year) ) then + if( m==2 .and. leap_year(year) ) then ds = ds + 29 else ds = ds + days(m) @@ -2208,7 +2212,7 @@ logical function leap_year(ny) ! ! No leap years prior to 0000 ! - parameter ( ny00 = 0000 ) ! The threshold for starting leap-year + parameter ( ny00 = 0000 ) ! The threshold for starting leap-year if( ny >= ny00 ) then if( mod(ny,100) == 0. .and. mod(ny,400) == 0. ) then diff --git a/tools/external_ic.F90 b/tools/external_ic.F90 index 6c3966fac..c4faf091d 100644 --- a/tools/external_ic.F90 +++ b/tools/external_ic.F90 @@ -5,18 +5,30 @@ module external_ic_mod #endif #ifndef DYCORE_SOLO +#if defined (FMS1_IO) use amip_interp_mod, only: i_sst, j_sst, sst_ncep +#else + use external_sst_mod, only: i_sst, j_sst, sst_ncep +#endif #endif use fv_arrays_mod, only: REAL4, REAL8, FVPRC, R_GRID - use fms_mod, only: file_exist, read_data, field_exist +#if defined (FMS1_IO) + use fms_mod, only: file_exists => file_exist, read_data, variable_exists => field_exist use fms_io_mod, only: get_tile_string, field_size +#else + use fms2_io_mod, only: file_exists, variable_exists, read_data +#endif use mpp_mod, only: mpp_error, FATAL, NOTE, mpp_broadcast,mpp_npes use mpp_parameter_mod, only: AGRID_PARAM=>AGRID use mpp_domains_mod, only: mpp_get_tile_id, domain2d, mpp_update_domains, mpp_get_boundary, DGRID_NE use tracer_manager_mod, only: get_tracer_names, get_number_tracers, get_tracer_index use field_manager_mod, only: MODEL_ATMOS - - use constants_mod, only: pi=>pi_8, omega, grav, kappa, rdgas, rvgas, cp_air +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: pi=>pi_8, omega, grav, kappa, rdgas, rvgas, cp_air #ifdef MAPL_MODE use MAPL #endif @@ -115,7 +127,7 @@ subroutine get_external_ic( Atm, fv_domain, use_geos_latlon_restart, use_geos_cu call timing_on('NCEP_IC') call get_ncep_ic( Atm, fv_domain, nq ) call timing_off('NCEP_IC') -#ifndef MAPL_MODE +#ifndef MAPL_MODE #ifndef NO_FV_TRACERS call fv_io_read_tracers( fv_domain, Atm ) if(is_master()) write(6,*) 'All tracers except sphum replaced by FV IC' @@ -169,9 +181,11 @@ subroutine get_cubed_sphere_terrain( Atm, fv_domain ) do n=1,ntileMe +#if defined (FMS1_IO) call get_tile_string(fname, 'INPUT/fv_core.res.tile', tile_id(n), '.nc' ) +#endif - if( file_exist(fname) ) then + if( file_exists(fname) ) then call read_data(fname, 'phis', Atm(n)%phis(is:ie,js:je), & domain=fv_domain, tile_count=n) else @@ -227,7 +241,7 @@ subroutine get_diag_ic( Atm, fv_domain, nq ) fname = Atm(1)%res_latlon_dynamics - if( file_exist(fname) ) then + if( file_exists(fname) ) then call open_ncfile( fname, ncid ) ! open the file call get_ncdim1( ncid, 'lon', tsize(1) ) call get_ncdim1( ncid, 'lat', tsize(2) ) @@ -272,7 +286,7 @@ subroutine get_diag_ic( Atm, fv_domain, nq ) do j=js,je do i=is,ie j1 = jdc(i,j) - jbeg = min(jbeg, j1) + jbeg = min(jbeg, j1) jend = max(jend, j1+1) enddo enddo @@ -323,7 +337,7 @@ subroutine get_diag_ic( Atm, fv_domain, nq ) qp = 0. do tr_ind=1, nq call get_tracer_names(MODEL_ATMOS, tr_ind, tracer_name) - if (field_exist(fname,tracer_name)) then + if (variable_exists(fname,tracer_name)) then call get_var3_r4( ncid, tracer_name, 1,im, jbeg,jend, 1,km, wk3 ) do k=1,km do j=js,je @@ -376,7 +390,7 @@ subroutine get_diag_ic( Atm, fv_domain, nq ) deallocate ( va ) if ( .not. Atm(1)%hydrostatic ) then - if (field_exist(fname,'w')) then + if (variable_exists(fname,'w')) then allocate ( wa(is:ie,js:je,km) ) call get_var3_r4( ncid, 'w', 1,im, jbeg,jend, 1,km, wk3 ) do k=1,km @@ -396,7 +410,7 @@ subroutine get_diag_ic( Atm, fv_domain, nq ) Atm(1)%w(:,:,:) = 0. endif ! delz: - if (field_exist(fname,'delz')) then + if (variable_exists(fname,'delz')) then allocate ( wa(is:ie,js:je,km) ) call get_var3_r4( ncid, 'delz', 1,im, jbeg,jend, 1,km, wk3 ) do k=1,km @@ -517,7 +531,7 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) ! Read input FV core restart file fname = "fvcore_internal_restart_in" - if( file_exist(fname) ) then + if( file_exists(fname) ) then call MAPL_NCIOGetFileType(fname,filetype) if (filetype >=0 ) then @@ -534,7 +548,7 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) im =cfg(1)%get_dimension('lon',rc=status) jm =cfg(1)%get_dimension('lat',rc=status) km =cfg(1)%get_dimension('lev',rc=status) - + allocate(gslice_r8(im,jm)) else @@ -553,7 +567,7 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) if(is_master()) write(*,*) 'Using GEOS restart:', fname - if ( file_exist(fname) ) then + if ( file_exists(fname) ) then if(is_master()) write(*,*) 'External IC dimensions:', im , jm , km if(is_master()) write(*,*) 'Interpolating to :', npx-1, (npy-1)*6, npz else @@ -621,7 +635,7 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) ! Read U allocate ( u0(isd_i:ied_i,jsd_i:jed_i+1,km) ) - u0(:,:,:) = 0.0 + u0(:,:,:) = 0.0 if (isNC4) then tileoff = (tile-1)*(jm/ntiles) do k=1,km @@ -658,7 +672,7 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) sbufferx=sbuffer, nbufferx=nbuffer, & gridtype=DGRID_NE ) do k=1,km - do i=is_i,ie_i + do i=is_i,ie_i u0(i,je_i+1,k) = nbuffer(i,k) enddo do j=js_i,je_i @@ -747,7 +761,7 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) jmc = adjustl(jmc) write(fname1, "('topo_DYN_ave_',a,'x',a,'.data')") trim(imc), trim(jmc) - if (.not. file_exist(fname1)) then + if (.not. file_exists(fname1)) then call mpp_error(FATAL,'get_geos_cubed_ic: cannot find topo_DYN_ave file') endif call print_memuse_stats('get_geos_cubed_ic: '//TRIM(fname1)//' being read') @@ -763,13 +777,13 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) Atm(1)%phis = Atm(1)%phis*grav call print_memuse_stats('get_geos_cubed_ic: phis') -! Horiz Interp for surface pressure +! Horiz Interp for surface pressure call prt_maxmin('PS_geos', ps0, is_i, ie_i, js_i, je_i, ng_i, 1, 1.0_FVPRC) do j=js,je do i=is,ie ic=index_c2c(1,i,j,tile) jc=index_c2c(2,i,j,tile) - psc(i,j)=weight_c2c(1,i,j,tile)*ps0(ic ,jc ) & + psc(i,j)=weight_c2c(1,i,j,tile)*ps0(ic ,jc ) & +weight_c2c(2,i,j,tile)*ps0(ic ,jc+1) & +weight_c2c(3,i,j,tile)*ps0(ic+1,jc+1) & +weight_c2c(4,i,j,tile)*ps0(ic+1,jc ) @@ -782,7 +796,7 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) do i=is,ie ic=index_c2c(1,i,j,tile) jc=index_c2c(2,i,j,tile) - gzc(i,j)=weight_c2c(1,i,j,tile)*gz0(ic ,jc ) & + gzc(i,j)=weight_c2c(1,i,j,tile)*gz0(ic ,jc ) & +weight_c2c(2,i,j,tile)*gz0(ic ,jc+1) & +weight_c2c(3,i,j,tile)*gz0(ic+1,jc+1) & +weight_c2c(4,i,j,tile)*gz0(ic+1,jc ) @@ -799,7 +813,7 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) ! Horiz Interp for moist tracers ! is there a moist restart file to interpolate? ! Read in tracers: only sphum at this point - if( file_exist("moist_internal_restart_in") .and. ntracers(1) > 0 ) then + if( file_exists("moist_internal_restart_in") .and. ntracers(1) > 0 ) then if (is_master()) print*, 'Trying to interpolate moist_internal_restart_in' call MAPL_NCIOGetFileType("moist_internal_restart_in",filetype) @@ -813,7 +827,7 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) call formatter%open("moist_internal_restart_in",pFIO_READ,rc=status) cfg(1) = formatter%read(rc=status) call MAPL_IOCountNonDimVars(cfg(1),nvars,rc=status) - if (nVars /= iq_moist1-iq_moist0+1) call mpp_error(FATAL,'Wrong number of variables in moist file') + if (nVars /= iq_moist1-iq_moist0+1) call mpp_error(FATAL,'Wrong number of variables in moist file') tileoff = (tile-1)*(jm/ntiles) end if @@ -856,7 +870,7 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) ! Horiz Interp for GOCART tracers ! is there a gocart restart file to interpolate? ! Read in tracers: only sphum at this point - if( file_exist("gocart_internal_restart_in") .and. ntracers(2) > 0 ) then + if( file_exists("gocart_internal_restart_in") .and. ntracers(2) > 0 ) then if (is_master()) print*, 'Trying to interpolate gocart_internal_restart_in' call MAPL_NCIOGetFileType("gocart_internal_restart_in",filetype) @@ -864,20 +878,20 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) if (filetype /= 0) then offset=4 else - lvar_cnt = 0 + lvar_cnt = 0 allocate(gslice_r4(im,jm)) allocate(cfg(1)) call formatter%open("gocart_internal_restart_in",pFIO_READ,rc=status) cfg(1) = formatter%read(rc=status) call MAPL_IOCountNonDimVars(cfg(1),nvars,rc=status) - if (nVars /= iq_gocart1-iq_gocart0+1) call mpp_error(FATAL,'Wrong number of variables in gocart file') + if (nVars /= iq_gocart1-iq_gocart0+1) call mpp_error(FATAL,'Wrong number of variables in gocart file') tileoff = (tile-1)*(jm/ntiles) allocate(vnames(nVars)) vars => cfg(1)%get_variables() iter = vars%begin() - - lvar_cnt=0 + + lvar_cnt=0 do while(iter /= vars%end()) var_name => iter%key() if (.not.cfg(1)%is_coordinate_variable(var_name)) then @@ -926,7 +940,7 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) ! Horiz Interp for pchem tracers ! is there a gocart restart file to interpolate? ! Read in tracers: only sphum at this point - if( file_exist("pchem_internal_restart_in") .and. ntracers(3) > 0 ) then + if( file_exists("pchem_internal_restart_in") .and. ntracers(3) > 0 ) then if (is_master()) print*, 'Trying to interpolate pchem_internal_restart_in' call MAPL_NCIOGetFileType("pchem_internal_restart_in",filetype) @@ -934,20 +948,20 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) if (filetype /= 0) then offset=4 else - lvar_cnt = 0 + lvar_cnt = 0 allocate(gslice_r4(im,jm)) allocate(cfg(1)) call formatter%open("pchem_internal_restart_in",pFIO_READ,rc=status) cfg(1) = formatter%read(rc=status) call MAPL_IOCountNonDimVars(cfg(1),nvars,rc=status) - if (nVars /= iq_pchem1-iq_pchem0+1) call mpp_error(FATAL,'Wrong number of variables in pchem file') + if (nVars /= iq_pchem1-iq_pchem0+1) call mpp_error(FATAL,'Wrong number of variables in pchem file') tileoff = (tile-1)*(jm/ntiles) allocate(vnames(nVars)) vars => cfg(1)%get_variables() iter = vars%begin() - - lvar_cnt=0 + + lvar_cnt=0 do while(iter /= vars%end()) var_name => iter%key() if (.not.cfg(1)%is_coordinate_variable(var_name)) then @@ -993,7 +1007,7 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) end if end if - + ! Horiz Interp for T deallocate ( q0 ) call mpp_update_domains(t0, domain_i) @@ -1016,7 +1030,7 @@ subroutine get_geos_cubed_ic( Atm, fv_domain, nq, ntracers ) deallocate( weight_c2c ) ! Horz/Vert remap for scalars - nqmap = nmoist + ngocart + npchem + nqmap = nmoist + ngocart + npchem call remap_scalar(im, jm, km, npz, nqmap, nqmap, ak0, bk0, psc, gzc, tp, qp, Atm(1)) @@ -1082,7 +1096,7 @@ subroutine get_geos_latlon_ic( Atm, fv_domain, nq, ntracers) real(FVPRC):: s2c(is:ie,js:je,4) integer, dimension(is:ie,js:je):: id1, id2, jdc real(FVPRC) psc(is:ie,js:je) - real(FVPRC) gzc(is:ie,js:je) + real(FVPRC) gzc(is:ie,js:je) real(FVPRC), allocatable:: tp(:,:,:), qp(:,:,:,:) real(FVPRC), allocatable:: ua(:,:,:), va(:,:,:) @@ -1131,7 +1145,7 @@ subroutine get_geos_latlon_ic( Atm, fv_domain, nq, ntracers) ! Read in lat-lon FV core restart file fname = "fvcore_internal_restart_in" - if( file_exist(fname) ) then + if( file_exists(fname) ) then call MAPL_NCIOGetFileType(fname,filetype) @@ -1173,7 +1187,7 @@ subroutine get_geos_latlon_ic( Atm, fv_domain, nq, ntracers) enddo allocate ( lat(jm) ) do j=1,jm - lat(j) = -0.5*pi + real(j-1)*pi/real(jm-1) ! SP to NP + lat(j) = -0.5*pi + real(j-1)*pi/real(jm-1) ! SP to NP enddo call remap_coef( im, jm, lon, lat, id1, id2, jdc, s2c , Atm(1)%gridstruct%agrid, Atm(1)%bd) @@ -1273,7 +1287,7 @@ subroutine get_geos_latlon_ic( Atm, fv_domain, nq, ntracers) enddo call print_memuse_stats('get_geos_latlon_ic: read t') ! Read PE - do k=1,km+1 + do k=1,km+1 if (isNC4) then call MAPL_VarRead(formatter,"PE",r8latlon,lev=k) else @@ -1313,8 +1327,8 @@ subroutine get_geos_latlon_ic( Atm, fv_domain, nq, ntracers) jmc = adjustl(jmc) write(fname1, "('topo_DYN_ave_',a,'x',a,'_DC.data')") trim(imc), trim(jmc) - if (.not. file_exist(fname1)) then - CALL mpp_error(FATAL,'get_geos_latlon_ic: cannot find topo_DYN_ave file') + if (.not. file_exists(fname1)) then + CALL mpp_error(FATAL,'get_geos_latlon_ic: cannot find topo_DYN_ave file') endif call print_memuse_stats('get_geos_latlon_ic: '//TRIM(fname1)//' being read') allocate ( r4latlon(im,jm) ) @@ -1335,7 +1349,7 @@ subroutine get_geos_latlon_ic( Atm, fv_domain, nq, ntracers) jmc = adjustl(jmc) write(fname1, "('topo_DYN_ave_',a,'x',a,'.data')") trim(imc), trim(jmc) - if (.not. file_exist(fname1)) then + if (.not. file_exists(fname1)) then call mpp_error(FATAL,'get_geos_latlon_ic: cannot find topo_DYN_ave file') endif allocate( phis_r4(Atm(1)%npx-1,6*(Atm(1)%npy-1)) ) @@ -1347,7 +1361,7 @@ subroutine get_geos_latlon_ic( Atm, fv_domain, nq, ntracers) deallocate( phis_r4 ) call print_memuse_stats('get_geos_latlon_ic: phis') -! Horiz Interp for surface pressure +! Horiz Interp for surface pressure if(is_master()) call pmaxmin( 'PS_geos', ps0, im, jm, 0.01_FVPRC) do j=js,je do i=is,ie @@ -1381,7 +1395,7 @@ subroutine get_geos_latlon_ic( Atm, fv_domain, nq, ntracers) ! Horiz Interp for moist tracers ! is there a moist restart file to interpolate? ! Read in tracers: only sphum at this point - if( file_exist("moist_internal_restart_in") .and. ntracers(1) > 0 ) then + if( file_exists("moist_internal_restart_in") .and. ntracers(1) > 0 ) then if (is_master()) print*, 'Trying to interpolate moist_internal_restart_in' allocate ( r4latlon(im,jm) ) @@ -1434,7 +1448,7 @@ subroutine get_geos_latlon_ic( Atm, fv_domain, nq, ntracers) ! Horiz Interp for GOCART tracers ! is there a gocart restart file to interpolate? ! Read in tracers: only sphum at this point - if( file_exist("gocart_internal_restart_in") .and. ntracers(2) > 0 ) then + if( file_exists("gocart_internal_restart_in") .and. ntracers(2) > 0 ) then if (is_master()) print*, 'Trying to interpolate gocart_internal_restart_in' allocate ( r4latlon(im,jm) ) call MAPL_NCIOGetFileType("gocart_internal_restart_in",filetype) @@ -1501,7 +1515,7 @@ subroutine get_geos_latlon_ic( Atm, fv_domain, nq, ntracers) ! Horiz Interp for pchem tracers ! is there a pchem restart file to interpolate? ! Read in tracers: only sphum at this point - if( file_exist("pchem_internal_restart_in") .and. ntracers(3) > 0 ) then + if( file_exists("pchem_internal_restart_in") .and. ntracers(3) > 0 ) then if (is_master()) print*, 'Trying to interpolate pchem_internal_restart_in' allocate ( r4latlon(im,jm) ) call MAPL_NCIOGetFileType("pchem_internal_restart_in",filetype) @@ -1568,7 +1582,7 @@ subroutine get_geos_latlon_ic( Atm, fv_domain, nq, ntracers) deallocate ( q0 ) ! Horiz Interp for T - if(is_master()) call pmaxmin( 'T_geos', t0, im*jm, km, 1.0_FVPRC) + if(is_master()) call pmaxmin( 'T_geos', t0, im*jm, km, 1.0_FVPRC) allocate ( tp(is:ie,js:je,km) ) do k=1,km do j=js,je @@ -1586,12 +1600,12 @@ subroutine get_geos_latlon_ic( Atm, fv_domain, nq, ntracers) ! Horz/Vert remap for MOIST, GOCART, and PCHEM scalars (Assuming Total Number is divisible by KM) ! ----------------------------------------------------------------------------------------------- - nqmap = nmoist + ngocart + npchem + nqmap = nmoist + ngocart + npchem call remap_scalar(im, jm, km, npz, nqmap, nqmap, ak0, bk0, psc, gzc, tp, qp, Atm(1)) deallocate ( tp ) - deallocate ( qp ) + deallocate ( qp ) call print_memuse_stats('get_geos_latlon_ic: remap_scalar') ! Horz/Vert remap for U/V @@ -1679,7 +1693,7 @@ subroutine get_ncep_ic( Atm, fv_domain, nq ) fname = Atm(1)%res_latlon_dynamics - if( file_exist(fname) ) then + if( file_exists(fname) ) then call open_ncfile( fname, ncid ) ! open the file call get_ncdim1( ncid, 'lon', tsize(1) ) call get_ncdim1( ncid, 'lat', tsize(2) ) @@ -1727,7 +1741,7 @@ subroutine get_ncep_ic( Atm, fv_domain, nq ) do j=js,je do i=is,ie j1 = jdc(i,j) - jbeg = min(jbeg, j1) + jbeg = min(jbeg, j1) jend = max(jend, j1+1) enddo enddo @@ -1938,9 +1952,11 @@ subroutine get_fv_ic( Atm, fv_domain, nq ) ! Read in lat-lon FV core restart file fname = Atm(1)%res_latlon_dynamics - if( file_exist(fname) ) then + if( file_exists(fname) ) then +#if defined (FMS1_IO) call field_size(fname, 'T', tsize, field_found=found) - if(is_master()) write(*,*) 'Using lat-lon FV restart:', fname +#endif + if(is_master()) write(*,*) 'Using lat-lon FV restart:', fname if ( found ) then im = tsize(1); jm = tsize(2); km = tsize(3) @@ -1958,7 +1974,7 @@ subroutine get_fv_ic( Atm, fv_domain, nq ) enddo do j=1,jm - lat(j) = -0.5*pi + real(j-1)*pi/real(jm-1) ! SP to NP + lat(j) = -0.5*pi + real(j-1)*pi/real(jm-1) ! SP to NP enddo allocate ( ak0(1:km+1) ) @@ -1993,15 +2009,15 @@ subroutine get_fv_ic( Atm, fv_domain, nq ) ! Read in tracers: only AM2 "physics tracers" at this point fname = Atm(1)%res_latlon_tracers - if( file_exist(fname) ) then - if(is_master()) write(*,*) 'Using lat-lon tracer restart:', fname + if( file_exists(fname) ) then + if(is_master()) write(*,*) 'Using lat-lon tracer restart:', fname allocate ( q0(im,jm,km,Atm(1)%ncnst) ) q0 = 0. do tr_ind = 1, nq call get_tracer_names(MODEL_ATMOS, tr_ind, tracer_name) - if (field_exist(fname,tracer_name)) then + if (variable_exists(fname,tracer_name)) then call read_data(fname, tracer_name, q0(1:im,1:jm,1:km,tr_ind)) call mpp_error(NOTE,'==> Have read tracer '//trim(tracer_name)//' from '//trim(fname)) cycle @@ -2017,8 +2033,8 @@ subroutine get_fv_ic( Atm, fv_domain, nq ) call d2a3d(u0, v0, ua, va, im, jm, km, lon) - deallocate ( u0 ) - deallocate ( v0 ) + deallocate ( u0 ) + deallocate ( v0 ) if(is_master()) call pmaxmin( 'UA', ua, im*jm, km, 1._FVPRC) if(is_master()) call pmaxmin( 'VA', va, im*jm, km, 1._FVPRC) @@ -2045,17 +2061,17 @@ subroutine get_fv_ic( Atm, fv_domain, nq ) call remap_xyz( im, 1, jm, jm, km, npz, nq, Atm(1)%ncnst, lon, lat, ak0, bk0, & ps0, gz0, ua, va, t0, q0, Atm ) - deallocate ( ak0 ) - deallocate ( bk0 ) - deallocate ( ps0 ) - deallocate ( gz0 ) - deallocate ( t0 ) - deallocate ( q0 ) - deallocate ( dp0 ) - deallocate ( ua ) - deallocate ( va ) - deallocate ( lat ) - deallocate ( lon ) + deallocate ( ak0 ) + deallocate ( bk0 ) + deallocate ( ps0 ) + deallocate ( gz0 ) + deallocate ( t0 ) + deallocate ( q0 ) + deallocate ( dp0 ) + deallocate ( ua ) + deallocate ( va ) + deallocate ( lat ) + deallocate ( lon ) #endif end subroutine get_fv_ic @@ -2207,7 +2223,7 @@ subroutine remap_coef( im, jm, lon, lat, id1, id2, jdc, s2c, agrid, bd ) endif enddo endif -111 continue +111 continue if ( agrid(i,j,2) file_exist +#else +use fms2_io_mod, only: file_exists, close_file +#endif use mpp_mod, only: input_nml_file use diag_manager_mod, only: register_diag_field, send_data, & register_static_field @@ -113,20 +119,8 @@ subroutine fv_climate_nudge_init ( Time, axes, flag ) if (module_is_initialized) return ! read namelist -#ifdef INTERNAL_FILE_NML read (input_nml_file, nml=fv_climate_nudge_nml, iostat=io) ierr = check_nml_error (io, 'fv_climate_nudge_nml') -#else - if (file_exist('input.nml') ) then - unit = open_namelist_file() - ierr=1 - do while (ierr /= 0) - read (unit, nml=fv_climate_nudge_nml, iostat=io, end=10) - ierr = check_nml_error (io, 'fv_climate_nudge_nml') - enddo -10 call close_file (unit) - endif -#endif !----- write version and namelist to log file ----- @@ -325,7 +319,7 @@ subroutine fv_climate_nudge (Time, dt, is, ie, js, je, npz, pfull, & ! vertically dependent factor call get_factor (npz,pfull, factor) - ! first time allocate state + ! first time allocate state if (do_state_alloc) then call var_state_init ( is, ie, js, je, npz, State(1) ) call var_state_init ( is, ie, js, je, npz, State(2) ) @@ -618,7 +612,7 @@ subroutine get_factor (nlev,pfull,factor) factor(k,2) = 0. enddo endif - + ! Specific humidity if (skip_top_q > 0) then do k = 1, skip_top_q @@ -808,7 +802,7 @@ subroutine remap_coef( isd, ied, jsd, jed, lon_in, lat_in, & ! integer, intent(out), dimension(is:ie,js:je ):: id1, id2, jdc real(FVPRC), intent(out), dimension(is:ie,js:je,4):: s2c - + !=============================================================================================== ! local: @@ -817,7 +811,7 @@ subroutine remap_coef( isd, ied, jsd, jed, lon_in, lat_in, & real(FVPRC):: a1, b1 integer i, j, i1, i2, jc, i0, j0 - !pk0(1) = ak_in(1)**KAPPA + !pk0(1) = ak_in(1)**KAPPA !pn_top = log(ak_in(1)) do i=isd,ied-1 @@ -991,7 +985,7 @@ subroutine remap_ps( is, ie, js, je, km, & gz(km+1) = gz_dat(i,j) pk0(km+1) = ph_dat(i,j,km+1)**KAPPA do k=km,1,-1 - gz(k) = gz(k+1) + RDGAS*tp_dat(i,j,k)*(pn_dat(i,j,k+1)-pn_dat(i,j,k)) + gz(k) = gz(k+1) + RDGAS*tp_dat(i,j,k)*(pn_dat(i,j,k+1)-pn_dat(i,j,k)) pk0(k) = ph_dat(i,j,k)**KAPPA enddo if ( phis(i,j) .gt. gz_dat(i,j) ) then diff --git a/tools/fv_diagnostics.F90 b/tools/fv_diagnostics.F90 index d46eb935a..a258d0373 100644 --- a/tools/fv_diagnostics.F90 +++ b/tools/fv_diagnostics.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -55,7 +55,7 @@ module fv_diagnostics_mod ! ! ! fv_arrays_mod -! fv_atmos_type, fv_grid_type, fv_diag_type, fv_grid_bounds_type, +! fv_atmos_type, fv_grid_type, fv_diag_type, fv_grid_bounds_type, ! R_GRIDmax_step ! ! @@ -112,15 +112,22 @@ module fv_diagnostics_mod ! ! - use constants_mod, only: grav, rdgas, rvgas, pi=>pi_8, radius, kappa, WTMAIR, WTMCO2, & +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: grav, rdgas, rvgas, pi=>pi_8, radius, kappa, WTMAIR, WTMCO2, & omega, hlv, cp_air, cp_vapor use fms_mod, only: write_version_number +#if defined (FMS1_IO) use fms_io_mod, only: set_domain, nullify_domain +#endif use time_manager_mod, only: time_type, get_date, get_time use mpp_domains_mod, only: domain2d, mpp_update_domains, DGRID_NE use diag_manager_mod, only: diag_axis_init, register_diag_field, & register_static_field, send_data, diag_grid_init - use fv_arrays_mod, only: fv_atmos_type, fv_grid_type, fv_diag_type, fv_grid_bounds_type, & + use fv_arrays_mod, only: fv_atmos_type, fv_grid_type, fv_diag_type, fv_grid_bounds_type, & R_GRID !!! CLEANUP needs rem oval? use fv_mapz_mod, only: E_Flux, moist_cv @@ -136,7 +143,7 @@ module fv_diagnostics_mod use mpp_mod, only: mpp_error, FATAL, stdlog, mpp_pe, mpp_root_pe, mpp_sum, mpp_max, NOTE use sat_vapor_pres_mod, only: compute_qs, lookup_es - use fv_arrays_mod, only: max_step + use fv_arrays_mod, only: max_step use ieee_arithmetic @@ -232,7 +239,9 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) ncnst = Atm(1)%ncnst m_calendar = Atm(1)%flagstruct%moist_phys +#if defined (FMS1_IO) call set_domain(Atm(1)%domain) ! Set domain so that diag_manager can access tile information +#endif sphum = get_tracer_index (MODEL_ATMOS, 'sphum') liq_wat = get_tracer_index (MODEL_ATMOS, 'liq_wat') @@ -270,7 +279,7 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) if ( pfull(k) > 30.e2 ) then mp_top = k exit - endif + endif enddo if ( is_master() ) write(*,*) 'mp_top=', mp_top, 'pfull=', pfull(mp_top) @@ -508,8 +517,8 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) allocate(idiag%id_tracer_dvmr(ncnst)) allocate(idiag%w_mr(ncnst)) idiag%id_tracer(:) = 0 - idiag%id_tracer_dmmr(:) = 0 - idiag%id_tracer_dvmr(:) = 0 + idiag%id_tracer_dmmr(:) = 0 + idiag%id_tracer_dvmr(:) = 0 idiag%w_mr(:) = 0.E0 allocate(idiag%id_u(nplev)) @@ -621,7 +630,7 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) !------------------------------------------ idiag%id_pmaskv2 = register_diag_field(TRIM(field), 'pmaskv2', axes(1:2), Time,& & 'masking pressure at lowest level', 'mb', missing_value=missing_value) - + !------------------- ! Hurricane scales: !------------------- @@ -674,7 +683,7 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) if ( .not. Atm(n)%flagstruct%hydrostatic ) & idiag%id_delz = register_diag_field ( trim(field), 'delz', axes(1:3), Time, & 'height thickness', 'm', missing_value=missing_value ) - if( Atm(n)%flagstruct%hydrostatic ) then + if( Atm(n)%flagstruct%hydrostatic ) then idiag%id_pfhy = register_diag_field ( trim(field), 'pfhy', axes(1:3), Time, & 'hydrostatic pressure', 'pa', missing_value=missing_value ) else @@ -846,7 +855,7 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) idiag%id_srh25 = register_diag_field ( trim(field), 'srh25', axes(1:2), Time, & '2-5 km Storm Relative Helicity', 'm/s**2', missing_value=missing_value ) - + if( .not. Atm(n)%flagstruct%hydrostatic ) then idiag%id_uh03 = register_diag_field ( trim(field), 'uh03', axes(1:2), Time, & '0-3 km Updraft Helicity', 'm/s**2', missing_value=missing_value ) @@ -1000,7 +1009,9 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) Atm(n)%ps, Atm(n)%delp, Atm(n)%q, Atm(n)%gridstruct%area_64, Atm(n)%domain) #endif +#if defined (FMS1_IO) call nullify_domain() ! Nullify set_domain info +#endif module_is_initialized=.true. istep = 0 @@ -1104,7 +1115,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) character(len=128) :: tname real, parameter:: ws_0 = 16. ! minimum max_wind_speed within the 7x7 search box real, parameter:: ws_1 = 20. - real, parameter:: vort_c0= 2.2e-5 + real, parameter:: vort_c0= 2.2e-5 logical, allocatable :: storm(:,:), cat_crt(:,:) real :: tmp2, pvsum, e2, einf, qm, mm, maxdbz, allmax, rgrav integer :: Cl, Cl2 @@ -1149,7 +1160,9 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) endif fv_time = Time +#if defined (FMS1_IO) call set_domain(Atm(1)%domain) +#endif if ( m_calendar ) then call get_date(fv_time, yr, mon, dd, hr, mn, seconds) @@ -1330,7 +1343,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) do i=isc,iec if ( storm(i,j) ) & storm(i,j) = (Atm(n)%gridstruct%agrid(i,j,2)>0. .and. wk(i,j,npz)> vort_c0) .or. & - (Atm(n)%gridstruct%agrid(i,j,2)<0. .and. wk(i,j,npz)<-vort_c0) + (Atm(n)%gridstruct%agrid(i,j,2)<0. .and. wk(i,j,npz)<-vort_c0) enddo enddo endif @@ -1350,14 +1363,14 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) call interpolate_vertical(isc, iec, jsc, jec, npz, & 850.e2, Atm(n)%peln, wk, a2) used=send_data(idiag%id_vort850, a2, Time) - if ( idiag%id_x850>0 ) x850(:,:) = a2(:,:) + if ( idiag%id_x850>0 ) x850(:,:) = a2(:,:) if(idiag%id_c15>0) then do j=jsc,jec do i=isc,iec if ( storm(i,j) ) & storm(i,j) = (Atm(n)%gridstruct%agrid(i,j,2)>0. .and. a2(i,j)> vort_c0) .or. & - (Atm(n)%gridstruct%agrid(i,j,2)<0. .and. a2(i,j)<-vort_c0) + (Atm(n)%gridstruct%agrid(i,j,2)<0. .and. a2(i,j)<-vort_c0) enddo enddo endif @@ -1483,7 +1496,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) endif - + !!$ if ( idiag%id_srh > 0 ) then !!$ call helicity_relative(isc, iec, jsc, jec, ngc, npz, zvir, sphum, a2, & !!$ Atm(n)%ua, Atm(n)%va, Atm(n)%delz, Atm(n)%q, & @@ -1605,7 +1618,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) do k=1,npz do j=jsc,jec do i=isc,iec - tmp = ( log(max(wk(i,j,k)*1.e-2,1.e-2)) + 17.27 * ( Atm(n)%pt(i,j,k) - 273.14 )/ ( -35.84 + Atm(n)%pt(i,j,k)) ) / 17.27 + tmp = ( log(max(wk(i,j,k)*1.e-2,1.e-2)) + 17.27 * ( Atm(n)%pt(i,j,k) - 273.14 )/ ( -35.84 + Atm(n)%pt(i,j,k)) ) / 17.27 a3(i,j,k) = 273.14 + 237.3*tmp/ ( 1. - tmp ) enddo enddo @@ -1776,7 +1789,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) endif call get_height_given_pressure(isc, iec, jsc, jec, npz, wz, nplev, idg, plevs, Atm(n)%peln, a3) - ! reset + ! reset idg(minloc(abs(levs-300))) = idiag%id_h(minloc(abs(levs-300))) idg(minloc(abs(levs-500))) = idiag%id_h(minloc(abs(levs-500))) @@ -1791,7 +1804,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) endif if( prt_minmax ) then - + if(all(idiag%id_h(minloc(abs(levs-100)))>0)) & call prt_mxm('Z100',a3(isc:iec,jsc:jec,11),isc,iec,jsc,jec,0,1,1.E-3,Atm(n)%gridstruct%area_64,Atm(n)%domain) @@ -2063,7 +2076,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) enddo used = send_data(idiag%id_mq, a2, Time) if( prt_minmax ) then - tot_mq = g_sum( Atm(n)%domain, a2, isc, iec, jsc, jec, ngc, Atm(n)%gridstruct%area_64, 0, quicksum=.true.) + tot_mq = g_sum( Atm(n)%domain, a2, isc, iec, jsc, jec, ngc, Atm(n)%gridstruct%area_64, 0, quicksum=.true.) idiag%mtq_sum = idiag%mtq_sum + tot_mq if ( idiag%steps <= max_step ) idiag%mtq(idiag%steps) = tot_mq if(master) write(*,*) 'Total (global) mountain torque (Hadleys)=', tot_mq @@ -2247,7 +2260,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) do i=isc,iec do k=2,npz tmp = atm(n)%q(i,j,k,liq_wat)+atm(n)%q(i,j,k,rainwat)+atm(n)%q(i,j,k,ice_wat)+ & - atm(n)%q(i,j,k,snowwat)+atm(n)%q(i,j,k,graupel) + atm(n)%q(i,j,k,snowwat)+atm(n)%q(i,j,k,graupel) if( tmp>5.e-6 ) then a2(i,j) = Atm(n)%pt(i,j,k) var1(i,j) = 0.01*Atm(n)%pe(i,k,j) @@ -2398,7 +2411,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) enddo enddo enddo -! Mass weighted KE +! Mass weighted KE do j=jsc,jec do i=isc,iec a2(i,j) = 0.5*a2(i,j)/(Atm(n)%ps(i,j)-ptop) @@ -2406,8 +2419,8 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) enddo used=send_data(idiag%id_ke, a2, Time) if(prt_minmax) then - tot_mq = g_sum( Atm(n)%domain, a2, isc, iec, jsc, jec, ngc, Atm(n)%gridstruct%area_64, 1, quicksum=.true.) - if (master) write(*,*) 'SQRT(2.*KE; m/s)=', sqrt(2.*tot_mq) + tot_mq = g_sum( Atm(n)%domain, a2, isc, iec, jsc, jec, ngc, Atm(n)%gridstruct%area_64, 1, quicksum=.true.) + if (master) write(*,*) 'SQRT(2.*KE; m/s)=', sqrt(2.*tot_mq) endif endif @@ -2416,7 +2429,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if(idiag%id_delp > 0 .or. idiag%id_cape > 0 .or. idiag%id_cin > 0 .or. ((.not. Atm(n)%flagstruct%hydrostatic) .and. idiag%id_pfnh > 0)) then do k=1,npz do j=jsc,jec - do i=isc,iec + do i=isc,iec wk(i,j,k) = Atm(n)%delp(i,j,k)*(1.-sum(Atm(n)%q(i,j,k,2:Atm(n)%flagstruct%nwat))) enddo enddo @@ -2427,9 +2440,9 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if( ( (.not. Atm(n)%flagstruct%hydrostatic) .and. idiag%id_pfnh > 0) .or. idiag%id_cape > 0 .or. idiag%id_cin > 0) then do k=1,npz do j=jsc,jec - do i=isc,iec + do i=isc,iec wk(i,j,k) = -wk(i,j,k)/(Atm(n)%delz(i,j,k)*grav)*rdgas* & - Atm(n)%pt(i,j,k)*(1.+zvir*Atm(n)%q(i,j,k,sphum)) + Atm(n)%pt(i,j,k)*(1.+zvir*Atm(n)%q(i,j,k,sphum)) enddo enddo enddo @@ -2457,7 +2470,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if( Atm(n)%flagstruct%hydrostatic .and. (idiag%id_pfhy > 0 .or. idiag%id_cape > 0 .or. idiag%id_cin > 0) ) then do k=1,npz do j=jsc,jec - do i=isc,iec + do i=isc,iec wk(i,j,k) = 0.5 *(Atm(n)%pe(i,k,j)+Atm(n)%pe(i,k+1,j)) enddo enddo @@ -2514,7 +2527,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) enddo used=send_data(idiag%id_delz, wk, Time) endif - + ! pressure for masking p-level fields ! incorrectly defines a2 to be ps (in mb). @@ -2828,7 +2841,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) used=send_data(idiag%id_w850, a2, Time) if ( idiag%id_x850>0 .and. idiag%id_vort850>0 ) then - x850(:,:) = x850(:,:)*a2(:,:) + x850(:,:) = x850(:,:)*a2(:,:) used=send_data(idiag%id_x850, x850, Time) deallocate ( x850 ) endif @@ -2862,7 +2875,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if(idiag%id_pt > 0) used=send_data(idiag%id_pt , Atm(n)%pt (isc:iec,jsc:jec,:), Time) if(idiag%id_omga > 0) used=send_data(idiag%id_omga, Atm(n)%omga(isc:iec,jsc:jec,:), Time) - + allocate( a3(isc:iec,jsc:jec,npz) ) if(idiag%id_theta_e > 0 ) then @@ -2918,7 +2931,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) #ifdef TEST_GWAVES call gw_1d(npz, 1000.E2, Atm(n)%ak, Atm(n)%ak, Atm(n)%ak(1), 10.E3, idiag%pt1) #else - idiag%pt1 = 0. + idiag%pt1 = 0. #endif do k=1,npz do j=jsc,jec @@ -2975,7 +2988,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if( prt_minmax ) then call prt_maxmin(trim(tname)//'_dmmr', dmmr, & isc, iec, jsc, jec, 0, npz, 1.) - call prt_maxmin(trim(tname)//'_dvmr', dvmr, & + call prt_maxmin(trim(tname)//'_dvmr', dvmr, & isc, iec, jsc, jec, 0, npz, 1.) endif endif @@ -3011,8 +3024,9 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if (allocated(dmmr)) deallocate(dmmr) if (allocated(dvmr)) deallocate(dvmr) +#if defined (FMS1_IO) call nullify_domain() - +#endif end subroutine fv_diag @@ -3133,7 +3147,7 @@ subroutine range_check(qname, q, is, ie, js, je, n_g, km, pos, q_low, q_hi, bad_ real qmin, qmax integer i,j,k - if ( present(bad_range) ) bad_range = .false. + if ( present(bad_range) ) bad_range = .false. if (any(.not.ieee_is_finite(q))) then qmax = huge(1.0) qmin = -qmax @@ -3160,7 +3174,7 @@ subroutine range_check(qname, q, is, ie, js, je, n_g, km, pos, q_low, q_hi, bad_ if( qminq_hi ) then if ( present(bad_range) ) then - bad_range = .true. + bad_range = .true. else if(master) write(*,*) 'Range_check Warning:', qname, ' max = ', qmax, ' min = ', qmin endif @@ -3259,7 +3273,7 @@ subroutine prt_mxm(qname, q, is, ie, js, je, n_g, km, fac, area, domain) call mp_reduce_min(qmin) call mp_reduce_max(qmax) - gmean = g_sum(domain, q(is:ie,js:je,km), is, ie, js, je, 3, area, 1, quicksum=.true.) + gmean = g_sum(domain, q(is:ie,js:je,km), is, ie, js, je, 3, area, 1, quicksum=.true.) if(master) write(6,*) qname//trim(gn), qmax*fac, qmin*fac, gmean*fac @@ -3294,12 +3308,12 @@ subroutine prt_mass(km, nq, is, ie, js, je, n_g, nwat, ps, delp, q, area, domain if ( nwat==0 ) then psmo = g_sum(domain, ps(is:ie,js:je), is, ie, js, je, n_g, area, 1, quicksum=.true.) if( master ) write(*,*) 'Total surface pressure (mb)', trim(gn), ' = ', 0.01*psmo - call z_sum(is, ie, js, je, km, n_g, delp, q(is-n_g,js-n_g,1,1 ), psqv(is,js)) + call z_sum(is, ie, js, je, km, n_g, delp, q(is-n_g,js-n_g,1,1 ), psqv(is,js)) return endif psq(:,:,:) = 0. - call z_sum(is, ie, js, je, km, n_g, delp, q(is-n_g,js-n_g,1,sphum ), psq(is,js,sphum )) + call z_sum(is, ie, js, je, km, n_g, delp, q(is-n_g,js-n_g,1,sphum ), psq(is,js,sphum )) if (liq_wat > 0) & call z_sum(is, ie, js, je, km, n_g, delp, q(is-n_g,js-n_g,1,liq_wat), psq(is,js,liq_wat)) @@ -3324,7 +3338,7 @@ subroutine prt_mass(km, nq, is, ie, js, je, n_g, nwat, ps, delp, q, area, domain if ( idiag%phalf(k+1) > 75. ) exit kstrat = k enddo - call z_sum(is, ie, js, je, kstrat, n_g, delp, q(is-n_g,js-n_g,1,sphum), q_strat(is,js)) + call z_sum(is, ie, js, je, kstrat, n_g, delp, q(is-n_g,js-n_g,1,sphum), q_strat(is,js)) psmo = g_sum(domain, q_strat, is, ie, js, je, n_g, area, 1, quicksum=.true.) * 1.e6 & / p_sum(is, ie, js, je, kstrat, n_g, delp, area, domain) if(master) write(*,*) 'Mean specific humidity (mg/kg) above 75 mb', trim(gn), '=', psmo @@ -3334,10 +3348,10 @@ subroutine prt_mass(km, nq, is, ie, js, je, n_g, nwat, ps, delp, q, area, domain !------------------- ! Check global means !------------------- - psmo = g_sum(domain, ps(is:ie,js:je), is, ie, js, je, n_g, area, 1, quicksum=.true.) + psmo = g_sum(domain, ps(is:ie,js:je), is, ie, js, je, n_g, area, 1, quicksum=.true.) do n=1,nwat - qtot(n) = g_sum(domain, psq(is:ie,js:je,n), is, ie, js, je, n_g, area, 1, quicksum=.true.) + qtot(n) = g_sum(domain, psq(is:ie,js:je,n), is, ie, js, je, n_g, area, 1, quicksum=.true.) enddo totw = sum(qtot(1:nwat)) @@ -3631,7 +3645,7 @@ subroutine cs3_interpolator(is, ie, js, je, km, qin, kd, pout, wz, pe, id, qout, real:: s0, a6 integer:: i,j,k, n, k1 -!$OMP parallel do default(none) shared(iv,id,is,ie,js,je,km,kd,pout,qin,qout,pe,wz) & +!$OMP parallel do default(none) shared(iv,id,is,ie,js,je,km,kd,pout,qin,qout,pe,wz) & !$OMP private(k1,s0,a6,q2,dp,qe) do j=js,je @@ -3662,7 +3676,7 @@ subroutine cs3_interpolator(is, ie, js, je, km, qin, kd, pout, wz, pe, id, qout, else qout(i,j,n) = qe(i,km+1) endif - else + else do k=k1,km if ( pout(n)>=pe(i,k,j) .and. pout(n) <= pe(i,k+1,j) ) then ! PPM distribution: f(s) = AL + s*[(AR-AL) + A6*(1-s)] ( 0 <= s <= 1 ) @@ -3696,7 +3710,7 @@ subroutine cs_interpolator(is, ie, js, je, km, qin, zout, wz, qout, qmin) real:: s0, a6 integer:: i,j,k -!$OMP parallel do default(none) shared(qmin,is,ie,js,je,km,zout,qin,qout,wz) & +!$OMP parallel do default(none) shared(qmin,is,ie,js,je,km,zout,qin,qout,wz) & !$OMP private(s0,a6,q2,dz,qe) do j=js,je @@ -3715,7 +3729,7 @@ subroutine cs_interpolator(is, ie, js, je, km, qin, zout, wz, qout, qmin) qout(i,j) = qe(i,1) elseif ( zout <= wz(i,j,km+1) ) then qout(i,j) = qe(i,km+1) - else + else do k=1,km if ( zout<=wz(i,j,k) .and. zout >= wz(i,j,k+1) ) then ! PPM distribution: f(s) = AL + s*[(AR-AL) + A6*(1-s)] ( 0 <= s <= 1 ) @@ -3762,7 +3776,7 @@ subroutine cs_prof(q2, delp, q, km, i1, i2, iv) gam(i,k) = d4(i) / bet enddo enddo - + do i=i1,i2 a_bot = 1. + d4(i)*(d4(i)+1.5) q(i,km+1) = (2.*d4(i)*(d4(i)+1.)*q2(i,km)+q2(i,km-1)-a_bot*q(i,km)) & @@ -3775,7 +3789,7 @@ subroutine cs_prof(q2, delp, q, km, i1, i2, iv) enddo enddo -! Apply *large-scale* constraints +! Apply *large-scale* constraints do i=i1,i2 q(i,2) = min( q(i,2), max(q2(i,1), q2(i,2)) ) q(i,2) = max( q(i,2), min(q2(i,1), q2(i,2)) ) @@ -3812,7 +3826,7 @@ subroutine cs_prof(q2, delp, q, km, i1, i2, iv) q(i,km) = min( q(i,km), max(q2(i,km-1), q2(i,km)) ) q(i,km) = max( q(i,km), min(q2(i,km-1), q2(i,km)) ) enddo - + end subroutine cs_prof @@ -3830,7 +3844,7 @@ subroutine interpolate_vertical(is, ie, js, je, km, plev, peln, a3, a2) logp = log(plev) -!$OMP parallel do default(none) shared(is,ie,js,je,km,peln,logp,a2,a3) & +!$OMP parallel do default(none) shared(is,ie,js,je,km,peln,logp,a2,a3) & !$OMP private(pm) do j=js,je do 1000 i=is,ie @@ -3843,7 +3857,7 @@ subroutine interpolate_vertical(is, ie, js, je, km, plev, peln, a3, a2) a2(i,j) = a3(i,j,1) elseif ( logp >= pm(km) ) then a2(i,j) = a3(i,j,km) - else + else do k=1,km-1 if( logp <= pm(k+1) .and. logp >= pm(k) ) then a2(i,j) = a3(i,j,k) + (a3(i,j,k+1)-a3(i,j,k))*(logp-pm(k))/(pm(k+1)-pm(k)) @@ -3879,7 +3893,7 @@ subroutine interpolate_z(is, ie, js, je, km, zl, hght, a3, a2) a2(i,j) = a3(i,j,1) elseif ( zl <= zm(km) ) then a2(i,j) = a3(i,j,km) - else + else do k=1,km-1 if( zl <= zm(k) .and. zl >= zm(k+1) ) then a2(i,j) = a3(i,j,k) + (a3(i,j,k+1)-a3(i,j,k))*(zm(k)-zl)/(zm(k)-zm(k+1)) @@ -3901,7 +3915,7 @@ subroutine helicity_relative(is, ie, js, je, ng, km, zvir, sphum, srh, & real, intent(in):: delz(is-ng:ie+ng,js-ng:je+ng,km) real, intent(in):: q(is-ng:ie+ng,js-ng:je+ng,km,*) real, intent(in):: phis(is-ng:ie+ng,js-ng:je+ng) - real, intent(in):: peln(is:ie,km+1,js:je) + real, intent(in):: peln(is:ie,km+1,js:je) logical, intent(in):: hydrostatic real, intent(out):: srh(is:ie,js:je) ! unit: (m/s)**2 ! real, parameter:: z_crit = 3.e3 ! lowest 3-km @@ -3986,7 +4000,7 @@ subroutine helicity_relative_CAPS(is, ie, js, je, ng, km, zvir, sphum, srh, uc, real, intent(in):: delz(is-ng:ie+ng,js-ng:je+ng,km) real, intent(in):: q(is-ng:ie+ng,js-ng:je+ng,km,*) real, intent(in):: phis(is-ng:ie+ng,js-ng:je+ng) - real, intent(in):: peln(is:ie,km+1,js:je) + real, intent(in):: peln(is:ie,km+1,js:je) real, intent(in):: uc(is:ie,js:je), vc(is:ie,js:je) logical, intent(in):: hydrostatic real, intent(out):: srh(is:ie,js:je) ! unit: (m/s)**2 @@ -4066,7 +4080,7 @@ subroutine bunkers_vector(is, ie, js, je, ng, km, zvir, sphum, uc, vc, & real, intent(in):: delz(is-ng:ie+ng,js-ng:je+ng,km) real, intent(in):: q(is-ng:ie+ng,js-ng:je+ng,km,*) real, intent(in):: phis(is-ng:ie+ng,js-ng:je+ng) - real, intent(in):: peln(is:ie,km+1,js:je) + real, intent(in):: peln(is:ie,km+1,js:je) logical, intent(in):: hydrostatic real, intent(out):: uc(is:ie,js:je), vc(is:ie,js:je) @@ -4145,7 +4159,7 @@ subroutine updraft_helicity(is, ie, js, je, ng, km, zvir, sphum, uh, & real, intent(in):: delz(is-ng:ie+ng,js-ng:je+ng,km) real, intent(in):: q(is-ng:ie+ng,js-ng:je+ng,km,*) real, intent(in):: phis(is-ng:ie+ng,js-ng:je+ng) - real, intent(in):: peln(is:ie,km+1,js:je) + real, intent(in):: peln(is:ie,km+1,js:je) logical, intent(in):: hydrostatic real, intent(out):: uh(is:ie,js:je) ! unit: (m/s)**2 ! Coded by S.-J. Lin for CONUS regional climate simulations @@ -4185,7 +4199,7 @@ subroutine updraft_helicity(is, ie, js, je, ng, km, zvir, sphum, uh, & elseif ( zh(i) < z_top ) then uh(i,j) = uh(i,j) + vort(i,j,k)*w(i,j,k)*dz(i) else - uh(i,j) = uh(i,j) + vort(i,j,k)*w(i,j,k)*(z_top - (zh(i)-dz(i)) ) + uh(i,j) = uh(i,j) + vort(i,j,k)*w(i,j,k)*(z_top - (zh(i)-dz(i)) ) goto 123 endif enddo @@ -4203,10 +4217,10 @@ subroutine pv_entropy(is, ie, js, je, ng, km, vort, f_d, pt, pkz, delp, grav) ! !INPUT PARAMETERS: integer, intent(in):: is, ie, js, je, ng, km real, intent(in):: grav - real, intent(in):: pt(is-ng:ie+ng,js-ng:je+ng,km) - real, intent(in):: pkz(is:ie,js:je,km) + real, intent(in):: pt(is-ng:ie+ng,js-ng:je+ng,km) + real, intent(in):: pkz(is:ie,js:je,km) real, intent(in):: delp(is-ng:ie+ng,js-ng:je+ng,km) - real, intent(in):: f_d(is-ng:ie+ng,js-ng:je+ng) + real, intent(in):: f_d(is-ng:ie+ng,js-ng:je+ng) ! vort is relative vorticity as input. Becomes PV on output real, intent(inout):: vort(is:ie,js:je,km) @@ -4220,9 +4234,9 @@ subroutine pv_entropy(is, ie, js, je, ng, km, vort, f_d, pt, pkz, delp, grav) ! z-surface is not that different from the hybrid sigma-p coordinate. ! See page 39, Pedlosky 1979: Geophysical Fluid Dynamics ! -! The follwoing simplified form is strictly correct only if vort is computed on +! The follwoing simplified form is strictly correct only if vort is computed on ! constant z surfaces. In addition hydrostatic approximation is made. -! EPV = - GRAV * (vort+f_d) / del(p) * del(pt) / pt +! EPV = - GRAV * (vort+f_d) / del(p) * del(pt) / pt ! where del() is the vertical difference operator. ! ! programmer: S.-J. Lin, shian-jiann.lin@noaa.gov @@ -4245,7 +4259,7 @@ subroutine pv_entropy(is, ie, js, je, ng, km, vort, f_d, pt, pkz, delp, grav) #else ! Compute PT at layer edges. !$OMP parallel do default(none) shared(is,ie,js,je,km,pt,pkz,w3d,delp,te2,te) & -!$OMP private(t2, delp2) +!$OMP private(t2, delp2) do j=js,je do k=1,km do i=is,ie @@ -4391,7 +4405,7 @@ subroutine ppme(p,qe,delp,im,km) end subroutine ppme subroutine rh_calc (pfull, t, qv, rh, do_cmip) - + real, intent (in), dimension(:,:) :: pfull, t, qv real, intent (out), dimension(:,:) :: rh real, dimension(size(t,1),size(t,2)) :: esat @@ -4422,7 +4436,7 @@ subroutine rh_calc (pfull, t, qv, rh, do_cmip) end subroutine rh_calc #ifdef SIMPLIFIED_THETA_E -!>@brief The subroutine 'eqv_pot' calculates the equivalent potential temperature using +!>@brief The subroutine 'eqv_pot' calculates the equivalent potential temperature using !! a simplified method. !>@author Shian-Jiann Lin subroutine eqv_pot(theta_e, pt, delp, delz, peln, pkz, q, is, ie, js, je, ng, npz, & @@ -4431,7 +4445,7 @@ subroutine eqv_pot(theta_e, pt, delp, delz, peln, pkz, q, is, ie, js, je, ng, np real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng,npz):: pt, delp, q real, intent(in), dimension(is-ng: ,js-ng: ,1: ):: delz real, intent(in), dimension(is:ie,npz+1,js:je):: peln - real, intent(in):: pkz(is:ie,js:je,npz) + real, intent(in):: pkz(is:ie,js:je,npz) logical, intent(in):: hydrostatic, moist ! Output: real, dimension(is:ie,js:je,npz), intent(out) :: theta_e !< eqv pot @@ -4516,7 +4530,7 @@ subroutine eqv_pot(theta_e, pt, delp, delz, peln, pkz, q, is, ie, js, je, ng, np real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng,npz):: pt, delp, q real, intent(in), dimension(is-ng: ,js-ng: ,1: ):: delz real, intent(in), dimension(is:ie,npz+1,js:je):: peln - real, intent(in):: pkz(is:ie,js:je,npz) + real, intent(in):: pkz(is:ie,js:je,npz) logical, intent(in):: hydrostatic, moist ! Output: real, dimension(is:ie,js:je,npz), intent(out) :: theta_e !< eqv pot @@ -4653,7 +4667,7 @@ subroutine nh_total_energy(is, ie, js, je, isd, ied, jsd, jed, km, & enddo enddo - psm = g_sum(domain, te, is, ie, js, je, 3, area_l, 1, quicksum=.true.) + psm = g_sum(domain, te, is, ie, js, je, 3, area_l, 1, quicksum=.true.) if( master ) write(*,*) 'TE ( Joule/m^2 * E9) =', psm * 1.E-9 end subroutine nh_total_energy @@ -4666,13 +4680,13 @@ end subroutine nh_total_energy !! model (i.e., the scheme known as "Resiner-2"). !! More information on the derivation of simulated reflectivity in RIP !! can be found in Stoelinga (2005, unpublished write-up). Contact -!! Mark Stoelinga (stoeling@atmos.washington.edu) for a copy. +!! Mark Stoelinga (stoeling@atmos.washington.edu) for a copy. !>@date 22 September, 2016 - modified for use with GFDL cloud microphysics parameters. subroutine dbzcalc(q, pt, delp, peln, delz, & dbz, maxdbz, allmax, bd, npz, ncnst, & hydrostatic, zvir, in0r, in0s, in0g, iliqskin) -! Code from Mark Stoelinga's dbzcalc.f from the RIP package. +! Code from Mark Stoelinga's dbzcalc.f from the RIP package. ! Currently just using values taken directly from that code, which is ! consistent for the MM5 Reisner-2 microphysics. From that file: @@ -4706,7 +4720,7 @@ subroutine dbzcalc(q, pt, delp, peln, delz, & ! ! More information on the derivation of simulated reflectivity in RIP ! can be found in Stoelinga (2005, unpublished write-up). Contact -! Mark Stoelinga (stoeling@atmos.washington.edu) for a copy. +! Mark Stoelinga (stoeling@atmos.washington.edu) for a copy. ! 22sep16: Modifying to use the GFDL MP parameters. If doing so remember ! that the GFDL MP assumes a constant intercept (in0X = .false.) @@ -4753,7 +4767,7 @@ subroutine dbzcalc(q, pt, delp, peln, delz, & real, parameter :: gamma_seven = 720. !The following values are also used in GFDL MP real, parameter :: rho_r = 1.0e3 ! LFO83 - real, parameter :: rho_s = 100. ! kg m^-3 + real, parameter :: rho_s = 100. ! kg m^-3 real, parameter :: rho_g0 = 400. ! kg m^-3 real, parameter :: rho_g = 500. ! graupel-hail mix ! real, parameter :: rho_g = 900. ! hail/frozen rain @@ -4842,13 +4856,13 @@ end subroutine dbzcalc subroutine fv_diag_init_gn(Atm) type(fv_atmos_type), intent(inout), target :: Atm - + if (Atm%grid_Number > 1) then write(gn,"(A2,I1)") " g", Atm%grid_number else gn = "" end if - + end subroutine fv_diag_init_gn !>@brief The subroutine 'getcape' calculateds the Convective Available @@ -4913,11 +4927,11 @@ subroutine getcape( nk , p , t , dz, q, the, cape , cin, source_in ) real, parameter :: pinc = 10000.0 !< Pressure increment (Pa) ! (smaller number yields more accurate - ! results,larger number makes code + ! results,larger number makes code ! go faster) - real, parameter :: ml_depth = 200.0 !< depth (m) of mixed layer + real, parameter :: ml_depth = 200.0 !< depth (m) of mixed layer !! for source=3 integer, parameter :: adiabat = 1 !< Formulation of moist adiabat: @@ -5031,7 +5045,7 @@ subroutine getcape( nk , p , t , dz, q, the, cape , cin, source_in ) !!$ !!$ ELSEIF( z(1).lt.ml_depth )THEN !!$ ! the top-most level is within the mixed layer: just use the -!!$ ! upper-most level (not +!!$ ! upper-most level (not !!$ !!$ avgth = th(1) !!$ avgqv = q(1) diff --git a/tools/fv_eta.F90 b/tools/fv_eta.F90 index 3c49530f8..7e3ab56bc 100644 --- a/tools/fv_eta.F90 +++ b/tools/fv_eta.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -43,7 +43,12 @@ module fv_eta_mod ! ! - use constants_mod, only: kappa, grav, cp_air, rdgas +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: kappa, grav, cp_air, rdgas use fv_mp_mod, only: is_master use mpp_mod, only: FATAL, mpp_error implicit none @@ -228,7 +233,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) stretch_fac = 1.035 ! Hi-top: case (63) ! N = 8, M=4 - ptop = 1. + ptop = 1. ! c360 or c384 stretch_fac = 1.035 case (71) ! N = 9 @@ -253,7 +258,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) call mount_waves(km, ak, bk, ptop, ks, pint) #else if (s_rate > 0.) then - call var_les(km, ak, bk, ptop, ks, pint, s_rate) + call var_les(km, ak, bk, ptop, ks, pint, s_rate) else if ( km > 79 ) then call var_hi2(km, ak, bk, ptop, ks, pint, stretch_fac) @@ -262,7 +267,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) ptop = 500.e2 ks = 0 do k=1,km+1 - bk(k) = real(k-1) / real (km) + bk(k) = real(k-1) / real (km) ak(k) = ptop*(1.-bk(k)) enddo else @@ -378,8 +383,8 @@ subroutine mount_waves(km, ak, bk, ptop, ks, pint) do k=1,km+1 eta(k) = pe1(k) / pe1(km+1) enddo - ep = eta(ks+1) - es = eta(km) + ep = eta(ks+1) + es = eta(km) ! es = 1. alpha = (ep**2-2.*ep*es) / (es-ep)**2 beta = 2.*ep*es**2 / (es-ep)**2 @@ -397,7 +402,7 @@ subroutine mount_waves(km, ak, bk, ptop, ks, pint) enddo ak(km+1) = 0. - do k=ks+2, km + do k=ks+2, km bk(k) = (pe1(k) - ak(k))/pe1(km+1) enddo bk(km+1) = 1. @@ -538,7 +543,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) ! Wilson's 32L settings: !--------------------- ! Top changed to 0.01 mb - data a32w/ 1.00, 26.6378, 84.5529, 228.8592, & + data a32w/ 1.00, 26.6378, 84.5529, 228.8592, & 539.9597, 1131.7087, 2141.8082, 3712.0454, & 5963.5317, 8974.1873, 12764.5388, 17294.5911, & 20857.7007, 22221.8651, 22892.7202, 22891.1641, & @@ -547,7 +552,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) 8073.9717, 6458.0824, 5027.9893, 3784.6104, & 2722.0093, 1828.9741, 1090.2397, 487.4575, & 0.0000 / - + data b32w/ 0.0000, 0.0000, 0.0000, 0.0000, & 0.0000, 0.0000, 0.0000, 0.0000, & 0.0000, 0.0000, 0.0000, 0.0000, & @@ -773,7 +778,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) 7.7938009300e+03, 6.1769062800e+03, 4.7874276000e+03, & 3.6050497500e+03, 2.6059860700e+03, 1.7668328200e+03, & 1.0656131200e+03, 4.8226201000e+02, 0.0000000000e+00, & - 0.0000000000e+00 / + 0.0000000000e+00 / data b60/ 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & @@ -803,7 +808,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) ! 3 layers data a63/64.247, 137.790, 221.958, & 318.266, 428.434, 554.424, & - 698.457, 863.05803, 1051.07995, & + 698.457, 863.05803, 1051.07995, & 1265.75194, 1510.71101, 1790.05098, & 2108.36604, 2470.78817, 2883.03811, & 3351.46002, 3883.05187, 4485.49315, & @@ -1161,7 +1166,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) 1.1164595563e+04, 9.4227665517e+03, 7.7259097899e+03, & 6.1538244381e+03, 4.7808126007e+03, 3.5967415552e+03, & 2.5886394104e+03, 1.7415964865e+03, 1.0393721271e+03, & - 4.6478852032e+02, 7.0308342481e-13, 0.0000000000e+00 / + 4.6478852032e+02, 7.0308342481e-13, 0.0000000000e+00 / data b104/ & @@ -1251,18 +1256,18 @@ subroutine set_eta(km, ks, ptop, ak, bk) case (24) - ks = 5 + ks = 5 do k=1,km+1 ak(k) = a24(k) bk(k) = b24(k) enddo case (26) - + ks = 7 do k=1,km+1 - ak(k) = a26(k) - bk(k) = b26(k) + ak(k) = a26(k) + bk(k) = b26(k) enddo case (32) @@ -1392,17 +1397,17 @@ subroutine set_eta(km, ks, ptop, ak, bk) do k=1,ks ak(k) = press(k) bk(k) = 0. - enddo - endif + enddo + endif pint = press(ks+1) - do k=ks+1,km - ak(k) = pint*(press(km)-press(k))/(press(km)-pint) - bk(k) = (press(k) - ak(k)) / press(km+1) - enddo - ak(km+1) = 0. - bk(km+1) = 1. - + do k=ks+1,km + ak(k) = pint*(press(km)-press(k))/(press(km)-pint) + bk(k) = (press(k) - ak(k)) / press(km+1) + enddo + ak(km+1) = 0. + bk(km+1) = 1. + ! do k=2,km ! bk(k) = real(k-1) / real(km) ! ak(k) = pt * ( 1. - bk(k) ) @@ -1495,7 +1500,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) bk(1) = 0. ak(2) = pint bk(2) = 0. - + do k=3,km+1 bk(k) = real(k-2) / real(km-1) ak(k) = pint - bk(k)*pint @@ -1510,7 +1515,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) end subroutine set_eta #endif -!>@brief The subroutine 'set_external_eta' sets 'ptop' (model top) and +!>@brief The subroutine 'set_external_eta' sets 'ptop' (model top) and !! 'ks' (first level of pure pressure coordinates given the coefficients !! 'ak' and 'bk' subroutine set_external_eta(ak, bk, ptop, ks) @@ -1526,12 +1531,12 @@ subroutine set_external_eta(ak, bk, ptop, ks) ptop = ak(1) ks = 1 do k = 1, size(bk(:)) - if (bk(k).lt.eps) ks = k + if (bk(k).lt.eps) ks = k enddo !--- change ks to layers from levels ks = ks - 1 if (is_master()) write(6,*) ' ptop & ks ', ptop, ks - + end subroutine set_external_eta @@ -1573,7 +1578,7 @@ subroutine var_les(km, ak, bk, ptop, ks, pint, s_rate) enddo s_fac(km-k_inc-1) = 0.5*(s_fac(km-k_inc) + s_rate) - + do k=km-k_inc-2, 5, -1 s_fac(k) = s_rate * s_fac(k+1) enddo @@ -1648,8 +1653,8 @@ subroutine var_les(km, ak, bk, ptop, ks, pint, s_rate) eta(k) = pe1(k) / pe1(km+1) enddo - ep = eta(ks+1) - es = eta(km) + ep = eta(ks+1) + es = eta(km) ! es = 1. alpha = (ep**2-2.*ep*es) / (es-ep)**2 beta = 2.*ep*es**2 / (es-ep)**2 @@ -1667,7 +1672,7 @@ subroutine var_les(km, ak, bk, ptop, ks, pint, s_rate) enddo ak(km+1) = 0. - do k=ks+2, km + do k=ks+2, km bk(k) = (pe1(k) - ak(k))/pe1(km+1) enddo bk(km+1) = 1. @@ -1675,7 +1680,7 @@ subroutine var_les(km, ak, bk, ptop, ks, pint, s_rate) if ( is_master() ) then ! write(*,*) 'KS=', ks, 'PINT (mb)=', pint/100. ! do k=1,km - ! pm(k) = 0.5*(pe1(k)+pe1(k+1))/100. + ! pm(k) = 0.5*(pe1(k)+pe1(k+1))/100. ! write(*,*) k, pm(k), dz(k) ! enddo tmp1 = ak(ks+1) @@ -1721,7 +1726,7 @@ subroutine var_gfs(km, ak, bk, ptop, ks, pint, s_rate) peln(1) = log(pe1(1)) pe1(km+1) = p00 peln(km+1) = log(pe1(km+1)) - + t0 = 270. ztop = rdgas/grav*t0*(peln(km+1) - peln(1)) @@ -1822,8 +1827,8 @@ subroutine var_gfs(km, ak, bk, ptop, ks, pint, s_rate) eta(k) = pe1(k) / pe1(km+1) enddo - ep = eta(ks+1) - es = eta(km) + ep = eta(ks+1) + es = eta(km) ! es = 1. alpha = (ep**2-2.*ep*es) / (es-ep)**2 beta = 2.*ep*es**2 / (es-ep)**2 @@ -1841,7 +1846,7 @@ subroutine var_gfs(km, ak, bk, ptop, ks, pint, s_rate) enddo ak(km+1) = 0. - do k=ks+2, km + do k=ks+2, km bk(k) = (pe1(k) - ak(k))/pe1(km+1) enddo bk(km+1) = 1. @@ -1885,7 +1890,7 @@ subroutine var_hi(km, ak, bk, ptop, ks, pint, s_rate) peln(1) = log(pe1(1)) pe1(km+1) = p00 peln(km+1) = log(pe1(km+1)) - + t0 = 270. ztop = rdgas/grav*t0*(peln(km+1) - peln(1)) @@ -1897,7 +1902,7 @@ subroutine var_hi(km, ak, bk, ptop, ks, pint, s_rate) enddo s_fac(km-k_inc-1) = 0.5*(s_fac(km-k_inc) + s_rate) - + #ifdef HIWPP do k=km-k_inc-2, 4, -1 s_fac(k) = s_rate * s_fac(k+1) @@ -1998,8 +2003,8 @@ subroutine var_hi(km, ak, bk, ptop, ks, pint, s_rate) eta(k) = pe1(k) / pe1(km+1) enddo - ep = eta(ks+1) - es = eta(km) + ep = eta(ks+1) + es = eta(km) ! es = 1. alpha = (ep**2-2.*ep*es) / (es-ep)**2 beta = 2.*ep*es**2 / (es-ep)**2 @@ -2017,7 +2022,7 @@ subroutine var_hi(km, ak, bk, ptop, ks, pint, s_rate) enddo ak(km+1) = 0. - do k=ks+2, km + do k=ks+2, km bk(k) = (pe1(k) - ak(k))/pe1(km+1) enddo bk(km+1) = 1. @@ -2056,7 +2061,7 @@ subroutine var_hi2(km, ak, bk, ptop, ks, pint, s_rate) peln(1) = log(pe1(1)) pe1(km+1) = p00 peln(km+1) = log(pe1(km+1)) - + t0 = 270. ztop = rdgas/grav*t0*(peln(km+1) - peln(1)) @@ -2065,13 +2070,13 @@ subroutine var_hi2(km, ak, bk, ptop, ks, pint, s_rate) s_fac(km-2) = 0.30 s_fac(km-3) = 0.40 s_fac(km-4) = 0.50 - s_fac(km-5) = 0.60 - s_fac(km-6) = 0.70 + s_fac(km-5) = 0.60 + s_fac(km-6) = 0.70 s_fac(km-7) = 0.80 s_fac(km-8) = 0.90 s_fac(km-9) = 0.95 s_fac(km-10) = 0.5*(s_fac(km-9) + s_rate) - + do k=km-11, 8, -1 s_fac(k) = s_rate * s_fac(k+1) enddo @@ -2155,8 +2160,8 @@ subroutine var_hi2(km, ak, bk, ptop, ks, pint, s_rate) eta(k) = pe1(k) / pe1(km+1) enddo - ep = eta(ks+1) - es = eta(km) + ep = eta(ks+1) + es = eta(km) ! es = 1. alpha = (ep**2-2.*ep*es) / (es-ep)**2 beta = 2.*ep*es**2 / (es-ep)**2 @@ -2174,7 +2179,7 @@ subroutine var_hi2(km, ak, bk, ptop, ks, pint, s_rate) enddo ak(km+1) = 0. - do k=ks+2, km + do k=ks+2, km bk(k) = (pe1(k) - ak(k))/pe1(km+1) enddo bk(km+1) = 1. @@ -2215,7 +2220,7 @@ subroutine var_dz(km, ak, bk, ptop, ks, pint, s_rate) peln(1) = log(pe1(1)) pe1(km+1) = p00 peln(km+1) = log(pe1(km+1)) - + t0 = 270. ztop = rdgas/grav*t0*(peln(km+1) - peln(1)) @@ -2224,13 +2229,13 @@ subroutine var_dz(km, ak, bk, ptop, ks, pint, s_rate) s_fac(km-2) = 0.30 s_fac(km-3) = 0.40 s_fac(km-4) = 0.50 - s_fac(km-5) = 0.60 - s_fac(km-6) = 0.70 + s_fac(km-5) = 0.60 + s_fac(km-6) = 0.70 s_fac(km-7) = 0.80 s_fac(km-8) = 0.90 s_fac(km-9) = 0.95 s_fac(km-10) = 0.5*(s_fac(km-9) + s_rate) - + do k=km-11, 9, -1 s_fac(k) = min(10.0, s_rate * s_fac(k+1) ) enddo @@ -2316,8 +2321,8 @@ subroutine var_dz(km, ak, bk, ptop, ks, pint, s_rate) eta(k) = pe1(k) / pe1(km+1) enddo - ep = eta(ks+1) - es = eta(km) + ep = eta(ks+1) + es = eta(km) ! es = 1. alpha = (ep**2-2.*ep*es) / (es-ep)**2 beta = 2.*ep*es**2 / (es-ep)**2 @@ -2335,7 +2340,7 @@ subroutine var_dz(km, ak, bk, ptop, ks, pint, s_rate) enddo ak(km+1) = 0. - do k=ks+2, km + do k=ks+2, km bk(k) = (pe1(k) - ak(k))/pe1(km+1) enddo bk(km+1) = 1. @@ -2376,7 +2381,7 @@ subroutine var55_dz(km, ak, bk, ptop, ks, pint, s_rate) peln(1) = log(pe1(1)) pe1(km+1) = p00 peln(km+1) = log(pe1(km+1)) - + t0 = 270. ztop = rdgas/grav*t0*(peln(km+1) - peln(1)) @@ -2479,8 +2484,8 @@ subroutine var55_dz(km, ak, bk, ptop, ks, pint, s_rate) eta(k) = pe1(k) / pe1(km+1) enddo - ep = eta(ks+1) - es = eta(km) + ep = eta(ks+1) + es = eta(km) ! es = 1. alpha = (ep**2-2.*ep*es) / (es-ep)**2 beta = 2.*ep*es**2 / (es-ep)**2 @@ -2498,7 +2503,7 @@ subroutine var55_dz(km, ak, bk, ptop, ks, pint, s_rate) enddo ak(km+1) = 0. - do k=ks+2, km + do k=ks+2, km bk(k) = (pe1(k) - ak(k))/pe1(km+1) enddo bk(km+1) = 1. @@ -2557,35 +2562,35 @@ subroutine hybrid_z_dz(km, dz, ztop, s_rate) s_fac(1) = 1.6 *s_fac(2) sum1 = 0. - do k=1,km - sum1 = sum1 + s_fac(k) - enddo - - dz0 = ztop / sum1 - - do k=1,km - dz(k) = s_fac(k) * dz0 - enddo - - ze(km+1) = 0. - do k=km,1,-1 - ze(k) = ze(k+1) + dz(k) - enddo - - ze(1) = ztop - + do k=1,km + sum1 = sum1 + s_fac(k) + enddo + + dz0 = ztop / sum1 + + do k=1,km + dz(k) = s_fac(k) * dz0 + enddo + + ze(km+1) = 0. + do k=km,1,-1 + ze(k) = ze(k+1) + dz(k) + enddo + + ze(1) = ztop + call sm1_edge(1, 1, 1, 1, km, 1, 1, ze, 2) - - do k=1,km - dz(k) = ze(k) - ze(k+1) - enddo - end subroutine hybrid_z_dz + do k=1,km + dz(k) = ze(k) - ze(k+1) + enddo + + end subroutine hybrid_z_dz !>@brief The subroutine 'get_eta_level' returns the interface and !! layer-mean pressures for reference. subroutine get_eta_level(npz, p_s, pf, ph, ak, bk, pscale) - integer, intent(in) :: npz + integer, intent(in) :: npz real, intent(in) :: p_s !< unit: pascal real, intent(in) :: ak(npz+1) real, intent(in) :: bk(npz+1) @@ -2594,18 +2599,18 @@ subroutine get_eta_level(npz, p_s, pf, ph, ak, bk, pscale) real, intent(out) :: ph(npz+1) integer k - ph(1) = ak(1) + ph(1) = ak(1) do k=2,npz+1 ph(k) = ak(k) + bk(k)*p_s - enddo - + enddo + if ( present(pscale) ) then do k=1,npz+1 ph(k) = pscale*ph(k) enddo - endif + endif - if( ak(1) > 1.E-8 ) then + if( ak(1) > 1.E-8 ) then pf(1) = (ph(2) - ph(1)) / log(ph(2)/ph(1)) else pf(1) = (ph(2) - ph(1)) * kappa/(kappa+1.) @@ -2630,7 +2635,7 @@ subroutine compute_dz(km, ztop, dz) ! ztop = 30.E3 - dz(1) = ztop / real(km) + dz(1) = ztop / real(km) dz(km) = 0.5*dz(1) do k=2,km-1 @@ -2671,12 +2676,12 @@ subroutine compute_dz_var(km, ztop, dz) s_fac(km-1) = 0.20 s_fac(km-2) = 0.30 s_fac(km-3) = 0.40 - s_fac(km-4) = 0.50 - s_fac(km-5) = 0.60 - s_fac(km-6) = 0.70 - s_fac(km-7) = 0.80 + s_fac(km-4) = 0.50 + s_fac(km-5) = 0.60 + s_fac(km-6) = 0.70 + s_fac(km-7) = 0.80 s_fac(km-8) = 0.90 - s_fac(km-9) = 1. + s_fac(km-9) = 1. do k=km-10, 9, -1 s_fac(k) = s_rate * s_fac(k+1) @@ -2754,7 +2759,7 @@ subroutine compute_dz_L32(km, ztop, dz) ze(2) = dz(1) dz0 = 1.5*dz0 - dz(2) = dz0 + dz(2) = dz0 ze(3) = ze(2) + dz(2) @@ -2862,8 +2867,8 @@ subroutine set_hybrid_z(is, ie, js, je, ng, km, ztop, dz, rgrav, hs, ze, dz3) do j=js,je do i=is,ie - ze(i,j, 1) = ztop - ze(i,j,km+1) = hs(i,j) * rgrav + ze(i,j, 1) = ztop + ze(i,j,km+1) = hs(i,j) * rgrav enddo enddo @@ -3034,7 +3039,7 @@ subroutine gw_1d(km, p0, ak, bk, ptop, ztop, pt1) n2 = 0.0001 endif - s0 = grav*grav / (cp_air*n2) + s0 = grav*grav / (cp_air*n2) ze(km+1) = 0. do k=km,1,-1 @@ -3047,16 +3052,16 @@ subroutine gw_1d(km, p0, ak, bk, ptop, ztop, pt1) pe1(k) = p0*( (1.-s0/t0) + s0/t0*exp(-n2*ze(k)/grav) )**(1./kappa) enddo - ptop = pe1(1) + ptop = pe1(1) ! if ( is_master() ) write(*,*) 'GW_1D: computed model top (pa)=', ptop -! Set up "sigma" coordinate +! Set up "sigma" coordinate ak(1) = pe1(1) bk(1) = 0. do k=2,km bk(k) = (pe1(k) - pe1(1)) / (pe1(km+1)-pe1(1)) ! bk == sigma - ak(k) = pe1(1)*(1.-bk(k)) - enddo + ak(k) = pe1(1)*(1.-bk(k)) + enddo ak(km+1) = 0. bk(km+1) = 1. @@ -3085,9 +3090,9 @@ subroutine zflip(q, im, km) qtmp = q(i,k) q(i,k) = q(i,km+1-k) q(i,km+1-k) = qtmp - end do - end do - - end subroutine zflip + end do + end do + + end subroutine zflip end module fv_eta_mod diff --git a/tools/fv_grid_tools.F90 b/tools/fv_grid_tools.F90 index fce13f23c..f62acc62c 100644 --- a/tools/fv_grid_tools.F90 +++ b/tools/fv_grid_tools.F90 @@ -1,21 +1,21 @@ !!*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** module fv_grid_tools_mod @@ -35,7 +35,7 @@ module fv_grid_tools_mod ! ! ! fms_io_mod -! file_exist, field_exist, read_data, get_global_att_value, get_var_att_value +! file_exists, field_exist, read_data, get_global_att_value, get_var_att_value ! ! ! fv_arrays_mod @@ -60,7 +60,7 @@ module fv_grid_tools_mod ! ! fv_grid_utils_mod ! gnomonic_grids, great_circle_dist,mid_pt_sphere, spherical_angle, -! cell_center2, get_area, inner_prod, fill_ghost, direct_transform, +! cell_center2, get_area, inner_prod, fill_ghost, direct_transform, ! dist2side_latlon,spherical_linear_interpolation, big_number ! ! @@ -69,7 +69,7 @@ module fv_grid_tools_mod ! ! ! fv_mp_mod -! ng, is_master, fill_corners, XDir, YDir,mp_gather, +! ng, is_master, fill_corners, XDir, YDir,mp_gather, ! mp_bcst, mp_reduce_max, mp_stop ! ! @@ -82,15 +82,15 @@ module fv_grid_tools_mod ! ! ! mpp_mod -! mpp_error, FATAL, get_unit, mpp_chksum, mpp_pe, stdout, -! mpp_send, mpp_recv, mpp_sync_self, EVENT_RECV, mpp_npes, +! mpp_error, FATAL, get_unit, mpp_chksum, mpp_pe, stdout, +! mpp_send, mpp_recv, mpp_sync_self, EVENT_RECV, mpp_npes, ! mpp_sum, mpp_max, mpp_min, mpp_root_pe, mpp_broadcast, mpp_transmit ! ! ! mpp_domains_mod -! domain2d,mpp_update_domains, mpp_get_boundary,mpp_get_ntile_count, -! mpp_get_pelist, mpp_get_compute_domains, mpp_global_field, -! mpp_get_data_domain, mpp_get_compute_domain,mpp_get_global_domain, +! domain2d,mpp_update_domains, mpp_get_boundary,mpp_get_ntile_count, +! mpp_get_pelist, mpp_get_compute_domains, mpp_global_field, +! mpp_get_data_domain, mpp_get_compute_domain,mpp_get_global_domain, ! mpp_global_sum, mpp_global_max, mpp_global_min ! ! @@ -99,9 +99,9 @@ module fv_grid_tools_mod ! ! ! mpp_parameter_mod -! AGRID_PARAM=>AGRID,DGRID_NE_PARAM=>DGRID_NE, -! CGRID_NE_PARAM=>CGRID_NE,CGRID_SW_PARAM=>CGRID_SW, -! BGRID_NE_PARAM=>BGRID_NE,BGRID_SW_PARAM=>BGRID_SW, +! AGRID_PARAM=>AGRID,DGRID_NE_PARAM=>DGRID_NE, +! CGRID_NE_PARAM=>CGRID_NE,CGRID_SW_PARAM=>CGRID_SW, +! BGRID_NE_PARAM=>BGRID_NE,BGRID_SW_PARAM=>BGRID_SW, ! SCALAR_PAIR,CORNER, CENTER, XUPDATE ! ! @@ -114,15 +114,19 @@ module fv_grid_tools_mod ! ! - - use constants_mod, only: grav, omega, pi=>pi_8, cnst_radius=>radius +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: grav, omega, pi=>pi_8, cnst_radius=>radius use fv_arrays_mod, only: fv_atmos_type, fv_grid_type, fv_grid_bounds_type, R_GRID use fv_grid_utils_mod, only: gnomonic_grids, gnomonic_grids_local, great_circle_dist, & mid_pt_sphere, spherical_angle, & cell_center2, get_area, inner_prod, fill_ghost, & direct_transform, dist2side_latlon, & spherical_linear_interpolation, big_number, & - project_sphere_v + project_sphere_v use fv_timing_mod, only: timing_on, timing_off use fv_mp_mod, only: ng, is_master, fill_corners, XDir, YDir use fv_mp_mod, only: mp_gather, mp_bcst, mp_reduce_max, mp_stop @@ -136,20 +140,27 @@ module fv_grid_tools_mod mpp_get_data_domain, mpp_get_compute_domain, & mpp_get_global_domain, mpp_global_sum, mpp_global_max, mpp_global_min use mpp_domains_mod, only: domain2d - use mpp_io_mod, only: mpp_get_att_value - - use mpp_parameter_mod, only: AGRID_PARAM=>AGRID, & +#if defined (FMS1_IO) + use mpp_io_mod, only: mpp_get_att_value +#endif + use mpp_parameter_mod, only: AGRID_PARAM=>AGRID, & DGRID_NE_PARAM=>DGRID_NE, & CGRID_NE_PARAM=>CGRID_NE, & CGRID_SW_PARAM=>CGRID_SW, & BGRID_NE_PARAM=>BGRID_NE, & - BGRID_SW_PARAM=>BGRID_SW, & + BGRID_SW_PARAM=>BGRID_SW, & SCALAR_PAIR, & CORNER, CENTER, XUPDATE +#if defined (FMS1_IO) use fms_mod, only: get_mosaic_tile_grid - use fms_io_mod, only: file_exist, field_exist, read_data, & + use fms_io_mod, only: file_exists => file_exist, field_exist, read_data, & get_global_att_value, get_var_att_value use mosaic_mod, only : get_mosaic_ntiles +#else + use fms2_io_mod, only: file_exists, get_global_attribute, get_variable_attribute, variable_exists, read_data, & + get_mosaic_tile_grid, FmsNetcdfFile_t, open_file, close_file + use mosaic2_mod, only : get_mosaic_ntiles +#endif implicit none private @@ -184,6 +195,9 @@ subroutine read_grid(Atm, grid_file, ndims, nregions, ng) integer, intent(IN) :: nregions integer, intent(IN) :: ng +#if !defined (FMS1_IO) + type(FmsNetcdfFile_t) :: Grid_input +#endif real, allocatable, dimension(:,:) :: tmpx, tmpy real(kind=R_GRID), pointer, dimension(:,:,:) :: grid character(len=128) :: units = "" @@ -191,7 +205,7 @@ subroutine read_grid(Atm, grid_file, ndims, nregions, ng) character(len=1024) :: attvalue integer :: ntiles, i, j, stdunit integer :: isc2, iec2, jsc2, jec2 - integer :: start(4), nread(4) + integer :: start(4), nread(4) integer :: is, ie, js, je integer :: isd, ied, jsd, jed @@ -205,53 +219,90 @@ subroutine read_grid(Atm, grid_file, ndims, nregions, ng) jed = Atm%bd%jed grid => Atm%gridstruct%grid_64 - if(.not. file_exist(grid_file)) call mpp_error(FATAL, 'fv_grid_tools(read_grid): file '// & + if(.not. file_exists(grid_file)) call mpp_error(FATAL, 'fv_grid_tools(read_grid): file '// & trim(grid_file)//' does not exist') !--- make sure the grid file is mosaic file. +#if defined(FMS1_IO) if( field_exist(grid_file, 'atm_mosaic_file') .OR. field_exist(grid_file, 'gridfiles') ) then - stdunit = stdout() - write(stdunit,*) '==>Note from fv_grid_tools_mod(read_grid): read atmosphere grid from mosaic version grid' - else - call mpp_error(FATAL, 'fv_grid_tools(read_grid): neither atm_mosaic_file nor gridfiles exists in file ' & - //trim(grid_file)) - endif +#else + if ( open_file(Grid_input, grid_file, "read") ) then + if( variable_exists(Grid_input, 'atm_mosaic_file') .OR. variable_exists(Grid_input, 'gridfiles') ) then +#endif + stdunit = stdout() + write(stdunit,*) '==>Note from fv_grid_tools_mod(read_grid): read atmosphere grid from mosaic version grid' + else + call mpp_error(FATAL, 'fv_grid_tools(read_grid): neither atm_mosaic_file nor gridfiles exists in file ' & + //trim(grid_file)) + endif +#if defined (FMS1_IO) if(field_exist(grid_file, 'atm_mosaic_file')) then call read_data(grid_file, "atm_mosaic_file", atm_mosaic) - atm_mosaic = "INPUT/"//trim(atm_mosaic) - else - atm_mosaic = trim(grid_file) +#else + if(variable_exists(Grid_input, 'atm_mosaic_file')) then + call read_data(Grid_input, "atm_mosaic_file", atm_mosaic) +#endif + atm_mosaic = "INPUT/"//trim(atm_mosaic) + else + atm_mosaic = trim(grid_file) + endif +#if !defined (FMS1_IO) + call close_file(Grid_input) endif +#endif call get_mosaic_tile_grid(atm_hgrid, atm_mosaic, Atm%domain) - - grid_form = "none" +#if defined (FMS1_IO) + !FIXME: Doesn't work for a nested grid + ntiles = get_mosaic_ntiles(atm_mosaic) +#else + if (open_file(Grid_input, atm_mosaic, "read")) then + ntiles = get_mosaic_ntiles(Grid_input) + call close_file(Grid_input) + endif +#endif + grid_form = "none" +#if defined (FMS1_IO) if( get_global_att_value(atm_hgrid, "history", attvalue) ) then if( index(attvalue, "gnomonic_ed") > 0) grid_form = "gnomonic_ed" endif +#else + if (open_file(Grid_input, atm_hgrid, "read")) then + call get_global_attribute(Grid_input, "history", attvalue) + if( index(attvalue, "gnomonic_ed") > 0) grid_form = "gnomonic_ed" +#endif if(grid_form .NE. "gnomonic_ed") call mpp_error(FATAL, & "fv_grid_tools(read_grid): the grid should be 'gnomonic_ed' when reading from grid file, contact developer") - !FIXME: Doesn't work for a nested grid - ntiles = get_mosaic_ntiles(atm_mosaic) if(ntiles .NE. 6) call mpp_error(FATAL, & 'fv_grid_tools(read_grid): ntiles should be 6 in mosaic file '//trim(atm_mosaic) ) if(nregions .NE. 6) call mpp_error(FATAL, & 'fv_grid_tools(read_grid): nregions should be 6 when reading from mosaic file '//trim(grid_file) ) +#if defined (FMS1_IO) call get_var_att_value(atm_hgrid, 'x', 'units', units) +#else + call get_variable_attribute(Grid_input, 'x', 'units', units) +#endif !--- get the geographical coordinates of super-grid. isc2 = 2*is-1; iec2 = 2*ie+1 - jsc2 = 2*js-1; jec2 = 2*je+1 + jsc2 = 2*js-1; jec2 = 2*je+1 allocate(tmpx(isc2:iec2, jsc2:jec2) ) allocate(tmpy(isc2:iec2, jsc2:jec2) ) start = 1; nread = 1 start(1) = isc2; nread(1) = iec2 - isc2 + 1 start(2) = jsc2; nread(2) = jec2 - jsc2 + 1 +#if defined (FMS1_IO) call read_data(atm_hgrid, 'x', tmpx, start, nread, no_domain=.TRUE.) call read_data(atm_hgrid, 'y', tmpy, start, nread, no_domain=.TRUE.) +#else + call read_data(Grid_input, 'x', tmpx, corner=start, edge_lengths=nread) + call read_data(Grid_input, 'y', tmpy, corner=start, edge_lengths=nread) + call close_file(Grid_input) + endif +#endif !--- geographic grid at cell corner grid(isd: is-1, jsd:js-1,1:ndims)=0. @@ -291,7 +342,7 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in) :: ishift, jshift, npes_x, npes_y real(kind=R_GRID), dimension(bd%is:bd%ie+ishift, bd%js:bd%je+jshift ), intent(in) :: data_in - real(kind=R_GRID), dimension(bd%is:bd%ie+jshift, bd%js:bd%je+ishift ), intent(out) :: data_out + real(kind=R_GRID), dimension(bd%is:bd%ie+jshift, bd%js:bd%je+ishift ), intent(out) :: data_out real(kind=R_GRID), dimension(:), allocatable :: send_buffer real(kind=R_GRID), dimension(:), allocatable :: recv_buffer integer, dimension(:), allocatable :: is_recv, ie_recv, js_recv, je_recv, pe_recv @@ -317,7 +368,7 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai ied = bd%ied jsd = bd%jsd jed = bd%jed - + !--- This routine will be called only for cubic sphere grid. so 6 tiles will be assumed !--- also number of processors on each tile will be the same. ntiles = mpp_get_ntile_count(domain) @@ -328,10 +379,10 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai npes_per_tile = npes/ntiles ! if(npes_x == npes_y) then ! even, simple communication - if(npes_x == npes_y .AND. mod(npx_g-1,npes_x) == 0 ) then ! even, + if(npes_x == npes_y .AND. mod(npx_g-1,npes_x) == 0 ) then ! even, msgsize = (ie-is+1+jshift)*(je-js+1+ishift) - pos = mod((mpp_pe()-mpp_root_pe()), npes_x*npes_y) + pos = mod((mpp_pe()-mpp_root_pe()), npes_x*npes_y) start_pe = mpp_pe() - pos ipos = mod(pos, npes_x) jpos = pos/npes_x @@ -361,7 +412,7 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai enddo enddo - call mpp_sync_self() + call mpp_sync_self() deallocate(send_buffer, recv_buffer) else @@ -380,8 +431,8 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai call mpp_get_pelist(domain, pelist) allocate(isl(0:npes-1), iel(0:npes-1), jsl(0:npes-1), jel(0:npes-1) ) call mpp_get_compute_domains(domain, xbegin=isl, xend=iel, ybegin=jsl, yend=jel) - !--- pre-post receiving - buffer_pos = 0 + !--- pre-post receiving + buffer_pos = 0 nrecv = 0 nsend = 0 recv_buf_size = 0 @@ -399,8 +450,8 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai is2 = isl(p); ie2 = iel(p) + ishift; js2 = jsl(p); je2 = jel(p) + jshift; is0 = max(is1,is2); ie0 = min(ie1,ie2) - js0 = max(js1,js2); je0 = min(je1,je2) - msgsize = 0 + js0 = max(js1,js2); je0 = min(je1,je2) + msgsize = 0 if(ie0 .GE. is0 .AND. je0 .GE. js0) then msgsize = (ie0-is0+1)*(je0-js0+1) recv_buf_size = recv_buf_size + msgsize @@ -462,7 +513,7 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai js0 = js_recv(p); je0 = je_recv(p) msgsize = (ie0-is0+1)*(je0-js0+1) call mpp_recv(recv_buffer(buffer_pos+1), glen=msgsize, from_pe=pe_recv(p), block=.FALSE. ) - buffer_pos = buffer_pos + msgsize + buffer_pos = buffer_pos + msgsize enddo !--- send the data @@ -480,7 +531,7 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai enddo enddo call mpp_send(send_buffer(buffer_pos+1), plen=msgsize, to_pe=pe_send(p) ) - buffer_pos = buffer_pos + msgsize + buffer_pos = buffer_pos + msgsize enddo call mpp_sync_self(check=EVENT_RECV) ! To ensure recv is completed. @@ -488,7 +539,7 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai !--- unpack buffer pos = 0 do p = 0, nrecv-1 - is0 = is_recv(p); ie0 = ie_recv(p) + is0 = is_recv(p); ie0 = ie_recv(p) js0 = js_recv(p); je0 = je_recv(p) do i = is0, ie0 @@ -544,11 +595,11 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, ! real(kind=R_GRID) :: grid_global(1-ng:npx +ng,1-ng:npy +ng,ndims,1:nregions) integer :: ios, ip, jp - + integer :: igrid - + integer :: tmplun - character(len=80) :: tmpFile + character(len=80) :: tmpFile real(kind=R_GRID), dimension(Atm%bd%is:Atm%bd%ie) :: sbuffer, nbuffer real(kind=R_GRID), dimension(Atm%bd%js:Atm%bd%je) :: wbuffer, ebuffer @@ -575,7 +626,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, integer :: is, ie, js, je integer :: isd, ied, jsd, jed - + logical :: local_algorithm local_algorithm = atm%flagstruct%compute_coords_locally @@ -622,7 +673,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, allocate(grid_global(1-ng:npx +ng,1-ng:npy +ng,ndims,1:nregions)) end if endif - + iinta => Atm%gridstruct%iinta jinta => Atm%gridstruct%jinta iintb => Atm%gridstruct%iintb @@ -674,11 +725,11 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, else cubed_sphere = .true. - + if (Atm%neststruct%nested) then call setup_aligned_nest(Atm) else - if(trim(grid_file) == 'INPUT/grid_spec.nc') then + if(trim(grid_file) == 'INPUT/grid_spec.nc') then call read_grid(Atm, grid_file, ndims, nregions, ng) else @@ -694,7 +745,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, grid_global(i,j,2,1) = ys(i,j) enddo enddo - ! mirror_grid assumes that the tile=1 is centered on equator and greenwich meridian Lon[-pi,pi] + ! mirror_grid assumes that the tile=1 is centered on equator and greenwich meridian Lon[-pi,pi] call mirror_grid(grid_global, ng, npx, npy, 2, 6) do n=1,nregions do j=1,npy @@ -716,25 +767,25 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, else call mpp_error(FATAL, "fv_grid_tools: reading of ASCII grid files no longer supported") endif - + grid_global( 1,1:npy,:,2)=grid_global(npx,1:npy,:,1) grid_global( 1,1:npy,:,3)=grid_global(npx:1:-1,npy,:,1) grid_global(1:npx,npy,:,5)=grid_global(1,npy:1:-1,:,1) grid_global(1:npx,npy,:,6)=grid_global(1:npx,1,:,1) - + grid_global(1:npx, 1,:,3)=grid_global(1:npx,npy,:,2) grid_global(1:npx, 1,:,4)=grid_global(npx,npy:1:-1,:,2) grid_global(npx,1:npy,:,6)=grid_global(npx:1:-1,1,:,2) - + grid_global( 1,1:npy,:,4)=grid_global(npx,1:npy,:,3) grid_global( 1,1:npy,:,5)=grid_global(npx:1:-1,npy,:,3) - + grid_global(npx,1:npy,:,3)=grid_global(1,1:npy,:,4) grid_global(1:npx, 1,:,5)=grid_global(1:npx,npy,:,4) grid_global(1:npx, 1,:,6)=grid_global(npx,npy:1:-1,:,4) - + grid_global( 1,1:npy,:,6)=grid_global(npx,1:npy,:,5) - + !------------------------ ! Schmidt transformation: !------------------------ @@ -787,7 +838,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, if (.not. Atm%neststruct%nested) call fill_corners(grid(:,:,1), npx, npy, FILL=XDir, BGRID=.true.) if (.not. Atm%neststruct%nested) call fill_corners(grid(:,:,2), npx, npy, FILL=XDir, BGRID=.true.) - !--- dx and dy + !--- dx and dy do j = js, je+1 do i = is, ie p1(1) = grid(i ,j,1) @@ -829,7 +880,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, call sorted_inta(isd, ied, jsd, jed, cubed_sphere, grid, iinta, jinta) agrid(:,:,:) = -1.e25 - + do j=js,je do i=is,ie if ( stretched_grid ) then @@ -1162,37 +1213,37 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, nullify(sina) nullify(cosa) - nullify(dx) - nullify(dy) - nullify(dxc) - nullify(dyc) - nullify(dxa) - nullify(dya) - nullify(rdx) - nullify(rdy) + nullify(dx) + nullify(dy) + nullify(dxc) + nullify(dyc) + nullify(dxa) + nullify(dya) + nullify(rdx) + nullify(rdy) nullify(rdxc) nullify(rdyc) nullify(rdxa) nullify(rdya) - nullify(e1) - nullify(e2) - - nullify(iinta) - nullify(jinta) - nullify(iintb) - nullify(jintb) - nullify(npx_g) - nullify(npy_g) - nullify(ntiles_g) - nullify(sw_corner) - nullify(se_corner) - nullify(ne_corner) - nullify(nw_corner) - nullify(latlon) - nullify(cubed_sphere) - nullify(have_south_pole) - nullify(have_north_pole) - nullify(stretched_grid) + nullify(e1) + nullify(e2) + + nullify(iinta) + nullify(jinta) + nullify(iintb) + nullify(jintb) + nullify(npx_g) + nullify(npy_g) + nullify(ntiles_g) + nullify(sw_corner) + nullify(se_corner) + nullify(ne_corner) + nullify(nw_corner) + nullify(latlon) + nullify(cubed_sphere) + nullify(have_south_pole) + nullify(have_north_pole) + nullify(stretched_grid) nullify(tile) @@ -1201,7 +1252,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, contains subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) - + type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in):: npx, npy real(kind=R_GRID), intent(IN) :: dx_const, dy_const, deglat @@ -1227,24 +1278,24 @@ subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) rdx(:,:) = 1./dx_const dy(:,:) = dy_const rdy(:,:) = 1./dy_const - + dxc(:,:) = dx_const rdxc(:,:) = 1./dx_const dyc(:,:) = dy_const rdyc(:,:) = 1./dy_const - + dxa(:,:) = dx_const rdxa(:,:) = 1./dx_const dya(:,:) = dy_const rdya(:,:) = 1./dy_const - + area(:,:) = dx_const*dy_const rarea(:,:) = 1./(dx_const*dy_const) - + area_c(:,:) = dx_const*dy_const rarea_c(:,:) = 1./(dx_const*dy_const) - -#ifndef MAPL_MODE + +#ifndef MAPL_MODE ! The following is a hack to get pass the am2 phys init: do j=max(1,jsd),min(jed,npy) do i=max(1,isd),min(ied,npx) @@ -1257,7 +1308,7 @@ subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) domain_rad = 1.e-2/npx do j=max(1,jsd),min(jed,npy) do i=max(1,isd),min(ied,npx) - grid(i,j,1) = (0.0 + FLOAT(i-1)*domain_rad)*pi/180.0 ! Radians + grid(i,j,1) = (0.0 + FLOAT(i-1)*domain_rad)*pi/180.0 ! Radians grid(i,j,2) = (deglat + FLOAT(j-1)*domain_rad)*pi/180.0 ! Radians enddo enddo @@ -1265,7 +1316,7 @@ subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) agrid(:,:,1) = lon_rad agrid(:,:,2) = lat_rad - + sina(:,:) = 1. cosa(:,:) = 0. @@ -1286,7 +1337,7 @@ subroutine setup_aligned_nest(Atm) integer :: isd_p, ied_p, jsd_p, jed_p integer :: isg, ieg, jsg, jeg integer :: ic, jc, imod, jmod - + real(kind=R_GRID), allocatable, dimension(:,:,:) :: p_grid_u, p_grid_v, pa_grid, p_grid, c_grid_u, c_grid_v integer :: p_ind(1-ng:npx +ng,1-ng:npy +ng,4) !< First two entries along dim 3 are @@ -1503,7 +1554,7 @@ subroutine setup_aligned_nest(Atm) ind_b(i,j,1) = ic ind_b(i,j,2) = jc - + ind_b(i,j,3) = imod ind_b(i,j,4) = jmod enddo @@ -1513,7 +1564,7 @@ subroutine setup_aligned_nest(Atm) ind_u = -99999999 !New BCs for wind components: - ! For aligned grid segments (mod(j-1,R) == 0) set + ! For aligned grid segments (mod(j-1,R) == 0) set ! identically equal to the coarse-grid value ! Do linear interpolation in the y-dir elsewhere @@ -1627,7 +1678,7 @@ subroutine setup_aligned_nest(Atm) do j=jsd,jed+1 do i=isd,ied+1 - + ic = ind_b(i,j,1) jc = ind_b(i,j,2) @@ -1791,7 +1842,7 @@ subroutine setup_aligned_nest(Atm) write(*,'(A, 2I5, 4F10.4)') 'NE CORNER: ', ic, jc, grid_global(npx,npy,:,1)*90./pi ic = p_ind(npx,1,1) ; jc = p_ind(npx,1,1) write(*,'(A, 2I5, 4F10.4)') 'SE CORNER: ', ic, jc, grid_global(npx,1,:,1)*90./pi - else + else write(*,*) 'PARENT GRID ', Atm%parent_grid%grid_number, Atm%parent_grid%tile ic = p_ind(1,1,1) ; jc = p_ind(1,1,1) write(*,'(A, 2I5, 4F10.4)') 'SW CORNER: ', ic, jc, Atm%parent_grid%grid_global(ic,jc,:,parent_tile)*90./pi @@ -1829,7 +1880,7 @@ subroutine setup_latlon(deglon_start,deglon_stop, deglat_start, deglat_stop, bd lon_start = deglon_start*pi/180. lat_start = deglat_start*pi/180. - + do j=jsd,jed+1 do i=isd,ied+1 grid(i,j,1) = lon_start + real(i-1)*dl @@ -1866,7 +1917,7 @@ subroutine setup_latlon(deglon_start,deglon_stop, deglat_start, deglat_stop, bd rdya(i,j) = 1./dya(i,j) enddo enddo - + do j=jsd,jed+1 do i=isd,ied dx(i,j) = dl*radius*cos(grid(i,j,2)) @@ -1915,20 +1966,20 @@ subroutine setup_latlon(deglon_start,deglon_stop, deglat_start, deglat_stop, bd sina(:,:) = 1. cosa(:,:) = 0. - + e1(1,:,:) = 1. e1(2,:,:) = 0. e1(3,:,:) = 0. - + e2(1,:,:) = 0. e2(2,:,:) = 1. e2(3,:,:) = 0. end subroutine setup_latlon - + end subroutine init_grid - subroutine cartesian_to_spherical(x, y, z, lon, lat, r) + subroutine cartesian_to_spherical(x, y, z, lon, lat, r) real(kind=R_GRID) , intent(IN) :: x, y, z real(kind=R_GRID) , intent(OUT) :: lon, lat, r @@ -1937,7 +1988,7 @@ subroutine cartesian_to_spherical(x, y, z, lon, lat, r) lon = 0. else lon = ATAN2(y,x) ! range: [-pi,pi] - endif + endif #ifdef RIGHT_HAND lat = asin(z/r) @@ -1966,10 +2017,10 @@ subroutine rot_3d(axis, x1in, y1in, z1in, angle, x2out, y2out, z2out, degrees, c real(kind=R_GRID) , intent(IN) :: x1in, y1in, z1in real(kind=R_GRID) , intent(INOUT) :: angle !< angle to rotate in radians real(kind=R_GRID) , intent(OUT) :: x2out, y2out, z2out - integer, intent(IN), optional :: degrees !< if present convert angle + integer, intent(IN), optional :: degrees !< if present convert angle !! from degrees to radians integer, intent(IN), optional :: convert !< if present convert input point - !! from spherical to cartesian, rotate, + !! from spherical to cartesian, rotate, !! and convert back real(kind=R_GRID) :: c, s @@ -1991,7 +2042,7 @@ subroutine rot_3d(axis, x1in, y1in, z1in, angle, x2out, y2out, z2out, degrees, c s = SIN(angle) SELECT CASE(axis) - + CASE(1) x2 = x1 y2 = c*y1 + s*z1 @@ -2006,7 +2057,7 @@ subroutine rot_3d(axis, x1in, y1in, z1in, angle, x2out, y2out, z2out, degrees, c z2 = z1 CASE DEFAULT write(*,*) "Invalid axis: must be 1 for X, 2 for Y, 3 for Z." - + END SELECT if ( present(convert) ) then @@ -2022,15 +2073,15 @@ end subroutine rot_3d !>brief The function 'get_area_tri' gets the surface area of a cell defined as a triangle -!!on the sphere. +!!on the sphere. !>@details The area is computed as the spherical excess [area units are based on the units of radius] real(kind=R_GRID) function get_area_tri(ndims, p_1, p_2, p_3) & result (myarea) - + integer, intent(IN) :: ndims !< 2=lat/lon, 3=xyz - real(kind=R_GRID) , intent(IN) :: p_1(ndims) ! - real(kind=R_GRID) , intent(IN) :: p_2(ndims) ! - real(kind=R_GRID) , intent(IN) :: p_3(ndims) ! + real(kind=R_GRID) , intent(IN) :: p_1(ndims) ! + real(kind=R_GRID) , intent(IN) :: p_2(ndims) ! + real(kind=R_GRID) , intent(IN) :: p_3(ndims) ! real(kind=R_GRID) :: angA, angB, angC @@ -2071,12 +2122,12 @@ subroutine grid_area(nx, ny, ndims, nregions, nested, gridstruct, domain, bd, ti integer :: i,j,n, nreg integer :: nh = 0 - real(kind=R_GRID), allocatable :: p_R8(:,:,:) + real(kind=R_GRID), allocatable :: p_R8(:,:,:) real(kind=R_GRID), pointer, dimension(:,:,:) :: grid, agrid integer, pointer, dimension(:,:,:) :: iinta, jinta, iintb, jintb real(kind=R_GRID), pointer, dimension(:,:) :: area, area_c - + integer :: is, ie, js, je integer :: isd, ied, jsd, jed @@ -2161,7 +2212,7 @@ subroutine grid_area(nx, ny, ndims, nregions, nested, gridstruct, domain, bd, ti !!$ deallocate( p_R8 ) !!$ !!$ call mp_reduce_max(maxarea) -!!$ minarea = -minarea +!!$ minarea = -minarea !!$ call mp_reduce_max(minarea) !!$ minarea = -minarea @@ -2305,8 +2356,8 @@ real(kind=R_GRID) function get_angle(ndims, p1, p2, p3, rad) result (angle) endif end function get_angle - -!>@brief The subroutine 'mirror_grid' mirrors the grid across the 0-longitude line + +!>@brief The subroutine 'mirror_grid' mirrors the grid across the 0-longitude line subroutine mirror_grid(grid_global,ng,npx,npy,ndims,nregions) integer, intent(IN) :: ng,npx,npy,ndims,nregions real(kind=R_GRID) , intent(INOUT) :: grid_global(1-ng:npx +ng,1-ng:npy +ng,ndims,1:nregions) @@ -2326,7 +2377,7 @@ subroutine mirror_grid(grid_global,ng,npx,npy,ndims,nregions) grid_global(i ,npy-(j-1),1,nreg) = SIGN(x1,grid_global(i ,npy-(j-1),1,nreg)) grid_global(npx-(i-1),npy-(j-1),1,nreg) = SIGN(x1,grid_global(npx-(i-1),npy-(j-1),1,nreg)) - y1 = 0.25d0 * (ABS(grid_global(i ,j ,2,nreg)) + & + y1 = 0.25d0 * (ABS(grid_global(i ,j ,2,nreg)) + & ABS(grid_global(npx-(i-1),j ,2,nreg)) + & ABS(grid_global(i ,npy-(j-1),2,nreg)) + & ABS(grid_global(npx-(i-1),npy-(j-1),2,nreg))) @@ -2334,7 +2385,7 @@ subroutine mirror_grid(grid_global,ng,npx,npy,ndims,nregions) grid_global(npx-(i-1),j ,2,nreg) = SIGN(y1,grid_global(npx-(i-1),j ,2,nreg)) grid_global(i ,npy-(j-1),2,nreg) = SIGN(y1,grid_global(i ,npy-(j-1),2,nreg)) grid_global(npx-(i-1),npy-(j-1),2,nreg) = SIGN(y1,grid_global(npx-(i-1),npy-(j-1),2,nreg)) - + ! force dateline/greenwich-meridion consitency if (mod(npx,2) /= 0) then if ( (i==1+(npx-1)/2.0d0) ) then @@ -2463,7 +2514,7 @@ end subroutine get_unit_vector_3pts subroutine get_unit_vector_2pts( p1, p2, uvect ) real(kind=R_GRID), intent(in):: p1(2), p2(2) ! input position unit vectors (spherical coordinates) real(kind=R_GRID), intent(out):: uvect(3) ! output unit vspherical cartesian -! local +! local integer :: n real(kind=R_GRID) :: xyz1(3), xyz2(3) real :: dp_dot_xyz1 @@ -2504,7 +2555,7 @@ end subroutine normalize_vect ! These only require allocation of arrays that span local domain and ! are far more accurate due to careful construction of symmetric ! loops. - + subroutine mirror_grid_local(local_tile, tileno) real(R_GRID) , intent(INOUT) :: local_tile(:,:,:) integer, intent(IN) :: tileno @@ -2609,7 +2660,7 @@ subroutine rot_3d_new(axis, x1in, y1in, z1in, sa, ca, x2out, y2out, z2out, conve real(R_GRID) , intent(IN) :: sa, ca ! sin and cos of angle to rotate in radians real(R_GRID) , intent(OUT) :: x2out, y2out, z2out integer, intent(IN), optional :: convert ! if present convert input point - ! from spherical to cartesian, rotate, + ! from spherical to cartesian, rotate, ! and convert back real(R_GRID) :: x1,y1,z1, x2,y2,z2 diff --git a/tools/fv_io.F90 b/tools/fv_io.F90 index 4c83b1408..1fda61f6f 100644 --- a/tools/fv_io.F90 +++ b/tools/fv_io.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -44,14 +44,14 @@ module fv_io_mod ! ! ! fms_mod -! file_exist +! file_exists ! ! ! fms_io_mod -! fms_io_exit, get_tile_string,restart_file_type, -! register_restart_field, save_restart, restore_state, -! set_domain, nullify_domain, set_filename_appendix, -! get_mosaic_tile_file, get_instance_filename, +! fms_io_exit, get_tile_string,restart_file_type, +! register_restart_field, save_restart, restore_state, +! nullify_domain, set_filename_appendix, +! get_mosaic_tile_file, get_instance_filename, ! save_restart_border, restore_state_border, ! free_restart_type,field_exist ! @@ -75,18 +75,19 @@ module fv_io_mod ! ! mpp_domains_mod ! domain2d, EAST, WEST, NORTH, CENTER, SOUTH, CORNER, -! mpp_get_compute_domain, mpp_get_data_domain, +! mpp_get_compute_domain, mpp_get_data_domain, ! mpp_get_layout, mpp_get_ntile_count,mpp_get_global_domain ! ! ! tracer_manager_mod -! tr_get_tracer_names=>get_tracer_names, -! get_tracer_names, get_number_tracers, +! tr_get_tracer_names=>get_tracer_names, +! get_tracer_names, get_number_tracers, ! set_tracer_profile, get_tracer_index ! ! - use fms_mod, only: file_exist +#if defined (FMS1_IO) + use fms_mod, only: file_exists => file_exist use fms_io_mod, only: fms_io_exit, get_tile_string, & restart_file_type, register_restart_field, & save_restart, restore_state, & @@ -94,17 +95,25 @@ module fv_io_mod get_mosaic_tile_file, get_instance_filename, & save_restart_border, restore_state_border, free_restart_type, & field_exist +#else + use fms2_io_mod, only: FmsNetcdfFile_t, FmsNetcdfDomainFile_t, & + register_restart_field, register_axis, unlimited, & + open_file, read_restart, read_restart_bc, write_restart, & + write_restart_bc, close_file, register_field, write_data, & + get_global_io_domain_indices, register_variable_attribute, & + variable_exists, read_data, set_filename_appendix, file_exists +#endif use mpp_mod, only: mpp_error, FATAL, NOTE, WARNING, mpp_root_pe, & mpp_sync, mpp_pe, mpp_declare_pelist use mpp_domains_mod, only: domain2d, EAST, WEST, NORTH, CENTER, SOUTH, CORNER, & - mpp_get_compute_domain, mpp_get_data_domain, & + mpp_get_compute_domain, mpp_get_data_domain, & mpp_get_layout, mpp_get_ntile_count, & mpp_get_global_domain use tracer_manager_mod, only: tr_get_tracer_names=>get_tracer_names, & get_tracer_names, get_number_tracers, & set_tracer_profile, & get_tracer_index - use field_manager_mod, only: MODEL_ATMOS + use field_manager_mod, only: MODEL_ATMOS use external_sst_mod, only: sst_ncep, sst_anom, use_ncep_sst use fv_arrays_mod, only: fv_atmos_type, fv_nest_BC_type_3D use fv_eta_mod, only: set_external_eta @@ -124,7 +133,7 @@ module fv_io_mod integer ::grid_xtdimid, grid_ytdimid, haloid, pfullid !For writing BCs integer ::grid_xtstagdimid, grid_ytstagdimid, oneid -contains +contains !>@brief Initialize the fv core restart facilities @@ -139,7 +148,7 @@ subroutine fv_io_exit end subroutine fv_io_exit - !>@brief Write the fv core restart quantities + !>@brief Write the fv core restart quantities subroutine fv_io_read_restart(fv_domain,Atm) type(domain2d), intent(inout) :: fv_domain type(fv_atmos_type), intent(inout) :: Atm(:) @@ -155,7 +164,11 @@ subroutine fv_io_read_restart(fv_domain,Atm) ntileMe = size(Atm(:)) ! This will need mods for more than 1 tile per pe +! MAT NOTE I just do NOT know how to fix all these calls for FMS2 IO. For now, I just...avoid +! them as GEOS does not seem to need to use them +#if defined (FMS1_IO) call restore_state(Atm(1)%Fv_restart) +#endif if (Atm(1)%flagstruct%external_eta) then call set_external_eta(Atm(1)%ak, Atm(1)%bk, Atm(1)%ptop, Atm(1)%ks) endif @@ -172,22 +185,28 @@ subroutine fv_io_read_restart(fv_domain,Atm) else stile_name = '' endif - + do n = 1, ntileMe +#if defined (FMS1_IO) call restore_state(Atm(n)%Fv_tile_restart) +#endif !--- restore data for fv_tracer - if it exists fname = 'INPUT/fv_tracer.res'//trim(stile_name)//'.nc' - if (file_exist(fname)) then + if (file_exists(fname)) then +#if defined (FMS1_IO) call restore_state(Atm(n)%Tra_restart) +#endif else call mpp_error(NOTE,'==> Warning from fv_read_restart: Expected file '//trim(fname)//' does not exist') endif !--- restore data for surface winds - if it exists fname = 'INPUT/fv_srf_wnd.res'//trim(stile_name)//'.nc' - if (file_exist(fname)) then + if (file_exists(fname)) then +#if defined (FMS1_IO) call restore_state(Atm(n)%Rsf_restart) +#endif Atm(n)%flagstruct%srf_init = .true. else call mpp_error(NOTE,'==> Warning from fv_read_restart: Expected file '//trim(fname)//' does not exist') @@ -197,15 +216,19 @@ subroutine fv_io_read_restart(fv_domain,Atm) if ( Atm(n)%flagstruct%fv_land ) then !--- restore data for mg_drag - if it exists fname = 'INPUT/mg_drag.res'//trim(stile_name)//'.nc' - if (file_exist(fname)) then + if (file_exists(fname)) then +#if defined (FMS1_IO) call restore_state(Atm(n)%Mg_restart) +#endif else call mpp_error(NOTE,'==> Warning from fv_read_restart: Expected file '//trim(fname)//' does not exist') endif !--- restore data for fv_land - if it exists fname = 'INPUT/fv_land.res'//trim(stile_name)//'.nc' - if (file_exist(fname)) then + if (file_exists(fname)) then +#if defined (FMS1_IO) call restore_state(Atm(n)%Lnd_restart) +#endif else call mpp_error(NOTE,'==> Warning from fv_read_restart: Expected file '//trim(fname)//' does not exist') endif @@ -216,10 +239,10 @@ subroutine fv_io_read_restart(fv_domain,Atm) return end subroutine fv_io_read_restart - + !>@brief The subroutine 'fv_io_read_tracers' reads in only tracers from restart files. - !>@details This subroutine is useful when initializing a cycled or nudged model - !! from an analysis that does not have a whole set of microphysical, aerosol, or + !>@details This subroutine is useful when initializing a cycled or nudged model + !! from an analysis that does not have a whole set of microphysical, aerosol, or !! chemical tracers subroutine fv_io_read_tracers(fv_domain,Atm) type(domain2d), intent(inout) :: fv_domain @@ -227,7 +250,11 @@ subroutine fv_io_read_tracers(fv_domain,Atm) integer :: n, ntracers, ntprog, nt, isc, iec, jsc, jec, id_restart character(len=6) :: stile_name character(len=64):: fname, tracer_name +#if defined (FMS1_IO) type(restart_file_type) :: Tra_restart_r +#else + type(FmsNetcdfDomainFile_t) :: Tra_restart_r +#endif integer :: ntiles n = 1 @@ -249,18 +276,24 @@ subroutine fv_io_read_tracers(fv_domain,Atm) do nt = 2, ntprog call get_tracer_names(MODEL_ATMOS, nt, tracer_name) call set_tracer_profile (MODEL_ATMOS, nt, Atm(n)%q(isc:iec,jsc:jec,:,nt) ) +#if defined (FMS1_IO) id_restart = register_restart_field(Tra_restart_r, fname, tracer_name, Atm(n)%q(:,:,:,nt), & domain=fv_domain, mandatory=.false., tile_count=n) +#endif enddo do nt = ntprog+1, ntracers call get_tracer_names(MODEL_ATMOS, nt, tracer_name) call set_tracer_profile (MODEL_ATMOS, nt, Atm(n)%qdiag(isc:iec,jsc:jec,:,nt) ) +#if defined (FMS1_IO) id_restart = register_restart_field(Tra_restart_r, fname, tracer_name, Atm(n)%qdiag(:,:,:,nt), & domain=fv_domain, mandatory=.false., tile_count=n) +#endif enddo - if (file_exist('INPUT'//trim(fname))) then + if (file_exists('INPUT'//trim(fname))) then +#if defined (FMS1_IO) call restore_state(Tra_restart_r) call free_restart_type(Tra_restart_r) +#endif else call mpp_error(NOTE,'==> Warning from fv_io_read_tracers: Expected file '//trim(fname)//' does not exist') endif @@ -268,8 +301,8 @@ subroutine fv_io_read_tracers(fv_domain,Atm) return end subroutine fv_io_read_tracers - - !>@brief The subroutine 'remap_restart' remaps the model state from remap files + + !>@brief The subroutine 'remap_restart' remaps the model state from remap files !! to a new set of Eulerian coordinates. !>@details Use if npz (run time z-dimension) /= npz_rst (restart z-dimension) subroutine remap_restart(fv_domain,Atm) @@ -283,7 +316,12 @@ subroutine remap_restart(fv_domain,Atm) integer :: isc, iec, jsc, jec, n, nt, nk, ntracers, ntprog, ntdiag integer :: isd, ied, jsd, jed integer :: ntiles +#if defined (FMS1_IO) type(restart_file_type) :: FV_restart_r, FV_tile_restart_r, Tra_restart_r +#else + type(FmsNetcdfDomainFile_t) :: FV_tile_restart_r, Tra_restart_r + type(FmsNetcdfFile_t) :: FV_restart_r +#endif integer :: id_restart ! @@ -332,10 +370,12 @@ subroutine remap_restart(fv_domain,Atm) endif fname = 'fv_core.res.nc' +#if defined (FMS1_IO) id_restart = register_restart_field(Fv_restart_r, fname, 'ak', ak_r(:), no_domain=.true.) id_restart = register_restart_field(Fv_restart_r, fname, 'bk', bk_r(:), no_domain=.true.) call restore_state(Fv_restart_r) call free_restart_type(Fv_restart_r) +#endif ! fix for single tile runs where you need fv_core.res.nc and fv_core.res.tile1.nc ntiles = mpp_get_ntile_count(fv_domain) @@ -348,20 +388,27 @@ subroutine remap_restart(fv_domain,Atm) ! do n = 1, ntileMe n = 1 fname = 'fv_core.res'//trim(stile_name)//'.nc' +#if defined (FMS1_IO) id_restart = register_restart_field(Fv_tile_restart_r, fname, 'u', u_r, & domain=fv_domain, position=NORTH,tile_count=n) id_restart = register_restart_field(Fv_tile_restart_r, fname, 'v', v_r, & domain=fv_domain, position=EAST,tile_count=n) +#endif if (.not.Atm(n)%flagstruct%hydrostatic) then +#if defined (FMS1_IO) id_restart = register_restart_field(Fv_tile_restart_r, fname, 'W', w_r, & domain=fv_domain, mandatory=.false., tile_count=n) id_restart = register_restart_field(Fv_tile_restart_r, fname, 'DZ', delz_r, & domain=fv_domain, mandatory=.false., tile_count=n) +#endif if ( Atm(n)%flagstruct%hybrid_z ) then +#if defined (FMS1_IO) id_restart = register_restart_field(Fv_tile_restart_r, fname, 'ZE0', ze0_r, & domain=fv_domain, mandatory=.false., tile_count=n) +#endif endif endif +#if defined (FMS1_IO) id_restart = register_restart_field(Fv_tile_restart_r, fname, 'T', pt_r, & domain=fv_domain, tile_count=n) id_restart = register_restart_field(Fv_tile_restart_r, fname, 'delp', delp_r, & @@ -370,9 +417,12 @@ subroutine remap_restart(fv_domain,Atm) domain=fv_domain, tile_count=n) call restore_state(FV_tile_restart_r) call free_restart_type(FV_tile_restart_r) +#endif fname = 'INPUT/fv_srf_wnd.res'//trim(stile_name)//'.nc' - if (file_exist(fname)) then + if (file_exists(fname)) then +#if defined (FMS1_IO) call restore_state(Atm(n)%Rsf_restart) +#endif Atm(n)%flagstruct%srf_init = .true. else call mpp_error(NOTE,'==> Warning from remap_restart: Expected file '//trim(fname)//' does not exist') @@ -382,36 +432,46 @@ subroutine remap_restart(fv_domain,Atm) if ( Atm(n)%flagstruct%fv_land ) then !--- restore data for mg_drag - if it exists fname = 'INPUT/mg_drag.res'//trim(stile_name)//'.nc' - if (file_exist(fname)) then + if (file_exists(fname)) then +#if defined (FMS1_IO) call restore_state(Atm(n)%Mg_restart) +#endif else call mpp_error(NOTE,'==> Warning from remap_restart: Expected file '//trim(fname)//' does not exist') endif !--- restore data for fv_land - if it exists fname = 'INPUT/fv_land.res'//trim(stile_name)//'.nc' - if (file_exist(fname)) then + if (file_exists(fname)) then +#if defined (FMS1_IO) call restore_state(Atm(n)%Lnd_restart) +#endif else call mpp_error(NOTE,'==> Warning from remap_restart: Expected file '//trim(fname)//' does not exist') endif endif fname = 'fv_tracer.res'//trim(stile_name)//'.nc' - if (file_exist('INPUT'//trim(fname))) then + if (file_exists('INPUT'//trim(fname))) then do nt = 1, ntprog call get_tracer_names(MODEL_ATMOS, nt, tracer_name) call set_tracer_profile (MODEL_ATMOS, nt, q_r(isc:iec,jsc:jec,:,nt) ) +#if defined (FMS1_IO) id_restart = register_restart_field(Tra_restart_r, fname, tracer_name, q_r(:,:,:,nt), & domain=fv_domain, mandatory=.false., tile_count=n) +#endif enddo do nt = ntprog+1, ntracers call get_tracer_names(MODEL_ATMOS, nt, tracer_name) call set_tracer_profile (MODEL_ATMOS, nt, qdiag_r(isc:iec,jsc:jec,:,nt) ) +#if defined (FMS1_IO) id_restart = register_restart_field(Tra_restart_r, fname, tracer_name, qdiag_r(:,:,:,nt), & domain=fv_domain, mandatory=.false., tile_count=n) +#endif enddo +#if defined (FMS1_IO) call restore_state(Tra_restart_r) call free_restart_type(Tra_restart_r) +#endif else call mpp_error(NOTE,'==> Warning from remap_restart: Expected file '//trim(fname)//' does not exist') endif @@ -469,9 +529,9 @@ subroutine fv_io_register_restart(fv_domain,Atm) integer :: id_restart integer :: n, nt, ntracers, ntprog, ntdiag, ntileMe, ntiles - ntileMe = size(Atm(:)) - ntprog = size(Atm(1)%q,4) - ntdiag = size(Atm(1)%qdiag,4) + ntileMe = size(Atm(:)) + ntprog = size(Atm(1)%q,4) + ntdiag = size(Atm(1)%qdiag,4) ntracers = ntprog+ntdiag !--- set the 'nestXX' appendix for all files using fms_io @@ -501,41 +561,54 @@ subroutine fv_io_register_restart(fv_domain,Atm) #endif fname = 'fv_core.res.nc' +#if defined (FMS1_IO) id_restart = register_restart_field(Atm(1)%Fv_restart, fname, 'ak', Atm(1)%ak(:), no_domain=.true.) id_restart = register_restart_field(Atm(1)%Fv_restart, fname, 'bk', Atm(1)%bk(:), no_domain=.true.) +#endif do n = 1, ntileMe fname = 'fv_core.res'//trim(stile_name)//'.nc' +#if defined (FMS1_IO) id_restart = register_restart_field(Atm(n)%Fv_tile_restart, fname, 'u', Atm(n)%u, & domain=fv_domain, position=NORTH,tile_count=n) id_restart = register_restart_field(Atm(n)%Fv_tile_restart, fname, 'v', Atm(n)%v, & domain=fv_domain, position=EAST,tile_count=n) +#endif if (.not.Atm(n)%flagstruct%hydrostatic) then +#if defined (FMS1_IO) id_restart = register_restart_field(Atm(n)%Fv_tile_restart, fname, 'W', Atm(n)%w, & domain=fv_domain, mandatory=.false., tile_count=n) id_restart = register_restart_field(Atm(n)%Fv_tile_restart, fname, 'DZ', Atm(n)%delz, & domain=fv_domain, mandatory=.false., tile_count=n) +#endif if ( Atm(n)%flagstruct%hybrid_z ) then +#if defined (FMS1_IO) id_restart = register_restart_field(Atm(n)%Fv_tile_restart, fname, 'ZE0', Atm(n)%ze0, & domain=fv_domain, mandatory=.false., tile_count=n) +#endif endif endif +#if defined (FMS1_IO) id_restart = register_restart_field(Atm(n)%Fv_tile_restart, fname, 'T', Atm(n)%pt, & domain=fv_domain, tile_count=n) id_restart = register_restart_field(Atm(n)%Fv_tile_restart, fname, 'delp', Atm(n)%delp, & domain=fv_domain, tile_count=n) id_restart = register_restart_field(Atm(n)%Fv_tile_restart, fname, 'phis', Atm(n)%phis, & domain=fv_domain, tile_count=n) +#endif - !--- include agrid winds in restarts for use in data assimilation + !--- include agrid winds in restarts for use in data assimilation if (Atm(n)%flagstruct%agrid_vel_rst) then +#if defined (FMS1_IO) id_restart = register_restart_field(Atm(n)%Fv_tile_restart, fname, 'ua', Atm(n)%ua, & domain=fv_domain, tile_count=n, mandatory=.false.) id_restart = register_restart_field(Atm(n)%Fv_tile_restart, fname, 'va', Atm(n)%va, & domain=fv_domain, tile_count=n, mandatory=.false.) +#endif endif fname = 'fv_srf_wnd.res'//trim(stile_name)//'.nc' +#if defined (FMS1_IO) id_restart = register_restart_field(Atm(n)%Rsf_restart, fname, 'u_srf', Atm(n)%u_srf, & domain=fv_domain, tile_count=n) id_restart = register_restart_field(Atm(n)%Rsf_restart, fname, 'v_srf', Atm(n)%v_srf, & @@ -543,18 +616,22 @@ subroutine fv_io_register_restart(fv_domain,Atm) #ifdef SIM_PHYS id_restart = register_restart_field(Rsf_restart(n), fname, 'ts', Atm(n)%ts, & domain=fv_domain, tile_count=n) +#endif #endif if ( Atm(n)%flagstruct%fv_land ) then !------------------------------------------------------------------------------------------------- ! Optional terrain deviation (sgh) and land fraction (oro) fname = 'mg_drag.res'//trim(stile_name)//'.nc' +#if defined (FMS1_IO) id_restart = register_restart_field(Atm(n)%Mg_restart, fname, 'ghprime', Atm(n)%sgh, & domain=fv_domain, tile_count=n) - +#endif fname = 'fv_land.res'//trim(stile_name)//'.nc' +#if defined (FMS1_IO) id_restart = register_restart_field(Atm(n)%Lnd_restart, fname, 'oro', Atm(n)%oro, & domain=fv_domain, tile_count=n) +#endif endif fname = 'fv_tracer.res'//trim(stile_name)//'.nc' @@ -562,15 +639,19 @@ subroutine fv_io_register_restart(fv_domain,Atm) call get_tracer_names(MODEL_ATMOS, nt, tracer_name) ! set all tracers to an initial profile value call set_tracer_profile (MODEL_ATMOS, nt, Atm(n)%q(:,:,:,nt) ) +#if defined (FMS1_IO) id_restart = register_restart_field(Atm(n)%Tra_restart, fname, tracer_name, Atm(n)%q(:,:,:,nt), & domain=fv_domain, mandatory=.false., tile_count=n) +#endif enddo do nt = ntprog+1, ntracers call get_tracer_names(MODEL_ATMOS, nt, tracer_name) ! set all tracers to an initial profile value call set_tracer_profile (MODEL_ATMOS, nt, Atm(n)%qdiag(:,:,:,nt) ) +#if defined (FMS1_IO) id_restart = register_restart_field(Atm(n)%Tra_restart, fname, tracer_name, Atm(n)%qdiag(:,:,:,nt), & domain=fv_domain, mandatory=.false., tile_count=n) +#endif enddo enddo @@ -591,24 +672,32 @@ subroutine fv_io_write_restart(Atm, grids_on_this_pe, timestamp) call mpp_error(NOTE, 'READING FROM SST_RESTART DISABLED') !call save_restart(Atm(1)%SST_restart, timestamp) endif - + do n = 1, ntileMe if (.not. grids_on_this_pe(n)) cycle if ( (use_ncep_sst .or. Atm(n)%flagstruct%nudge) .and. .not. Atm(n)%gridstruct%nested ) then +#if defined (FMS1_IO) call save_restart(Atm(n)%SST_restart, timestamp) +#endif endif - + +#if defined (FMS1_IO) call save_restart(Atm(n)%Fv_restart, timestamp) call save_restart(Atm(n)%Fv_tile_restart, timestamp) call save_restart(Atm(n)%Rsf_restart, timestamp) +#endif if ( Atm(n)%flagstruct%fv_land ) then +#if defined (FMS1_IO) call save_restart(Atm(n)%Mg_restart, timestamp) call save_restart(Atm(n)%Lnd_restart, timestamp) +#endif endif - call save_restart(Atm(n)%Tra_restart, timestamp) +#if defined (FMS1_IO) + !call save_restart(Atm(n)%Tra_restart, timestamp) +#endif end do @@ -617,7 +706,11 @@ end subroutine fv_io_write_restart subroutine register_bcs_2d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & var_name, var, var_bc, istag, jstag) type(fv_atmos_type), intent(in) :: Atm +#if defined (FMS1_IO) type(restart_file_type), intent(inout) :: BCfile_ne, BCfile_sw +#else + type(FmsNetcdfFile_t), intent(inout) :: BCfile_ne, BCfile_sw +#endif character(len=120), intent(in) :: fname_ne, fname_sw character(len=*), intent(in) :: var_name real, dimension(:,:), intent(in), optional :: var @@ -632,8 +725,8 @@ subroutine register_bcs_2d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & integer, allocatable, dimension(:) :: x2_pelist, y2_pelist logical :: is_root_pe - i_stag = 0 - j_stag = 0 + i_stag = 0 + j_stag = 0 if (present(istag)) i_stag = i_stag if (present(jstag)) j_stag = j_stag call mpp_get_global_domain(Atm%domain, xsize = npx, ysize = npy, position=CORNER ) @@ -675,6 +768,7 @@ subroutine register_bcs_2d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & is_root_pe = .FALSE. if (is.eq.1 .and. js.eq.1) is_root_pe = .TRUE. !register west halo data in t1 +#if defined (FMS1_IO) if (present(var_bc)) id_restart = register_restart_field(BCfile_sw, trim(fname_sw), & trim(var_name)//'_west_t1', & var_bc%west_t1, & @@ -685,26 +779,30 @@ subroutine register_bcs_2d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & trim(var_name)//'_west', & var, indices, global_size, & y2_pelist, is_root_pe, jshift=y_halo) +#endif !define east root_pe is_root_pe = .FALSE. if (ie.eq.npx-1 .and. je.eq.npy-1) is_root_pe = .TRUE. !register east halo data in t1 +#if defined (FMS1_IO) if (present(var_bc)) id_restart = register_restart_field(BCfile_ne, trim(fname_ne), & trim(var_name)//'_east_t1', & var_bc%east_t1, & indices, global_size, y1_pelist, & is_root_pe, jshift=y_halo) - +#endif !reset indices for prognostic variables in the east halo indices(1) = ied-x_halo+1+i_stag indices(2) = ied+i_stag !register east prognostic halo data +#if defined (FMS1_IO) if (present(var)) id_restart = register_restart_field(BCfile_ne, trim(fname_ne), & trim(var_name)//'_east', & var, indices, global_size, & y1_pelist, is_root_pe, jshift=y_halo, & x_halo=(size(var,1)-x_halo), ishift=-(ie+i_stag)) +#endif !NORTH & SOUTH !set defaults for north/south halo regions @@ -724,6 +822,7 @@ subroutine register_bcs_2d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & is_root_pe = .FALSE. if (is.eq.1 .and. js.eq.1) is_root_pe = .TRUE. !register south halo data in t1 +#if defined (FMS1_IO) if (present(var_bc)) id_restart = register_restart_field(BCfile_sw, trim(fname_sw), & trim(var_name)//'_south_t1', & var_bc%south_t1, & @@ -734,26 +833,31 @@ subroutine register_bcs_2d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & trim(var_name)//'_south', & var, indices, global_size, & x2_pelist, is_root_pe, x_halo=x_halo_ns) +#endif !define north root_pe is_root_pe = .FALSE. if (ie.eq.npx-1 .and. je.eq.npy-1) is_root_pe = .TRUE. !register north halo data in t1 +#if defined (FMS1_IO) if (present(var_bc)) id_restart = register_restart_field(BCfile_ne, trim(fname_ne), & trim(var_name)//'_north_t1', & var_bc%north_t1, & indices, global_size, x1_pelist, & is_root_pe, x_halo=x_halo_ns) +#endif !reset indices for prognostic variables in the north halo indices(3) = jed-y_halo+1+j_stag indices(4) = jed+j_stag !register north prognostic halo data +#if defined (FMS1_IO) if (present(var)) id_restart = register_restart_field(BCfile_ne, trim(fname_ne), & trim(var_name)//'_north', & var, indices, global_size, & x1_pelist, is_root_pe, x_halo=x_halo_ns, & y_halo=(size(var,2)-y_halo), jshift=-(je+j_stag)) +#endif end subroutine register_bcs_2d @@ -761,7 +865,11 @@ end subroutine register_bcs_2d subroutine register_bcs_3d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & var_name, var, var_bc, istag, jstag, mandatory) type(fv_atmos_type), intent(in) :: Atm +#if defined (FMS1_IO) type(restart_file_type), intent(inout) :: BCfile_ne, BCfile_sw +#else + type(FmsNetcdfFile_t), intent(inout) :: BCfile_ne, BCfile_sw +#endif character(len=120), intent(in) :: fname_ne, fname_sw character(len=*), intent(in) :: var_name real, dimension(:,:,:), intent(in), optional :: var @@ -821,6 +929,7 @@ subroutine register_bcs_3d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & is_root_pe = .FALSE. if (is.eq.1 .and. js.eq.1) is_root_pe = .TRUE. !register west halo data in t1 +#if defined (FMS1_IO) if (present(var_bc)) id_restart = register_restart_field(BCfile_sw, trim(fname_sw), & trim(var_name)//'_west_t1', & var_bc%west_t1, & @@ -831,26 +940,31 @@ subroutine register_bcs_3d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & trim(var_name)//'_west', & var, indices, global_size, & y2_pelist, is_root_pe, jshift=y_halo, mandatory=mandatory) +#endif !define east root_pe is_root_pe = .FALSE. if (ie.eq.npx-1 .and. je.eq.npy-1) is_root_pe = .TRUE. !register east halo data in t1 +#if defined (FMS1_IO) if (present(var_bc)) id_restart = register_restart_field(BCfile_ne, trim(fname_ne), & trim(var_name)//'_east_t1', & var_bc%east_t1, & indices, global_size, y1_pelist, & is_root_pe, jshift=y_halo, mandatory=mandatory) +#endif !reset indices for prognostic variables in the east halo indices(1) = ied-x_halo+1+i_stag indices(2) = ied+i_stag !register east prognostic halo data +#if defined (FMS1_IO) if (present(var)) id_restart = register_restart_field(BCfile_ne, trim(fname_ne), & trim(var_name)//'_east', & var, indices, global_size, & y1_pelist, is_root_pe, jshift=y_halo, & x_halo=(size(var,1)-x_halo), ishift=-(ie+i_stag), mandatory=mandatory) +#endif !NORTH & SOUTH !set defaults for north/south halo regions @@ -871,6 +985,7 @@ subroutine register_bcs_3d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & is_root_pe = .FALSE. if (is.eq.1 .and. js.eq.1) is_root_pe = .TRUE. !register south halo data in t1 +#if defined (FMS1_IO) if (present(var_bc)) id_restart = register_restart_field(BCfile_sw, trim(fname_sw), & trim(var_name)//'_south_t1', & var_bc%south_t1, & @@ -881,30 +996,35 @@ subroutine register_bcs_3d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & trim(var_name)//'_south', & var, indices, global_size, & x2_pelist, is_root_pe, x_halo=x_halo_ns, mandatory=mandatory) +#endif !define north root_pe is_root_pe = .FALSE. if (ie.eq.npx-1 .and. je.eq.npy-1) is_root_pe = .TRUE. !register north halo data in t1 +#if defined (FMS1_IO) if (present(var_bc)) id_restart = register_restart_field(BCfile_ne, trim(fname_ne), & trim(var_name)//'_north_t1', & var_bc%north_t1, & indices, global_size, x1_pelist, & is_root_pe, x_halo=x_halo_ns, mandatory=mandatory) +#endif !reset indices for prognostic variables in the north halo indices(3) = jed-y_halo+1+j_stag indices(4) = jed+j_stag !register north prognostic halo data +#if defined (FMS1_IO) if (present(var)) id_restart = register_restart_field(BCfile_ne, trim(fname_ne), & trim(var_name)//'_north', & var, indices, global_size, & x1_pelist, is_root_pe, x_halo=x_halo_ns, & y_halo=(size(var,2)-y_halo), jshift=-(je+j_stag), mandatory=mandatory) +#endif end subroutine register_bcs_3d -!>@brief The subroutine 'fv_io_register_restart_BCs' registers restarts for +!>@brief The subroutine 'fv_io_register_restart_BCs' registers restarts for !! nested-grid boundary conditions. subroutine fv_io_register_restart_BCs(Atm) type(fv_atmos_type), intent(inout) :: Atm @@ -919,7 +1039,9 @@ subroutine fv_io_register_restart_BCs(Atm) ntdiag=size(Atm%qdiag,4) ntracers=ntprog+ntdiag +#if defined (FMS1_IO) call set_domain(Atm%domain) +#endif call register_bcs_2d(Atm, Atm%neststruct%BCfile_ne, Atm%neststruct%BCfile_sw, & fname_ne, fname_sw, 'phis', var=Atm%phis) @@ -964,8 +1086,9 @@ subroutine fv_io_register_restart_BCs(Atm) fname_ne, fname_sw, 'vc', var_bc=Atm%neststruct%vc_BC, jstag=1) call register_bcs_3d(Atm, Atm%neststruct%BCfile_ne, Atm%neststruct%BCfile_sw, & fname_ne, fname_sw, 'divg', var_bc=Atm%neststruct%divg_BC, istag=1,jstag=1, mandatory=.false.) +#if defined (FMS1_IO) Atm%neststruct%divg_BC%initialized = field_exist(fname_ne, 'divg_north_t1', Atm%domain) - +#endif return end subroutine fv_io_register_restart_BCs @@ -980,7 +1103,9 @@ subroutine fv_io_register_restart_BCs_NH(Atm) fname_ne = 'fv_BC_ne.res.nc' fname_sw = 'fv_BC_sw.res.nc' +#if defined (FMS1_IO) call set_domain(Atm%domain) +#endif if (is_master()) print*, 'fv_io_register_restart_BCs_NH: REGISTERING NH BCs', Atm%flagstruct%hydrostatic, Atm%flagstruct%make_nh #ifndef SW_DYNAMICS @@ -999,8 +1124,10 @@ subroutine fv_io_write_BCs(Atm, timestamp) type(fv_atmos_type), intent(inout) :: Atm character(len=*), intent(in), optional :: timestamp +#if defined (FMS1_IO) call save_restart_border(Atm%neststruct%BCfile_ne, timestamp) call save_restart_border(Atm%neststruct%BCfile_sw, timestamp) +#endif return end subroutine fv_io_write_BCs @@ -1009,9 +1136,10 @@ end subroutine fv_io_write_BCs subroutine fv_io_read_BCs(Atm) type(fv_atmos_type), intent(inout) :: Atm +#if defined (FMS1_IO) call restore_state_border(Atm%neststruct%BCfile_ne) call restore_state_border(Atm%neststruct%BCfile_sw) - +#endif return end subroutine fv_io_read_BCs diff --git a/tools/fv_mp_mod.F90 b/tools/fv_mp_mod.F90 index 184f58154..e6579e13d 100644 --- a/tools/fv_mp_mod.F90 +++ b/tools/fv_mp_mod.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -83,7 +83,7 @@ module fv_mp_mod use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_data_domain use mpp_domains_mod, only : mpp_global_field, mpp_global_sum, mpp_global_max, mpp_global_min use mpp_domains_mod, only : mpp_domains_init, mpp_domains_exit, mpp_broadcast_domain - use mpp_domains_mod, only : mpp_check_field, mpp_define_layout + use mpp_domains_mod, only : mpp_check_field, mpp_define_layout use mpp_domains_mod, only : mpp_get_neighbor_pe, mpp_define_mosaic, mpp_define_io_domain use mpp_domains_mod, only : NORTH, NORTH_EAST, EAST, SOUTH_EAST use mpp_domains_mod, only : SOUTH, SOUTH_WEST, WEST, NORTH_WEST @@ -93,7 +93,9 @@ module fv_mp_mod use mpp_domains_mod, only : group_halo_update_type => mpp_group_update_type use mpp_parameter_mod, only : WUPDATE, EUPDATE, SUPDATE, NUPDATE, XUPDATE, YUPDATE use fv_arrays_mod, only: fv_atmos_type +#if defined (FMS1_IO) use fms_io_mod, only: set_domain +#endif use mpp_mod, only : mpp_get_current_pelist use mpp_domains_mod, only : mpp_define_domains use mpp_domains_mod, only : mpp_define_nest_domains, nest_domain_type @@ -125,13 +127,13 @@ module fv_mp_mod type(nest_domain_type), allocatable, dimension(:) :: nest_domain integer :: this_pe_grid = 0 - integer, EXTERNAL :: omp_get_thread_num, omp_get_num_threads + integer, EXTERNAL :: omp_get_thread_num, omp_get_num_threads integer :: npes_this_grid !! CLEANUP: these are currently here for convenience !! Right now calling switch_current_atm sets these to the value on the "current" grid - !! (as well as changing the "current" domain) + !! (as well as changing the "current" domain) integer :: is, ie, js, je integer :: isd, ied, jsd, jed integer :: isc, iec, jsc, jec @@ -181,7 +183,7 @@ module fv_mp_mod MODULE PROCEDURE fill_corners_dgrid_r8 END INTERFACE - !> The interface 'mp_bcast contains routines that call SPMD broadcast + !> The interface 'mp_bcast contains routines that call SPMD broadcast !! (one-to-many communication). INTERFACE mp_bcst MODULE PROCEDURE mp_bcst_i @@ -201,17 +203,17 @@ module fv_mp_mod MODULE PROCEDURE mp_bcst_4d_i END INTERFACE - !> The interface 'mp_reduce_min' contains routines that call SPMD_REDUCE. + !> The interface 'mp_reduce_min' contains routines that call SPMD_REDUCE. !! The routines compute the minima of values and place the - !! absolute minimum value in a result. + !! absolute minimum value in a result. INTERFACE mp_reduce_min MODULE PROCEDURE mp_reduce_min_r4 MODULE PROCEDURE mp_reduce_min_r8 END INTERFACE - !> The interface 'mp_reduce_max' contains routines that call SPMD_REDUCE. + !> The interface 'mp_reduce_max' contains routines that call SPMD_REDUCE. !! The routines compute the maxima of values and place the - !! absolute maximum value in a result. + !! absolute maximum value in a result. INTERFACE mp_reduce_max MODULE PROCEDURE mp_reduce_max_r4_1d MODULE PROCEDURE mp_reduce_max_r4 @@ -220,10 +222,10 @@ module fv_mp_mod MODULE PROCEDURE mp_reduce_max_i END INTERFACE - - !> The interface 'mp_reduce_sum' contains routines that call SPMD_REDUCE. + + !> The interface 'mp_reduce_sum' contains routines that call SPMD_REDUCE. !! The routines compute the sums of values and place the - !! net sum in a result. + !! net sum in a result. INTERFACE mp_reduce_sum MODULE PROCEDURE mp_reduce_sum_r4 MODULE PROCEDURE mp_reduce_sum_r4_1d @@ -234,11 +236,11 @@ module fv_mp_mod MODULE PROCEDURE mp_reduce_sum_r8_1darr MODULE PROCEDURE mp_reduce_sum_r8_2darr END INTERFACE - - !> The interface 'mp_gather contains routines that call SPMD Gather. - !! The routines aggregate elements from many processes into one process. + + !> The interface 'mp_gather contains routines that call SPMD Gather. + !! The routines aggregate elements from many processes into one process. ! WARNING only works with one level (ldim == 1) - INTERFACE mp_gather + INTERFACE mp_gather MODULE PROCEDURE mp_gather_4d_r4 MODULE PROCEDURE mp_gather_3d_r4 MODULE PROCEDURE mp_gather_3d_r8 @@ -306,7 +308,7 @@ subroutine setup_master(pelist_local) integer, intent(IN) :: pelist_local(:) if (ANY(gid == pelist_local)) then - + masterproc = pelist_local(1) master = (gid == masterproc) @@ -318,11 +320,11 @@ end subroutine setup_master ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! !>@brief The subroutine 'mp_barrier' waits for all SPMD processes subroutine mp_barrier() - + call MPI_BARRIER(commglobal, ierror) - + end subroutine mp_barrier -! +! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! !------------------------------------------------------------------------------- @@ -356,12 +358,12 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) integer :: nx,ny,n,num_alloc character(len=32) :: type = "unknown" - logical :: is_symmetry + logical :: is_symmetry logical :: debug=.false. integer, allocatable :: tile_id(:) integer i - integer :: npes_x, npes_y + integer :: npes_x, npes_y integer, pointer :: pelist(:), grid_number, num_contact, npes_per_tile logical, pointer :: square_domain @@ -403,7 +405,7 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) is_symmetry = .true. call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout ) - if ( npes_x == 0 ) then + if ( npes_x == 0 ) then npes_x = layout(1) endif if ( npes_y == 0 ) then @@ -417,7 +419,7 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) call mp_stop call exit(1) endif - + layout = (/npes_x,npes_y/) case (3) ! Lat-Lon "cyclic" type="Lat-Lon: cyclic" @@ -475,7 +477,7 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) endif call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout ) - if ( npes_x == 0 ) then + if ( npes_x == 0 ) then npes_x = layout(1) endif if ( npes_y == 0 ) then @@ -490,7 +492,7 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) call mp_stop call exit(1) endif - + layout = (/npes_x,npes_y/) case default call mpp_error(FATAL, 'domain_decomp: no such test: '//type) @@ -509,7 +511,7 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) allocate(tile1(num_alloc), tile2(num_alloc) ) allocate(istart1(num_alloc), iend1(num_alloc), jstart1(num_alloc), jend1(num_alloc) ) allocate(istart2(num_alloc), iend2(num_alloc), jstart2(num_alloc), jend2(num_alloc) ) - + is_symmetry = .true. select case(nregions) case ( 1 ) @@ -659,13 +661,13 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) deallocate(istart2, iend2, jstart2, jend2) !--- find the tile number - Atm%tile = (gid-pelist(1))/npes_per_tile+1 + Atm%tile = (gid-pelist(1))/npes_per_tile+1 if (ANY(pelist == gid)) then npes_this_grid = npes_per_tile*nregions tile = Atm%tile call mpp_get_compute_domain( domain, is, ie, js, je ) call mpp_get_data_domain ( domain, isd, ied, jsd, jed ) - + Atm%bd%is = is Atm%bd%js = js Atm%bd%ie = ie @@ -689,7 +691,7 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) endif 200 format(i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ') else - + Atm%bd%is = 0 Atm%bd%js = 0 Atm%bd%ie = -1 @@ -723,18 +725,18 @@ subroutine start_var_group_update_2d(group, array, domain, flags, position, whal !! should be initiated immediately or wait for second pass_..._start call real :: d_type logical :: is_complete -! Arguments: -! (inout) group - The data type that store information for group update. +! Arguments: +! (inout) group - The data type that store information for group update. ! This data will be used in do_group_pass. ! (inout) array - The array which is having its halos points exchanged. ! (in) domain - contains domain information. ! (in) flags - An optional integer indicating which directions the -! data should be sent. +! data should be sent. ! (in) position - An optional argument indicating the position. This is ! may be CORNER, but is CENTER by default. ! (in) complete - An optional argument indicating whether the halo updates -! should be initiated immediately or wait for second -! pass_..._start call. Omitting complete is the same as +! should be initiated immediately or wait for second +! pass_..._start call. Omitting complete is the same as ! setting complete to .true. if (mpp_group_update_initialized(group)) then @@ -746,7 +748,7 @@ subroutine start_var_group_update_2d(group, array, domain, flags, position, whal is_complete = .TRUE. if(present(complete)) is_complete = complete - if(is_complete .and. halo_update_type == 1) then + if(is_complete .and. halo_update_type == 1) then call mpp_start_group_update(group, domain, d_type) endif @@ -757,7 +759,7 @@ subroutine start_var_group_update_3d(group, array, domain, flags, position, whal type(group_halo_update_type), intent(inout) :: group !< The data type that store information for group update real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points exchanged type(domain2D), intent(inout) :: domain !< contains domain information - integer, optional, intent(in) :: flags !< Optional integer indicating which directions the data should be sent + integer, optional, intent(in) :: flags !< Optional integer indicating which directions the data should be sent integer, optional, intent(in) :: position !< An optional argument indicating the position integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo logical, optional, intent(in) :: complete !< Optional argument indicating whether the halo updates @@ -765,18 +767,18 @@ subroutine start_var_group_update_3d(group, array, domain, flags, position, whal real :: d_type logical :: is_complete -! Arguments: -! (inout) group - The data type that store information for group update. +! Arguments: +! (inout) group - The data type that store information for group update. ! This data will be used in do_group_pass. ! (inout) array - The array which is having its halos points exchanged. ! (in) domain - contains domain information. ! (in) flags - An optional integer indicating which directions the -! data should be sent. +! data should be sent. ! (in) position - An optional argument indicating the position. This is ! may be CORNER, but is CENTER by default. ! (in) complete - An optional argument indicating whether the halo updates -! should be initiated immediately or wait for second -! pass_..._start call. Omitting complete is the same as +! should be initiated immediately or wait for second +! pass_..._start call. Omitting complete is the same as ! setting complete to .true. if (mpp_group_update_initialized(group)) then @@ -798,7 +800,7 @@ subroutine start_var_group_update_4d(group, array, domain, flags, position, whal type(group_halo_update_type), intent(inout) :: group !< The data type that store information for group update real, dimension(:,:,:,:), intent(inout) :: array !< The array which is having its halos points exchanged type(domain2D), intent(inout) :: domain !< contains domain information - integer, optional, intent(in) :: flags !< Optional integer indicating which directions the data should be sent + integer, optional, intent(in) :: flags !< Optional integer indicating which directions the data should be sent integer, optional, intent(in) :: position !< An optional argument indicating the position !! This is may be CORNER, but is CENTER by default integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo @@ -807,18 +809,18 @@ subroutine start_var_group_update_4d(group, array, domain, flags, position, whal real :: d_type logical :: is_complete -! Arguments: -! (inout) group - The data type that store information for group update. +! Arguments: +! (inout) group - The data type that store information for group update. ! This data will be used in do_group_pass. ! (inout) array - The array which is having its halos points exchanged. ! (in) domain - contains domain information. ! (in) flags - An optional integer indicating which directions the -! data should be sent. +! data should be sent. ! (in) position - An optional argument indicating the position. This is ! may be CORNER, but is CENTER by default. ! (in) complete - An optional argument indicating whether the halo updates -! should be initiated immediately or wait for second -! pass_..._start call. Omitting complete is the same as +! should be initiated immediately or wait for second +! pass_..._start call. Omitting complete is the same as ! setting complete to .true. integer :: dirflag @@ -843,12 +845,12 @@ end subroutine start_var_group_update_4d subroutine start_vector_group_update_2d(group, u_cmpt, v_cmpt, domain, flags, gridtype, whalo, ehalo, shalo, nhalo, complete) type(group_halo_update_type), intent(inout) :: group !< The data type that store information for group update real, dimension(:,:), intent(inout) :: u_cmpt, v_cmpt !< The nominal zonal (u) and meridional (v) - !! components of the vector pair that + !! components of the vector pair that !! is having its halos points exchanged type(domain2d), intent(inout) :: domain !< Contains domain decomposition information - integer, optional, intent(in) :: flags !< Optional integer indicating which directions the data should be sent + integer, optional, intent(in) :: flags !< Optional integer indicating which directions the data should be sent integer, optional, intent(in) :: gridtype !< An optional flag, which may be one of A_GRID, BGRID_NE, - !! CGRID_NE or DGRID_NE, indicating where the two components of th + !! CGRID_NE or DGRID_NE, indicating where the two components of th !! vector are discretized integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo logical, optional, intent(in) :: complete !< Optional argument indicating whether the halo updates @@ -856,22 +858,22 @@ subroutine start_vector_group_update_2d(group, u_cmpt, v_cmpt, domain, flags, gr real :: d_type logical :: is_complete -! Arguments: -! (inout) group - The data type that store information for group update. +! Arguments: +! (inout) group - The data type that store information for group update. ! This data will be used in do_group_pass. ! (inout) u_cmpt - The nominal zonal (u) component of the vector pair which ! is having its halos points exchanged. ! (inout) v_cmpt - The nominal meridional (v) component of the vector pair -! which is having its halos points exchanged. +! which is having its halos points exchanged. ! (in) domain - Contains domain decomposition information. ! (in) flags - An optional integer indicating which directions the -! data should be sent. +! data should be sent. ! (in) gridtype - An optional flag, which may be one of A_GRID, BGRID_NE, ! CGRID_NE or DGRID_NE, indicating where the two components of the -! vector are discretized. +! vector are discretized. ! (in) complete - An optional argument indicating whether the halo updates -! should be initiated immediately or wait for second -! pass_..._start call. Omitting complete is the same as +! should be initiated immediately or wait for second +! pass_..._start call. Omitting complete is the same as ! setting complete to .true. if (mpp_group_update_initialized(group)) then @@ -893,12 +895,12 @@ end subroutine start_vector_group_update_2d subroutine start_vector_group_update_3d(group, u_cmpt, v_cmpt, domain, flags, gridtype, whalo, ehalo, shalo, nhalo, complete) type(group_halo_update_type), intent(inout) :: group !< The data type that store information for group update real, dimension(:,:,:), intent(inout) :: u_cmpt, v_cmpt !! The nominal zonal (u) and meridional (v) - !! components of the vector pair that + !! components of the vector pair that !! is having its halos points exchanged. type(domain2d), intent(inout) :: domain !< Contains domain decomposition information integer, optional, intent(in) :: flags !< Optional integer indicating which directions the data should be sent integer, optional, intent(in) :: gridtype !< An optional flag, which may be one of A_GRID, BGRID_NE, - !! CGRID_NE or DGRID_NE, indicating where the two components of th + !! CGRID_NE or DGRID_NE, indicating where the two components of th !! vector are discretized integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo logical, optional, intent(in) :: complete !< Optional argument indicating whether the halo updates @@ -906,22 +908,22 @@ subroutine start_vector_group_update_3d(group, u_cmpt, v_cmpt, domain, flags, gr real :: d_type logical :: is_complete -! Arguments: -! (inout) group - The data type that store information for group update. +! Arguments: +! (inout) group - The data type that store information for group update. ! This data will be used in do_group_pass. ! (inout) u_cmpt - The nominal zonal (u) component of the vector pair which ! is having its halos points exchanged. ! (inout) v_cmpt - The nominal meridional (v) component of the vector pair -! which is having its halos points exchanged. +! which is having its halos points exchanged. ! (in) domain - Contains domain decomposition information. ! (in) flags - An optional integer indicating which directions the -! data should be sent. +! data should be sent. ! (in) gridtype - An optional flag, which may be one of A_GRID, BGRID_NE, ! CGRID_NE or DGRID_NE, indicating where the two components of the -! vector are discretized. +! vector are discretized. ! (in) complete - An optional argument indicating whether the halo updates -! should be initiated immediately or wait for second -! pass_..._start call. Omitting complete is the same as +! should be initiated immediately or wait for second +! pass_..._start call. Omitting complete is the same as ! setting complete to .true. if (mpp_group_update_initialized(group)) then @@ -946,8 +948,8 @@ subroutine complete_group_halo_update(group, domain) type(domain2d), intent(inout) :: domain !< Contains domain decomposition information real :: d_type -! Arguments: -! (inout) group - The data type that store information for group update. +! Arguments: +! (inout) group - The data type that store information for group update. ! (in) domain - Contains domain decomposition information. if( halo_update_type == 1 ) then @@ -962,7 +964,7 @@ end subroutine complete_group_halo_update subroutine broadcast_domains(Atm) - + type(fv_atmos_type), intent(INOUT) :: Atm(:) integer :: n, i1, i2, j1, j2, i @@ -991,7 +993,7 @@ subroutine switch_current_domain(new_domain,new_domain_for_coupler) logical, parameter :: debug = .FALSE. !--- find the tile number - !tile = mpp_pe()/npes_per_tile+1 + !tile = mpp_pe()/npes_per_tile+1 !ntiles = mpp_get_ntile_count(new_domain) call mpp_get_compute_domain( new_domain, is, ie, js, je ) isc = is ; jsc = js @@ -1002,8 +1004,9 @@ subroutine switch_current_domain(new_domain,new_domain_for_coupler) ! if (debug .AND. (gid==masterproc)) write(*,200) tile, is, ie, js, je !200 format('New domain: ', i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ') +#if defined (FMS1_IO) call set_domain(new_domain) - +#endif end subroutine switch_current_domain @@ -1029,12 +1032,12 @@ end subroutine switch_current_Atm !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !! -! +! subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: q integer, intent(IN):: npx,npy - integer, intent(IN):: FILL !< X-Dir or Y-Dir - logical, OPTIONAL, intent(IN) :: AGRID, BGRID + integer, intent(IN):: FILL !< X-Dir or Y-Dir + logical, OPTIONAL, intent(IN) :: AGRID, BGRID integer :: i,j if (present(BGRID)) then @@ -1043,7 +1046,7 @@ subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) case (XDir) do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner @@ -1052,7 +1055,7 @@ subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) case (YDir) do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i+1 ,1-j ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i+1 ,1-j ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-j ,npy+i) = q(i+1 ,npy+j ) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx+j,1-i ) = q(npx-i,1-j ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx+j,npy+i) = q(npx-i,npy+j ) !NE Corner @@ -1061,7 +1064,7 @@ subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) case default do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner @@ -1075,7 +1078,7 @@ subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) case (XDir) do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-i ,npy-1+j) = q(1-j ,npy-1-i+1) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx-1+i,1-j ) = q(npx-1+j,i ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+i,npy-1+j) = q(npx-1+j,npy-1-i+1) !NE Corner @@ -1084,7 +1087,7 @@ subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) case (YDir) do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner @@ -1092,13 +1095,13 @@ subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) enddo case default do j=1,ng - do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner + do i=1,ng + if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner enddo - enddo + enddo end select endif endif @@ -1109,12 +1112,12 @@ end subroutine fill_corners_2d_r4 !------------------------------------------------------------------------------- !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !! -! +! subroutine fill_corners_2d_r8(q, npx, npy, FILL, AGRID, BGRID) real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: q integer, intent(IN):: npx,npy - integer, intent(IN):: FILL ! 6) then -!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & +!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & !!$ dest_gid, send_tag, & !!$ q(ie+2,js), 1, MPI_DOUBLE_PRECISION, & -!!$ src_gid, recv_tag, & +!!$ src_gid, recv_tag, & !!$ commglobal, Stats, ierror ) !!$ nsend=nsend-1 !!$ else -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & +!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & !!$ send_tag, commglobal, sqest(nsend), ierror ) !!$ nrecv=nrecv+1 !!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, & @@ -1694,7 +1697,7 @@ end subroutine fill_corners_agrid_r8 !!$ recv_tag, commglobal, rqest(nrecv), ierror ) !!$ endif !!$ -!!$! wait for comm to complete +!!$! wait for comm to complete !!$ if (npes==6) then !!$ if (nsend>0) then !!$ call MPI_WAITALL(nsend, sqest, Stats, ierror) @@ -1748,10 +1751,10 @@ end subroutine fill_corners_agrid_r8 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & !!$ dest_gid, send_tag, & !!$ q(is-1,js), 1, MPI_DOUBLE_PRECISION, & -!!$ src_gid, recv_tag, & +!!$ src_gid, recv_tag, & !!$ commglobal, Stats, ierror ) !!$ nsend=nsend-1 -!!$ else +!!$ else !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & !!$ send_tag, commglobal, sqest(nsend), ierror ) !!$ nrecv=nrecv+1 @@ -1770,13 +1773,13 @@ end subroutine fill_corners_agrid_r8 !!$ src_gid = (tile+1)*npes_x*npes_y !!$ if (src_gid+1 > npes) src_gid=src_gid-npes !!$ if (npes>6) then -!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & +!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & !!$ dest_gid, send_tag, & !!$ q(ie+2,je+1), 1, MPI_DOUBLE_PRECISION, & -!!$ src_gid, recv_tag, & +!!$ src_gid, recv_tag, & !!$ commglobal, Stats, ierror ) !!$ nsend=nsend-1 -!!$ else +!!$ else !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & !!$ send_tag, commglobal, sqest(nsend), ierror ) !!$ nrecv=nrecv+1 @@ -1784,7 +1787,7 @@ end subroutine fill_corners_agrid_r8 !!$ recv_tag, commglobal, rqest(nrecv), ierror ) !!$ endif !!$ endif -!!$! wait for comm to complete +!!$! wait for comm to complete !!$ if (npes==6) then !!$ if (nsend>0) then !!$ call MPI_WAITALL(nsend, sqest, Stats, ierror) @@ -1800,7 +1803,7 @@ end subroutine fill_corners_agrid_r8 !!$ endif !!$ nsend=0 ; nrecv=0 !!$ endif -!!$ +!!$ !!$! Odd Face UL 1 pair ; 1 1-way !!$ if ( (tile==1) .and. (is==1) .and. (je==npy-1) ) then !!$ nsend=nsend+1 @@ -1829,22 +1832,22 @@ end subroutine fill_corners_agrid_r8 !!$ qsend(nsend) = q(is+1,je+1) !!$ send_tag = 400+tile !!$ dest_gid = npes_x*(npes_y-1) -!!$ recv_tag = 400+(tile-2) +!!$ recv_tag = 400+(tile-2) !!$ src_gid = dest_gid !!$ if (npes>6) then -!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & +!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & !!$ dest_gid, send_tag, & !!$ q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, & -!!$ src_gid, recv_tag, & +!!$ src_gid, recv_tag, & !!$ commglobal, Stats, ierror ) -!!$ nsend=nsend-1 +!!$ nsend=nsend-1 !!$ else -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & +!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & !!$ send_tag, commglobal, sqest(nsend), ierror ) !!$ nrecv=nrecv+1 !!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, & !!$ recv_tag, commglobal, rqest(nrecv), ierror ) -!!$ endif +!!$ endif !!$ nsend=nsend+1 !!$ qsend(nsend) = q(is,je) !!$ send_tag = 400+tile @@ -1855,12 +1858,12 @@ end subroutine fill_corners_agrid_r8 !!$ if ( (tile==5) .and. (is==1) .and. (je==npy-1) ) then !!$ recv_tag = 400+(tile-2) !!$ src_gid = (tile-3)*npes_x*npes_y + npes_x*(npes_y-1) -!!$ nrecv=nrecv+1 +!!$ nrecv=nrecv+1 !!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, & -!!$ recv_tag, commglobal, rqest(nrecv), ierror ) +!!$ recv_tag, commglobal, rqest(nrecv), ierror ) !!$ endif !!$ -!!$! wait for comm to complete +!!$! wait for comm to complete !!$ if (npes==6) then !!$ if (nsend>0) then !!$ call MPI_WAITALL(nsend, sqest, Stats, ierror) @@ -1877,7 +1880,7 @@ end subroutine fill_corners_agrid_r8 !!$ nsend=0 ; nrecv=0 !!$ endif !!$ -!!$! Send to Even face UL 3 1-way +!!$! Send to Even face UL 3 1-way !!$ if ( (ie==npx-1) .and. (je==npy-1) ) then !!$ nsend=nsend+1 !!$ qsend(nsend) = q(ie,je+1) @@ -1890,7 +1893,7 @@ end subroutine fill_corners_agrid_r8 !!$! Receive Odd Face LR 3 1-way !!$ if ( (ie==npx-1) .and. (js==1) ) then !!$ recv_tag = 200+(tile+1) -!!$ src_gid = (tile-1)*npes_x*npes_y + npes_x*npes_y +!!$ src_gid = (tile-1)*npes_x*npes_y + npes_x*npes_y !!$ nrecv=nrecv+1 !!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, & !!$ recv_tag, commglobal, rqest(nrecv), ierror ) @@ -1919,35 +1922,35 @@ end subroutine fill_corners_agrid_r8 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -!>@brief The subroutine 'mp_gather_4d_r4' calls SPMD Gather. +!>@brief The subroutine 'mp_gather_4d_r4' calls SPMD Gather. subroutine mp_gather_4d_r4(q, i1,i2, j1,j2, idim, jdim, kdim, ldim) integer, intent(IN) :: i1,i2, j1,j2 integer, intent(IN) :: idim, jdim, kdim, ldim real(kind=4), intent(INOUT):: q(idim,jdim,kdim,ldim) - integer :: i,j,k,l,n,icnt + integer :: i,j,k,l,n,icnt integer :: Lsize, Lsize_buf(1) integer :: Gsize integer :: LsizeS(npes_this_grid), Ldispl(npes_this_grid), cnts(npes_this_grid) integer :: Ldims(5), Gdims(5*npes_this_grid) real(kind=4), allocatable, dimension(:) :: larr, garr - + Ldims(1) = i1 Ldims(2) = i2 Ldims(3) = j1 Ldims(4) = j2 - Ldims(5) = tile + Ldims(5) = tile do l=1,npes_this_grid cnts(l) = 5 Ldispl(l) = 5*(l-1) - enddo + enddo call mpp_gather(Ldims, Gdims) ! call MPI_GATHERV(Ldims, 5, MPI_INTEGER, Gdims, cnts, Ldispl, MPI_INTEGER, masterproc, commglobal, ierror) - + Lsize = ( (i2 - i1 + 1) * (j2 - j1 + 1) ) * kdim do l=1,npes_this_grid cnts(l) = 1 Ldispl(l) = l-1 - enddo + enddo LsizeS(:)=1 Lsize_buf(1) = Lsize call mpp_gather(Lsize_buf, LsizeS) @@ -2004,18 +2007,18 @@ end subroutine mp_gather_4d_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_gather_3d_r4 :: Call SPMD Gather +! mp_gather_3d_r4 :: Call SPMD Gather ! subroutine mp_gather_3d_r4(q, i1,i2, j1,j2, idim, jdim, ldim) integer, intent(IN) :: i1,i2, j1,j2 integer, intent(IN) :: idim, jdim, ldim real(kind=4), intent(INOUT):: q(idim,jdim,ldim) - integer :: i,j,l,n,icnt + integer :: i,j,l,n,icnt integer :: Lsize, Lsize_buf(1) integer :: Gsize integer :: LsizeS(npes_this_grid), Ldispl(npes_this_grid), cnts(npes_this_grid) integer :: Ldims(5), Gdims(5*npes_this_grid) - real(kind=4), allocatable, dimension(:) :: larr, garr + real(kind=4), allocatable, dimension(:) :: larr, garr Ldims(1) = i1 Ldims(2) = i2 @@ -2033,7 +2036,7 @@ subroutine mp_gather_3d_r4(q, i1,i2, j1,j2, idim, jdim, ldim) do l=1,npes_this_grid cnts(l) = 1 Ldispl(l) = l-1 - enddo + enddo LsizeS(:)=1 Lsize_buf(1) = Lsize call mpp_gather(Lsize_buf, LsizeS) @@ -2043,7 +2046,7 @@ subroutine mp_gather_3d_r4(q, i1,i2, j1,j2, idim, jdim, ldim) icnt = 1 do j=j1,j2 do i=i1,i2 - larr(icnt) = q(i,j,tile) + larr(icnt) = q(i,j,tile) icnt=icnt+1 enddo enddo @@ -2063,7 +2066,7 @@ subroutine mp_gather_3d_r4(q, i1,i2, j1,j2, idim, jdim, ldim) do n=2,npes_this_grid icnt=1 do l=Gdims( (n-1)*5 + 5 ), Gdims( (n-1)*5 + 5 ) - do j=Gdims( (n-1)*5 + 3 ), Gdims( (n-1)*5 + 4 ) + do j=Gdims( (n-1)*5 + 3 ), Gdims( (n-1)*5 + 4 ) do i=Gdims( (n-1)*5 + 1 ), Gdims( (n-1)*5 + 2 ) q(i,j,l) = garr(Ldispl(n)+icnt) icnt=icnt+1 @@ -2083,7 +2086,7 @@ end subroutine mp_gather_3d_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_gather_3d_r8 :: Call SPMD Gather +! mp_gather_3d_r8 :: Call SPMD Gather ! subroutine mp_gather_3d_r8(q, i1,i2, j1,j2, idim, jdim, ldim) integer, intent(IN) :: i1,i2, j1,j2 @@ -2163,7 +2166,7 @@ end subroutine mp_gather_3d_r8 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_i :: Call SPMD broadcast +! mp_bcst_i :: Call SPMD broadcast ! subroutine mp_bcst_i(q) integer, intent(INOUT) :: q @@ -2178,7 +2181,7 @@ end subroutine mp_bcst_i !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_r4 :: Call SPMD broadcast +! mp_bcst_r4 :: Call SPMD broadcast ! subroutine mp_bcst_r4(q) real(kind=4), intent(INOUT) :: q @@ -2193,7 +2196,7 @@ end subroutine mp_bcst_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_r8 :: Call SPMD broadcast +! mp_bcst_r8 :: Call SPMD broadcast ! subroutine mp_bcst_r8(q) real(kind=8), intent(INOUT) :: q @@ -2208,7 +2211,7 @@ end subroutine mp_bcst_r8 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_1d_r4 :: Call SPMD broadcast +! mp_bcst_1d_r4 :: Call SPMD broadcast ! subroutine mp_bcst_1d_r4(q, idim) integer, intent(IN) :: idim @@ -2224,7 +2227,7 @@ end subroutine mp_bcst_1d_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_1d_r8 :: Call SPMD broadcast +! mp_bcst_1d_r8 :: Call SPMD broadcast ! subroutine mp_bcst_1d_r8(q, idim) integer, intent(IN) :: idim @@ -2240,7 +2243,7 @@ end subroutine mp_bcst_1d_r8 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_2d_r4 :: Call SPMD broadcast +! mp_bcst_2d_r4 :: Call SPMD broadcast ! subroutine mp_bcst_2d_r4(q, idim, jdim) integer, intent(IN) :: idim, jdim @@ -2256,7 +2259,7 @@ end subroutine mp_bcst_2d_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_2d_r8 :: Call SPMD broadcast +! mp_bcst_2d_r8 :: Call SPMD broadcast ! subroutine mp_bcst_2d_r8(q, idim, jdim) integer, intent(IN) :: idim, jdim @@ -2272,7 +2275,7 @@ end subroutine mp_bcst_2d_r8 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_3d_r4 :: Call SPMD broadcast +! mp_bcst_3d_r4 :: Call SPMD broadcast ! subroutine mp_bcst_3d_r4(q, idim, jdim, kdim) integer, intent(IN) :: idim, jdim, kdim @@ -2288,7 +2291,7 @@ end subroutine mp_bcst_3d_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_3d_r8 :: Call SPMD broadcast +! mp_bcst_3d_r8 :: Call SPMD broadcast ! subroutine mp_bcst_3d_r8(q, idim, jdim, kdim) integer, intent(IN) :: idim, jdim, kdim @@ -2303,33 +2306,33 @@ end subroutine mp_bcst_3d_r8 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! mp_bcst_4d_r4 :: Call SPMD broadcast +! +! mp_bcst_4d_r4 :: Call SPMD broadcast ! subroutine mp_bcst_4d_r4(q, idim, jdim, kdim, ldim) integer, intent(IN) :: idim, jdim, kdim, ldim real(kind=4), intent(INOUT) :: q(idim,jdim,kdim,ldim) call MPI_BCAST(q, idim*jdim*kdim*ldim, MPI_REAL, masterproc, commglobal, ierror) - + end subroutine mp_bcst_4d_r4 -! +! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! !------------------------------------------------------------------------------- !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! mp_bcst_4d_r8 :: Call SPMD broadcast +! +! mp_bcst_4d_r8 :: Call SPMD broadcast ! subroutine mp_bcst_4d_r8(q, idim, jdim, kdim, ldim) integer, intent(IN) :: idim, jdim, kdim, ldim real(kind=8), intent(INOUT) :: q(idim,jdim,kdim,ldim) call MPI_BCAST(q, idim*jdim*kdim*ldim, MPI_DOUBLE_PRECISION, masterproc, commglobal, ierror) - + end subroutine mp_bcst_4d_r8 -! +! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! !------------------------------------------------------------------------------- @@ -2399,44 +2402,44 @@ end subroutine mp_bcst_4d_i !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! mp_reduce_max_r4_1d :: Call SPMD REDUCE_MAX +! +! mp_reduce_max_r4_1d :: Call SPMD REDUCE_MAX ! subroutine mp_reduce_max_r4_1d(mymax,npts) integer, intent(IN) :: npts real(kind=4), intent(INOUT) :: mymax(npts) - + real(kind=4) :: gmax(npts) - + call MPI_ALLREDUCE( mymax, gmax, npts, MPI_REAL, MPI_MAX, & commglobal, ierror ) - + mymax = gmax - + end subroutine mp_reduce_max_r4_1d -! +! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! !------------------------------------------------------------------------------- !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! mp_reduce_max_r8_1d :: Call SPMD REDUCE_MAX +! +! mp_reduce_max_r8_1d :: Call SPMD REDUCE_MAX ! subroutine mp_reduce_max_r8_1d(mymax,npts) integer, intent(IN) :: npts real(kind=8), intent(INOUT) :: mymax(npts) - + real(kind=8) :: gmax(npts) - + call MPI_ALLREDUCE( mymax, gmax, npts, MPI_DOUBLE_PRECISION, MPI_MAX, & commglobal, ierror ) - + mymax = gmax - + end subroutine mp_reduce_max_r8_1d -! +! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! !------------------------------------------------------------------------------- @@ -2444,7 +2447,7 @@ end subroutine mp_reduce_max_r8_1d !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_reduce_max_r4 :: Call SPMD REDUCE_MAX +! mp_reduce_max_r4 :: Call SPMD REDUCE_MAX ! subroutine mp_reduce_max_r4(mymax) real(kind=4), intent(INOUT) :: mymax @@ -2461,7 +2464,7 @@ end subroutine mp_reduce_max_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_reduce_max_r8 :: Call SPMD REDUCE_MAX +! mp_reduce_max_r8 :: Call SPMD REDUCE_MAX ! subroutine mp_reduce_max_r8(mymax) real(kind=8), intent(INOUT) :: mymax @@ -2505,7 +2508,7 @@ end subroutine mp_reduce_min_r8 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_4d_i :: Call SPMD REDUCE_MAX +! mp_bcst_4d_i :: Call SPMD REDUCE_MAX ! subroutine mp_reduce_max_i(mymax) integer, intent(INOUT) :: mymax @@ -2525,7 +2528,7 @@ end subroutine mp_reduce_max_i !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_reduce_sum_r4 :: Call SPMD REDUCE_SUM +! mp_reduce_sum_r4 :: Call SPMD REDUCE_SUM ! subroutine mp_reduce_sum_r4(mysum) real(kind=4), intent(INOUT) :: mysum @@ -2545,7 +2548,7 @@ end subroutine mp_reduce_sum_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_reduce_sum_r8 :: Call SPMD REDUCE_SUM +! mp_reduce_sum_r8 :: Call SPMD REDUCE_SUM ! subroutine mp_reduce_sum_r8(mysum) real(kind=8), intent(INOUT) :: mysum @@ -2612,7 +2615,7 @@ end subroutine mp_reduce_sum_r4_2darr !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_reduce_sum_r4_1d :: Call SPMD REDUCE_SUM +! mp_reduce_sum_r4_1d :: Call SPMD REDUCE_SUM ! subroutine mp_reduce_sum_r4_1d(mysum, sum1d, npts) integer, intent(in) :: npts @@ -2625,7 +2628,7 @@ subroutine mp_reduce_sum_r4_1d(mysum, sum1d, npts) mysum = 0.0 do i=1,npts mysum = mysum + sum1d(i) - enddo + enddo call MPI_ALLREDUCE( mysum, gsum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & commglobal, ierror ) @@ -2640,7 +2643,7 @@ end subroutine mp_reduce_sum_r4_1d !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_reduce_sum_r8_1d :: Call SPMD REDUCE_SUM +! mp_reduce_sum_r8_1d :: Call SPMD REDUCE_SUM ! subroutine mp_reduce_sum_r8_1d(mysum, sum1d, npts) integer, intent(in) :: npts @@ -2653,7 +2656,7 @@ subroutine mp_reduce_sum_r8_1d(mysum, sum1d, npts) mysum = 0.0 do i=1,npts mysum = mysum + sum1d(i) - enddo + enddo call MPI_ALLREDUCE( mysum, gsum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & commglobal, ierror ) diff --git a/tools/fv_nudge.F90 b/tools/fv_nudge.F90 index 8e15a065d..85c86e746 100644 --- a/tools/fv_nudge.F90 +++ b/tools/fv_nudge.F90 @@ -102,11 +102,22 @@ module fv_nwp_nudge_mod use external_sst_mod, only: i_sst, j_sst, sst_ncep, sst_anom, forecast_mode use diag_manager_mod, only: register_diag_field, send_data - use constants_mod, only: pi=>pi_8, grav, rdgas, cp_air, kappa, cnst_radius =>radius - use fms_mod, only: write_version_number, open_namelist_file, & - check_nml_error, file_exist, close_file +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: pi=>pi_8, grav, rdgas, cp_air, kappa, cnst_radius =>radius + use fms_mod, only: write_version_number, & + check_nml_error +#if defined (FMS1_IO) + use fms_mod, only: open_namelist_file, & + check_nml_error, file_exists => file_exist, close_file +#else + use fms2_io_mod, only: file_exists, close_file +#endif !use fms_io_mod, only: field_size - use mpp_mod, only: mpp_error, FATAL, stdlog, get_unit, mpp_pe + use mpp_mod, only: mpp_error, FATAL, stdlog, get_unit, mpp_pe, input_nml_file use mpp_domains_mod, only: mpp_update_domains, domain2d use time_manager_mod, only: time_type, get_time, get_date @@ -1255,15 +1266,8 @@ subroutine fv_nwp_nudge_init(time, axes, npz, zvir, ak, bk, ts, phis, gridstruct track_file_name = "No_File_specified" - if( file_exist( 'input.nml' ) ) then - unit = open_namelist_file () - io = 1 - do while ( io .ne. 0 ) - read( unit, nml = fv_nwp_nudge_nml, iostat = io, end = 10 ) - ierr = check_nml_error(io,'fv_nwp_nudge_nml') - end do -10 call close_file ( unit ) - end if + read (input_nml_file, nml=fv_nwp_nudge_nml, iostat=io) + ierr = check_nml_error (io, 'fv_nwp_nudge_nml') call write_version_number ( 'FV_NUDGE_MOD', version ) if ( master ) then f_unit=stdlog() @@ -1447,7 +1451,7 @@ subroutine get_ncep_analysis ( ps, u, v, t, q, zvir, ts, nfile, fname, bd ) #include - if( .not. file_exist(fname) ) then + if( .not. file_exists(fname) ) then call mpp_error(FATAL,'==> Error from get_ncep_analysis: file not found') else call open_ncfile( fname, ncid ) ! open the file diff --git a/tools/fv_restart.F90 b/tools/fv_restart.F90 index ea38ef3e7..2df5609c3 100644 --- a/tools/fv_restart.F90 +++ b/tools/fv_restart.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** module fv_restart_mod @@ -68,14 +68,14 @@ module fv_restart_mod ! ! ! fv_grid_utils_mod -! ptop_min, fill_ghost, g_sum, +! ptop_min, fill_ghost, g_sum, ! make_eta_level, cubed_to_latlon, great_circle_dist ! ! ! fv_io_mod -! fv_io_init, fv_io_read_restart, fv_io_write_restart, -! remap_restart, fv_io_register_restart, fv_io_register_nudge_restart, -! fv_io_register_restart_BCs, fv_io_register_restart_BCs_NH, fv_io_write_BCs, +! fv_io_init, fv_io_read_restart, fv_io_write_restart, +! remap_restart, fv_io_register_restart, fv_io_register_nudge_restart, +! fv_io_register_restart_BCs, fv_io_register_restart_BCs_NH, fv_io_write_BCs, ! fv_io_read_BCs ! ! @@ -105,12 +105,12 @@ module fv_restart_mod ! ! mpp_mod ! mpp_chksum, stdout, mpp_error, FATAL, NOTE, get_unit, mpp_sum, -! mpp_get_current_pelist, mpp_set_current_pelist, mpp_send, mpp_recv, +! mpp_get_current_pelist, mpp_set_current_pelist, mpp_send, mpp_recv, ! mpp_sync_self, mpp_npes, mpp_pe, mpp_sync ! ! ! mpp_domains_mod -! mpp_get_compute_domain, mpp_get_data_domain, mpp_get_global_domain, +! mpp_get_compute_domain, mpp_get_data_domain, mpp_get_global_domain, ! mpp_update_domains, domain2d, DGRID_NE, CENTER, CORNER, NORTH, EAST, ! mpp_get_C2F_index, WEST, SOUTH, mpp_global_field ! @@ -132,8 +132,12 @@ module fv_restart_mod ! ! - - use constants_mod, only: kappa, pi=>pi_8, omega, rdgas, grav, rvgas, cp_air, radius +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: kappa, pi=>pi_8, omega, rdgas, grav, rvgas, cp_air, radius use fv_arrays_mod, only: fv_atmos_type, fv_nest_type, fv_grid_bounds_type, R_GRID use fv_io_mod, only: fv_io_init, fv_io_read_restart, fv_io_write_restart, & remap_restart, fv_io_register_restart, fv_io_register_nudge_restart, & @@ -159,10 +163,14 @@ module fv_restart_mod use field_manager_mod, only: MODEL_ATMOS use external_ic_mod, only: get_external_ic, get_cubed_sphere_terrain use fv_eta_mod, only: compute_dz_var, compute_dz_L32, set_hybrid_z - use boundary_mod, only: fill_nested_grid, nested_grid_BC, update_coarse_grid + use boundary_mod, only: fill_nested_grid, nested_grid_BC, update_coarse_grid use field_manager_mod, only: MODEL_ATMOS use fv_timing_mod, only: timing_on, timing_off - use fms_mod, only: file_exist +#if defined (FMS1_IO) + use fms_mod, only: file_exists => file_exist +#else + use fms2_io_mod, only: file_exists +#endif use fv_treat_da_inc_mod, only: read_da_inc implicit none @@ -175,7 +183,7 @@ module fv_restart_mod !--- private data type logical :: module_is_initialized = .FALSE. -contains +contains subroutine fv_restart_init() @@ -183,9 +191,9 @@ subroutine fv_restart_init() module_is_initialized = .TRUE. end subroutine fv_restart_init -!>@brief The subroutine 'fv_restart' initializes the model state, including +!>@brief The subroutine 'fv_restart' initializes the model state, including !! prognaostic variables and several auxiliary pressure variables -!>@details The modules also writes out restart files at the end of the +!>@details The modules also writes out restart files at the end of the !! model run, and prints out diagnostics of the initial state. !! There are several options to control the initialization process. subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_type, grids_on_this_pe) @@ -240,15 +248,17 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ Atm(n)%flagstruct%warm_start = .false. !resetting warm_start flag to avoid FATAL error below else if (is_master()) print*, 'Searching for nested grid restart file ', trim(fname) - cold_start_grids(n) = .not. file_exist(fname, Atm(n)%domain) - Atm(n)%flagstruct%warm_start = file_exist(fname, Atm(n)%domain)!resetting warm_start flag to avoid FATAL error below +#if defined (FMS1_IO) + cold_start_grids(n) = .not. file_exists(fname, Atm(n)%domain) + Atm(n)%flagstruct%warm_start = file_exists(fname, Atm(n)%domain)!resetting warm_start flag to avoid FATAL error below +#endif endif endif if (.not. grids_on_this_pe(n)) then - + !Even if this grid is not on this PE, if it has child grids we must send - !along the data that is needed. + !along the data that is needed. !This is a VERY complicated bit of code that attempts to follow the entire decision tree ! of the initialization without doing anything. This could very much be cleaned up. @@ -260,24 +270,26 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ call fill_nested_grid_topo_halo(Atm(n), .false.) call nested_grid_BC(Atm(n)%ps, Atm(n)%parent_grid%ps, Atm(n)%neststruct%nest_domain, & Atm(n)%neststruct%ind_h, Atm(n)%neststruct%wt_h, 0, 0, & - Atm(n)%npx, Atm(n)%npy,Atm(n)%bd, isg, ieg, jsg, jeg, proc_in=.false.) - call setup_nested_boundary_halo(Atm(n),.false.) + Atm(n)%npx, Atm(n)%npy,Atm(n)%bd, isg, ieg, jsg, jeg, proc_in=.false.) + call setup_nested_boundary_halo(Atm(n),.false.) else call fill_nested_grid_topo(Atm(n), .false.) - call setup_nested_boundary_halo(Atm(n),.false.) + call setup_nested_boundary_halo(Atm(n),.false.) if ( Atm(n)%flagstruct%external_ic .and. grid_type < 4 ) call fill_nested_grid_data(Atm(n:n), .false.) endif else if (is_master()) print*, 'Searching for nested grid BC files ', trim(fname_ne), ' ', trim (fname_sw) - !!!! PROBLEM: file_exist doesn't know to look for fv_BC_ne.res.nest02.nc instead of fv_BC_ne.res.nc on coarse grid - if (file_exist(fname_ne, Atm(n)%domain) .and. file_exist(fname_sw, Atm(n)%domain)) then + !!!! PROBLEM: file_exists doesn't know to look for fv_BC_ne.res.nest02.nc instead of fv_BC_ne.res.nc on coarse grid +#if defined (FMS1_IO) + if (file_exists(fname_ne, Atm(n)%domain) .and. file_exists(fname_sw, Atm(n)%domain)) then else if ( is_master() ) write(*,*) 'BC files not found, re-generating nested grid boundary conditions' call fill_nested_grid_topo_halo(Atm(n), .false.) call setup_nested_boundary_halo(Atm(n), .false.) - Atm(N)%neststruct%first_step = .true. + Atm(N)%neststruct%first_step = .true. endif +#endif end if if (.not. Atm(n)%flagstruct%hydrostatic .and. Atm(n)%flagstruct%make_nh .and. & @@ -345,12 +357,12 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ !Fill nested grid halo with ps call nested_grid_BC(Atm(n)%ps, Atm(n)%parent_grid%ps, Atm(n)%neststruct%nest_domain, & Atm(n)%neststruct%ind_h, Atm(n)%neststruct%wt_h, 0, 0, & - Atm(n)%npx, Atm(n)%npy,Atm(n)%bd, isg, ieg, jsg, jeg, proc_in=.true.) + Atm(n)%npx, Atm(n)%npy,Atm(n)%bd, isg, ieg, jsg, jeg, proc_in=.true.) endif endif if ( Atm(n)%flagstruct%external_ic ) then if( is_master() ) write(*,*) 'Calling get_external_ic' - call get_external_ic(Atm(n:n), Atm(n)%domain, cold_start_grids(n)) + call get_external_ic(Atm(n:n), Atm(n)%domain, cold_start_grids(n)) if( is_master() ) write(*,*) 'IC generated from the specified external source' endif @@ -373,11 +385,12 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ Atm(N)%neststruct%first_step = .false. if (Atm(n)%neststruct%nested) then if ( Atm(n)%flagstruct%npz_rst /= 0 .and. Atm(n)%flagstruct%npz_rst /= Atm(n)%npz ) then - call setup_nested_boundary_halo(Atm(n)) + call setup_nested_boundary_halo(Atm(n)) else !If BC file is found, then read them in. Otherwise we need to initialize the BCs. if (is_master()) print*, 'Searching for nested grid BC files ', trim(fname_ne), ' ', trim (fname_sw) - if (file_exist(fname_ne, Atm(n)%domain) .and. file_exist(fname_sw, Atm(n)%domain)) then +#if defined (FMS1_IO) + if (file_exists(fname_ne, Atm(n)%domain) .and. file_exists(fname_sw, Atm(n)%domain)) then call fv_io_read_BCs(Atm(n)) else if ( is_master() ) write(*,*) 'BC files not found, re-generating nested grid boundary conditions' @@ -385,6 +398,7 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ call setup_nested_boundary_halo(Atm(n), .true.) Atm(N)%neststruct%first_step = .true. endif +#endif !Following line to make sure u and v are consistent across processor subdomains call mpp_update_domains(Atm(n)%u, Atm(n)%v, Atm(n)%domain, gridtype=DGRID_NE, complete=.true.) endif @@ -479,7 +493,7 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ if ( .not. Atm(n)%flagstruct%external_ic ) then call init_case(Atm(n)%u,Atm(n)%v,Atm(n)%w,Atm(n)%pt,Atm(n)%delp,Atm(n)%q, & Atm(n)%phis, Atm(n)%ps,Atm(n)%pe, Atm(n)%peln,Atm(n)%pk,Atm(n)%pkz, & - Atm(n)%uc,Atm(n)%vc, Atm(n)%ua,Atm(n)%va, & + Atm(n)%uc,Atm(n)%vc, Atm(n)%ua,Atm(n)%va, & Atm(n)%ak, Atm(n)%bk, Atm(n)%gridstruct, Atm(n)%flagstruct,& Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%ng, & ncnst, Atm(n)%flagstruct%nwat, & @@ -495,7 +509,7 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ call init_double_periodic(Atm(n)%u,Atm(n)%v,Atm(n)%w,Atm(n)%pt, & Atm(n)%delp,Atm(n)%q,Atm(n)%phis, Atm(n)%ps,Atm(n)%pe, & Atm(n)%peln,Atm(n)%pk,Atm(n)%pkz, & - Atm(n)%uc,Atm(n)%vc, Atm(n)%ua,Atm(n)%va, & + Atm(n)%uc,Atm(n)%vc, Atm(n)%ua,Atm(n)%va, & Atm(n)%ak, Atm(n)%bk, & Atm(n)%gridstruct, Atm(n)%flagstruct, & Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%ng, & @@ -538,7 +552,7 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ !if (Atm(n)%neststruct%nested) then ! Only fill nested-grid data if external_ic is called for the cubed-sphere grid if (Atm(n)%neststruct%nested) then - call setup_nested_boundary_halo(Atm(n), .true.) + call setup_nested_boundary_halo(Atm(n), .true.) if (Atm(n)%flagstruct%external_ic .and. .not. Atm(n)%flagstruct%nggps_ic .and. grid_type < 4 ) then call fill_nested_grid_data(Atm(n:n)) endif @@ -692,7 +706,7 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ if ( .not. Atm(n)%flagstruct%srf_init ) then call cubed_to_latlon(Atm(n)%u, Atm(n)%v, Atm(n)%ua, Atm(n)%va, & Atm(n)%gridstruct, & - Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, 1, & + Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, 1, & Atm(n)%gridstruct%grid_type, Atm(n)%domain, & Atm(n)%gridstruct%nested, Atm(n)%flagstruct%c2l_ord, Atm(n)%bd) do j=jsc,jec @@ -740,7 +754,7 @@ subroutine setup_nested_boundary_halo(Atm, proc_in) ncnst = Atm%ncnst isc = Atm%bd%isc; iec = Atm%bd%iec; jsc = Atm%bd%jsc; jec = Atm%bd%jec is = Atm%bd%is ; ie = Atm%bd%ie ; js = Atm%bd%js ; je = Atm%bd%je - npz = Atm%npz + npz = Atm%npz nwat = Atm%flagstruct%nwat #ifdef MAPL_MODE @@ -823,7 +837,7 @@ subroutine setup_nested_boundary_halo(Atm, proc_in) call nested_grid_BC(Atm%pt, Atm%parent_grid%pt, Atm%neststruct%nest_domain, & Atm%neststruct%ind_h, Atm%neststruct%wt_h, 0, 0, & - Atm%npx, Atm%npy, npz, Atm%bd, isg, ieg, jsg, jeg, proc_in=process) + Atm%npx, Atm%npy, npz, Atm%bd, isg, ieg, jsg, jeg, proc_in=process) if (.not. Atm%flagstruct%hydrostatic) then @@ -894,13 +908,13 @@ subroutine fill_nested_grid_topo_halo(Atm, proc_in) call nested_grid_BC(Atm%phis, Atm%parent_grid%phis, Atm%neststruct%nest_domain, & Atm%neststruct%ind_h, Atm%neststruct%wt_h, 0, 0, & Atm%npx, Atm%npy, Atm%bd, isg, ieg, jsg, jeg, proc_in=proc_in) - + end subroutine fill_nested_grid_topo_halo -!>@brief The subroutine 'fill_nested_grid_topo' fills the nested grid with topo +!>@brief The subroutine 'fill_nested_grid_topo' fills the nested grid with topo !! to enable boundary smoothing. !>@details Interior topography is then over-written in get_external_ic. - subroutine fill_nested_grid_topo(Atm, proc_in) + subroutine fill_nested_grid_topo(Atm, proc_in) type(fv_atmos_type), intent(INOUT) :: Atm logical, intent(IN), OPTIONAL :: proc_in real, allocatable :: g_dat(:,:,:) @@ -937,7 +951,7 @@ subroutine fill_nested_grid_topo(Atm, proc_in) call mpp_global_field( & Atm%parent_grid%domain, & Atm%parent_grid%phis(isd_p:ied_p,jsd_p:jed_p), g_dat(isg:,jsg:,1), position=CENTER) - if (mpp_pe() == sending_proc) then + if (mpp_pe() == sending_proc) then do p=1,size(Atm%pelist) call mpp_send(g_dat,size(g_dat),Atm%pelist(p)) enddo @@ -987,8 +1001,8 @@ subroutine fill_nested_grid_data(Atm, proc_in) jed = Atm(1)%bd%jed ncnst = Atm(1)%ncnst isc = Atm(1)%bd%isc; iec = Atm(1)%bd%iec; jsc = Atm(1)%bd%jsc; jec = Atm(1)%bd%jec - npz = Atm(1)%npz - + npz = Atm(1)%npz + gid = mpp_pe() sending_proc = Atm(1)%parent_grid%pelist(1) + (Atm(1)%neststruct%parent_tile-1)*Atm(1)%parent_grid%npes_per_tile @@ -1000,8 +1014,8 @@ subroutine fill_nested_grid_data(Atm, proc_in) call mpp_get_global_domain( Atm(1)%parent_grid%domain, & isg, ieg, jsg, jeg, xsize=npx_p, ysize=npy_p) - if (process) then - + if (process) then + call mpp_error(NOTE, "FILLING NESTED GRID DATA") else @@ -1067,7 +1081,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) end do - !Note that we do NOT fill in phis (surface geopotential), which should + !Note that we do NOT fill in phis (surface geopotential), which should !be computed exactly instead of being interpolated. @@ -1131,7 +1145,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) call mpp_sync_self call timing_off('COMM_TOTAL') - if (process) then + if (process) then allocate(pt_coarse(isd:ied,jsd:jed,npz)) call fill_nested_grid(pt_coarse, g_dat, & Atm(1)%neststruct%ind_h, Atm(1)%neststruct%wt_h, & @@ -1258,7 +1272,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) end if #endif - deallocate(g_dat) + deallocate(g_dat) !u @@ -1322,10 +1336,10 @@ subroutine fill_nested_grid_data(Atm, proc_in) end subroutine fill_nested_grid_data - !>@brief The subroutine ' fill_nested_grid_data_end' + !>@brief The subroutine ' fill_nested_grid_data_end' !! actually sets up the coarse-grid TOPOGRAPHY. subroutine fill_nested_grid_data_end(Atm, proc_in) - type(fv_atmos_type), intent(INOUT) :: Atm + type(fv_atmos_type), intent(INOUT) :: Atm logical, intent(IN), OPTIONAL :: proc_in real, allocatable :: g_dat(:,:,:), pt_coarse(:,:,:) integer :: i,j,k,nq, sphum, ncnst, istart, iend, npz @@ -1350,8 +1364,8 @@ subroutine fill_nested_grid_data_end(Atm, proc_in) jed = Atm%bd%jed ncnst = Atm%ncnst isc = Atm%bd%isc; iec = Atm%bd%iec; jsc = Atm%bd%jsc; jec = Atm%bd%jec - npz = Atm%npz - + npz = Atm%npz + isd_p = Atm%parent_grid%bd%isd ied_p = Atm%parent_grid%bd%ied jsd_p = Atm%parent_grid%bd%jsd @@ -1414,7 +1428,7 @@ subroutine fill_nested_grid_data_end(Atm, proc_in) Atm%flagstruct%moist_phys, .true., Atm%flagstruct%nwat, Atm%domain) #endif - + end subroutine fill_nested_grid_data_end @@ -1538,7 +1552,7 @@ subroutine fv_restart_end(Atm, grids_on_this_pe) do n=1,steps write(file_unit) Atm(1)%idiag%efx(n) write(file_unit) Atm(1)%idiag%mtq(n) ! time series global mountain torque - !write(file_unit) Atm(1)%idiag%efx_nest(n) + !write(file_unit) Atm(1)%idiag%efx_nest(n) enddo close(unit=file_unit) endif @@ -1568,7 +1582,7 @@ subroutine d2c_setup(u, v, & real, intent(in) :: cosa_s(isd:ied,jsd:jed) real, intent(in) :: rsin2(isd:ied,jsd:jed) -! Local +! Local real, dimension(isd:ied,jsd:jed):: utmp, vtmp real, parameter:: t11=27./28., t12=-13./28., t13=3./7., t14=6./7., t15=3./28. real, parameter:: a1 = 0.5625 @@ -1591,7 +1605,7 @@ subroutine d2c_setup(u, v, & npt = -2 endif - if ( nested) then + if ( nested) then do j=jsd+1,jed-1 do i=isd,ied @@ -1610,7 +1624,7 @@ subroutine d2c_setup(u, v, & vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j)) enddo i = isd - vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) + vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) i = ied vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) enddo @@ -1739,7 +1753,7 @@ subroutine d2c_setup(u, v, & ! Xdir: if( is==1 .and. .not. nested ) then do j=js-1,je+1 - uc(0,j) = c1*utmp(-2,j) + c2*utmp(-1,j) + c3*utmp(0,j) + uc(0,j) = c1*utmp(-2,j) + c2*utmp(-1,j) + c3*utmp(0,j) uc(1,j) = ( t14*(utmp( 0,j)+utmp(1,j)) & + t12*(utmp(-1,j)+utmp(2,j)) & + t15*(utmp(-2,j)+utmp(3,j)) )*rsin_u(1,j) @@ -1749,11 +1763,11 @@ subroutine d2c_setup(u, v, & if( (ie+1)==npx .and. .not. nested ) then do j=js-1,je+1 - uc(npx-1,j) = c1*utmp(npx-3,j)+c2*utmp(npx-2,j)+c3*utmp(npx-1,j) + uc(npx-1,j) = c1*utmp(npx-3,j)+c2*utmp(npx-2,j)+c3*utmp(npx-1,j) uc(npx,j) = (t14*(utmp(npx-1,j)+utmp(npx,j))+ & t12*(utmp(npx-2,j)+utmp(npx+1,j)) & + t15*(utmp(npx-3,j)+utmp(npx+2,j)))*rsin_u(npx,j) - uc(npx+1,j) = c3*utmp(npx,j)+c2*utmp(npx+1,j)+c1*utmp(npx+2,j) + uc(npx+1,j) = c3*utmp(npx,j)+c2*utmp(npx+1,j)+c1*utmp(npx+2,j) enddo endif @@ -1839,7 +1853,7 @@ subroutine d2a_setup(u, v, ua, va, dord4, & real, intent(in) :: rsin2(isd:ied,jsd:jed) logical, intent(in) :: nested -! Local +! Local real, dimension(isd:ied,jsd:jed):: utmp, vtmp real, parameter:: t11=27./28., t12=-13./28., t13=3./7., t14=6./7., t15=3./28. real, parameter:: a1 = 0.5625 @@ -1862,7 +1876,7 @@ subroutine d2a_setup(u, v, ua, va, dord4, & npt = -2 endif - if ( nested) then + if ( nested) then do j=jsd+1,jed-1 do i=isd,ied @@ -1881,7 +1895,7 @@ subroutine d2a_setup(u, v, ua, va, dord4, & vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j)) enddo i = isd - vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) + vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) i = ied vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) enddo diff --git a/tools/fv_surf_map.F90 b/tools/fv_surf_map.F90 index cde5a0ca9..e94a12532 100644 --- a/tools/fv_surf_map.F90 +++ b/tools/fv_surf_map.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -63,12 +63,23 @@ module fv_surf_map_mod ! ! - use fms_mod, only: file_exist, check_nml_error, & - open_namelist_file, close_file, stdlog, & + use fms_mod, only: check_nml_error, & + stdlog, & mpp_pe, mpp_root_pe, FATAL, error_mesg +#if defined (FMS1_IO) + use fms_mod, only: file_exists => file_exist, & + open_namelist_file, close_file +#else + use fms2_io_mod, only: file_exists, close_file +#endif use mpp_mod, only: get_unit, input_nml_file, mpp_error use mpp_domains_mod, only: mpp_update_domains, domain2d - use constants_mod, only: grav, radius, pi=>pi_8 +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: grav, radius, pi=>pi_8 use fv_grid_utils_mod, only: great_circle_dist, latlon2xyz, v_prod, normalize_vect use fv_grid_utils_mod, only: g_sum, global_mx, vect_cross @@ -97,8 +108,8 @@ module fv_surf_map_mod ! New NASA SRTM30 data: SRTM30.nc ! nlon = 43200 ! nlat = 21600 - logical:: zs_filter = .true. - logical:: zero_ocean = .true. ! if true, no diffusive flux into water/ocean area + logical:: zs_filter = .true. + logical:: zero_ocean = .true. ! if true, no diffusive flux into water/ocean area integer :: nlon = 21600 integer :: nlat = 10800 real:: cd4 = 0.15 !< Dimensionless coeff for del-4 diffusion (with FCT) @@ -109,13 +120,13 @@ module fv_surf_map_mod integer:: n_del2_weak = 12 integer:: n_del2_strong = -1 integer:: n_del4 = -1 - + character(len=128):: surf_file = "INPUT/topo1min.nc" character(len=6) :: surf_format = 'netcdf' logical :: namelist_read = .false. - real(kind=R_GRID) da_min + real(kind=R_GRID) da_min real cos_grid character(len=3) :: grid_string = '' @@ -218,12 +229,12 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ ! ! surface file must be in NetCDF format ! - if ( file_exist(surf_file) ) then + if ( file_exists(surf_file) ) then if (surf_format == "netcdf") then status = nf_open (surf_file, NF_NOWRITE, ncid) if (status .ne. NF_NOERR) call handle_err(status) - + status = nf_inq_dimid (ncid, 'lon', lonid) if (status .ne. NF_NOERR) call handle_err(status) status = nf_inq_dimlen (ncid, lonid, londim) @@ -243,7 +254,7 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ write(*,*) 'Opening USGS datset file:', surf_file, surf_format, nlon, nlat endif endif - + else call error_mesg ( 'surfdrv','Raw IEEE data format no longer supported !!!', FATAL ) endif @@ -466,7 +477,7 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ ! nested grids this allows us to do the smoothing near the boundary ! without having to fill the boundary halo from the coarse grid - !ALSO for nesting: note that we are smoothing the terrain using + !ALSO for nesting: note that we are smoothing the terrain using ! the nested-grid's outer halo filled with the terrain computed ! directly from the input file computed here, and then ! replacing it with interpolated topography in fv_restart, so @@ -636,7 +647,7 @@ subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_s else m_slope = 10. endif - + do 777 nt=1, ntmax call mpp_update_domains(q, domain) @@ -645,13 +656,13 @@ subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_s if ( nt==1 .and. check_slope ) then do j=js,je do i=is,ie+1 - ddx(i,j) = (q(i,j) - q(i-1,j))/dxc(i,j) + ddx(i,j) = (q(i,j) - q(i-1,j))/dxc(i,j) ddx(i,j) = abs(ddx(i,j)) enddo enddo do j=js,je+1 do i=is,ie - ddy(i,j) = (q(i,j) - q(i,j-1))/dyc(i,j) + ddy(i,j) = (q(i,j) - q(i,j-1))/dyc(i,j) ddy(i,j) = abs(ddy(i,j)) enddo enddo @@ -833,13 +844,13 @@ subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_s call mpp_update_domains(q, domain) do j=js,je do i=is,ie+1 - ddx(i,j) = (q(i,j) - q(i-1,j))/dxc(i,j) + ddx(i,j) = (q(i,j) - q(i-1,j))/dxc(i,j) ddx(i,j) = abs(ddx(i,j)) enddo enddo do j=js,je+1 do i=is,ie - ddy(i,j) = (q(i,j) - q(i,j-1))/dyc(i,j) + ddy(i,j) = (q(i,j) - q(i,j-1))/dyc(i,j) ddy(i,j) = abs(ddy(i,j)) enddo enddo @@ -974,11 +985,11 @@ subroutine del4_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, type(domain2d), intent(INOUT) :: domain ! diffusivity real :: diff(bd%is-3:bd%ie+2,bd%js-3:bd%je+2) -! diffusive fluxes: +! diffusive fluxes: real :: fx1(bd%is:bd%ie+1,bd%js:bd%je), fy1(bd%is:bd%ie,bd%js:bd%je+1) real :: fx2(bd%is:bd%ie+1,bd%js:bd%je), fy2(bd%is:bd%ie,bd%js:bd%je+1) real :: fx4(bd%is:bd%ie+1,bd%js:bd%je), fy4(bd%is:bd%ie,bd%js:bd%je+1) - real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: d2, win, wou + real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: d2, win, wou real, dimension(bd%is:bd%ie,bd%js:bd%je):: qlow, qmin, qmax, q0 real, parameter:: esl = 1.E-20 integer i,j, n @@ -1149,18 +1160,18 @@ subroutine del4_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, do j=js,je do i=is,ie+1 if ( fx4(i,j) > 0. ) then - fx4(i,j) = min(1., wou(i-1,j), win(i,j)) * fx4(i,j) + fx4(i,j) = min(1., wou(i-1,j), win(i,j)) * fx4(i,j) else - fx4(i,j) = min(1., win(i-1,j), wou(i,j)) * fx4(i,j) + fx4(i,j) = min(1., win(i-1,j), wou(i,j)) * fx4(i,j) endif enddo enddo do j=js,je+1 do i=is,ie if ( fy4(i,j) > 0. ) then - fy4(i,j) = min(1., wou(i,j-1), win(i,j)) * fy4(i,j) + fy4(i,j) = min(1., wou(i,j-1), win(i,j)) * fy4(i,j) else - fy4(i,j) = min(1., win(i,j-1), wou(i,j)) * fy4(i,j) + fy4(i,j) = min(1., win(i,j-1), wou(i,j)) * fy4(i,j) endif enddo enddo @@ -1269,7 +1280,7 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & if(is_master()) write(*,*) 'surf_map: Search started ....' ! stretch_fac * pi5/(npx-1) / (pi/nlat) - lat_crit = nint( stretch_fac*real(nlat)/real(npx_global-1) ) + lat_crit = nint( stretch_fac*real(nlat)/real(npx_global-1) ) lat_crit = min( jt, max( 4, lat_crit ) ) if ( jstart==1 ) then @@ -1299,7 +1310,7 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & hsum = 0. np = 0 do j=1,lat_crit - do i=1,im + do i=1,im np = np + 1 hsum = hsum + (qsp-zs(i,j))**2 enddo @@ -1330,7 +1341,7 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & np = 0 do jp=jend-lat_crit+1, jend j = jp - jstart + 1 - do i=1,im + do i=1,im np = np + 1 hsum = hsum + (qnp-zs(i,j))**2 enddo @@ -1386,7 +1397,7 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & j1 = j1 - jstart + 1 j2 = j2 - jstart + 1 - lon_w = min( grid(i,j,1), grid(i+1,j,1), grid(i,j+1,1), grid(i+1,j+1,1) ) + lon_w = min( grid(i,j,1), grid(i+1,j,1), grid(i,j+1,1), grid(i+1,j+1,1) ) lon_e = max( grid(i,j,1), grid(i+1,j,1), grid(i,j+1,1), grid(i+1,j+1,1) ) if ( (lon_e-lon_w) > pi ) then @@ -1422,7 +1433,7 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & pc(k) = p1(k) + p2(k) + p3(k) + p4(k) enddo call normalize_vect( pc ) - + th0 = min( v_prod(p1,p3), v_prod(p2, p4) ) th1 = min( cos_grid, cos(0.25*acos(max(v_prod(p1,p3), v_prod(p2, p4))))) @@ -1544,7 +1555,7 @@ end subroutine handle_err subroutine remove_ice_sheets (lon, lat, lfrac, bd ) !--------------------------------- ! Bruce Wyman's fix for Antarctic -!--------------------------------- +!--------------------------------- type(fv_grid_bounds_type), intent(IN) :: bd real(kind=R_GRID), intent(in) :: lon(bd%isd:bd%ied,bd%jsd:bd%jed), lat(bd%isd:bd%ied,bd%jsd:bd%jed) real, intent(inout) :: lfrac(bd%isd:bd%ied,bd%jsd:bd%jed) @@ -1555,10 +1566,10 @@ subroutine remove_ice_sheets (lon, lat, lfrac, bd ) ! lon = longitude in radians ! lat = latitude in radians ! lfrac = land-sea mask (land=1, sea=0) - + integer :: i, j real :: dtr, phs, phn - + is = bd%is ie = bd%ie js = bd%js @@ -1567,12 +1578,12 @@ subroutine remove_ice_sheets (lon, lat, lfrac, bd ) ied = bd%ied jsd = bd%jsd jed = bd%jed - + dtr = acos(0.)/90. - phs = -83.9999*dtr + phs = -83.9999*dtr ! phn = -78.9999*dtr phn = -76.4*dtr - + do j = jsd, jed do i = isd, ied if ( lat(i,j) < phn ) then @@ -1584,14 +1595,14 @@ subroutine remove_ice_sheets (lon, lat, lfrac, bd ) ! replace between 270 and 360 deg if ( sin(lon(i,j)) < 0. .and. cos(lon(i,j)) > 0.) then lfrac(i,j) = 1.0 - cycle + cycle endif endif enddo enddo end subroutine remove_ice_sheets -!>@brief The subroutine 'read_namelis' reads the namelist file, +!>@brief The subroutine 'read_namelis' reads the namelist file, !! writes the namelist to log file, and initializes constants. subroutine read_namelist integer :: unit, ierr, io @@ -1600,18 +1611,8 @@ subroutine read_namelist ! read namelist if (namelist_read) return -#ifdef INTERNAL_FILE_NML read (input_nml_file, nml=surf_map_nml, iostat=io) ierr = check_nml_error(io,'surf_map_nml') -#else - unit = open_namelist_file ( ) - ierr=1 - do while (ierr /= 0) - read (unit, nml=surf_map_nml, iostat=io, end=10) - ierr = check_nml_error(io,'surf_map_nml') - enddo - 10 call close_file (unit) -#endif ! write version and namelist to log file diff --git a/tools/fv_treat_da_inc.F90 b/tools/fv_treat_da_inc.F90 index f53e93404..e0b5701db 100644 --- a/tools/fv_treat_da_inc.F90 +++ b/tools/fv_treat_da_inc.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -24,7 +24,7 @@ !>@details This module includes functions to read in the externally calculated increments !! and applies the increments to the restart variables. Specifically, if the increments are !! zero, FV3 should reproduce directly from the restart files. -!>@note Please treat the following subroutines as API interfaces, and consult the FV3 team +!>@note Please treat the following subroutines as API interfaces, and consult the FV3 team !! code modification proposal. !>@warning Expanding the list of increments without the proper knowledge of the FV3 dynamical !! core is EXTREMELY RISKY, especially for the non-hydrostatic scenario. Such a modification @@ -37,7 +37,7 @@ !------------------------------------------------------------------------------- #ifdef OVERLOAD_R4 -#define _GET_VAR1 get_var1_real +#define _GET_VAR1 get_var1_real #else #define _GET_VAR1 get_var1_double #endif @@ -61,7 +61,7 @@ module fv_treat_da_inc_mod ! fms_mod ! file_exist, open_namelist_file,close_file, error_mesg, FATAL, ! check_nml_error, stdlog,write_version_number,set_domain, -! mpp_clock_id, mpp_clock_begin, mpp_clock_end, CLOCK_SUBCOMPONENT, +! mpp_clock_id, mpp_clock_begin, mpp_clock_end, CLOCK_SUBCOMPONENT, ! clock_flag_default, nullify_domain ! ! @@ -96,7 +96,7 @@ module fv_treat_da_inc_mod ! ! ! sim_nc_mod -! open_ncfile, close_ncfile, get_ncdim1, get_var1_double, +! open_ncfile, close_ncfile, get_ncdim1, get_var1_double, ! get_var2_real, get_var3_r4, get_var1_real ! ! @@ -105,8 +105,13 @@ module fv_treat_da_inc_mod ! ! - use fms_mod, only: file_exist, read_data, & - field_exist, write_version_number + use fms_mod, only: write_version_number +#if defined (FMS1_IO) + use fms_mod, only: file_exists => file_exist, read_data, & + field_exist +#else + use fms2_io_mod, only: file_exists, read_data +#endif use mpp_mod, only: mpp_error, FATAL, NOTE, mpp_pe use mpp_domains_mod, only: mpp_get_tile_id, & domain2d, & @@ -121,7 +126,12 @@ module fv_treat_da_inc_mod #ifdef MAPL_MODE use MAPL #else - use constants_mod, only: pi=>pi_8, omega, grav, kappa, & +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: pi=>pi_8, omega, grav, kappa, & rdgas, rvgas, cp_air #endif @@ -172,7 +182,7 @@ module fv_treat_da_inc_mod #ifdef MAPL_MODE subroutine geos_get_da_increments(Atm, fv_domain, lon,lat,im,jm,km, & u_amb, v_amb, t_amb, dp_amb, q_amb, o3_amb, & - u_inc, v_inc, t_inc, dp_inc, q_inc, o3_inc) + u_inc, v_inc, t_inc, dp_inc, q_inc, o3_inc) type(fv_atmos_type), intent(inout) :: Atm(:) type(domain2d), intent(inout) :: fv_domain real, intent(inout) :: lon(im), lat(jm) @@ -217,7 +227,7 @@ subroutine geos_get_da_increments(Atm, fv_domain, lon,lat,im,jm,km, & ! FV3 code wants lon 0:360 IMsplit = IM/2 ! Lons - tmp1d( 1:IMsplit) = lon(IMsplit+1:IM ) + tmp1d( 1:IMsplit) = lon(IMsplit+1:IM ) tmp1d(IMsplit+1:IM ) = 2.0*PI + lon( 1:IMsplit) lon = tmp1d ! ANA-BKG @@ -388,7 +398,7 @@ end subroutine geos_get_da_increments #endif !============================================================================= - !>@brief The subroutine 'read_da_inc' reads the increments of the diagnostic variables + !>@brief The subroutine 'read_da_inc' reads the increments of the diagnostic variables !! from the DA-generated files. !>@details Additional support of prognostic variables such as tracers can be assessed !! and added upon request. @@ -447,7 +457,7 @@ subroutine read_da_inc(Atm, fv_domain) fname = 'INPUT/'//Atm(1)%flagstruct%res_latlon_dynamics - if( file_exist(fname) ) then + if( file_exists(fname) ) then call open_ncfile( fname, ncid ) ! open the file call get_ncdim1( ncid, 'lon', tsize(1) ) call get_ncdim1( ncid, 'lat', tsize(2) ) @@ -492,7 +502,7 @@ subroutine read_da_inc(Atm, fv_domain) do j=js,je do i=is,ie j1 = jdc(i,j) - jbeg = min(jbeg, j1) + jbeg = min(jbeg, j1) jend = max(jend, j1+1) enddo enddo @@ -539,7 +549,7 @@ subroutine read_da_inc(Atm, fv_domain) do j=js,je do i=is,ie+1 j1 = jdc_c(i,j) - jbeg = min(jbeg, j1) + jbeg = min(jbeg, j1) jend = max(jend, j1+1) enddo enddo @@ -592,7 +602,7 @@ subroutine read_da_inc(Atm, fv_domain) do j=js,je+1 do i=is,ie j1 = jdc_d(i,j) - jbeg = min(jbeg, j1) + jbeg = min(jbeg, j1) jend = max(jend, j1+1) enddo enddo @@ -682,7 +692,7 @@ end subroutine apply_inc_on_3d_scalar !--------------------------------------------------------------------------- #endif end subroutine read_da_inc - + !============================================================================= subroutine remap_coef( is, ie, js, je, isd, ied, jsd, jed, & im, jm, lon, lat, id1, id2, jdc, s2c, agrid ) @@ -698,7 +708,7 @@ subroutine remap_coef( is, ie, js, je, isd, ied, jsd, jed, & real :: rdlat(jm) real:: a1, b1 integer i,j, i1, i2, jc, i0, j0 - + do i=1,im-1 rdlon(i) = 1. / (lon(i+1) - lon(i)) enddo diff --git a/tools/init_hydro.F90 b/tools/init_hydro.F90 index ce3db26cb..0e902c5e2 100644 --- a/tools/init_hydro.F90 +++ b/tools/init_hydro.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -41,7 +41,7 @@ module init_hydro_mod ! ! mpp_mod ! mpp_chksum, stdout, mpp_error, FATAL, NOTE,get_unit, mpp_sum, mpp_broadcast, -! mpp_get_current_pelist, mpp_npes, mpp_set_current_pelist, mpp_send, mpp_recv, +! mpp_get_current_pelist, mpp_npes, mpp_set_current_pelist, mpp_send, mpp_recv, ! mpp_sync_self, mpp_npes, mpp_pe, mpp_sync ! ! @@ -54,8 +54,12 @@ module init_hydro_mod ! ! - - use constants_mod, only: grav, rdgas, rvgas +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: grav, rdgas, rvgas use fv_grid_utils_mod, only: g_sum use fv_mp_mod, only: is_master use field_manager_mod, only: MODEL_ATMOS @@ -72,7 +76,7 @@ module init_hydro_mod contains !------------------------------------------------------------------------------- -!>@brief the subroutine 'p_var' computes auxiliary pressure variables for +!>@brief the subroutine 'p_var' computes auxiliary pressure variables for !! a hydrostatic state. !>@details The variables are: surfce, interface, layer-mean pressure, exener function !! Given (ptop, delp) computes (ps, pk, pe, peln, pkz) @@ -137,7 +141,7 @@ subroutine p_var(km, ifirst, ilast, jfirst, jlast, ptop, ptop_min, & if ( adjust_dry_mass ) then do i=ifirst,ilast ratio(i) = 1. + dpd/(ps(i,j)-ptop) - enddo + enddo do k=1,km do i=ifirst,ilast delp(i,j,k) = delp(i,j,k) * ratio(i) @@ -185,7 +189,7 @@ subroutine p_var(km, ifirst, ilast, jfirst, jlast, ptop, ptop_min, & rdg = -rdgas / grav if ( present(make_nh) ) then if ( make_nh ) then - delz = 1.e25 + delz = 1.e25 !$OMP parallel do default(none) shared(ifirst,ilast,jfirst,jlast,km,delz,rdg,pt,peln) do k=1,km do j=jfirst,jlast @@ -238,14 +242,14 @@ end subroutine p_var - subroutine drymadj(km, ifirst, ilast, jfirst, jlast, ng, & + subroutine drymadj(km, ifirst, ilast, jfirst, jlast, ng, & cappa, ptop, ps, delp, q, nq, area, nwat, & dry_mass, adjust_dry_mass, moist_phys, dpd, domain) ! INPUT PARAMETERS: integer km integer ifirst, ilast !< Longitude strip - integer jfirst, jlast !< Latitude strip + integer jfirst, jlast !< Latitude strip integer nq, ng, nwat real, intent(in):: dry_mass real, intent(in):: ptop @@ -255,9 +259,9 @@ subroutine drymadj(km, ifirst, ilast, jfirst, jlast, ng, & real(kind=R_GRID), intent(IN) :: area(ifirst-ng:ilast+ng, jfirst-ng:jlast+ng) type(domain2d), intent(IN) :: domain -! INPUT/OUTPUT PARAMETERS: +! INPUT/OUTPUT PARAMETERS: real, intent(in):: q(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng,km,nq) - real, intent(in)::delp(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng,km) + real, intent(in)::delp(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng,km) real, intent(inout):: ps(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng) !< surface pressure real, intent(out):: dpd ! Local @@ -265,7 +269,7 @@ subroutine drymadj(km, ifirst, ilast, jfirst, jlast, ng, & real psmo, psdry integer i, j, k -!$OMP parallel do default(none) shared(ifirst,ilast,jfirst,jlast,km,ps,ptop,psd,delp,nwat,q) +!$OMP parallel do default(none) shared(ifirst,ilast,jfirst,jlast,km,ps,ptop,psd,delp,nwat,q) do j=jfirst,jlast do i=ifirst,ilast @@ -293,9 +297,9 @@ subroutine drymadj(km, ifirst, ilast, jfirst, jlast, ng, & enddo ! Check global maximum/minimum - psdry = g_sum(domain, psd, ifirst, ilast, jfirst, jlast, ng, area, 1) + psdry = g_sum(domain, psd, ifirst, ilast, jfirst, jlast, ng, area, 1) psmo = g_sum(domain, ps(ifirst:ilast,jfirst:jlast), ifirst, ilast, jfirst, jlast, & - ng, area, 1) + ng, area, 1) #ifdef MAPL_MODE if( adjust_dry_mass ) Then @@ -332,7 +336,7 @@ end subroutine drymadj !! basic state from input heights. subroutine hydro_eq(km, is, ie, js, je, ps, hs, drym, delp, ak, bk, & pt, delz, area, ng, mountain, hydrostatic, hybrid_z, domain) -! Input: +! Input: integer, intent(in):: is, ie, js, je, km, ng real, intent(in):: ak(km+1), bk(km+1) real, intent(in):: hs(is-ng:ie+ng,js-ng:je+ng) @@ -353,7 +357,7 @@ subroutine hydro_eq(km, is, ie, js, je, ps, hs, drym, delp, ak, bk, & real mslp, z1, t1, p1, t0, a0, psm real ztop, c0 #ifdef INIT_4BYTE - real(kind=4) :: dps + real(kind=4) :: dps #else real dps ! note that different PEs will get differt dps during initialization ! this has no effect after cold start @@ -373,7 +377,7 @@ subroutine hydro_eq(km, is, ie, js, je, ps, hs, drym, delp, ak, bk, & c0 = t0/a0 if ( hybrid_z ) then - ptop = 100. ! *** hardwired model top *** + ptop = 100. ! *** hardwired model top *** else ptop = ak(1) endif @@ -408,8 +412,8 @@ subroutine hydro_eq(km, is, ie, js, je, ps, hs, drym, delp, ak, bk, & ps(i,j) = ps(i,j) + dps gz(i, 1) = ztop gz(i,km+1) = hs(i,j) - ph(i, 1) = ptop - ph(i,km+1) = ps(i,j) + ph(i, 1) = ptop + ph(i,km+1) = ps(i,j) enddo if ( hybrid_z ) then @@ -418,14 +422,14 @@ subroutine hydro_eq(km, is, ie, js, je, ps, hs, drym, delp, ak, bk, & !--------------- do k=km,2,-1 do i=is,ie - gz(i,k) = gz(i,k+1) - delz(i,j,k)*grav + gz(i,k) = gz(i,k+1) - delz(i,j,k)*grav enddo enddo ! Correct delz at the top: do i=is,ie delz(i,j,1) = (gz(i,2) - ztop) / grav enddo - + do k=2,km do i=is,ie if ( gz(i,k) >= z1 ) then diff --git a/tools/read_climate_nudge_data.F90 b/tools/read_climate_nudge_data.F90 index 0b9429bea..9a1cd6b0f 100644 --- a/tools/read_climate_nudge_data.F90 +++ b/tools/read_climate_nudge_data.F90 @@ -1,15 +1,25 @@ module read_climate_nudge_data_mod -use fms_mod, only: open_namelist_file, check_nml_error, close_file, & +use fms_mod, only: check_nml_error, & stdlog, mpp_pe, mpp_root_pe, write_version_number, & - string, error_mesg, FATAL, NOTE, file_exist + string, error_mesg, FATAL, NOTE +#if defined (FMS1_IO) +use fms_mod, only: open_namelist_file, close_file, file_exists => file_exist +#else +use fms2_io_mod, only: file_exists, close_file +#endif use mpp_mod, only: input_nml_file use mpp_io_mod, only: mpp_open, MPP_NETCDF, MPP_RDONLY,MPP_MULTI, MPP_SINGLE use mpp_io_mod, only: axistype, fieldtype, mpp_get_time_axis, mpp_get_atts use mpp_io_mod, only: mpp_get_fields, mpp_get_info, mpp_get_axes, mpp_get_times use mpp_io_mod, only: mpp_get_axis_data, mpp_read, mpp_close, mpp_get_default_calendar -use constants_mod, only: PI, GRAV, RDGAS, RVGAS +#if defined (SINGLE_FV) +use constantsr4_mod, & +#else +use constants_mod, & +#endif + only: PI, GRAV, RDGAS, RVGAS use fv_arrays_mod, only: REAL4, REAL8, FVPRC implicit none @@ -41,7 +51,7 @@ module read_climate_nudge_data_mod INDEX_U = 8, INDEX_V = 9 character(len=8), dimension(NUM_REQ_FLDS) :: required_field_names = & (/ 'P0 ', 'hyai', 'hybi', 'PHI ', 'PS ', 'T ', 'Q ', 'U ', 'V ' /) - + integer, parameter :: MAXFILES = 53 character(len=256) :: filenames(MAXFILES) character(len=256) :: filename_tails(MAXFILES) @@ -65,7 +75,7 @@ module read_climate_nudge_data_mod integer, dimension(NUM_REQ_FLDS) :: field_index ! varid for variables integer, dimension(NUM_REQ_AXES) :: axis_index ! varid for dimensions type(axistype), dimension(NUM_REQ_FLDS) :: axes - type(fieldtype), dimension(NUM_REQ_FLDS) :: fields + type(fieldtype), dimension(NUM_REQ_FLDS) :: fields end type type(filedata_type), allocatable :: Files(:) @@ -97,20 +107,8 @@ subroutine read_climate_nudge_data_init (nlon, nlat, nlev, ntime) enddo !----- read namelist ----- -#ifdef INTERNAL_FILE_NML read (input_nml_file, nml=read_climate_nudge_data_nml, iostat=io) ierr = check_nml_error (io, 'read_climate_nudge_data_nml') -#else - if (file_exist('input.nml') ) then - iunit = open_namelist_file() - ierr=1 - do while (ierr /= 0) - read (iunit, nml=read_climate_nudge_data_nml, iostat=io, end=10) - ierr = check_nml_error (io, 'read_climate_nudge_data_nml') - enddo -10 call close_file (iunit) - endif -#endif !----- write version and namelist to log file ----- @@ -280,7 +278,7 @@ subroutine read_grid ( lon, lat, ak, bk ) else ak = 0. endif - + call mpp_read(Files(1)%ncid, Files(1)%fields(INDEX_BK), bk) @@ -371,7 +369,7 @@ subroutine read_climate_nudge_data_2d (itime, field, dat, is, js) call error_mesg ('read_climate_nudge_data_mod', 'itime out of range', FATAL) endif - ! check dimensions + ! check dimensions if (present(js)) then if (size(dat,1) .ne. global_axis_size(INDEX_LON) .or. & size(dat,2) .ne. sub_domain_latitude_size) then @@ -393,7 +391,7 @@ subroutine read_climate_nudge_data_2d (itime, field, dat, is, js) else call error_mesg ('read_climate_nudge_data_mod', 'incorrect field requested in read_climate_nudge_data_2d', FATAL) endif - + ! file index and actual time index in file n = file_index(itime) atime = itime - Files(n)%time_offset @@ -406,9 +404,9 @@ subroutine read_climate_nudge_data_2d (itime, field, dat, is, js) nread = 1 nread(1) = size(dat,1) nread(2) = size(dat,2) - + call mpp_read(Files(n)%ncid, Files(n)%fields(this_index), dat, start, nread) - + ! geopotential height (convert to m2/s2 if necessary) if (field .eq. 'phis') then if (maxval(dat) > 1000.*GRAV) then @@ -468,7 +466,7 @@ subroutine read_climate_nudge_data_3d (itime, field, dat, is, js) else call error_mesg ('read_climate_nudge_data_mod', 'incorrect field requested in read_climate_nudge_data_3d', FATAL) endif - + ! file index and actual time index in file n = file_index(itime) diff --git a/tools/test_cases.F90 b/tools/test_cases.F90 index 2386b34e4..64a0353ef 100644 --- a/tools/test_cases.F90 +++ b/tools/test_cases.F90 @@ -1,21 +1,21 @@ !*********************************************************************** -!* GNU Lesser General Public License +!* GNU Lesser General Public License !* !* This file is part of the FV3 dynamical core. !* -!* The FV3 dynamical core is free software: you can redistribute it +!* The FV3 dynamical core is free software: you can redistribute it !* and/or modify it under the terms of the !* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or +!* Free Software Foundation, either version 3 of the License, or !* (at your option) any later version. !* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. !* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. +!* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** @@ -58,8 +58,8 @@ module test_cases_mod ! ! ! fv_mp_mod -! ng, is_master,is,js,ie,je, isd,jsd,ied,jed, -! domain_decomp, fill_corners, XDir, YDir, mp_stop, +! ng, is_master,is,js,ie,je, isd,jsd,ied,jed, +! domain_decomp, fill_corners, XDir, YDir, mp_stop, ! mp_reduce_sum, mp_reduce_max, mp_gather, mp_bcst ! ! @@ -97,7 +97,12 @@ module test_cases_mod ! ! - use constants_mod, only: cnst_radius=>radius, pi=>pi_8, omega, grav, kappa, rdgas, cp_air, rvgas +#if defined (SINGLE_FV) + use constantsr4_mod, & +#else + use constants_mod, & +#endif + only: cnst_radius=>radius, pi=>pi_8, omega, grav, kappa, rdgas, cp_air, rvgas use init_hydro_mod, only: p_var, hydro_eq use fv_mp_mod, only: ng, is_master, & is,js,ie,je, isd,jsd,ied,jed, & @@ -127,15 +132,15 @@ module test_cases_mod implicit none private -! Test Case Number +! Test Case Number ! -1 = Divergence conservation test ! 0 = Idealized non-linear deformational flow ! 1 = Cosine Bell advection ! 2 = Zonal geostrophically balanced flow -! 3 = non-rotating potential flow +! 3 = non-rotating potential flow ! 4 = Tropical cyclones (merger of Rankine vortices) ! 5 = Zonal geostrophically balanced flow over an isolated mountain -! 6 = Rossby Wave number 4 +! 6 = Rossby Wave number 4 ! 7 = Barotropic instability ! ! 8 = Potential flow (as in 5 but no rotation and initially at rest) ! 8 = "Soliton" propagation twin-vortex along equator @@ -164,7 +169,7 @@ module test_cases_mod ! 44 = Lock-exchange on the sphere; atm at rest with no mountain ! 45 = New test ! 51 = 3D tracer advection (deformational nondivergent flow) -! 55 = TC +! 55 = TC ! 101 = 3D non-hydrostatic Large-Eddy-Simulation (LES) with hybrid_z IC integer :: sphum, theta_d @@ -186,11 +191,11 @@ module test_cases_mod real, parameter :: pi_shift = 0.0 !< 3.0*pi/4. ! -1:null_op, 0:All-Grids, 1:C-Grid, 2:D-Grid, 3:A-Grid, 4:A-Grid then Rotate, 5:D-Grid with unit vectors then Rotate - integer, parameter :: initWindsCase0 =-1 + integer, parameter :: initWindsCase0 =-1 integer, parameter :: initWindsCase1 = 1 - integer, parameter :: initWindsCase2 = 5 + integer, parameter :: initWindsCase2 = 5 integer, parameter :: initWindsCase5 = 5 - integer, parameter :: initWindsCase6 =-1 + integer, parameter :: initWindsCase6 =-1 integer, parameter :: initWindsCase9 =-1 real, allocatable, dimension(:) :: pz0, zz0 @@ -265,11 +270,11 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre real(kind=R_GRID) :: p1(2), p2(2), p3(2), p4(2), pt(2) real(kind=R_GRID) :: e1(3), e2(3), ex(3), ey(3) - real :: dist, r, r0 + real :: dist, r, r0 integer :: i,j,k,n real :: utmp, vtmp - real :: psi_b(isd:ied+1,jsd:jed+1), psi(isd:ied,jsd:jed), psi1, psi2 + real :: psi_b(isd:ied+1,jsd:jed+1), psi(isd:ied,jsd:jed), psi1, psi2 integer :: is2, ie2, js2, je2 real(kind=R_GRID), pointer, dimension(:,:,:) :: agrid, grid @@ -309,7 +314,7 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre rdya => gridstruct%rdya dxc => gridstruct%dxc dyc => gridstruct%dyc - + cubed_sphere => gridstruct%cubed_sphere latlon => gridstruct%latlon @@ -428,19 +433,19 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre do i=is2,ie2+1 dist = dxc(i,j) v(i,j) = (psi(i,j)-psi(i-1,j))/dist - if (dist==0) v(i,j) = 0. + if (dist==0) v(i,j) = 0. enddo enddo do j=js2,je2+1 do i=is2,ie2 dist = dyc(i,j) u(i,j) = -1.0*(psi(i,j)-psi(i,j-1))/dist - if (dist==0) u(i,j) = 0. + if (dist==0) u(i,j) = 0. enddo enddo call mp_update_dwinds(u, v, npx, npy, domain) call dtoa( u, v,ua,va,dx,dy,dxa,dya,dxc,dyc,npx,npy,ng) - call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, nested, domain) + call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, nested, domain) elseif ( (cubed_sphere) .and. (defOnGrid==3) ) then do j=js,je do i=is,ie @@ -465,7 +470,7 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre do i=is,ie ua(i,j) = Ubar * ( COS(agrid(i,j,2))*COS(alpha) + & SIN(agrid(i,j,2))*COS(agrid(i,j,1))*SIN(alpha) ) - va(i,j) = -Ubar * SIN(agrid(i,j,1))*SIN(alpha) + va(i,j) = -Ubar * SIN(agrid(i,j,1))*SIN(alpha) call mid_pt_sphere(grid(i,j,1:2), grid(i,j+1,1:2), p1) call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p2) call mid_pt_sphere(grid(i+1,j,1:2), grid(i+1,j+1,1:2), p3) @@ -541,7 +546,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, gridstruct, flagstruct, npx, npy, npz, ng, ncnst, nwat, ndims, nregions, & dry_mass, mountain, moist_phys, hydrostatic, hybrid_z, delz, ze0, adiabatic, & ks, npx_global, ptop, domain_in, tile_in, bd) - + type(fv_grid_bounds_type), intent(IN) :: bd real , intent(INOUT) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) real , intent(INOUT) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) @@ -635,7 +640,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, real :: pmin, pmin1 real :: pmax, pmax1 real :: grad(bd%isd:bd%ied ,bd%jsd:bd%jed,2) - real :: div0(bd%isd:bd%ied ,bd%jsd:bd%jed ) + real :: div0(bd%isd:bd%ied ,bd%jsd:bd%jed ) real :: vor0(bd%isd:bd%ied ,bd%jsd:bd%jed ) real :: divg(bd%isd:bd%ied ,bd%jsd:bd%jed ) real :: vort(bd%isd:bd%ied ,bd%jsd:bd%jed ) @@ -661,7 +666,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! real sbuffer(npy+1,npz) real wbuffer(npy+2,npz) real sbuffer(npx+2,npz) - + real :: gz(bd%isd:bd%ied,bd%jsd:bd%jed,npz+1), zt, zdist real :: zvir @@ -744,7 +749,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, rdya => gridstruct%rdya dxc => gridstruct%dxc dyc => gridstruct%dyc - + cubed_sphere => gridstruct%cubed_sphere latlon => gridstruct%latlon @@ -843,7 +848,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, write(*,201) 'Divergence L1_norm : ', L1_norm write(*,201) 'Divergence L2_norm : ', L2_norm write(*,201) 'Divergence Linf_norm : ', Linf_norm - endif + endif call init_winds(UBar, u,v,ua,va,uc,vc, 3, npx, npy, ng, ndims, nregions, gridstruct%nested, gridstruct, domain, tile) ! Test Divergence operator at cell centers @@ -911,14 +916,14 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=jsd,jed do i=isd,ied - x1 = agrid(i,j,1) + x1 = agrid(i,j,1) y1 = agrid(i,j,2) z1 = radius p = p0_c0 * cos(y1) Vtx = ((3.0*SQRT(2.0))/2.0) * (( 1.0/cosh(p) )**2.0) * tanh(p) w_p = 0.0 - if (p /= 0.0) w_p = Vtx/p + if (p /= 0.0) w_p = Vtx/p delp(i,j,1) = 1.0 - tanh( (p/rgamma) * sin(x1 - w_p*0.0) ) ua(i,j,1) = w_p*(sin(lat0)*cos(agrid(i,j,2)) + cos(lat0)*cos(agrid(i,j,1) - lon0)*sin(agrid(i,j,2))) va(i,j,1) = w_p*cos(lat0)*sin(agrid(i,j,1) - lon0) @@ -928,7 +933,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call mid_pt_sphere(grid(i,j,1:2), grid(i,j+1,1:2), p1) call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p2) call mid_pt_sphere(grid(i+1,j,1:2), grid(i+1,j+1,1:2), p3) - call mid_pt_sphere(grid(i,j+1,1:2), grid(i+1,j+1,1:2), p4) + call mid_pt_sphere(grid(i,j+1,1:2), grid(i+1,j+1,1:2), p4) if (cubed_sphere) call rotate_winds(ua(i,j,1),va(i,j,1), p1,p2,p3,p4, agrid(i,j,1:2), 2, 1) enddo @@ -1103,7 +1108,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ubar = 50. ! maxmium wind speed (m/s) r0 = 250.e3 ! RADIUS of the maximum wind of the Rankine vortex gh0 = grav * 1.e3 - + do j=jsd,jed do i=isd,ied delp(i,j,1) = gh0 @@ -1111,7 +1116,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo ! ddeg = 2.*r0/radius ! no merger - ddeg = 1.80*r0/radius ! merged + ddeg = 1.80*r0/radius ! merged p1(1) = pi*1.5 - ddeg p1(2) = pi/18. ! 10 N @@ -1145,7 +1150,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, case(5) - Ubar = 20. + Ubar = 20. gh0 = 5960.*Grav phis = 0.0 r0 = PI/9. @@ -1196,7 +1201,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call get_unit_vect2(p1, p2, e2) call get_latlon_vector(p3, ex, ey) utmp = radius*omg*cos(p3(2)) + & - radius*rk*(cos(p3(2))**(R-1))*(R*sin(p3(2))**2-cos(p3(2))**2)*cos(R*p3(1)) + radius*rk*(cos(p3(2))**(R-1))*(R*sin(p3(2))**2-cos(p3(2))**2)*cos(R*p3(1)) vtmp = -radius*rk*R*sin(p3(2))*sin(R*p3(1))*cos(p3(2))**(R-1) v(i,j,1) = utmp*inner_prod(e2,ex) + vtmp*inner_prod(e2,ey) enddo @@ -1209,7 +1214,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call get_unit_vect2(p1, p2, e1) call get_latlon_vector(p3, ex, ey) utmp = radius*omg*cos(p3(2)) + & - radius*rk*(cos(p3(2))**(R-1))*(R*sin(p3(2))**2-cos(p3(2))**2)*cos(R*p3(1)) + radius*rk*(cos(p3(2))**(R-1))*(R*sin(p3(2))**2-cos(p3(2))**2)*cos(R*p3(1)) vtmp = -radius*rk*R*sin(p3(2))*sin(R*p3(1))*cos(p3(2))**(R-1) u(i,j,1) = utmp*inner_prod(e1,ex) + vtmp*inner_prod(e1,ey) enddo @@ -1233,7 +1238,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! 9 4 8 ! ! 5 1 3 -! +! ! 6 2 7 pt1 = gh_jet(npy, agrid(i,j,2)) call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), pa) @@ -1298,7 +1303,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo enddo initWindsCase=initWindsCase6 ! shouldn't do anything with this -!initialize tracer with shallow-water PV +!initialize tracer with shallow-water PV !Compute vorticity call get_vorticity(is, ie, js, je, isd, ied, jsd, jed, npz, u, v, q(is:ie,js:je,:,1), dx, dy, rarea) do j=jsd,jed+1 @@ -1634,7 +1639,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! call mpp_error(FATAL, 'You must provide a routine for hybrid_z') if ( is_master() ) write(*,*) 'Using const DZ' ztop = 45.E3 ! assuming ptop = 100. - dz1(1) = ztop / real(npz) + dz1(1) = ztop / real(npz) dz1(npz) = 0.5*dz1(1) do z=2,npz-1 dz1(z) = dz1(1) @@ -1665,7 +1670,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call checker_tracers(is,ie, js,je, isd,ied, jsd,jed, & ncnst, npz, q, agrid(is:ie,js:je,1), agrid(is:ie,js:je,2), 9., 9.) #else - !For consistency with earlier single-grid simulations use gh0 = 1.0e-6 and p1(1) = 195.*pi/180. + !For consistency with earlier single-grid simulations use gh0 = 1.0e-6 and p1(1) = 195.*pi/180. q(:,:,:,:) = 0. gh0 = 1.0e-3 r0 = radius/3. !RADIUS radius/3. @@ -1686,7 +1691,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo enddo #endif - + #else q(:,:,:,:) = 0. @@ -1727,7 +1732,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do i=is,ie pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) pk(i,j,k) = exp( kappa*log(pe(i,k,j)) ) - peln(i,k,j) = log(pe(i,k,j)) + peln(i,k,j) = log(pe(i,k,j)) enddo enddo enddo @@ -1752,9 +1757,9 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, !Set up moisture sphum = get_tracer_index (MODEL_ATMOS, 'sphum') pcen(1) = PI/9. - pcen(2) = 2.0*PI/9. + pcen(2) = 2.0*PI/9. !$OMP parallel do default(none) shared(sphum,is,ie,js,je,npz,pe,q,agrid,pcen,delp,peln) & -!$OMP private(ptmp) +!$OMP private(ptmp) do k=1,npz do j=js,je do i=is,ie @@ -1770,11 +1775,11 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo endif - ! Initialize winds + ! Initialize winds Ubar = 35.0 r0 = 1.0 pcen(1) = PI/9. - pcen(2) = 2.0*PI/9. + pcen(2) = 2.0*PI/9. if (test_case == 13) then #ifdef ALT_PERT u1 = 0.0 @@ -1794,13 +1799,13 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, utmp = Ubar * COS(eta_v(z))**(3.0/2.0) * SIN(2.0*grid(i,j+1,2))**2.0 ! Perturbation if Case==13 r = great_circle_dist( pcen, grid(i,j+1,1:2), radius ) - if (-(r/r0)**2.0 > -40.0) utmp = utmp + u1*EXP(-(r/r0)**2.0) + if (-(r/r0)**2.0 > -40.0) utmp = utmp + u1*EXP(-(r/r0)**2.0) vv1 = utmp*(ee2(2,i,j+1)*cos(grid(i,j+1,1)) - ee2(1,i,j+1)*sin(grid(i,j+1,1))) utmp = Ubar * COS(eta_v(z))**(3.0/2.0) * SIN(2.0*grid(i,j,2))**2.0 ! Perturbation if Case==13 r = great_circle_dist( pcen, grid(i,j,1:2), radius ) - if (-(r/r0)**2.0 > -40.0) utmp = utmp + u1*EXP(-(r/r0)**2.0) + if (-(r/r0)**2.0 > -40.0) utmp = utmp + u1*EXP(-(r/r0)**2.0) vv3 = utmp*(ee2(2,i,j)*cos(grid(i,j,1)) - ee2(1,i,j)*sin(grid(i,j,1))) ! Mid-point: p1(:) = grid(i ,j ,1:2) @@ -1809,7 +1814,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, utmp = Ubar * COS(eta_v(z))**(3.0/2.0) * SIN(2.0*pa(2))**2.0 ! Perturbation if Case==13 r = great_circle_dist( pcen, pa, radius ) - if (-(r/r0)**2.0 > -40.0) utmp = utmp + u1*EXP(-(r/r0)**2.0) + if (-(r/r0)**2.0 > -40.0) utmp = utmp + u1*EXP(-(r/r0)**2.0) vv2 = utmp*(ew(2,i,j,2)*cos(pa(1)) - ew(1,i,j,2)*sin(pa(1))) ! 3-point average: v(i,j,z) = 0.25*(vv1 + 2.*vv2 + vv3) @@ -1877,7 +1882,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! 9 4 8 ! ! 5 1 3 -! +! ! 6 2 7 ! call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p1) @@ -1928,7 +1933,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, pt(i,j,z) = pt(i,j,z) + pt0*exp(-(r/r0)**2) endif #endif - + enddo enddo enddo @@ -1949,7 +1954,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! 9 4 8 ! ! 5 1 3 -! +! ! 6 2 7 ! call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p1) @@ -2169,15 +2174,15 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=jsd,jed do i=isd,ied - ps(i,j) = pe1(npz+1) + ps(i,j) = pe1(npz+1) enddo enddo do z=1,npz+1 do j=js,je do i=is,ie - pe(i,z,j) = pe1(z) - peln(i,z,j) = log(pe1(z)) + pe(i,z,j) = pe1(z) + peln(i,z,j) = log(pe1(z)) pk(i,j,z) = exp(kappa*peln(i,z,j)) enddo enddo @@ -2193,7 +2198,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, if ( r 1.E-12 ) then - zeta = asin ( p4(2) / sqrt(p4(1)**2 + p4(2)**2) ) + zeta = asin ( p4(2) / sqrt(p4(1)**2 + p4(2)**2) ) else zeta = pi/2. endif @@ -2412,7 +2417,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, zeta = zeta + pi/6. v1 = r/uu1 * cos( zeta ) v2 = r/uu2 * sin( zeta ) - phis(i,j) = ftop / ( 1. + v1**2 + v2**2 ) + phis(i,j) = ftop / ( 1. + v1**2 + v2**2 ) else phis(i,j) = 0. endif @@ -2429,7 +2434,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, else if ( is_master() ) write(*,*) 'Using const DZ' ztop = 15.E3 - dz1(1) = ztop / real(npz) + dz1(1) = ztop / real(npz) do k=2,npz dz1(k) = dz1(1) enddo @@ -2463,23 +2468,23 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, t00 = 300. pt0 = t00/pk0 n2 = 1.E-4 - s0 = grav*grav / (cp_air*n2) + s0 = grav*grav / (cp_air*n2) ! For constant N2, Given z --> p do k=1,npz+1 pe1(k) = p00*( (1.-s0/t00) + s0/t00*exp(-n2*ze1(k)/grav) )**(1./kappa) enddo - ptop = pe1(1) + ptop = pe1(1) if ( is_master() ) write(*,*) 'Lee vortex testcase: model top (mb)=', ptop/100. -! Set up fake "sigma" coordinate +! Set up fake "sigma" coordinate ak(1) = pe1(1) bk(1) = 0. do k=2,npz bk(k) = (pe1(k) - pe1(1)) / (pe1(npz+1)-pe1(1)) ! bk == sigma - ak(k) = pe1(1)*(1.-bk(k)) - enddo + ak(k) = pe1(1)*(1.-bk(k)) + enddo ak(npz+1) = 0. bk(npz+1) = 1. @@ -2489,7 +2494,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do i=is,ie pk(i,j,k) = pk0 - (1.-exp(-n2/grav*ze0(i,j,k))) * (grav*grav)/(n2*cp_air*pt0) pe(i,k,j) = pk(i,j,k) ** (1./kappa) - peln(i,k,j) = log(pe(i,k,j)) + peln(i,k,j) = log(pe(i,k,j)) enddo enddo enddo @@ -2497,7 +2502,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=js,je do i=is,ie pe(i,1,j) = ptop - peln(i,1,j) = log(pe(i,1,j)) + peln(i,1,j) = log(pe(i,1,j)) pk(i,j,1) = pe(i,1,j) ** kappa ps(i,j) = pe(i,npz+1,j) enddo @@ -2507,7 +2512,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=js,je do i=is,ie pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) - delp(i,j,k) = pe(i,k+1,j)-pe(i,k,j) + delp(i,j,k) = pe(i,k+1,j)-pe(i,k,j) pt(i,j,k) = pkz(i,j,k)*grav*delz(i,j,k) / ( cp_air*(pk(i,j,k)-pk(i,j,k+1)) ) enddo enddo @@ -2533,7 +2538,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, !NOTE: since we have an isothermal atmosphere and specify constant height-thickness layers we will disregard ak and bk and specify the initial pressures in a different way dz = 12000./real(npz) - + allocate(zz0(npz+1)) allocate(pz0(npz+1)) @@ -2660,11 +2665,11 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, case default call mpp_error(FATAL, 'Value of tracer_test not implemented ') end select - + else if (test_case == 52) then !Orography and steady-state test: DCMIP 20 - + f0 = 0. fC = 0. @@ -2712,7 +2717,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=js,je do i=is,ie p2(:) = agrid(i,j,1:2) - r = great_circle_dist( p1, p2, one ) + r = great_circle_dist( p1, p2, one ) if (r < r0) then phis(i,j) = grav*0.5*2000.*(1. + cos(pi*r/r0))*cos(pi*r/zetam)**2. pe(i,npz+1,j) = p00*(1.-gamma/T00*phis(i,j)/grav)**(1./exponent) @@ -2747,7 +2752,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, !ANalytic layer-mean pt(i,j,k) = -grav*t00*p00/(rdgas*gamma + grav)/delp(i,j,k) * & ( (pe(i,k,j)/p00)**(exponent+1.) - (pe(i,k+1,j)/p00)**(exponent+1.) ) - + enddo enddo @@ -2795,8 +2800,8 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, w(:,:,:) = 0. q(:,:,:,:) = 0. - pp0(1) = 262.0/180.*pi ! OKC - pp0(2) = 35.0/180.*pi + pp0(1) = 262.0/180.*pi ! OKC + pp0(2) = 35.0/180.*pi do k=1,npz do j=js,je @@ -2829,7 +2834,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, if (test_case > 0) then ! SRH = 40 if ( zm .le. 2.e3 ) then - utmp = 8.*(1.-cos(pi*zm/4.e3)) + utmp = 8.*(1.-cos(pi*zm/4.e3)) vtmp = 8.*sin(pi*zm/4.e3) elseif (zm .le. 6.e3 ) then utmp = 8. + (us0-8.)*(zm-2.e3)/4.e3 @@ -2861,7 +2866,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, if( is_master() ) then write(6,*) k, utmp, vtmp endif - + do j=js,je do i=is,ie+1 p1(:) = grid(i ,j ,1:2) @@ -2945,7 +2950,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! 9 4 8 ! ! 5 1 3 -! +! ! 6 2 7 ! pt = 0.25*pt1 + 0.125*(pt2+pt3+pt4+pt5) + 0.0625*(pt6+pt7+pt8+pt9) if ( test_case==35 ) then @@ -2985,7 +2990,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, pt(:,:,:) = t00 endif - if( test_case==33 ) then + if( test_case==33 ) then ! NCAR Ridge-mountain Mods: do j=js,je do i=is,ie @@ -3033,35 +3038,35 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! 9 4 8 ! ! 5 1 3 -! +! ! 6 2 7 ! pt = 0.25*pt1 + 0.125*(pt2+pt3+pt4+pt5) + 0.0625*(pt6+pt7+pt8+pt9) #ifdef USE_CELL_AVG - r = great_circle_dist( p0, agrid(i,j,1:2), radius ) + r = great_circle_dist( p0, agrid(i,j,1:2), radius ) pt1 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p2) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) pt2 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 call mid_pt_sphere(grid(i+1,j,1:2), grid(i+1,j+1,1:2), p2) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) pt3 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 call mid_pt_sphere(grid(i,j+1,1:2), grid(i+1,j+1,1:2), p2) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) pt4 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 call mid_pt_sphere(grid(i,j,1:2), grid(i,j+1,1:2), p2) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) pt5 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 - r = great_circle_dist( p0, grid(i,j,1:2), radius ) + r = great_circle_dist( p0, grid(i,j,1:2), radius ) pt6 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 - r = great_circle_dist( p0, grid(i+1,j,1:2), radius ) + r = great_circle_dist( p0, grid(i+1,j,1:2), radius ) pt7 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 - r = great_circle_dist( p0, grid(i+1,j+1,1:2), radius ) + r = great_circle_dist( p0, grid(i+1,j+1,1:2), radius ) pt8 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 - r = great_circle_dist( p0, grid(i,j+1,1:2), radius ) + r = great_circle_dist( p0, grid(i,j+1,1:2), radius ) pt9 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 phis(i,j) = grav*h0*(0.25*pt1+0.125*(pt2+pt3+pt4+pt5)+0.0625*(pt6+pt7+pt8+pt9)) #else - r = great_circle_dist( p0, agrid(i,j,1:2), radius ) + r = great_circle_dist( p0, agrid(i,j,1:2), radius ) pt1 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 phis(i,j) = grav*h0*exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 #endif @@ -3149,7 +3154,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, !------------------------------------ ! HIWPP Super-Cell !------------------------------------ -! HIWPP SUPER_K; +! HIWPP SUPER_K; f0(:,:) = 0. fC(:,:) = 0. q(:,:,:,:) = 0. @@ -3313,17 +3318,17 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo enddo enddo - + do j=js,je do i=is,ie pe(i,1,j) = ptop - peln(i,1,j) = log(pe(i,1,j)) + peln(i,1,j) = log(pe(i,1,j)) pk(i,j,1) = exp(kappa*peln(i,1,j)) enddo do k=2,npz+1 do i=is,ie pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) - peln(i,k,j) = log(pe(i,k,j)) + peln(i,k,j) = log(pe(i,k,j)) pk(i,j,k) = exp(kappa*peln(i,k,j)) enddo enddo @@ -3337,7 +3342,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=js,je do i=is,ie pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) - dist = great_circle_dist( p0, agrid(i,j,1:2), radius ) + dist = great_circle_dist( p0, agrid(i,j,1:2), radius ) if ( dist .le. r0 ) then pt(i,j,k) = 275. q(i,j,k,1) = 1. @@ -3387,17 +3392,17 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo enddo enddo - + do j=js,je do i=is,ie pe(i,1,j) = ptop - peln(i,1,j) = log(pe(i,1,j)) + peln(i,1,j) = log(pe(i,1,j)) pk(i,j,1) = exp(kappa*peln(i,1,j)) enddo do k=2,npz+1 do i=is,ie pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) - peln(i,k,j) = log(pe(i,k,j)) + peln(i,k,j) = log(pe(i,k,j)) pk(i,j,k) = exp(kappa*peln(i,k,j)) enddo enddo @@ -3405,7 +3410,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! Initiate the westerly-wind-burst: ubar = soliton_Umax - r0 = soliton_size + r0 = soliton_size !!$ if (test_case == 46) then !!$ ubar = 200. !!$ r0 = 250.e3 @@ -3509,7 +3514,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=js,je do i=is,ie p2(:) = agrid(i,j,1:2) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) ps(i,j) = p00 - dp*exp(-(r/rp)**1.5) phis(i,j) = 0. enddo @@ -3525,7 +3530,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo enddo enddo - + !Pressure do j=js,je do i=is,ie @@ -3542,18 +3547,18 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=js,je do i=is,ie+1 p2(:) = 0.5*(grid(i,j,1:2)+grid(i,j+1,1:2)) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) ps_v(i,j) = p00 - dp*exp(-(r/rp)**1.5) enddo enddo do j=js,je+1 do i=is,ie p2(:) = 0.5*(grid(i,j,1:2)+grid(i+1,j,1:2)) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) ps_u(i,j) = p00 - dp*exp(-(r/rp)**1.5) enddo enddo - + !Pressure do j=js,je do i=is,ie+1 @@ -3584,7 +3589,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, !endif p0 = (/ pi, pi/18. /) - + exppr = 1.5 exppz = 2. gamma = 0.007 @@ -3610,7 +3615,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, d2 = cos(p0(2))*sin(p3(1)-p0(1)) d = max(1.e-15,sqrt(d1**2+d2**2)) - r = great_circle_dist( p0, p3, radius ) + r = great_circle_dist( p0, p3, radius ) do k=1,npz ptmp = 0.5*(pe_v(i,k,j)+pe_v(i,k+1,j)) @@ -3624,7 +3629,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, +(1.d0-p00/dp*exp((r/rp)**exppr)*exp((height/zp)**exppz))))) vtmp = utmp*d2 utmp = utmp*d1 - + v(i,j,k) = utmp*inner_prod(e2,ex) + vtmp*inner_prod(e2,ey) endif @@ -3643,7 +3648,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, d2 = cos(p0(2))*sin(p3(1)-p0(1)) d = max(1.e-15,sqrt(d1**2+d2**2)) - r = great_circle_dist( p0, p3, radius ) + r = great_circle_dist( p0, p3, radius ) do k=1,npz ptmp = 0.5*(pe_u(i,k,j)+pe_u(i,k+1,j)) @@ -3683,7 +3688,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, else q(i,j,k,1) = q00*exp(-height/zq1)*exp(-(height/zq2)**exppz) p2(:) = agrid(i,j,1:2) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) pt(i,j,k) = (T00-gamma*height)/(1.d0+zvir*q(i,j,k,1))/(1.d0+exppz*Rdgas*(T00-gamma*height)*height & /(grav*zp**exppz*(1.d0-p00/dp*exp((r/rp)**exppr)*exp((height/zp)**exppz)))) end if @@ -3723,9 +3728,9 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do i=isd,ied f0(i,j) = cor enddo - enddo + enddo endif - + else if ( test_case == -55 ) then @@ -3787,21 +3792,21 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, nullify(fC) nullify(f0) - nullify(dx) - nullify(dy) - nullify(dxa) - nullify(dya) - nullify(rdxa) - nullify(rdya) - nullify(dxc) - nullify(dyc) - - nullify(ee1) - nullify(ee2) - nullify(ew) - nullify(es) - nullify(en1) - nullify(en2) + nullify(dx) + nullify(dy) + nullify(dxa) + nullify(dya) + nullify(rdxa) + nullify(rdya) + nullify(dxc) + nullify(dyc) + + nullify(ee1) + nullify(ee2) + nullify(ew) + nullify(es) + nullify(en1) + nullify(en2) nullify(latlon) nullify(cubed_sphere) @@ -3809,13 +3814,13 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, nullify(domain) nullify(tile) - nullify(have_south_pole) - nullify(have_north_pole) + nullify(have_south_pole) + nullify(have_north_pole) - nullify(ntiles_g) - nullify(acapN) - nullify(acapS) - nullify(globalarea) + nullify(ntiles_g) + nullify(acapN) + nullify(acapS) + nullify(globalarea) end subroutine init_case @@ -3849,9 +3854,9 @@ subroutine get_vorticity(isc, iec, jsc, jec ,isd, ied, jsd, jed, npz, u, v, vort enddo enddo enddo - + end subroutine get_vorticity - + subroutine checker_tracers(i0, i1, j0, j1, ifirst, ilast, jfirst, jlast, & nq, km, q, lon, lat, nx, ny, rn) !-------------------------------------------------------------------- @@ -3986,7 +3991,7 @@ subroutine terminator_tracers(i0, i1, j0, j1, ifirst, ilast, jfirst, jlast, & endif call mpp_sum(qcly0) if (is_master()) print*, ' qcly0 = ', qcly0 - + end subroutine terminator_tracers @@ -4013,7 +4018,7 @@ subroutine rankine_vortex(ubar, r0, p1, u, v, grid ) call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p2) ! shift: p2(1) = p2(1) - p1(1) - cos_p = sin(p2(2))*sin(p1(2)) + cos(p2(2))*cos(p1(2))*cos(p2(1)) + cos_p = sin(p2(2))*sin(p1(2)) + cos(p2(2))*cos(p1(2))*cos(p2(1)) r = radius*acos(cos_p) ! great circle distance ! if( r<0.) call mpp_error(FATAL, 'radius negative!') if( r gridstruct%dyc period = real( 12*24*3600 ) !12 days - + l = 2.*pi/period dt2 = dt*0.5 @@ -4293,7 +4298,7 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, enddo enddo enddo - + do k=1,npz do j=js,je do i=is,ie @@ -4379,7 +4384,7 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, enddo enddo enddo - + do k=1,npz do j=js,je do i=is,ie @@ -4439,7 +4444,7 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, call dtoa( u(:,:,1), v(:,:,1),ua(:,:,1),va(:,:,1),dx,dy,dxa,dya,dxc,dyc,npx,npy,ng) call mpp_update_domains( ua, va, domain, gridtype=AGRID_PARAM) !! ABSOLUTELY NECESSARY!! call atoc(ua(:,:,1),va(:,:,1),uc(:,:,1),vc(:,:,1),dx,dy,dxa,dya,npx,npy,ng, gridstruct%nested, domain) - + do k=2,npz do j=js,je do i=is,ie @@ -4480,7 +4485,7 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, enddo enddo enddo - + do k=1,npz do j=js,je do i=is,ie @@ -4491,7 +4496,7 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, ubar = 40. - !Set lat-lon A-grid winds + !Set lat-lon A-grid winds k = 1 do j=js,je do i=is,ie @@ -4528,7 +4533,7 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, end subroutine case51_forcing !------------------------------------------------------------------------------- -! +! ! get_stats :: get L-1, L-2, and L-inf norms and other stats as defined ! in Williamson, 1994 (p.16) subroutine get_stats(dt, dtout, nt, maxnt, ndays, u,v,pt,delp,q,phis, ps, & @@ -4587,7 +4592,7 @@ subroutine get_stats(dt, dtout, nt, maxnt, ndays, u,v,pt,delp,q,phis, ps, & real, dimension(:,:,:), pointer :: grid, agrid real, dimension(:,:), pointer :: area, f0, dx, dy, dxa, dya, dxc, dyc - + grid => gridstruct%grid agrid=> gridstruct%agrid @@ -4731,13 +4736,13 @@ subroutine get_stats(dt, dtout, nt, maxnt, ndays, u,v,pt,delp,q,phis, ps, & endif ! Get PT Stats - pmax1 = -1.e25 - pmin1 = 1.e25 + pmax1 = -1.e25 + pmin1 = 1.e25 i0=-999 j0=-999 k0=-999 n0=-999 - do k=1,npz + do k=1,npz call pmxn(pt(:,:,k), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) pmin1 = min(pmin, pmin1) pmax1 = max(pmax, pmax1) @@ -4767,8 +4772,8 @@ subroutine get_stats(dt, dtout, nt, maxnt, ndays, u,v,pt,delp,q,phis, ps, & #endif ! Get DELP Stats - pmax1 = -1.e25 - pmin1 = 1.e25 + pmax1 = -1.e25 + pmin1 = 1.e25 i0=-999 j0=-999 k0=-999 @@ -4818,8 +4823,8 @@ subroutine get_stats(dt, dtout, nt, maxnt, ndays, u,v,pt,delp,q,phis, ps, & endif ! Get Q Stats - pmax1 = -1.e25 - pmin1 = 1.e25 + pmax1 = -1.e25 + pmin1 = 1.e25 i0=-999 j0=-999 k0=-999 @@ -4864,7 +4869,7 @@ subroutine get_stats(dt, dtout, nt, maxnt, ndays, u,v,pt,delp,q,phis, ps, & write(*,201) 'UV(850) L2_norm : ', L2_norm write(*,201) 'UV(850) Linf_norm : ', Linf_norm endif - endif + endif tmass = 0.0 tKE = 0.0 @@ -4889,7 +4894,7 @@ subroutine get_stats(dt, dtout, nt, maxnt, ndays, u,v,pt,delp,q,phis, ps, & u0 = u v0 = v endif - + !! UA is the PERTURBATION now up = u - u0 vp = v - v0 @@ -4909,7 +4914,7 @@ subroutine get_stats(dt, dtout, nt, maxnt, ndays, u,v,pt,delp,q,phis, ps, & ! Conservation of Energy do j=js,je do i=is,ie - temp(i,j) = 0.5 * (delp(i,j,k)/Grav) * temp(i,j) ! Include Previously calcullated KE + temp(i,j) = 0.5 * (delp(i,j,k)/Grav) * temp(i,j) ! Include Previously calcullated KE temp(i,j) = temp(i,j) + & Grav*((delp(i,j,k)/Grav + phis(i,j))*(delp(i,j,k)/Grav + phis(i,j))) - & phis(i,j)*phis(i,j) @@ -4938,22 +4943,22 @@ subroutine get_stats(dt, dtout, nt, maxnt, ndays, u,v,pt,delp,q,phis, ps, & tmass_orig = tmass tener_orig = tener tvort_orig = tvort - endif + endif arr_r4(1) = (tmass-tmass_orig)/tmass_orig arr_r4(2) = (tener-tener_orig)/tener_orig arr_r4(3) = (tvort-tvort_orig)/tvort_orig arr_r4(4) = tKE - if (test_case==12) arr_r4(4) = L2_norm + if (test_case==12) arr_r4(4) = L2_norm #if defined(SW_DYNAMICS) myRec = nt+1 #else - myRec = myDay*86400.0/dtout + 1 + myRec = myDay*86400.0/dtout + 1 #endif if (is_master()) write(consv_lun,rec=myRec) arr_r4(1:4) #if defined(SW_DYNAMICS) if ( (is_master()) .and. MOD(nt,monitorFreq)==0) then #else - if ( (is_master()) ) then + if ( (is_master()) ) then #endif write(*,201) 'MASS TOTAL : ', tmass write(*,201) 'NORMALIZED MASS : ', (tmass-tmass_orig)/tmass_orig @@ -4978,7 +4983,7 @@ end subroutine get_stats - subroutine get_pt_on_great_circle(p1, p2, dist, heading, p3) + subroutine get_pt_on_great_circle(p1, p2, dist, heading, p3) ! get_pt_on_great_circle :: Get the mid-point on a great circle given: ! -2 points (Lon/Lat) to define a great circle ! -Great Cirle distance between 2 defining points @@ -5000,7 +5005,7 @@ subroutine get_pt_on_great_circle(p1, p2, dist, heading, p3) p3(1) = MOD( (p1(1)-pi)-dp+pi , 2.*pi ) !- pi Leave at 0 to 360 end subroutine get_pt_on_great_circle - + ! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! @@ -5042,7 +5047,7 @@ subroutine get_scalar_stats(var, varT, npx, npy, ndims, nregions, & real, dimension(:,:,:), pointer :: grid, agrid real, dimension(:,:), pointer :: area - + grid => gridstruct%grid agrid=> gridstruct%agrid @@ -5136,7 +5141,7 @@ subroutine get_vector_stats(varU, varUT, varV, varVT, & real, dimension(:,:,:), pointer :: grid, agrid real, dimension(:,:), pointer :: area - + grid => gridstruct%grid agrid=> gridstruct%agrid @@ -5196,7 +5201,7 @@ end subroutine get_vector_stats !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! check_courant_numbers :: +! check_courant_numbers :: ! subroutine check_courant_numbers(uc,vc, ndt, n_split, gridstruct, npx, npy, npz, tile, noPrint) @@ -5206,14 +5211,14 @@ subroutine check_courant_numbers(uc,vc, ndt, n_split, gridstruct, npx, npy, npz, logical, OPTIONAL, intent(IN) :: noPrint real , intent(IN) :: uc(isd:ied+1,jsd:jed ,npz) real , intent(IN) :: vc(isd:ied ,jsd:jed+1,npz) - + real :: ideal_c=0.06 real :: tolerance= 1.e-3 - real :: dt_inc, dt_orig + real :: dt_inc, dt_orig real :: meanCy, minCy, maxCy, meanCx, minCx, maxCx real :: counter - logical :: ideal + logical :: ideal integer :: i,j,k real :: dt @@ -5233,7 +5238,7 @@ subroutine check_courant_numbers(uc,vc, ndt, n_split, gridstruct, npx, npy, npz, ideal = .false. do while(.not. ideal) - + counter = 0 minCy = missing maxCy = -1.*missing @@ -5250,7 +5255,7 @@ subroutine check_courant_numbers(uc,vc, ndt, n_split, gridstruct, npx, npy, npz, if (ABS( (dt/dxc(i,j))*uc(i,j,k) ) > 1.0) then counter = counter+1 - write(*,300) i,j,k,tile, ABS( (dt/dxc(i,j))*uc(i,j,k) ), dt, dxc(i,j), uc(i,j,k), counter + write(*,300) i,j,k,tile, ABS( (dt/dxc(i,j))*uc(i,j,k) ), dt, dxc(i,j), uc(i,j,k), counter call exit(1) endif @@ -5285,10 +5290,10 @@ subroutine check_courant_numbers(uc,vc, ndt, n_split, gridstruct, npx, npy, npz, meanCx = meanCx/(6.0*DBLE(npx)*DBLE(npy-1)) meanCy = meanCy/(6.0*DBLE(npx-1)*DBLE(npy)) - !if ( (ABS(maxCy-ideal_c) <= tolerance) .and. (ABS(maxCx-ideal_c) <= tolerance) ) then - ideal = .true. + !if ( (ABS(maxCy-ideal_c) <= tolerance) .and. (ABS(maxCx-ideal_c) <= tolerance) ) then + ideal = .true. !elseif (maxCy-ideal_c > 0) then - ! dt = dt - dt_inc + ! dt = dt - dt_inc !else ! dt = dt + dt_inc !endif @@ -5378,7 +5383,7 @@ subroutine pmxn(p, npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) rdya => gridstruct%rdya dxc => gridstruct%dxc dyc => gridstruct%dyc - + cubed_sphere => gridstruct%cubed_sphere latlon => gridstruct%latlon @@ -5391,7 +5396,7 @@ subroutine pmxn(p, npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) globalarea => gridstruct%globalarea pmax = -1.e25 - pmin = 1.e25 + pmin = 1.e25 i0 = -999 j0 = -999 n0 = tile @@ -5421,7 +5426,7 @@ subroutine pmxn(p, npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) call mp_reduce_max(j0) call mp_reduce_max(n0) - pmin = -pmin + pmin = -pmin call mp_reduce_max(pmin) pmin = -pmin @@ -5479,10 +5484,10 @@ subroutine output_ncdf(dt, nt, maxnt, nout, u,v,pt,delp,q,phis,ps, uc,vc, ua,va, real, allocatable :: tmp(:,:,:) real, allocatable :: tmpA(:,:,:) -#if defined(SW_DYNAMICS) +#if defined(SW_DYNAMICS) real, allocatable :: ut(:,:,:) real, allocatable :: vt(:,:,:) -#else +#else real, allocatable :: ut(:,:,:,:) real, allocatable :: vt(:,:,:,:) real, allocatable :: tmpA_3d(:,:,:,:) @@ -5521,7 +5526,7 @@ subroutine output_ncdf(dt, nt, maxnt, nout, u,v,pt,delp,q,phis,ps, uc,vc, ua,va, allocate( tmp(npx ,npy ,nregions) ) allocate( tmpA(npx-1,npy-1,nregions) ) -#if defined(SW_DYNAMICS) +#if defined(SW_DYNAMICS) allocate( ut(npx-1,npy-1,nregions) ) allocate( vt(npx-1,npy-1,nregions) ) #else @@ -5529,7 +5534,7 @@ subroutine output_ncdf(dt, nt, maxnt, nout, u,v,pt,delp,q,phis,ps, uc,vc, ua,va, allocate( vt(npx-1,npy-1,npz,nregions) ) allocate( tmpA_3d(npx-1,npy-1,npz,nregions) ) #endif - allocate( vort(isd:ied,jsd:jed) ) + allocate( vort(isd:ied,jsd:jed) ) nout = nout + 1 @@ -5700,7 +5705,7 @@ subroutine output_ncdf(dt, nt, maxnt, nout, u,v,pt,delp,q,phis,ps, uc,vc, ua,va, deallocate( tmp ) deallocate( tmpA ) -#if defined(SW_DYNAMICS) +#if defined(SW_DYNAMICS) deallocate( ut ) deallocate( vt ) #else @@ -5715,14 +5720,14 @@ subroutine output_ncdf(dt, nt, maxnt, nout, u,v,pt,delp,q,phis,ps, uc,vc, ua,va, nullify(area) - nullify(dx) - nullify(dy) - nullify(dxa) - nullify(dya) - nullify(rdxa) - nullify(rdya) - nullify(dxc) - nullify(dyc) + nullify(dx) + nullify(dy) + nullify(dxa) + nullify(dya) + nullify(rdxa) + nullify(rdya) + nullify(dxc) + nullify(dyc) end subroutine output_ncdf @@ -5849,7 +5854,7 @@ subroutine output(dt, nt, maxnt, nout, u,v,pt,delp,q,phis,ps, uc,vc, ua,va, & phi0 = 0.0 do j=jsd,jed do i=isd,ied - x1 = agrid(i,j,1) + x1 = agrid(i,j,1) y1 = agrid(i,j,2) z1 = radius p = p0_c0 * cos(y1) @@ -5922,7 +5927,7 @@ subroutine output(dt, nt, maxnt, nout, u,v,pt,delp,q,phis,ps, uc,vc, ua,va, & !if (tile==2) then ! do i=is,ie - ! print*, i, ps(i,35) + ! print*, i, ps(i,35) ! enddo !endif tmpA(is:ie,js:je,tile) = ps(is:ie,js:je) @@ -5967,14 +5972,14 @@ subroutine output(dt, nt, maxnt, nout, u,v,pt,delp,q,phis,ps, uc,vc, ua,va, & nullify(area) - nullify(dx) - nullify(dy) - nullify(dxa) - nullify(dya) - nullify(rdxa) - nullify(rdya) - nullify(dxc) - nullify(dyc) + nullify(dx) + nullify(dy) + nullify(dxa) + nullify(dya) + nullify(rdxa) + nullify(rdya) + nullify(dxc) + nullify(dyc) nullify(cubed_sphere) @@ -5986,7 +5991,7 @@ end subroutine output !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! wrt2d_ncdf :: write out a 2d field -! +! subroutine wrtvar_ncdf(ncid, varid, nrec, i1,i2, j1,j2, npx, npy, npz, ntiles, p, ndims) #include integer, intent(IN) :: ncid, varid @@ -6019,7 +6024,7 @@ subroutine wrtvar_ncdf(ncid, varid, nrec, i1,i2, j1,j2, npx, npy, npz, ntiles, p if (ndims == 4) icount(4) = ntiles icount(ndims+1) = 1 - if (is_master()) then + if (is_master()) then error = NF_PUT_VARA_REAL(ncid, varid, istart, icount, p_R4) endif ! masterproc @@ -6054,7 +6059,7 @@ subroutine wrt2d(iout, nrec, i1,i2, j1,j2, npx, npy, nregions, p) enddo enddo - call mp_gather(p_R4, i1,i2, j1,j2, npx-1, npy-1, nregions) + call mp_gather(p_R4, i1,i2, j1,j2, npx-1, npy-1, nregions) if (is_master()) then write(iout,rec=nrec) p_R4(1:npx-1,1:npy-1,1:nregions) @@ -6073,7 +6078,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, gridstruct, flagstruct, npx, npy, npz, ng, ncnst, nwat, ndims, nregions, dry_mass, & mountain, moist_phys, hydrostatic, hybrid_z, delz, ze0, ks, ptop, domain_in, tile_in, bd) - + type(fv_grid_bounds_type), intent(IN) :: bd real , intent(INOUT) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) real , intent(INOUT) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) @@ -6081,7 +6086,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, real , intent(INOUT) :: pt(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) real , intent(INOUT) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) real , intent(INOUT) :: q(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst) - + real , intent(INOUT) :: phis(bd%isd:bd%ied ,bd%jsd:bd%jed ) real , intent(INOUT) :: ps(bd%isd:bd%ied ,bd%jsd:bd%jed ) @@ -6095,15 +6100,15 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, real , intent(INOUT) :: va(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) real , intent(inout) :: delz(bd%isd:,bd%jsd:,1:) real , intent(inout) :: ze0(bd%is:,bd%js:,1:) - + real , intent(inout) :: ak(npz+1) real , intent(inout) :: bk(npz+1) - + integer, intent(IN) :: npx, npy, npz integer, intent(IN) :: ng, ncnst, nwat integer, intent(IN) :: ndims integer, intent(IN) :: nregions - + real, intent(IN) :: dry_mass logical, intent(IN) :: mountain logical, intent(IN) :: moist_phys @@ -6146,7 +6151,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, real, pointer :: acapN, acapS, globalarea real(kind=R_GRID), pointer :: dx_const, dy_const - + integer :: is, ie, js, je integer :: isd, ied, jsd, jed @@ -6210,7 +6215,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, vc(:,:,:)=10. pt(:,:,:)=1. delp(:,:,:)=0. - + do j=js,je if (j>0 .and. j<5) then do i=is,ie @@ -6273,7 +6278,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, do k=1,npz prf = ak(k) + ps(i,j)*bk(k) if ( prf > 100.E2 ) then - pt(i,j,k) = pt(i,j,k) + 0.01*(1. - (dist/r0)) * prf/ps(i,j) + pt(i,j,k) = pt(i,j,k) + 0.01*(1. - (dist/r0)) * prf/ps(i,j) endif enddo enddo @@ -6400,7 +6405,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, do i=is,ie delz(i,j,k) = ze1(k+1) - ze1(k) pk(i,j,k) = pk(i,j,k+1) + grav*delz(i,j,k)/(cp_air*t00)*pk0 - pe(i,k,j) = pk(i,j,k)**(1./kappa) + pe(i,k,j) = pk(i,j,k)**(1./kappa) enddo enddo enddo @@ -6411,7 +6416,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, do k=1,npz+1 do j=js,je do i=is,ie - peln(i,k,j) = log(pe(i,k,j)) + peln(i,k,j) = log(pe(i,k,j)) ze0(i,j,k) = ze1(k) enddo enddo @@ -6421,14 +6426,14 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, do j=js,je do i=is,ie pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) - delp(i,j,k) = pe(i,k+1,j)-pe(i,k,j) + delp(i,j,k) = pe(i,k+1,j)-pe(i,k,j) pt(i,j,k) = t00/pk0 ! potential temp enddo enddo enddo pturb = 15. - xmax = 51.2E3 + xmax = 51.2E3 xc = xmax / 2. do k=1,npz @@ -6436,11 +6441,11 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, do j=js,je do i=is,ie ! Impose perturbation in potential temperature: pturb - xx = (dx_const * (0.5+real(i-1)) - xc) / 4.E3 + xx = (dx_const * (0.5+real(i-1)) - xc) / 4.E3 yy = (dy_const * (0.5+real(j-1)) - xc) / 4.E3 dist = sqrt( xx**2 + yy**2 + zm**2 ) if ( dist<=1. ) then - pt(i,j,k) = pt(i,j,k) - pturb/pkz(i,j,k)*(cos(pi*dist)+1.)/2. + pt(i,j,k) = pt(i,j,k) - pturb/pkz(i,j,k)*(cos(pi*dist)+1.)/2. endif ! Transform back to temperature: pt(i,j,k) = pt(i,j,k) * pkz(i,j,k) @@ -6594,7 +6599,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, do k=1,npz zm = 0.5*(ze1(k)+ze1(k+1)) if ( zm .le. 2.e3 ) then - utmp = 8.*(1.-cos(pi*zm/4.e3)) + utmp = 8.*(1.-cos(pi*zm/4.e3)) vtmp = 8.*sin(pi*zm/4.e3) elseif (zm .le. 6.e3 ) then utmp = 8. + (us0-8.)*(zm-2.e3)/4.e3 @@ -6691,7 +6696,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, enddo -! Set up fake "sigma" coordinate +! Set up fake "sigma" coordinate call make_eta_level(npz, pe, area, ks, ak, bk, ptop, domain, bd) if ( is_master() ) write(*,*) 'LES testcase: computed model top (mb)=', ptop/100. @@ -6700,7 +6705,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, do j=js,je do i=is,ie pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) - delp(i,j,k) = pe(i,k+1,j)-pe(i,k,j) + delp(i,j,k) = pe(i,k+1,j)-pe(i,k,j) enddo enddo enddo @@ -6723,7 +6728,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ! *** Add perturbation *** r0 = 1.0e3 ! radius (m) - zc = 1.0e3 ! center of bubble + zc = 1.0e3 ! center of bubble icenter = npx/2 jcenter = npy/2 @@ -6750,21 +6755,21 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, nullify(fC) nullify(f0) - nullify(ee1) - nullify(ee2) - nullify(ew) - nullify(es) - nullify(en1) - nullify(en2) - - nullify(dx) - nullify(dy) - nullify(dxa) - nullify(dya) - nullify(rdxa) - nullify(rdya) - nullify(dxc) - nullify(dyc) + nullify(ee1) + nullify(ee2) + nullify(ew) + nullify(es) + nullify(en1) + nullify(en2) + + nullify(dx) + nullify(dy) + nullify(dxa) + nullify(dya) + nullify(rdxa) + nullify(rdya) + nullify(dxc) + nullify(dyc) nullify(dx_const) nullify(dy_const) @@ -6772,19 +6777,19 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, nullify(domain) nullify(tile) - nullify(have_south_pole) - nullify(have_north_pole) + nullify(have_south_pole) + nullify(have_north_pole) - nullify(ntiles_g) - nullify(acapN) - nullify(acapS) - nullify(globalarea) + nullify(ntiles_g) + nullify(acapN) + nullify(acapS) + nullify(globalarea) end subroutine init_double_periodic !>@brief The subroutine 'SuperK_Sounding' gets the sounding at "equator"; the !! initial storm center. -!>@details This is the z-coordinate version +!>@details This is the z-coordinate version !! (Morris Weisman & J. Klemp 2002 sounding) subroutine SuperK_Sounding(km, pe, p00, ze, pt, qz) integer, intent(in):: km @@ -6848,8 +6853,8 @@ subroutine SuperK_Sounding(km, pe, p00, ze, pt, qz) ! Derive pressure fields from hydrostatic balance: do k=km,1,-1 pk(k) = pk(k+1) - grav*(ze(k)-ze(k+1))/(cp_air*pt(k)*(1.+zvir*qz(k))) - peln(k) = log(pk(k)) / kappa - pe(k) = exp(peln(k)) + peln(k) = log(pk(k)) / kappa + pe(k) = exp(peln(k)) enddo do k=1, km pm = (pe(k+1)-pe(k))/(peln(k+1)-peln(k)) @@ -6869,8 +6874,8 @@ subroutine SuperK_Sounding(km, pe, p00, ze, pt, qz) ! Derive "dry" pressure fields from hydrostatic balance: do k=km,1,-1 pk(k) = pk(k+1) - grav*(ze(k)-ze(k+1))/(cp_air*pt(k)) - peln(k) = log(pk(k)) / kappa - pe(k) = exp(peln(k)) + peln(k) = log(pk(k)) / kappa + pe(k) = exp(peln(k)) enddo do k=1, km dp0(k) = pe(k+1) - pe(k) @@ -6885,7 +6890,7 @@ subroutine SuperK_Sounding(km, pe, p00, ze, pt, qz) do k=1, km dp(k) = dp0(k)*(1. + qz(k)) ! moist air - pe(k+1) = pe(k) + dp(k) + pe(k+1) = pe(k) + dp(k) enddo ! dry pressure, pt & height remain unchanged pk(km+1) = pe(km+1)**kappa @@ -6894,8 +6899,8 @@ subroutine SuperK_Sounding(km, pe, p00, ze, pt, qz) ! Derive pressure fields from hydrostatic balance: do k=km,1,-1 pk(k) = pk(k+1) - grav*(ze(k)-ze(k+1))/(cp_air*pt(k)*(1.+zvir*qz(k))) - peln(k) = log(pk(k)) / kappa - pe(k) = exp(peln(k)) + peln(k) = log(pk(k)) / kappa + pe(k) = exp(peln(k)) enddo do k=1, km pm = (pe(k+1)-pe(k))/(peln(k+1)-peln(k)) @@ -6907,7 +6912,7 @@ subroutine SuperK_Sounding(km, pe, p00, ze, pt, qz) enddo enddo #endif - + if ( is_master() ) then write(*,*) 'Super_K: computed ptop (mb)=', 0.01*pe(1), ' PS=', 0.01*pe(km+1) call prt_m1('1D Sounding T0', temp, 1, km, 1, 1, 0, 1, 1.) @@ -7102,7 +7107,7 @@ subroutine SuperK_u(km, zz, um, dudz) #endif enddo - end subroutine superK_u + end subroutine superK_u subroutine DCMIP16_BC(delp,pt,u,v,q,w,delz,& @@ -7176,7 +7181,7 @@ subroutine DCMIP16_BC(delp,pt,u,v,q,w,delz,& enddo enddo - !delp + !delp do k=1,npz do j=js,je do i=is,ie @@ -7237,7 +7242,7 @@ subroutine DCMIP16_BC(delp,pt,u,v,q,w,delz,& !!$ endif !!$ !!! END DEBUG CODE if (abs(z - ziter) < zconv) exit - enddo + enddo gz(i,j,k) = z enddo enddo @@ -7293,7 +7298,7 @@ subroutine DCMIP16_BC(delp,pt,u,v,q,w,delz,& if (do_pert) then uu = uu + DCMIP16_BC_uwind_pert(0.5*(z+z0),lat_u(i,j),lon_u(i,j)) endif - u(i,j,k) = u1(i,j)*uu + u(i,j,k) = u1(i,j)*uu gz_u(i,j) = z p_u(i,j) = p @@ -7394,7 +7399,7 @@ subroutine DCMIP16_BC(delp,pt,u,v,q,w,delz,& contains - + real function DCMIP16_BC_temperature(z, lat) real, intent(IN) :: z @@ -7456,7 +7461,7 @@ real function DCMIP16_BC_uwind_pert(z,lat,lon) pphere = (/ lon, lat /) dst = great_circle_dist(pphere, ppcenter, radius) - + DCMIP16_BC_uwind_pert = max(0., up*ZZ*exp(-(dst/Rp)**2) ) end function DCMIP16_BC_uwind_pert @@ -7554,7 +7559,7 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& enddo enddo - !delp + !delp do k=1,npz do j=js,je do i=is,ie @@ -7615,7 +7620,7 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& !!$ endif !!$ !!! END DEBUG CODE if (abs(z - ziter) < zconv) exit - enddo + enddo gz(i,j,k) = z enddo enddo @@ -7778,7 +7783,7 @@ real function DCMIP16_TC_pressure(z, r) if (z <= zt) then DCMIP16_TC_pressure = pb*exp(grav/(Rdgas*lapse) * log( (Tv0-lapse*z)/Tv0) ) -dp* exp(-sqrt((r/rp)**3) - (z/zp)**2) * & - exp( grav/(Rdgas*lapse) * log( (Tv0-lapse*z)/Tv0) ) + exp( grav/(Rdgas*lapse) * log( (Tv0-lapse*z)/Tv0) ) else DCMIP16_TC_pressure = ptt*exp(grav*(zt-z)/(Rdgas*Tvt)) endif @@ -7806,7 +7811,7 @@ subroutine DCMIP16_TC_uwind_pert(z,r,lon,lat,uu,vv) vt = -fr5 + sqrt( fr5**2 - (1.5 * rfac * Tvrd) / & ( 1. + 2*Tvrd*z/(grav*zp**2) - pb/dp*exp( rfac + (z/zp)**2) ) ) - + d1 = sin(phip)*cos(lat) - cos(phip)*sin(lat)*cos(lon - lamp) d2 = cos(phip)*sin(lon - lamp) d = max(1.e-25,sqrt(d1*d1 + d2*d2)) @@ -7838,7 +7843,7 @@ subroutine init_latlon(u,v,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, real , intent(INOUT) :: pt(isd:ied ,jsd:jed ,npz) real , intent(INOUT) :: delp(isd:ied ,jsd:jed ,npz) real , intent(INOUT) :: q(isd:ied ,jsd:jed ,npz, ncnst) - + real , intent(INOUT) :: phis(isd:ied ,jsd:jed ) real , intent(INOUT) :: ps(isd:ied ,jsd:jed ) @@ -7852,16 +7857,16 @@ subroutine init_latlon(u,v,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, real , intent(INOUT) :: va(isd:ied ,jsd:jed ,npz) real , intent(inout) :: delz(isd:,jsd:,1:) real , intent(inout) :: ze0(is:,js:,1:) - + real , intent(IN) :: ak(npz+1) real , intent(IN) :: bk(npz+1) - + integer, intent(IN) :: npx, npy, npz integer, intent(IN) :: ng, ncnst integer, intent(IN) :: ndims integer, intent(IN) :: nregions integer,target,intent(IN):: tile_in - + real, intent(IN) :: dry_mass logical, intent(IN) :: mountain logical, intent(IN) :: moist_phys @@ -7965,7 +7970,7 @@ subroutine init_latlon(u,v,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, !!$ vc(:,:,:)=10. !!$ pt(:,:,:)=1. !!$ delp(:,:,:)=0. -!!$ +!!$ !!$ do j=js,je !!$ if (j>10 .and. j<15) then !!$ do i=is,ie @@ -7987,25 +7992,25 @@ subroutine init_latlon(u,v,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, nullify(fC) nullify(f0) - nullify(dx) - nullify(dy) - nullify(dxa) - nullify(dya) - nullify(rdxa) - nullify(rdya) - nullify(dxc) - nullify(dyc) + nullify(dx) + nullify(dy) + nullify(dxa) + nullify(dya) + nullify(rdxa) + nullify(rdya) + nullify(dxc) + nullify(dyc) nullify(domain) nullify(tile) - - nullify(have_south_pole) - nullify(have_north_pole) - nullify(ntiles_g) - nullify(acapN) - nullify(acapS) - nullify(globalarea) + nullify(have_south_pole) + nullify(have_north_pole) + + nullify(ntiles_g) + nullify(acapN) + nullify(acapS) + nullify(globalarea) end subroutine init_latlon @@ -8026,11 +8031,11 @@ subroutine init_latlon_winds(UBar, u, v, ua, va, uc, vc, defOnGrid, gridstruct) real :: p1(2),p2(2),p3(2),p4(2), pt(2) real :: e1(3), e2(3), ex(3), ey(3) - real :: dist, r, r0 + real :: dist, r, r0 integer :: i,j,k,n real :: utmp, vtmp - real :: psi_b(isd:ied+1,jsd:jed+1), psi(isd:ied,jsd:jed), psi1, psi2 + real :: psi_b(isd:ied+1,jsd:jed+1), psi(isd:ied,jsd:jed), psi1, psi2 real, dimension(:,:,:), pointer :: grid, agrid real, dimension(:,:), pointer :: area, dx, dy, dxc, dyc @@ -8058,7 +8063,7 @@ subroutine init_latlon_winds(UBar, u, v, ua, va, uc, vc, defOnGrid, gridstruct) cos(grid(i,j,1))*cos(grid(i,j,2))*sin(alpha) ) ) enddo enddo - + if ( defOnGrid == 1 ) then do j=jsd,jed+1 do i=isd,ied @@ -8075,23 +8080,23 @@ subroutine init_latlon_winds(UBar, u, v, ua, va, uc, vc, defOnGrid, gridstruct) enddo enddo - + do j=js,je do i=is,ie+1 dist = dxc(i,j) v(i,j) = (psi(i,j)-psi(i-1,j))/dist - if (dist==0) v(i,j) = 0. + if (dist==0) v(i,j) = 0. enddo enddo do j=js,je+1 do i=is,ie dist = dyc(i,j) u(i,j) = -1.0*(psi(i,j)-psi(i,j-1))/dist - if (dist==0) u(i,j) = 0. + if (dist==0) u(i,j) = 0. enddo enddo endif - + end subroutine init_latlon_winds subroutine d2a2c(im,jm,km, ifirst,ilast, jfirst,jlast, ng, nested, & @@ -8127,7 +8132,7 @@ subroutine d2a2c(im,jm,km, ifirst,ilast, jfirst,jlast, ng, nested, & real , intent(inout) :: vc(isd:ied,jsd:jed+1) !(ifirst-ng:ilast+ng,jfirst-ng:jlast+1+ng) !-------------------------------------------------------------- -! Local +! Local real :: sinlon(im,jm) real :: coslon(im,jm) @@ -8138,7 +8143,7 @@ subroutine d2a2c(im,jm,km, ifirst,ilast, jfirst,jlast, ng, nested, & real :: tmp2(jsd:jed) real :: tmp3(jsd:jed) - real mag,mag1,mag2, ang,ang1,ang2 + real mag,mag1,mag2, ang,ang1,ang2 real us, vs, un, vn integer i, j, k, im2 integer js1g1 @@ -8191,7 +8196,7 @@ subroutine d2a2c(im,jm,km, ifirst,ilast, jfirst,jlast, ng, nested, & rdya => gridstruct%rdya dxc => gridstruct%dxc dyc => gridstruct%dyc - + cubed_sphere => gridstruct%cubed_sphere latlon => gridstruct%latlon @@ -8333,7 +8338,7 @@ subroutine atob_s(qin, qout, npx, npy, dxa, dya, nested, cubed_sphere, altInterp integer, intent(IN) :: npx, npy real , intent(IN) :: qin(isd:ied ,jsd:jed ) !< A-grid field real , intent(OUT) :: qout(isd:ied+1,jsd:jed+1) !< Output B-grid field - integer, OPTIONAL, intent(IN) :: altInterp + integer, OPTIONAL, intent(IN) :: altInterp logical, intent(IN) :: nested, cubed_sphere real, intent(IN), dimension(isd:ied,jsd:jed) :: dxa, dya @@ -8356,13 +8361,13 @@ subroutine atob_s(qin, qout, npx, npy, dxa, dya, nested, cubed_sphere, altInterp if (.not. nested) call fill_corners(tmpq , npx, npy, FILL=XDir, AGRID=.true.) ! ATOC do j=jsd,jed - call interp_left_edge_1d(tmpq1(:,j), tmpq(:,j), dxa(:,j), isd, ied, altInterp) + call interp_left_edge_1d(tmpq1(:,j), tmpq(:,j), dxa(:,j), isd, ied, altInterp) enddo if (.not. nested) call fill_corners(tmpq , npx, npy, FILL=YDir, AGRID=.true.) ! ATOD do i=isd,ied - tmp1j(jsd:jed) = 0.0 + tmp1j(jsd:jed) = 0.0 tmp2j(jsd:jed) = tmpq(i,jsd:jed) tmp3j(jsd:jed) = dya(i,jsd:jed) call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed, altInterp) @@ -8374,7 +8379,7 @@ subroutine atob_s(qin, qout, npx, npy, dxa, dya, nested, cubed_sphere, altInterp tmp1j(:) = tmpq1(i,:) tmp2j(:) = tmpq1(i,:) tmp3j(:) = 1.0 ! Uniform Weighting missing first value so will not reproduce - call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed+1, altInterp) + call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed+1, altInterp) tmpq1(i,:) = tmp1j(:) enddo @@ -8387,7 +8392,7 @@ subroutine atob_s(qin, qout, npx, npy, dxa, dya, nested, cubed_sphere, altInterp tmpq2(:,j) = tmp1i(:) enddo -! Average +! Average do j=jsd,jed+1 do i=isd,ied+1 qout(i,j) = 0.5 * (tmpq1(i,j) + tmpq2(i,j)) @@ -8543,14 +8548,14 @@ subroutine dtoa(uin, vin, uout, vout, dx, dy, dxa, dya, dxc, dyc, npx, npy, ng) tmp1j(:) = 0.0 tmp2j(:) = uin(i,:)*dyc(i,:) tmp3j(:) = dyc(i,:) - call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed+1, interpOrder) + call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed+1, interpOrder) uout(i,jsd:jed) = tmp1j(jsd+1:jed+1)/dya(i,jsd:jed) enddo do j=jsd,jed tmp1i(:) = 0.0 tmp2i(:) = vin(:,j)*dxc(:,j) tmp3i(:) = dxc(:,j) - call interp_left_edge_1d(tmp1i, tmp2i, tmp3i, isd, ied+1, interpOrder) + call interp_left_edge_1d(tmp1i, tmp2i, tmp3i, isd, ied+1, interpOrder) vout(isd:ied,j) = tmp1i(isd+1:ied+1)/dxa(isd:ied,j) enddo #endif @@ -8611,7 +8616,7 @@ subroutine atoc(uin, vin, uout, vout, dx, dy, dxa, dya, npx, npy, ng, nested, do tmp3j(:) = dya(i,:) call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed, interpOrder) vout(i,:) = tmp1j(:) - enddo + enddo #endif #else @@ -8685,7 +8690,7 @@ end subroutine atoc ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! !>@brief The subroutine 'ctoa' interpolates values from the C-Grid to the A-grid. subroutine ctoa(uin, vin, uout, vout, dx, dy, dxc, dyc, dxa, dya, npx, npy, ng) - integer, intent(IN) :: npx, npy, ng + integer, intent(IN) :: npx, npy, ng real , intent(IN) :: uin(isd:ied+1,jsd:jed ) !< C-grid u-wind field real , intent(IN) :: vin(isd:ied ,jsd:jed+1) !< C-grid v-wind field real , intent(OUT) :: uout(isd:ied ,jsd:jed ) !< A-grid u-wind field @@ -8740,11 +8745,11 @@ subroutine rotate_winds(myU, myV, p1, p2, p3, p4, t1, ndims, dir) integer, intent(IN) :: ndims real , intent(INOUT) :: myU !< u-wind field real , intent(INOUT) :: myV !< v-wind field - real(kind=R_GRID) , intent(IN) :: p1(ndims) ! p4 - real(kind=R_GRID) , intent(IN) :: p2(ndims) ! + real(kind=R_GRID) , intent(IN) :: p1(ndims) ! p4 + real(kind=R_GRID) , intent(IN) :: p2(ndims) ! real(kind=R_GRID) , intent(IN) :: p3(ndims) ! p1 t1 p3 - real(kind=R_GRID) , intent(IN) :: p4(ndims) ! - real(kind=R_GRID) , intent(IN) :: t1(ndims) ! p2 + real(kind=R_GRID) , intent(IN) :: p4(ndims) ! + real(kind=R_GRID) , intent(IN) :: t1(ndims) ! p2 integer, intent(IN) :: dir !< Direction ; 1=>sphere-to-cube 2=> cube-to-sphere real(kind=R_GRID) :: ee1(3), ee2(3), ee3(3), elon(3), elat(3) @@ -8771,7 +8776,7 @@ subroutine rotate_winds(myU, myV, p1, p2, p3, p4, t1, ndims, dir) newu = myU*g11 + myV*g12 newv = myU*g21 + myV*g22 else - newu = ( myU*g22 - myV*g12)/(g11*g22 - g21*g12) + newu = ( myU*g22 - myV*g12)/(g11*g22 - g21*g12) newv = (-myU*g21 + myV*g11)/(g11*g22 - g21*g12) endif myU = newu @@ -8871,9 +8876,9 @@ real function globalsum(p, npx, npy, ifirst, ilast, jfirst, jlast, isd, ied, js allocate(p_r8(npx-1,npy-1,ntiles_g)) gsum = 0. - - if (latlon) then - j1 = 2 + + if (latlon) then + j1 = 2 j2 = npy-2 !!! WARNING: acapS and acapN have NOT been initialized. gsum = gsum + p(1,1)*acapS @@ -8885,7 +8890,7 @@ real function globalsum(p, npx, npy, ifirst, ilast, jfirst, jlast, isd, ied, js enddo else - do n=tile,tile + do n=tile,tile do j=jfirst,jlast do i=ifirst,ilast p_R8(i,j,n) = p(i,j)*area(i,j) @@ -8908,7 +8913,7 @@ real function globalsum(p, npx, npy, ifirst, ilast, jfirst, jlast, isd, ied, js endif deallocate(p_r8) - + end function globalsum @@ -8916,9 +8921,9 @@ subroutine get_unit_vector( p1, p2, p3, uvect ) real(kind=R_GRID), intent(in):: p1(2), p2(2), p3(2) ! input position unit vectors (spherical coordinates) real(kind=R_GRID), intent(out):: uvect(3) ! output unit spherical cartesian ! local - integer :: n + integer :: n real(kind=R_GRID) :: xyz1(3), xyz2(3), xyz3(3) - real :: dp(3) + real :: dp(3) call spherical_to_cartesian(p1(1), p1(2), one, xyz1(1), xyz1(2), xyz1(3)) call spherical_to_cartesian(p2(1), p2(2), one, xyz2(1), xyz2(2), xyz2(3)) @@ -8973,7 +8978,7 @@ subroutine mp_ghost_ew(im, jm, km, nq, ifirst, ilast, jfirst, jlast, & ! ! !DESCRIPTION: ! -! Ghost 4d east/west +! Ghost 4d east/west ! ! !REVISION HISTORY: ! 2005.08.22 Putman @@ -9015,7 +9020,7 @@ end subroutine mp_ghost_ew !>@brief The subroutine 'interp_left_edge_1d' interpolates to left edge of a cell. !>@details order = 1 -> Linear average !>order = 2 -> Uniform PPM -!>order = 3 -> Non-Uniform PPM +!>order = 3 -> Non-Uniform PPM subroutine interp_left_edge_1d(qout, qin, dx, ifirst, ilast, order) integer, intent(in):: ifirst,ilast real, intent(out) :: qout(ifirst:) @@ -9025,26 +9030,26 @@ subroutine interp_left_edge_1d(qout, qin, dx, ifirst, ilast, order) integer :: i real :: dm(ifirst:ilast),qmax,qmin - real :: r3, da1, da2, a6da, a6, al, ar + real :: r3, da1, da2, a6da, a6, al, ar real :: qLa, qLb1, qLb2 real :: x r3 = 1./3. - qout(:) = 0.0 - if (order==1) then + qout(:) = 0.0 + if (order==1) then ! 1st order Uniform linear averaging do i=ifirst+1,ilast qout(i) = 0.5 * (qin(i-1) + qin(i)) enddo elseif (order==2) then -! Non-Uniform 1st order average +! Non-Uniform 1st order average do i=ifirst+1,ilast qout(i) = (dx(i-1)*qin(i-1) + dx(i)*qin(i))/(dx(i-1)+dx(i)) enddo - elseif (order==3) then + elseif (order==3) then -! PPM - Uniform +! PPM - Uniform do i=ifirst+1,ilast-1 dm(i) = 0.25*(qin(i+1) - qin(i-1)) enddo @@ -9100,12 +9105,12 @@ subroutine interp_left_edge_1d(qout, qin, dx, ifirst, ilast, order) enddo elseif (order==5) then - + ! Linear Spline do i=ifirst+1,ilast-1 - x = FLOAT(i-(ifirst+1))*FLOAT(ilast-ifirst+1-1)/FLOAT(ilast-ifirst-1) + x = FLOAT(i-(ifirst+1))*FLOAT(ilast-ifirst+1-1)/FLOAT(ilast-ifirst-1) qout(i) = qin(ifirst+NINT(x)) + (x - NINT(x)) * (qin(ifirst+NINT(x+1)) - qin(ifirst+NINT(x))) - ! if (tile==1) print*, ifirst+NINT(x+1), ifirst+NINT(x), (x - NINT(x)) + ! if (tile==1) print*, ifirst+NINT(x+1), ifirst+NINT(x), (x - NINT(x)) ! if (tile==1) print*, 0.5*(qin(i-1)+qin(i)), qout(i) enddo @@ -9122,10 +9127,10 @@ subroutine interp_left_edge_1d(qout, qin, dx, ifirst, ilast, order) end subroutine interp_left_edge_1d !------------------------------------------------------------------------------ -!----------------------------------------------------------------------- -!>@brief The subroutine 'vpol5' treats the V winds at the poles. -!>@details This requires an average of the U- and V-winds, -!!weighted by their angles of incidence at the pole points. +!----------------------------------------------------------------------- +!>@brief The subroutine 'vpol5' treats the V winds at the poles. +!>@details This requires an average of the U- and V-winds, +!!weighted by their angles of incidence at the pole points. subroutine vpol5(u, v, im, jm, coslon, sinlon, cosl5, sinl5, & ng_d, ng_s, jfirst, jlast) ! !INPUT PARAMETERS: @@ -9261,7 +9266,7 @@ subroutine var_dz(km, ztop, ze) s_fac(km ) = 0.25 s_fac(km-1) = 0.30 s_fac(km-2) = 0.50 - s_fac(km-3) = 0.70 + s_fac(km-3) = 0.70 s_fac(km-4) = 0.90 s_fac(km-5) = 1. do k=km-6, 5, -1