diff --git a/tests/conftest.py b/tests/conftest.py index 7e8d61410..07d508efb 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -39,8 +39,8 @@ def pytest_addoption(parser): "--config", required=True, action='store', - help=("List of model configurations to test, options are 'NWM'," + - "'Gridded',and 'Reach'") + help=("List of model configurations to test, options include all configs listed in " + "hydro_namelist.json keys") ) parser.addoption( @@ -67,6 +67,13 @@ def pytest_addoption(parser): help='Scheduler to use for testing, options are PBSCheyenne or do not specify for no ' 'scheduler') + parser.addoption( + '--nnodes', + default='2', + required=False, + action='store', + help='Number of nodes to use for testing if running on scheduler' + ) parser.addoption( '--account', default='NRAL0017', @@ -75,9 +82,10 @@ def pytest_addoption(parser): help='Account number to use if using a scheduler.') def _make_sim(domain_dir, - source_dir, + source_dir, configuration, ncores, + nnodes, scheduler, account): # model @@ -102,7 +110,11 @@ def _make_sim(domain_dir, sim.add(job) if scheduler is not None and scheduler == 'pbscheyenne': - sim.add(schedulers.PBSCheyenne(account=account,nproc=int(ncores))) + sim.add(schedulers.PBSCheyenne(account=account,nproc=int(ncores),nnodes = int(nnodes))) + + if configuration == 'nwm_channel-only': + # Update the forcing here to be taken from the nwm_ana run dir + sim.domain return sim @@ -113,15 +125,17 @@ def candidate_sim(request): candidate_dir = request.config.getoption("--candidate_dir") configuration = request.config.getoption("--config") ncores = request.config.getoption("--ncores") + nnodes = request.config.getoption("--nnodes") scheduler = str(request.config.getoption("--scheduler")).lower() account = request.config.getoption("--account") candidate_sim = _make_sim(domain_dir = domain_dir, - source_dir= candidate_dir, - configuration=configuration, - ncores = ncores, - scheduler = scheduler, - account = account) + source_dir= candidate_dir, + configuration=configuration, + ncores = ncores, + nnodes = nnodes, + scheduler = scheduler, + account = account) return candidate_sim @@ -132,15 +146,17 @@ def reference_sim(request): reference_dir = request.config.getoption("--reference_dir") configuration = request.config.getoption("--config") ncores = request.config.getoption("--ncores") + nnodes = request.config.getoption("--nnodes") scheduler = str(request.config.getoption("--scheduler")).lower() account = request.config.getoption("--account") reference_sim = _make_sim(domain_dir = domain_dir, - source_dir= reference_dir, - configuration=configuration, - ncores = ncores, - scheduler = scheduler, - account = account) + source_dir= reference_dir, + configuration=configuration, + ncores = ncores, + nnodes = nnodes, + scheduler = scheduler, + account = account) return reference_sim diff --git a/tests/local/core_take_test.sh b/tests/local/core_take_test.sh deleted file mode 100755 index 7f195552b..000000000 --- a/tests/local/core_take_test.sh +++ /dev/null @@ -1,349 +0,0 @@ -#!/bin/bash -# TODO(JLM): I should have written this in python... or not, would require python whereas -# docker has python. This only require bash on the host machine. -# Purpose: -# This script is the general pupose launching script for wrf_hydro_nwm_public -# and wrf_hydro_nwm testing. -# This script takes care of logging the tests. -# This script handles launchin in docker and on known machines. Other -# machines will cause an error. -# -# Only argument take for docker usage: -# -i : enters docker interactively. -# -# All other arguments are passed to take_test.py, please run -# ./take_test.sh --help -# For complete information on the following arguments: -# -- domain -# -- config -# -- candidate_spec_file -# -- test_spec - -if [[ ${@} == *"--help"* ]]; then - echo - echo "Retrieving the help from take_test.py..." - echo - python take_test.py --help - echo - echo "take_test.sh notes: " - echo " When using docker, the domain argument becomes the key which is the basename of the path." - echo - exit 0 -fi - -# ################################# -# Determine the path to this file, allowing for a symlink. -#https://stackoverflow.com/questions/59895/getting-the-source-directory-of-a-bash-script-from-within -SOURCE="${BASH_SOURCE[0]}" -while [ -L "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink - DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - SOURCE="$(readlink "$SOURCE")" - [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located -done -this_dir="$( cd -P "$( dirname "$SOURCE" )" && pwd )" -this_repo=$(dirname $this_dir) -this_repo_name=$(basename $this_repo) -#echo "Testing: $this_dir" - - -# ################################# -# Collect some parameters... -if [ $(which docker 2> /dev/null | wc -l) != 0 ]; then - docker_avail=0 -else - docker_avail=1 -fi -#echo "docker_avail: $docker_avail" - -grep docker /proc/1/cgroup -qa 2> /dev/null -in_docker=$? -#echo "in docker: $in_docker" - -machine_spec_file=$this_dir/machine_spec.yaml -machines_in_spec=`cat $machine_spec_file | egrep -e $'^[a-z]' | cut -d':' -f1` -#echo "machines_in_spec:" $machines_in_spec - -known_machine=1 -for mm in ${machines_in_spec}; do - if [ $(echo ${HOSTNAME} | grep ${mm} | wc -l 2> /dev/null) -gt 0 ]; then - known_machine=0 - #echo $mm - fi -done -#echo "known_machine: $known_machine" - - -# ################################# -# TODO(JLM): Construct the passed options -# domain -# config -# candidate_spec_file -# test_spec - -# Eliminate '=' and condense spaces -args_to_pass="${@}" -args_to_parse=$(echo "${args_to_pass}" | tr -s ' ' | tr -d '=' | tr -d '-') -#echo "args_to_pass: $args_to_pass" - -# The domain arg and the candidate spec file args are needed -# by this script. Eschew getopt and do it manually for portability. - -origIFS=${IFS} -IFS=' ' read -r -a array <<< "$args_to_parse" - -wh_domain=-1 -wh_candidate_spec=-1 -for index in "${!array[@]}" -do - if [[ "${array[index]}" == domain ]]; then - wh_domain=$((index+1)) - fi - if [[ "${array[index]}" == candidate_spec_file ]]; then - wh_candidate_spec=$((index+1)) - fi -done - -if [ $wh_domain != -1 ]; then - domain=${array[$wh_domain]} - #echo "domain: $domain" -fi -if [ -z $domain ]; then - domain=wrfhydro/domains:croton_NY - args_to_pass="${args_to_pass} --domain $domain" - #echo "domain: $domain" -fi - -if [ $wh_candidate_spec != -1 ]; then - candidate_spec_file=${array[$wh_candidate_spec]} - #echo "candidate_spec_file: $candidate_spec_file" -fi - -# ################################# -# Known Machine (this includes docker) - -if [[ $known_machine == 0 ]] || [[ $in_docker == 0 ]]; then - - if [[ $in_docker == 0 ]]; then - - # Deal with possibility of case-insensitive mounted file system - rm -f abcdefghijklmnop ABCDEFGHIJKLMNOP - touch abcdefghijklmnop ABCDEFGHIJKLMNOP &> /dev/null - rm -f ABCDEFGHIJKLMNOP - case_insensitive=$(ls abcdefghijklmnop &> /dev/null | wc -l) - #echo "case_insensitive: $case_insensitive" - rm -f abcdefghijklmnop ABCDEFGHIJKLMNOP - - if [[ $case_insensitive -eq 0 ]]; then - cp -r ${this_repo} ${this_repo}_case_sensitive - cd ${this_repo}_case_sensitive/tests - # Edit the candidate spec file for the above. - for ff in ~/.test_spec_dir/*yaml; do - sed -i "s|${this_repo}\$|${this_repo}_case_sensitive|g" $ff - #echo $this_repo - #echo $ff - done - # Also deal with local_paths in the candidate spec... - for ii in `egrep 'local_path.*:' ~/.test_spec_dir/*.yaml | tr -d ' ' | cut -d':' -f3`; do - #echo $ii - if [[ "$ii" == *"_case_sensitive" ]]; then - continue - fi - cp -r $ii ${ii}_case_sensitive - sed -i "s|$ii\$|${ii}_case_sensitive|g" ~/.test_spec_dir/*.yaml - done - - fi - - fi - - python take_test.py ${args_to_pass} - return_code=$? - -else - -# ################################# -# UnKnown Machine - - # ################################# - # Docker - if [[ $docker_avail != 0 ]]; then - - echo "This machine is not known to $machine_spec_file and " - echo "docker does not seem to be available. Exiting." - exit 1 - - else - - echo - echo "Using Docker." - echo - - echo "Refresh the wrfhydro/dev:conda container" - docker pull wrfhydro/dev:conda - echo - - # Is the domain a container or a path? - if [[ ${domain} == "wrfhydro/domains:"* ]]; then - - echo "Refresh the ${domain} container" - docker pull ${domain} - - # Have to edit args_to_pass to the correct location. - domain_tag=$(echo $domain | cut -d':' -f2) - args_to_pass=$(echo "$args_to_pass" | sed "s|${domain}|/home/docker/domain/${domain_tag}|") - # Dummy, hopefully untaken name... - domain_tmp_vol="${domain_tag}"_tmp_vol - docker create --name $domain_tmp_vol ${domain} || exit 1 - - else - - # Set the mount. - host_domain_dir=$domain - domain_tag=$(basename $domain) - docker_domain_dir=/home/docker/domain/$domain_tag - args_to_pass=$(echo "$args_to_pass" | sed "s|${domain}|/home/docker/domain/${domain_tag}|") - - fi - echo - - # Need the user and candidate specs in a mountable place. - host_spec_dir=/tmp/user_spec_dir/ - docker_spec_dir=/home/docker/.test_spec_dir - rm -rf ${host_spec_dir} - mkdir ${host_spec_dir} - - # User spec - if [ ! -z $WRF_HYDRO_TESTS_USER_SPEC ]; then - cp $WRF_HYDRO_TESTS_USER_SPEC ${host_spec_dir}/. - docker_user_spec=${docker_spec_dir}/$(basename $WRF_HYDRO_TESTS_USER_SPEC) - else - # TODO(JLM): is this handled by take_test.py? may not be necessary. - echo "Using default user specification file: ${this_dir}/template_user_spec.yaml" - echo - cp ${this_dir}/template_user_spec.yaml ${host_spec_dir}/. - docker_user_spec=${docker_spec_dir}/template_user_spec.yaml - fi - - # Candidate spec - if [ $wh_candidate_spec != -1 ]; then - cp ${candidate_spec_file} ${host_spec_dir}/. - can_spec_file_copy=${host_spec_dir}$(basename ${candidate_spec_file}) - can_spec_file_docker=$docker_spec_dir/$(basename ${candidate_spec_file}) - # Have to mount and edit - - args_to_pass=$(echo "$args_to_pass" | \ - sed "s|${candidate_spec_file}|$can_spec_file_docker|") - candidate_spec_mounts="" - for ii in `egrep 'local_path.*:' ${candidate_spec_file} | tr -d ' ' | cut -d':' -f2`; do - if [ -z $ii ]; then - continue - fi - #echo ---- - # SOURCE="$ii" - # while [ -L "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlnk - # echo bar - # DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - # SOURCE="$(readlink "$SOURCE")" - # [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" - # done - # the_path=$SOURCE - the_path=$ii - #echo "the_path: $the_path" - #echo "this_repo: $this_repo" - #echo "this_repo_name: $this_repo_name" - if [[ "$the_path" != "$this_repo" ]] && \ - [[ "$the_path" != *"$this_repo_name" ]]; then - # If the local_path is the testing repo (though full path is not - # in this case have to mount the repos - # and have to edit the copied candidate spec to reflect docker locn - rep_str="${the_path}:/home/docker/$(basename ${the_path})" - candidate_spec_mounts=$(echo "${candiate_spec_mounts} -v ${rep_str}") - fi - sed -i '' "s|${the_path}|/home/docker/$(basename ${the_path})|g" $can_spec_file_copy - done - - fi - - - # Use mount this repo to /home/docker - this_repo_name=$(basename $this_repo) - - # Candidate spec file - # needs mounted, may spcify alternate - # repos which need mounted. - - not_interactive=$(echo "$args_to_pass" | grep '\-i' | wc -l) - #args_to_pass=$(echo "$args_to_pass" | sed "s|-i||") - - docker_cmd="" - #docker_cmd=${docker_cmd}"cd /home/docker/wrf_hydro_py; pip uninstall -y wrfhydropy; " - #docker_cmd=${docker_cmd}" python setup.py install; pip install termcolor; " - docker_cmd=$(echo \ -"${docker_cmd} \ -cd /home/docker/${this_repo_name}/tests/; \ -./take_test.sh ${args_to_pass}; \ -if [ \\\$? -ne 0 ]; then cd /home/docker/take_test; /bin/bash; fi;") - - if [[ "$not_interactive" -eq 1 ]]; then - echo "Interactive docker requested at end of test." - docker_cmd=$(echo "${docker_cmd} cd /home/docker/take_test; /bin/bash ") - else - docker_cmd=$(echo "${docker_cmd} exit \\\$? ") - fi - - #echo "docker_cmd: $docker_cmd" - - echo "Starting the docker image." - echo - # -e establishes env variables in docker. - # -v mounts directories host:docker - # The GITHUB variables are used only for the private, wrf_hydro_nwm repo. - invoke_docker=$(echo \ -"docker run -it \ - -e USER=docker \ - -e GITHUB_AUTHTOKEN=$GITHUB_AUTHTOKEN \ - -e GITHUB_USERNAME=$GITHUB_USERNAME \ - -e WRF_HYDRO_TESTS_USER_SPEC=${docker_user_spec} \ -") - - if [[ ! -z $host_domain_dir ]]; then - invoke_docker=$(echo \ -"${invoke_docker} \ - -v ${host_domain_dir}:${docker_domain_dir} \ -") - fi - - invoke_docker=$(echo \ -"${invoke_docker} \ - -v ${host_spec_dir}:${docker_spec_dir} \ - -v ${this_repo}:/home/docker/${this_repo_name} \ - ${candidate_spec_mounts}") - - # May want the custom ability to mount wrf_hydro_py at some point. -# -v /Users/jamesmcc/WRF_Hydro/wrf_hydro_py:/home/docker/wrf_hydro_py \ - - if [[ ! -z $domain_tmp_vol ]]; then - invoke_docker=$(echo \ -"${invoke_docker} \ - --volumes-from ${domain_tmp_vol} \ -") - fi - - invoke_docker=$(echo \ -"${invoke_docker} \ - wrfhydro/dev:conda /bin/bash -c \"${docker_cmd}\"") - - #echo "$args_to_pass" - #echo "invoke_docker: $invoke_docker" - eval $invoke_docker - return_code=$? - - if [[ ${domain} == "wrfhydro/domains:"* ]]; then - echo "Tearing down the data container: " $(docker rm -v ${domain_tmp_vol}) - fi - - fi # Trying docker - -fi # Known machine else unknown machine - -exit $return_code diff --git a/tests/local/default_candidate_spec_cheyenne.yaml b/tests/local/default_candidate_spec_cheyenne.yaml deleted file mode 100644 index b5c8675bc..000000000 --- a/tests/local/default_candidate_spec_cheyenne.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Choices are currently 'GNU' and 'intel'. (currently case-sensitive). -compiler : 'gfort' - -# queue: None disables the scheduler. -queue : regular -wall_time : 00:02 -n_cores: - default : 2 - test : 1 - - -# Where temp repositories cloned from github shall be placed -# (in subfolders candidate/ and reference/) -repos_dir : /glade/scratch/${USER}/take_test/test_repos -test_dir : /glade/scratch/${USER}/take_test/test_dir - -candidate_repo: - fork : - commitish : - # The path to the repo from whence take_test was called. - local_path : '{this_repo_path}' - -reference_repo: - fork : NCAR/wrf_hydro_nwm_public - commitish : master - local_path : - -wrf_hydro_tests: - user_spec : - machine_spec : - diff --git a/tests/local/default_candidate_spec_docker.yaml b/tests/local/default_candidate_spec_docker.yaml deleted file mode 100644 index 1493ec394..000000000 --- a/tests/local/default_candidate_spec_docker.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Choices are currently 'GNU' and 'intel'. (currently case-sensitive). -compiler : 'gfort' - -# queue: None disables the scheduler. -queue : -wall_time : - -n_cores: - default : 2 - test : 1 - - -# Where temp repositories cloned from github shall be placed -# (in subfolders candidate/ and reference/) -repos_dir : /home/docker/take_test/test_repos -test_dir : /home/docker/take_test/test_dir - -candidate_repo: - fork : - commitish : - # The path to the repo from whence take_test was called. - local_path : '{this_repo_path}' - -reference_repo: - fork : NCAR/wrf_hydro_nwm_public - commitish : master - local_path : - -wrf_hydro_tests: - user_spec : - machine_spec : diff --git a/tests/local/gdrive_download.py b/tests/local/gdrive_download.py new file mode 100644 index 000000000..e92c19752 --- /dev/null +++ b/tests/local/gdrive_download.py @@ -0,0 +1,51 @@ +import requests +from argparse import ArgumentParser + +def download_file_from_google_drive(id, destination): + URL = "https://docs.google.com/uc?export=download" + + session = requests.Session() + + response = session.get(URL, params = { 'id' : id }, stream = True) + token = get_confirm_token(response) + + if token: + params = { 'id' : id, 'confirm' : token } + response = session.get(URL, params = params, stream = True) + + save_response_content(response, destination) + +def get_confirm_token(response): + for key, value in response.cookies.items(): + if key.startswith('download_warning'): + return value + + return None + +def save_response_content(response, destination): + CHUNK_SIZE = 32768 + + with open(destination, "wb") as f: + for chunk in response.iter_content(CHUNK_SIZE): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + +def main(): + + parser = ArgumentParser() + parser.add_argument("--file_id", + dest="file_id", + help="Google drive file ID. Get from shareable link") + parser.add_argument("--dest_file", + dest="dest_file", + help="Full path including filename for downloaded file.") + + args = parser.parse_args() + file_id = args.file_id + dest_file = args.dest_file + + download_file_from_google_drive(file_id, dest_file) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tests/local/machine_spec.yaml b/tests/local/machine_spec.yaml deleted file mode 100644 index c1e9bcb0a..000000000 --- a/tests/local/machine_spec.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# wrf_hydro_tests: machine configuration file. -# Purpose: Log all the static information for each machine in this file. This file -# is sourced after the candidate specification file and my rely on -# variables defined therein. -cheyenne: - - modules: - base : nco/4.6.2 python/3.6.2 - ifort: intel/16.0.3 ncarenv/1.2 ncarcompilers/0.4.1 mpt/2.15f netcdf/4.4.1 - gfort: gnu/7.1.0 ncarenv/1.2 ncarcompilers/0.4.1 mpt/2.15 netcdf/4.4.1.1 - -# This should Not be necessary. -# netcdf: -# intel: /glade/u/apps/ch/opt/netcdf/4.4.1/intel/16.0.1 -# GNU : /glade/u/apps/ch/opt/netcdf/4.4.1.1/gnu/7.1.0 - - scheduler: - name: "PBS" - max_walltime: '12:00' - - cores_per_node: 36 - - # The executable invocation on the machine - exe_cmd: - PBS : 'mpiexec_mpt ./wrf_hydro.exe' - default: 'mpirun -np {nproc} ./wrf_hydro.exe' - -docker: - modules: - scheduler: - cores_per_node: - exe_cmd: - default: 'mpirun -ppn {nproc} ./wrf_hydro.exe' diff --git a/tests/local/releaseapi.py b/tests/local/releaseapi.py new file mode 100644 index 000000000..fc8a05b45 --- /dev/null +++ b/tests/local/releaseapi.py @@ -0,0 +1,61 @@ +import urllib.request +import json +from argparse import ArgumentParser + +def get_release_asset(download_dir: str, + repo_name: str, + tag: str, + asset_name: str = 'testcase'): + """Function to download an asset from a specified public github repository + Args: + download_dir: The local directory to hold downloaded assets + repo_name: The repository name, e.g. NCAR/wrf_hydro_nwm_public + tag: The release tag, e.g. v5.0.1 + asset_name: The name of the asset, can be partial + """ + url = "https://api.github.com/repos/" + repo_name + "/releases/tags/" + tag + + # Get json data from api as a string + fp = urllib.request.urlopen(url) + json_string = fp.read().decode("utf8") + fp.close() + + # load into list of dicts + asset_list = json.loads(json_string)['assets'] + + # Iterate over assets and find asset url from asset name matching + for asset in asset_list: + if asset_name in asset['name']: + asset_url = asset['browser_download_url'] + full_asset_name = asset['name'] + + #download asset + print('downloading asset ' + full_asset_name + ' to ' + download_dir) + download_filepath = str(download_dir) + '/' + full_asset_name + urllib.request.urlretrieve(asset_url, download_filepath) + +def main(): + parser = ArgumentParser() + parser.add_argument("--download_dir", + dest="download_dir", + help="The local directory to hold downloaded assets") + parser.add_argument("--repo_name", + dest="repo_name", + help="The repository name, e.g. NCAR/wrf_hydro_nwm_public") + parser.add_argument("--tag", + dest="tag", + help="The release tag, e.g. v5.0.1") + parser.add_argument("--asset_name", + dest="asset_name", + default='testcase', + help="The name of the asset, can be partial") + + args = parser.parse_args() + + get_release_asset(download_dir = args.download_dir, + repo_name = args.repo_name, + tag = args.tag, + asset_name = args.asset_name) + +if __name__ == '__main__': + main() diff --git a/tests/local/run_tests.py b/tests/local/run_tests.py new file mode 100644 index 000000000..fbb622149 --- /dev/null +++ b/tests/local/run_tests.py @@ -0,0 +1,210 @@ +import subprocess +import socket +import getpass + +import pathlib +from argparse import ArgumentParser +import shutil + +from releaseapi import get_release_asset +from gdrive_download import download_file_from_google_drive + +def run_tests(config: str, + compiler: str, + domain_dir: str, + candidate_dir: str, + reference_dir: str, + output_dir: str, + ncores: int = 72, + nnodes: int = 2, + account: str = 'NRAL0017'): + + """Function to run wrf_hydro_nwm pytests + Args: + config: The config(s) to run, must be listed in hydro_namelist.json keys. + E.g. nwm_ana gridded + compiler: The compiler to use, options are 'intel' or 'gfort' + domain_dir: The domain directory to use + candidate_dir: The wrf-hydro code candidate directory to use, e.g. wrf_hydro_nwm_public + reference_dir: The wrf-hydro code directory to use, e.g. wrf_hydro_nwm_public + output_dir: The directory to hold test outputs + nproc: Optional. The number of cores to use if running on cheyenne + nnodes: Optional. The number of nodes to use if running on cheyenne + account: Options. The account number to use if running on cheyenne + """ + + # Pytest wants the actual source code directory, not the top level repo directory + candidate_source_dir = candidate_dir + '/trunk/NDHMS' + reference_source_dir = reference_dir + '/trunk/NDHMS' + + pytest_cmd = "pytest -v --ignore=local" + pytest_cmd += " --config " + config.lower() + pytest_cmd += " --compiler " + compiler.lower() + pytest_cmd += " --domain_dir " + domain_dir + pytest_cmd += " --candidate_dir " + candidate_source_dir + pytest_cmd += " --reference_dir " + reference_source_dir + pytest_cmd += " --output_dir " + output_dir + + + # Get hostname to add scheduler if running on cheyenne + if 'cheyenne' in socket.gethostname(): + pytest_cmd += " --scheduler pbscheyenne" + pytest_cmd += " --ncores " + str(ncores) + pytest_cmd += " --account " + account + + tests = subprocess.run(pytest_cmd, shell=True, cwd=candidate_dir) + + return tests + +def main(): + parser = ArgumentParser() + + parser.add_argument("--config", + required=True, + nargs='+', + help=" The configuration(s) to test, " + "must be one listed in hydro_namelist.json keys.") + + parser.add_argument('--compiler', + required=True, + help=' compiler, options are intel or gfort') + + parser.add_argument('--output_dir', + required=True, + help=' test output directory') + + parser.add_argument('--candidate_dir', + required=True, + help=' candidate model directory') + + parser.add_argument('--reference_dir', + required=True, + help=' reference model directory') + + parser.add_argument('--domain_dir', + required=True, + help=' domain directory') + + parser.add_argument("--domain_tag", + required=False, + help="The release tag of the domain to retrieve, e.g. v5.0.1. or dev. If " + "specified, a small test domain will be retrieved and placed in the " + "specified output_dir and used for the testing domain") + # Optional args: + parser.add_argument('--ncores', + default='2', + required=False, + help='Number of cores to use for testing') + + parser.add_argument('--nnodes', + default='2', + required=False, + help='Number of nodes to use for testing if running on scheduler') + + parser.add_argument('--scheduler', + required=False, + help='Scheduler to use for testing, options are PBSCheyenne or do not ' + 'specify for no scheduler') + + parser.add_argument('--account', + default='NRAL0017', + required=False, + action='store', + help='Account number to use if using a scheduler.') + + args = parser.parse_args() + + # Make all directories pathlib objects + output_dir = pathlib.Path(args.output_dir) + candidate_dir = pathlib.Path(args.candidate_dir) + reference_dir = pathlib.Path(args.reference_dir) + domain_dir = pathlib.Path(args.domain_dir) + + # Get other args + config = args.config + compiler = args.compiler + domain_tag = args.domain_tag + ncores = args.ncores + nnodes = args.nnodes + scheduler = args.scheduler + account = args.account + + # Make output dir if does not exist + if output_dir.is_dir(): + raise(IsADirectoryError('Output directory ' + str(output_dir) + ' already exists')) + else: + output_dir.mkdir(parents=True) + + # Get the domain if asked for + if domain_tag is not None: + #Reset domain dir to be the downlaoded domain in the output dir + domain_dir = output_dir.joinpath('example_case') + + if domain_tag == 'dev': + file_id = '1EHgWeM8k2-Y3jNMLri6C0u_fIUQIonO_' + download_file_from_google_drive(file_id, str(output_dir.joinpath( + 'gdrive_testcase.tar.gz'))) + + # untar the test case + untar_cmd = 'tar -xf *testcase*.tar.gz' + subprocess.run(untar_cmd, + shell=True, + cwd=str(output_dir)) + else: + get_release_asset(download_dir=str(output_dir), + repo_name='NCAR/wrf_hydro_nwm_public', + tag=domain_tag, + asset_name='testcase') + # untar the test case + untar_cmd = 'tar -xf *testcase*.tar.gz' + subprocess.run(untar_cmd, + shell=True, + cwd=str(output_dir)) + + ## Make copy paths + candidate_copy = output_dir.joinpath(candidate_dir.name + '_can_pytest') + reference_copy = output_dir.joinpath(reference_dir.name + '_ref_pytest') + + ## Remove if exist and make if not + if candidate_copy.is_dir(): + shutil.rmtree(str(candidate_copy)) + if reference_copy.is_dir(): + shutil.rmtree(str(reference_copy)) + + ## copy directories to avoid polluting user source code directories + shutil.copytree(str(candidate_dir),str(candidate_copy),symlinks=True) + shutil.copytree(str(reference_dir),str(reference_copy),symlinks=True) + + # run pytest for each supplied config + has_failure = False + for config in args.config: + print('\n\n############################') + print('### TESTING ' + config + ' ###') + print('############################\n\n',flush=True) + + test_result = run_tests(config = config, + compiler = compiler, + domain_dir = str(domain_dir), + candidate_dir = str(candidate_dir), + reference_dir = str(reference_dir), + output_dir = str(output_dir), + ncores = ncores, + nnodes = nnodes, + account = account) + if test_result.returncode != 0: + has_failure = True + + # Exit with 1 if failure + if has_failure: + print('\n\n############################') + print('### FAILED ###') + print('############################\n\n',flush=True) + exit(1) + else: + print('\n\n############################') + print('### PASSED ###') + print('############################\n\n',flush=True) + exit(0) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/tests/local/take_test.py b/tests/local/take_test.py deleted file mode 100644 index 558a47848..000000000 --- a/tests/local/take_test.py +++ /dev/null @@ -1,315 +0,0 @@ -# take_test.py : -# A python interface to wrf_hydro_nwm and wrfhydro_nwm_public testing -# Purpose: -# Set up all the desired prerequisites for testing and call pytest. - -# ####################################################### -# Basic orientation -import os -this_script = __file__ -this_script_path = os.path.dirname(os.path.realpath(this_script)) -this_repo_path = os.path.dirname(os.path.realpath(this_script_path)) - -import wrfhydropy -import warnings -with warnings.catch_warnings(): - warnings.simplefilter("ignore") - machine_name = wrfhydropy.core.job_tools.get_machine() - - -# ###################################################### -# Agruments -# Do this up front for speed when help is requested. -import argparse - -parser = argparse.ArgumentParser( - description='A WRF-Hydro candidate takes a test.' -) - -parser.add_argument( - '--domain', - metavar='/path/to/domain/directory', - help='Path to the domain directory.', - default=None -) - -parser.add_argument( - '--candidate_spec_file', - metavar='path/to/candidate_spec_file', - type=str, - help='The YAML candidate specification file.', - default=this_script_path + '/default_candidate_spec_' + machine_name + '.yaml' -) - -# For the following, the defaults of None trigger the defaults set in conftest. - -parser.add_argument( - '--config', - nargs='*', - metavar='key', - help=('Zero or more keys separated by whitespace for model configuration selection ' + - '(no keys runs all configurations).'), - default = None # None is handled as all by pytest currently. -) - -parser.add_argument( - '--test_spec', - nargs='*', - metavar='key', - help=('Zero or more keys separated by whitespace for specifying the desired tests. ' + - 'See pytest -k for details on keys including logical relations (note different ' + - 'format here).'), - default=None -) - -parser.add_argument( - '-i', - action='store_true', - help=('Keep output (for browsing) even when all tests successful.'), - default=None -) - - -args = parser.parse_args() -candidate_spec_file = args.candidate_spec_file -domain = args.domain - -config= args.config - -test_spec = args.test_spec -if test_spec is not None: - if type(test_spec) is list: - test_spec = ' '.join(test_spec) - -interactive = args.i - - -# ####################################################### -# Rest of the imports now. -import code -import copy -import json -import logging -import pathlib -from pprint import pprint, pformat -import pytest -import shutil -import sys - -sys.path.insert(0, this_script_path+'/toolbox/') -from color_logs import log -from establish_repo import * -from establish_specs import * -from log_boilerplate import log_boilerplate - - -# ###################################################### -# Preamble/overview/Help/docstring. - -if domain is None: - if machine_name is 'cheyenne': - domain = '/glade/p/work/jamesmcc/domains/public/croton_NY' - else : - domain = '/home/docker/domain/croton_NY' - -check = pathlib.PosixPath(domain).exists() - -# ###################################################### -# Logging setup. -# Right now handling the logging through the two-layer bash system. -# It's currently opaque to me how to get the pytest output into -# this kind of log. -log.setLevel(logging.DEBUG) - -stdout = logging.StreamHandler() -stdout.setLevel(logging.DEBUG) -log.addHandler(stdout) - -# log_file = "take_test.log" -# log_file_handler = logging.FileHandler(log_file, mode='w') -# log_file_handler.setLevel(logging.DEBUG) -# log.addHandler(log_file_handler) - -horiz_bar = '=================================================================' -log.info(horiz_bar) -log.info("*** take_test.py: A wrf_hydro candidate takes a test. ***") -log.debug('') - -# ###################################################### -# Specification files to dictionaries. -log.info(horiz_bar ) -log.info( "Setup the specifictions (specs):") - -env_vars = os.environ.copy() - -candidate_spec = establish_candidate(candidate_spec_file) -# The default candidate path is solved here based on the this_script_path. -if not pathlib.PosixPath(candidate_spec['candidate_repo']['local_path']).exists(): - candidate_spec['candidate_repo']['local_path'] = \ - candidate_spec['candidate_repo']['local_path'].format( - **{'this_repo_path': this_repo_path} - ) - -user_spec = establish_user_spec(candidate_spec, env_vars) -candidate_spec['machine_spec_file'] = this_script_path + '/machine_spec.yaml' -machine_spec = establish_machine_spec(candidate_spec, user_spec, env_vars) -log.debug('') - -# ################################# -if pathlib.PosixPath(candidate_spec['test_dir']).exists(): - raise FileExistsError("Exiting: the testing run directory already exists " + - candidate_spec['test_dir']) - -# ###################################################### -# Log boilerplate info - -candidate_spec['config'] = config -candidate_spec['domain'] = domain - -log.info(horiz_bar ) -log.info("Boilerplate:") -log_boilerplate(candidate_spec, user_spec, env_vars, horiz_bar, this_script_path) -log.debug('') - - -# ###################################################### -# Repos setup -log.info(horiz_bar ) -log.info("Establish repositories:") -establish_repo('candidate_repo', candidate_spec, user_spec) -establish_repo('reference_repo', candidate_spec, user_spec) -log.debug('') - -# Can I catch at this point: -# if a path, is the repo on a case-sensitive dir? -# if not copy and edit (this is in docker). - -# ################################### -log.info(horiz_bar ) -log.info("Establish jobs and scheduler:") - -if machine_name == 'docker': - default_scheduler = None -else: - if candidate_spec['queue'] is None or candidate_spec['queue'] == 'None': - default_scheduler = None - else: - default_scheduler = wrfhydropy.Scheduler( - job_name='default', - account=user_spec['PBS']['account'], - walltime=candidate_spec['wall_time'], - queue=candidate_spec['queue'], - nproc=candidate_spec['n_cores']['default'], - ppn=machine_spec[machine_name]['cores_per_node'] - ).__dict__ - -default_scheduler = json.dumps(default_scheduler) -job_default = wrfhydropy.Job(nproc=candidate_spec['n_cores']['default']) -job_ncores=copy.deepcopy(job_default) -job_ncores.nproc=candidate_spec['n_cores']['test'] -job_default = json.dumps(job_default.__dict__) -job_ncores = json.dumps(job_ncores.__dict__) - -log.debug('') -# ################################### -log.info(horiz_bar) -log.info("Calling pytest:") - -pytest_cmd = [ - #'--pdb', # for debugging the tests. May make this an option in the future... - '-v', - '--color', 'yes', - '--rootdir', str(candidate_spec['candidate_repo']['local_path']) + '/tests/' , - '--ignore', 'take_test.py', - '--ignore', 'toolbox/', - '--ignore', 'run_travis_yml_locally.py', - '--compiler', candidate_spec['compiler'], - '--domain_dir', domain, - '--output_dir', candidate_spec['test_dir'], - '--candidate_dir', str(candidate_spec['candidate_repo']['local_path']) + '/trunk/NDHMS', - '--reference_dir', str(candidate_spec['reference_repo']['local_path']) + '/trunk/NDHMS', - '--job_default', job_default, - '--job_ncores', job_ncores, - '--scheduler', default_scheduler -] - -if config is not None: - pytest_cmd = pytest_cmd + [ '--config' ] + config - -if test_spec is not None: - pytest_cmd = pytest_cmd + ['-k'] + [test_spec] - -log.debug('') -log.info('with arguments:') -pprint(pytest_cmd) -log.debug('') -log.debug('') - -pytest_return = pytest.main(pytest_cmd) - -log.debug('') - -# ###################################################### -# Tear down if success -log.info('=================================================================') -if pytest_return == 0: - if interactive: - log.info('All tests successful but -i leaves files for interactive browsing.') - else: - log.info('All tests successful: tear down test.') - log.debug('') - if pathlib.PosixPath(candidate_spec['repos_dir']).exists(): - shutil.rmtree(candidate_spec['repos_dir']) - shutil.rmtree(candidate_spec['test_dir']) - log.debug('') -else: - if pathlib.PosixPath(candidate_spec['repos_dir']).exists(): - log.info('Some tests failed: leaving tests and repos in:') - log.info('Repos: ' + candidate_spec['repos_dir']) - else: - log.info('Some tests failed: leaving tests in:') - - log.info('Tests: ' + candidate_spec['test_dir']) - log.debug('') - - -# ###################################################### -# Echo specs to log files -log.info('=================================================================') -log.info('*** take_test.py: Finished. ***') -log.debug('') -log.debug('Writing working specifications to ' + - this_script_path +'/take_test.log') -log.debug('') - - -log_file = this_script_path + "/take_test.log" -log_file_handler = logging.FileHandler(log_file, mode='a') -log_file_handler.setLevel(logging.DEBUG) -log.addHandler(log_file_handler) - -# Kill the 'stdout' handler. -log.removeHandler(stdout) - -log.info('*****************************************************************') -log.debug('') - -# Protect the authtoken from printing to log files. -if not user_spec['github']['authtoken'] is None: - user_spec['github']['authtoken'] = '*************************' - -def log_spec(spec, name): - log.info(horiz_bar) - log.info(name+' spec: ') - log.debug(pformat(spec)) - log.debug('') - -all_specs = { 'Candidate': candidate_spec, - 'User': user_spec, - 'Machine': machine_spec } - -for key, value in all_specs.items(): - log_spec(value, key) - - -sys.exit(pytest_return) diff --git a/tests/local/take_test.sh b/tests/local/take_test.sh deleted file mode 100755 index 5351ce097..000000000 --- a/tests/local/take_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# This is a wrapper on take_test_core.sh which handles the logging. - -log_file=take_test.log - -./core_take_test.sh "${@}" 2>&1 | tee $log_file - -## The following is how you get a return status in spite of tee. -exitValue=${PIPESTATUS[0]} - -exit $exitValue diff --git a/tests/local/template_candidate_spec.yaml b/tests/local/template_candidate_spec.yaml deleted file mode 100644 index 3a73a4a5d..000000000 --- a/tests/local/template_candidate_spec.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# Choices are currently 'GNU' and 'intel'. (currently case-sensitive). -compiler : 'gfort' -queue : 'regular' -wall_time : 00:01 -n_cores: - default : 2 - test : 1 - - -# Where temp repositories cloned from github shall be placed -# (in subfolders candidate/ and reference/) -repos_dir : /home/docker/test_repos - -test_dir : /home/docker/take_test - -candidate_repo: - # Default = ${GITHUB_USERNAME}/wrf_hydro_nwm - fork : NCAR/wrf_hydro_nwm - commitish : master - # --- OR --- - # A path on local machine where the current state of the repo (potentially uncommitted) - # is compiled. This supercedes BOTH candidateFork and candidateBranchCommit if set. - local_path : /Users/jamesmcc/WRF_Hydro/wrf_hydro_nwm_public - -reference_repo: - # Default = ${GITHUB_USERNAME}/wrf_hydro_nwm - fork : ${GITHUB_USERNAME}/wrf_hydro_nwm - commitish : master - # --- OR --- - # A path on local machine where the current state of the repo (potentially uncommitted) - # is compiled. This supercedes BOTH referenceFork and referenceBranchCommit if set. - local_path : /Users/jamesmcc/WRF_Hydro/wrf_hydro_nwm_public_test_copy - -wrf_hydro_tests: -# * User spec file path * -# Default (if not set) = ~/.wrf_hydro_tests_user_spec.sh -# We recommend using the default by leaving blank. If using an -# alternative location, then variable consists of the path/file -# to the file. - user_spec : -# * Machine spec file path * -# Default (if not set) = {wrf_hydro_tests_dir}/machine_spec.sh -# Where wrf_hydro_test_dir is set in the user_spec. -# We recommend using the default by leaving blank. If using an -# alternative location, then variable consists of the path/file -# to the file. - machine_spec : - diff --git a/tests/local/template_user_spec.yaml b/tests/local/template_user_spec.yaml deleted file mode 100644 index 78c3dae4f..000000000 --- a/tests/local/template_user_spec.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# REQUIRED only if cloning any repositories from github. -# See wrf_hydro_tests/README.md for information and a suggestion on -# setting these. These can be inherited from the environment -github: - # Authtoken can either be a file or an env var. - authtoken: $GITHUB_AUTHTOKEN - authtoken_file: ~/.github_authtoken - username: jmccreight - # Note: setting ssh_priv_key overrides using authtoken. - # No value results in None. - ssh_priv_key: - #/Users/jamesmcc/.ssh/id_rsa_chimayo_ucar - -# Your PBS header fields. Edit to your needs Do NOT include others here. -# These appear commented, but PBS looks for lines beginning with #PBS -PBS: - email: - when: abe - who: $USER@ucar.edu - account: NRAL0017 - - -# ###################################################### -# Overrides to machine_spec.sh -# Place any environment variables you would like modified -# compared to the machine_spec.sh file. -machine_spec_overrides: diff --git a/tests/local/testing_readme.txt b/tests/local/testing_readme.txt deleted file mode 100644 index 2002a12a4..000000000 --- a/tests/local/testing_readme.txt +++ /dev/null @@ -1,30 +0,0 @@ -# To run the tests included in the wrf_hydro_nwm_public/tests directory: - -# Requirements: -All requirements for WRF-Hydro -Docker with the following docker images: - wrfhydro/dev:conda with wrfhydropy=0.0.2 - wrfhydro/domains:croton_NY - -# Commands to run testing -## Get docker images -docker pull wrfhydro/dev:conda -docker pull wrfhydro/domains:croton_NY - -## Create data volume -docker create --name croton_NY wrfhydro/domains:croton_NY - -## Start docker -docker run --volumes-from croton_NY -v YOUR_WRF_HYDRO_NWM_CODE_DIRECTORY:/home/docker/wrf_hydro_nwm_public -it wrfhydro/dev:conda - -### Running inside of docker issue the following commands -#### Install correct version of wrfhydropy -pip uninstall -y wrfhydropy -pip install wrfhydropy=0.0.2 - -#### Change directory to wrf_hydro_nwm and run tests -cd /home/docker/wrf_hydro_nwm_public -pytest -v --domain_dir=/home/docker/domain/croton_NY - --candidate_dir=YOUR_REFERENCE_CODE_DIRECTORY/trunk/NDHMS - --reference_dir=YOUR_CANDIDATE_CODE_DIRECTORY/trunk/NDHMS - --output_dir=/home/docker/test_out" diff --git a/tests/test_supp_1_channel_only.py b/tests/test_supp_1_channel_only.py new file mode 100644 index 000000000..45b518d4d --- /dev/null +++ b/tests/test_supp_1_channel_only.py @@ -0,0 +1,310 @@ +def test_order(): + pass + +# # ################################# +# # Channel-only tests below: +# +# # Channel-only Run +# def test_run_candidate_channel_only( +# candidate_setup, +# candidate_channel_only_setup, +# output_dir, +# job_default, +# scheduler, +# capsys +# ): +# +# with capsys.disabled(): +# print("\nQuestion: The candidate channel-only mode runs successfully?", end='') +# +# # Dont recompile the model, just use the candidate's model. +# candidate_channel_only_setup.model = candidate_setup.model +# +# # Set the forcing directory +# candidate_channel_only_setup.namelist_hrldas['noahlsm_offline']['indir'] = \ +# str(output_dir / 'run_candidate') +# +# # Set run directory +# run_dir = output_dir / 'run_candidate_channel_only' +# +# candidate_channel_only_job = job_default +# candidate_channel_only_job.scheduler = scheduler +# +# # Run +# candidate_channel_only_run = wrfhydropy.WrfHydroRun( +# wrf_hydro_setup=candidate_channel_only_setup, +# run_dir=run_dir, +# jobs=candidate_channel_only_job +# ) +# check_run_dir = candidate_channel_only_run.run_jobs() +# +# if scheduler is not None: +# # This function waits for the completed run. +# candidate_channel_only_run = \ +# wrfhydropy.job_tools.restore_completed_scheduled_job(check_run_dir) +# +# # Check subprocess and model run status +# assert candidate_channel_only_run.jobs_completed[0].exit_status == 0, \ +# "Candidate code run exited with non-zero status" +# assert candidate_channel_only_run.jobs_completed[0].job_status == 'completed success', \ +# "Candidate code run did not complete" +# +# +# # Channel-only matches full-model? +# def test_channel_only_matches_full( +# candidate_channel_only_setup, +# output_dir, +# capsys +# ): +# +# if candidate_channel_only_setup is None: +# pytest.skip("Unsupported configuration for channel-only.") +# +# with capsys.disabled(): +# print("\nQuestion: The candidate channel-only run restarts and CHRTOUT files match " + +# "those of the full model?", +# end="") +# +# # Check for existence of run objects +# candidate_run_file = output_dir / 'run_candidate' / 'WrfHydroRun.pkl' +# candidate_channel_only_run_file = output_dir / 'run_candidate_channel_only' / 'WrfHydroRun.pkl' +# +# if candidate_run_file.is_file() is False: +# pytest.skip('Candidate run object not found, skipping test') +# if candidate_channel_only_run_file.is_file() is False: +# pytest.skip('candidate_channel_only run object not found, skipping test') +# +# # Load run objects +# candidate_run_expected = pickle.load(open(candidate_run_file,"rb")) +# candidate_channel_only_run_expected = pickle.load(open(candidate_channel_only_run_file,"rb")) +# +# exclude_vars = [ +# 'stc1', +# 'smc1', +# 'sh2ox1', +# 'stc2', +# 'smc2', +# 'sh2ox2', +# 'stc3', +# 'smc3', +# 'sh2ox3', +# 'stc4', +# 'smc4', +# 'sh2ox4', +# 'infxsrt', +# 'soldrain', +# 'sfcheadrt', +# 'QBDRYRT', +# 'infxswgt', +# 'sfcheadsubrt', +# 'sh2owgt1', +# 'sh2owgt2', +# 'sh2owgt3', +# 'sh2owgt4', +# 'qstrmvolrt', +# 'hlink', +# 'lake_inflort' +# ] +# +# # We still compare these: +# # 'qlink1' +# # 'qlink2' +# # 'resht' +# # 'qlakeo' +# # 'z_gwsubbas' +# +# # Dont compare metadata in this case, there are different dimensions +# # in the files that always result in a return code of 1. +# nccmp_options = ['--data', '--force', '--quiet'] #, '--metadata'] +# +# # Check diffs +# regression_diffs = wrfhydropy.RestartDiffs( +# candidate_run_expected, +# candidate_channel_only_run_expected, +# nccmp_options=nccmp_options, +# exclude_vars=exclude_vars +# ) +# +# # Check hydro restarts +# for diff in regression_diffs.hydro: +# if diff is not None: +# with capsys.disabled(): +# print(diff) +# assert diff is None, \ +# "Candidate channel-only hydro restart files do not match full restart files" +# +# # Check nudging restarts +# for diff in regression_diffs.nudging: +# if diff is not None: +# with capsys.disabled(): +# print(diff) +# assert diff is None, \ +# "Candidate channel-only nudging restart files do not match full restart files" +# +# +# # Channel-only ncores question +# def test_ncores_candidate_channel_only( +# candidate_channel_only_setup, +# output_dir, +# job_ncores, +# scheduler, +# capsys +# ): +# +# if candidate_channel_only_setup is None: +# pytest.skip("unsupported configuration") +# +# with capsys.disabled(): +# print("\nQuestion: The candidate_channel-only restarts from a 1 core run match restarts from standard run?", +# end='') +# +# candidate_channel_only_run_file = output_dir / 'run_candidate_channel_only' / 'WrfHydroRun.pkl' +# if candidate_channel_only_run_file.is_file() is False: +# pytest.skip('candidate_channel_only run object not found, skipping test.') +# +# # Load initial run model object +# candidate_channel_only_run_expected = pickle.load(open(candidate_channel_only_run_file, "rb")) +# # Set run directory +# run_dir = output_dir.joinpath('ncores_candidate_channel_only') +# +# candidate_channel_only_ncores_job = job_ncores +# candidate_channel_only_ncores_job.scheduler = scheduler +# +# # Run +# candidate_channel_only_ncores_run = wrfhydropy.WrfHydroRun( +# wrf_hydro_setup=candidate_channel_only_setup, +# run_dir=run_dir, +# jobs=candidate_channel_only_ncores_job +# ) +# check_run_dir = candidate_channel_only_ncores_run.run_jobs() +# +# if scheduler is not None: +# candidate_channel_only_ncores_run = wrfhydropy.job_tools.restore_completed_scheduled_job(check_run_dir) +# +# #Check against initial run +# ncores_restart_diffs = wrfhydropy.RestartDiffs( +# candidate_channel_only_ncores_run, +# candidate_channel_only_run_expected +# ) +# +# ## Check hydro restarts +# for diff in ncores_restart_diffs.hydro: +# if diff is not None: +# with capsys.disabled(): +# print(diff) +# assert diff == None, "candidate_channel-only hydro restart files do not match when run with different number of cores" +# +# ## Check nudging restarts +# for diff in ncores_restart_diffs.nudging: +# if diff is not None: +# with capsys.disabled(): +# print(diff) +# assert diff == None, "candidate_channel-only nudging restart files do not match when run with different number of cores" +# +# +# # Channel-only perfect restarts question +# def test_perfrestart_candidate_channel_only( +# candidate_channel_only_setup, +# output_dir, +# job_default, +# scheduler, +# capsys +# ): +# +# if candidate_channel_only_setup is None: +# pytest.skip("unsupported configuration") +# +# with capsys.disabled(): +# print("\nQuestion: The candidate_channel_only restarts from a restart run match the restarts from standard run?", +# end='') +# +# candidate_channel_only_run_file = output_dir / 'run_candidate_channel_only' / 'WrfHydroRun.pkl' +# if candidate_channel_only_run_file.is_file() is False: +# pytest.skip('candidate_channel_only run object not found, skipping test') +# +# # Load initial run model object +# candidate_channel_only_run_expected = \ +# pickle.load(open(output_dir / 'run_candidate_channel_only' / 'WrfHydroRun.pkl', "rb")) +# +# #Make deep copy since changing namelist optoins +# perfrestart_setup = copy.deepcopy(candidate_channel_only_setup) +# +# # Set run directory +# run_dir = output_dir / 'restart_candidate_channel_only' +# +# # Establish the run (run after setting external files) +# candidate_channel_only_perfrestart_job = job_default +# # TODO(JLM): edit scheduler names +# candidate_channel_only_perfrestart_job.scheduler = scheduler +# +# # Add the jobs after determining the restart time. +# candidate_channel_only_perfrestart_run = wrfhydropy.WrfHydroRun( +# wrf_hydro_setup=perfrestart_setup, +# run_dir=run_dir, +# mode='r' +# ) +# +# # Symlink restarts files to new directory and modify namelistrestart files +# # Hydro +# hydro_rst = candidate_channel_only_run_expected.restart_hydro[0] +# new_hydro_rst_path = run_dir.joinpath(hydro_rst.name) +# new_hydro_rst_path.unlink() +# new_hydro_rst_path.symlink_to(hydro_rst) +# +# perfrestart_setup.hydro_namelist['hydro_nlist'].update( +# {'restart_file': str(new_hydro_rst_path)}) +# +# # Nudging +# if candidate_channel_only_run_expected.restart_nudging is not None and \ +# len(candidate_channel_only_run_expected.restart_nudging) > 0: +# nudging_rst = candidate_channel_only_run_expected.restart_nudging[0] +# new_nudging_rst_path = run_dir.joinpath(nudging_rst.name) +# new_nudging_rst_path.unlink() +# new_nudging_rst_path.symlink_to(nudging_rst) +# +# perfrestart_setup.hydro_namelist['nudging_nlist'].update( +# {'nudginglastobsfile': str(run_dir.joinpath(nudging_rst.name))}) +# +# +# # Setup the restart in the run. +# orig_start_time, orig_end_time = wrfhydropy.job_tools.solve_model_start_end_times( +# None, +# None, +# candidate_channel_only_perfrestart_run.setup +# ) +# +# restart_dt = hydro_rst.open() +# restart_time = dt.datetime.strptime(restart_dt.Restart_Time,'%Y-%m-%d_%H:%M:%S') +# +# candidate_channel_only_perfrestart_job.model_start_time = restart_time +# candidate_channel_only_perfrestart_job.model_end_time = orig_end_time +# +# # Run +# with warnings.catch_warnings(): +# warnings.simplefilter("ignore") +# candidate_channel_only_perfrestart_run.add_jobs(candidate_channel_only_perfrestart_job) +# check_run_dir = candidate_channel_only_perfrestart_run.run_jobs() +# if scheduler is not None: +# candidate_channel_only_perfrestart_run = \ +# wrfhydropy.job_tools.restore_completed_scheduled_job(check_run_dir) +# +# #Check against initial run +# perfstart_restart_diffs = wrfhydropy.RestartDiffs( +# candidate_channel_only_perfrestart_run, +# candidate_channel_only_run_expected +# ) +# ## Check hydro restarts +# for diff in perfstart_restart_diffs.hydro: +# if diff is not None: +# with capsys.disabled(): +# print(diff) +# assert diff is None, \ +# "candidate_channel_only hydro restart files do not match when starting from a restart" +# +# ## Check nudging restarts +# for diff in perfstart_restart_diffs.nudging: +# if diff is not None: +# with capsys.disabled(): +# print(diff) +# assert diff is None, \ +# "candidate_channel_only nudging restart files do not match when starting from a restart" \ No newline at end of file diff --git a/tests/toolbox/color_logs.py b/tests/toolbox/color_logs.py deleted file mode 100644 index 441a3e7f9..000000000 --- a/tests/toolbox/color_logs.py +++ /dev/null @@ -1,29 +0,0 @@ -import logging -from termcolor import colored - -# from https://gist.github.com/brainsik/1238935 - - -class ColorLog(object): - - colormap = dict( - info=dict(color='green', attrs=['bold']), # success messges - debug=dict(color='white'), # info - warn=dict(color='yellow', attrs=['bold']), # warning - warning=dict(color='yellow', attrs=['bold']), # warning - error=dict(color='red'), # failure - critical=dict(color='red', attrs=['bold']), # failure - ) - - def __init__(self, logger): - self._log = logger - - def __getattr__(self, name): - if name in ['debug', 'info', 'warn', 'warning', 'error', 'critical']: - return lambda s, *args: getattr(self._log, name)( - colored(s, **self.colormap[name]), *args) - - return getattr(self._log, name) - - -log = ColorLog(logging.getLogger(__name__)) diff --git a/tests/toolbox/establish_job.py b/tests/toolbox/establish_job.py deleted file mode 100644 index 876e27518..000000000 --- a/tests/toolbox/establish_job.py +++ /dev/null @@ -1,118 +0,0 @@ -import os -#from pprint import pprint -import re -import socket -import sys -import warnings - - -home = os.path.expanduser("~/") -sys.path.insert(0, home + '/WRF_Hydro/wrf_hydro_tests/toolbox/') -from establish_specs import establish_spec, establish_default_files - - -def get_job_args_from_specs( - job_name: str=None, - mode: str='r', - nnodes: int=None, - nproc: int=None, - scheduler_name: str='', - machine_spec_file: str=None, - user_spec_file: str=None, - candidate_spec_file: str=None -): - - # The candidate comes first and can replace the machine and user specs files. - # The user spec can overrides parts of the machine spec. - # So, importance: candidate_spec_file > user_spec_file > machine_spec_file - - # Candidate and its overrides - if candidate_spec_file: - candidate_spec = establish_spec(candidate_spec_file) - #pprint(candidate_spec) - - machine_spec_file_from_candiate = candidate_spec['wrf_hydro_tests']['machine_spec'] - if machine_spec_file_from_candiate: - machine_spec_file = machine_spec_file_from_candiate - warnings.warn("WARNING: candidate spec_file is overriding machine_spec_file with file " + - machine_spec_file) - - user_spec_file_from_candiate = candidate_spec['wrf_hydro_tests']['user_spec'] - if user_spec_file_from_candiate: - user_spec_file = user_spec_file_from_candiate - warnings.warn("WARNING: candidate spec_file is overriding user_spec_file with file " + - user_spec_file) - - default_user_file, default_machine_file = establish_default_files() - - if not user_spec_file: - user_spec_file = default_user_file - - # TODO JLM: should probably be in a try catch. - user_spec = establish_spec(user_spec_file) - - if not machine_spec_file: - machine_spec_file = default_machine_file - - # TODO JLM: should probably be in a try catch. - machine_spec = establish_spec(machine_spec_file) - - # From least to most important - spec=machine_spec - spec.update(user_spec) - spec.update(candidate_spec) - - # Extract relevant information for the scheduler - machine = socket.gethostname() - if re.search('cheyenne',machine): - machine = 'cheyenne' - - if machine not in spec.keys(): - machine = 'docker' - warnings.warn("Machine not found in the machine_spec_file.yaml, using docker.") - - if scheduler_name == '': - if spec[machine]['scheduler'] is not None: - scheduler_name = spec[machine]['scheduler']['name'] - else: - scheduler_name = None - - compiler_name = spec['compiler'] - - sched_args_dict = {} - job_args_dict = {} - # sad alias for mutable container - sad = sched_args_dict - jad = job_args_dict - - # From optional arguments - if job_name: - sad['job_name'] = job_name - if nnodes: - sad['nnodes'] = nnodes - if nproc: - sad['nproc'] = nproc - jad['nproc'] = nproc - - #jad['machine'] = machine - - # From spec files. - if scheduler_name is not None: - sad['account'] = spec[scheduler_name]['account'] - sad['email_when'] = spec[scheduler_name]['email']['when'] - sad['email_who'] = spec[scheduler_name]['email']['who'] - sad['queue'] = spec['queue'] - sad['walltime'] = spec['wall_time'] - sad['ppn'] = spec[machine]['cores_per_node'] - jad['scheduler'] = sad - jad['exe_cmd'] = spec[machine]['exe_cmd'][scheduler_name] - else: - jad['exe_cmd'] = spec[machine]['exe_cmd']['default'] - - if spec[machine]['modules'] is not None: - jad['modules'] = spec[machine]['modules'][compiler_name] - if 'base' in spec[machine]['modules'].keys(): - jad['modules'] += ' ' + spec[machine]['modules']['base'] - - - return(job_args_dict) diff --git a/tests/toolbox/establish_repo.py b/tests/toolbox/establish_repo.py deleted file mode 100644 index bab9091a9..000000000 --- a/tests/toolbox/establish_repo.py +++ /dev/null @@ -1,85 +0,0 @@ -import sys -import subprocess -from pathlib import * -from color_logs import log -from multitool import delete_dir_and_contents - -def form_authtoken_url(repo_tag, candidate_spec, user_spec): - - # Allow the authtoken to be - # 1) a file or - # 2) actually in the dictionary. - # Handle a blank authoken too. - - # Assume it's in the dictionary. - authtoken = user_spec['github']['authtoken'] - - # If authoken is not blank, check if its a file. If so, get the - # authtoken from the file. - if authtoken is not None and Path(authtoken).exists(): - authtoken = subprocess.run(["cat", "/Users/james/.github_authtoken"]) - authtoken = authtoken.communicate()[0].decode("utf-8") - - if authtoken is None: - auth_info = user_spec['github']['username'] - else: - auth_info = user_spec['github']['username']+':'+authtoken - - url = 'https://'+auth_info+'@github.com/'+candidate_spec[repo_tag]['fork'] - return(url) - - -def clone_repo(repo_tag, candidate_spec, user_spec, dir_for_clone): - - # If ssh_priv_key is NOT supplied, use https, else ssh. - if user_spec['github']['ssh_priv_key'] is None: - protocol = 'https' - url = form_authtoken_url(repo_tag, candidate_spec, user_spec) - else: - protocol = 'ssh' - url = 'git@github.com:'+candidate_spec[repo_tag]['fork'] - - log.debug('Cloning ' + candidate_spec[repo_tag]['fork'] + - ' into ' + str(dir_for_clone) + ' using ' + protocol + ' ...') - - process = subprocess.run(['git', 'clone', url, dir_for_clone]) - - if process.returncode != 0: - return(False) - return(True) - - -def establish_repo(repo_tag, candidate_spec, user_spec): - - repo_tag_base = repo_tag.split('_')[0] - log.debug('') - log.info(repo_tag_base.title() + ' repo') - - if candidate_spec[repo_tag]['local_path'] is None: - - # The case when local_path is not set. - candidate_spec[repo_tag]['local_path_setby'] = 'fork & commitish' - dir_for_clone = Path(candidate_spec['repos_dir'] + '/' + repo_tag_base) - #print('dir_for_clone: ',dir_for_clone) - - candidate_spec[repo_tag]['local_path'] = dir_for_clone - - if dir_for_clone.exists(): - delete_dir_and_contents(dir_for_clone) - Path.mkdir(dir_for_clone, parents=True) - - clone_repo(repo_tag, candidate_spec, user_spec, dir_for_clone) - - # check out the commitish - commitish = candidate_spec[repo_tag]['commitish'] - if commitish is None: - commitish = 'master' - - log.debug('Checking out commitish: '+commitish) - subprocess.run(['git', 'checkout', commitish], cwd=dir_for_clone) - git_log = subprocess.run(['git', 'log', '-n1'], stdout=subprocess.PIPE, cwd=dir_for_clone) - log.debug(git_log.stdout.decode('utf-8')) - - else: - - candidate_spec[repo_tag]['local_path_setby'] = 'candidate spec' diff --git a/tests/toolbox/establish_specs.py b/tests/toolbox/establish_specs.py deleted file mode 100644 index 1512dabeb..000000000 --- a/tests/toolbox/establish_specs.py +++ /dev/null @@ -1,105 +0,0 @@ -import os -import yaml -from pathlib import Path -from boltons.iterutils import remap -from color_logs import log - -# A few generic utils followed by functions for individual spec files which -# can acommodate customization. -# The user, machine, and candidate specs are very similar. The test spec is it's own thing. - -# ###################################################### -# Remapping nested values -# http://sedimental.org/remap.html - -# These visit all levels of a nested dictionary and expand ${} and ~. - - -def visit_expand(path, key, value): - if isinstance(value, str): - return key, os.path.expanduser(os.path.expandvars(value)) - return key, value - - -def remap_vars(spec_file): - return(remap(spec_file, visit_expand)) - - -# These visit all levels of a nested dictionary and transform '' to None. - - -def visit_blanks(path, key, value): - if isinstance(value, str): - if value == '': - return key, None - return key, value - - -def remap_blanks(spec_file): - return(remap(spec_file, visit_blanks)) - - -# ###################################################### -# Generic spec establishment = YAML + remap_spec - -def establish_spec(spec_file): - """Parse YAML and expand ~ and $ - """ - with open(spec_file) as ff: - spec_dict = yaml.safe_load(ff) - - spec = remap_vars(spec_dict) - spec = remap_vars(spec) - - return(spec) - - -# ###################################################### -# User spec - -def establish_user_spec(candidate_spec, env_vars): - log.debug('Establish user spec.') - - user_spec_file = None - if ('wrf_hydro_tests' in candidate_spec) and \ - ('user_spec_file' in candidate_spec): - user_spec_file = candidate_spec['user_spec_file'] - candidate_spec['user_spec_setby'] = 'candidate spec' - - if user_spec_file == '' or user_spec_file is None: - user_spec_file = os.environ['WRF_HYDRO_TESTS_USER_SPEC'] - user_spec = establish_spec(user_spec_file) - candidate_spec['user_spec_setby'] = 'env var' - - candidate_spec['user_spec_file'] = user_spec_file - # TODO JLM: indicate in the candidate_spec how the user_spec_file was set. - # TODO JLM: WARN if DNE - #print('user_spec_file: ', user_spec_file) - user_spec = establish_spec(user_spec_file) - - return(user_spec) - - -# ###################################################### -# Machine spec - -def establish_machine_spec(candidate_spec, user_spec, env_vars): - log.debug('Establish machine spec.') - - # TODO JLM: indicate in the candidate_spec how the machine_spec_file was set. - machine_spec = establish_spec(candidate_spec['machine_spec_file']) - candidate_spec['machine_spec_setby'] = '__file__' - - # TODO JLM: User spec is supposed to allow overrides to machine spec. - # Apply overrides from user_spec - return(machine_spec) - - -# ###################################################### -# Candidate spec - -def establish_candidate(candidate_spec_file): - log.debug('Establish candidate spec.') - candidate_spec = establish_spec(candidate_spec_file) - candidate_spec['candidate_spec_file'] = candidate_spec_file - return(candidate_spec) diff --git a/tests/toolbox/log_boilerplate.py b/tests/toolbox/log_boilerplate.py deleted file mode 100644 index 9a5114676..000000000 --- a/tests/toolbox/log_boilerplate.py +++ /dev/null @@ -1,57 +0,0 @@ -import subprocess -import os -from datetime import datetime -from color_logs import log - -def log_boilerplate(candidate_spec, user_spec, env_vars, horiz_bar, script_path): - - log.debug( "Date : " + datetime.now().strftime('%Y %h %d %H:%M:%S %Z') ) - - if not 'USER' in env_vars: - user = subprocess.Popen(["whoami"], stdout=subprocess.PIPE).communicate()[0] - env_vars['USER'] = user.decode('utf-8').replace("\n",'') - log.debug( "User : " + env_vars['USER'] ) - - if not 'HOSTNAME' in env_vars: - hostname = subprocess.Popen(["hostname"], stdout=subprocess.PIPE).communicate()[0] - env_vars['HOSTNAME'] = hostname.decode('utf-8').replace("\n",'') - log.debug( "Machine : " + env_vars['HOSTNAME'] ) - - proc = subprocess.run( - ['git', 'rev-parse', 'HEAD'], - stdout=subprocess.PIPE, - cwd=script_path - ) - the_commit = proc.stdout.decode('utf-8').split()[0] - log.debug( "take_test.py location : " + script_path ) - # TODO(JLM): should we check for uncommiteed changes in the script_path? Just - # ones in the test dir? - - #is_uncommitted = \ - # subprocess.run(['git', 'diff-index', '--quiet', 'HEAD', '--']).returncode - #if is_uncommitted != 0: - # log.warning( "There are uncommitted changes to the testing repo (" + script_path + ")") - - log.debug("Domain argument : " + str(candidate_spec['domain'])) - log.debug("Config argument : " + str(candidate_spec['config'])) - - log.debug("Tests run in : " + candidate_spec['test_dir'] ) - log.debug("Cloned repos in : " + candidate_spec['repos_dir'] ) - - log.debug("Candidate spec file : " + candidate_spec['candidate_spec_file'] ) - - log.debug("Machine spec file : " + candidate_spec['machine_spec_file'] ) - log.debug("Machine spec set by : " + - candidate_spec['machine_spec_setby'] ) - - log.debug("User spec file : " + candidate_spec['user_spec_file'] ) - log.debug("User spec set by : " + - candidate_spec['user_spec_setby'] ) - - #log.debug( "Test spec file : " + candidate_spec['test_spec_file'] ) - #log.debug( "Test spec set by : " + - # candidate_spec['test_spec_setby'] ) - - log.debug("Log file : " + script_path + '/take_test.log') - log.debug("Will echo specs to log at end.") - return(True) diff --git a/tests/toolbox/multitool.py b/tests/toolbox/multitool.py deleted file mode 100644 index 550a98cda..000000000 --- a/tests/toolbox/multitool.py +++ /dev/null @@ -1,11 +0,0 @@ -from pathlib import * - -# A libpath way to 'rm -rf' - -def delete_dir_and_contents(pth): - for sub in pth.iterdir(): - if sub.is_dir(): - delete_dir_and_contents(sub) - else: - sub.unlink() - pth.rmdir() diff --git a/tests/toolbox/run_travis_yml_locally.py b/tests/toolbox/run_travis_yml_locally.py deleted file mode 100644 index 49431b906..000000000 --- a/tests/toolbox/run_travis_yml_locally.py +++ /dev/null @@ -1,40 +0,0 @@ -# It's annoyingly hard to run the CI locally and to have to run/test -# code which is not actually part of the .travi.yml file. -# So just parse the .travis.yml as much as possible and run it on docker locally! - -import os -this_script = __file__ -this_script_path = os.path.dirname(os.path.realpath(this_script)) -this_repo_path = os.path.dirname(os.path.dirname(os.path.realpath(this_script_path))) - -docker_repo_path = '/home/docker/the_repo_to_test' - -import yaml -travis_yaml_file = this_repo_path + '/.travis.yml' -with open(travis_yaml_file) as ff: - travis_cmds = yaml.safe_load(ff) - -import re -before_cmds = [bb.replace('sudo ', '') - for bb in travis_cmds['before_install'] if not re.match('docker', bb)] -before_cmds='; '.join(before_cmds) -script_cmds = travis_cmds['script'][0].split('"')[1] -the_cmd = before_cmds + '; ' + script_cmds - -data_container_name = "croton_NY_for_CI_local" -mk_data_container_cmd = "docker create --name " + data_container_name + " wrfhydro/domains:croton_NY" -rm_data_container_cmd = "docker rm -v " + data_container_name - -invoke_docker_cmd = "docker run -it " -invoke_docker_cmd += " -e TRAVIS_BUILD_DIR=" + docker_repo_path -invoke_docker_cmd += " --volumes-from " + data_container_name -invoke_docker_cmd += " -v " + this_repo_path + ":" + docker_repo_path -invoke_docker_cmd += " wrfhydro/dev:conda " -invoke_docker_cmd += " /bin/bash -c \"" + the_cmd + "\"" - -import subprocess -import shlex -from pprint import pprint -subprocess.run(shlex.split(mk_data_container_cmd)) -subprocess.run(shlex.split(invoke_docker_cmd)) -subprocess.run(shlex.split(rm_data_container_cmd))