Skip to content
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion AUTOTEST/machine-tux.sh
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ mv -f check-case.??? $output_dir

# Basic build and run tests
mo="-j test"
ro="-ams -ij -sstruct -sstructmat -struct -structmat -lobpcg"
ro="-unit -ams -ij -sstruct -sstructmat -struct -structmat -lobpcg"
eo=""

co=""
Expand Down
1 change: 1 addition & 0 deletions AUTOTEST/runtests-unit
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
TEST_unit/*.sh
40 changes: 40 additions & 0 deletions src/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,15 @@ add_custom_target(checkpar
COMMAND ${BASH_PROGRAM} ${CMAKE_CURRENT_SOURCE_DIR}/cleantest.sh
COMMAND echo ""

COMMAND ${CMAKE_COMMAND} -E echo_append "Running Unit Tests... "
COMMAND ${BASH_PROGRAM} ${CMAKE_CURRENT_SOURCE_DIR}/runtest.sh
-atol ${HYPRE_CHECK_TOL}
-mpi "${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG}"
${CMAKE_CURRENT_SOURCE_DIR}/TEST_unit/unit.sh
COMMAND ${BASH_PROGRAM} ${CMAKE_CURRENT_SOURCE_DIR}/checktest.sh
COMMAND ${BASH_PROGRAM} ${CMAKE_CURRENT_SOURCE_DIR}/cleantest.sh
COMMAND echo ""

WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
DEPENDS ${TEST_SRCS}
COMMENT "Running parallel tests"
Expand Down Expand Up @@ -172,8 +181,39 @@ if(BUILD_TESTING)
DEPENDS sstruct
FAIL_REGULAR_EXPRESSION "Failed"
)

# Unit tests
add_test(NAME test_unit
COMMAND ${BASH_PROGRAM} -c "\
${BASH_PROGRAM} ${CMAKE_CURRENT_SOURCE_DIR}/runtest.sh \
-atol ${HYPRE_CHECK_TOL} \
-mpi \"${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG}\" \
${CMAKE_CURRENT_SOURCE_DIR}/TEST_unit/unit.sh && \
${BASH_PROGRAM} ${CMAKE_CURRENT_SOURCE_DIR}/checktest.sh && \
${BASH_PROGRAM} ${CMAKE_CURRENT_SOURCE_DIR}/cleantest.sh"
)
set_tests_properties(test_unit PROPERTIES
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
DEPENDS test_csr_overlap
FAIL_REGULAR_EXPRESSION "Failed"
)
endif()

# Add unit_tests make target for convenience
add_custom_target(unit_tests
COMMAND ${CMAKE_COMMAND} -E echo "Running unit tests..."
COMMAND ${BASH_PROGRAM} ${CMAKE_CURRENT_SOURCE_DIR}/runtest.sh
-atol ${HYPRE_CHECK_TOL}
-mpi "${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG}"
${CMAKE_CURRENT_SOURCE_DIR}/TEST_unit/unit.sh
COMMAND ${BASH_PROGRAM} ${CMAKE_CURRENT_SOURCE_DIR}/checktest.sh
COMMAND ${BASH_PROGRAM} ${CMAKE_CURRENT_SOURCE_DIR}/cleantest.sh
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
DEPENDS test_csr_overlap
COMMENT "Running unit tests"
VERBATIM
)

# Optional Fortran-based drivers
if(HYPRE_ENABLE_FORTRAN)
# Build Fortran wrapper object libraries to be linked into C drivers
Expand Down
13 changes: 13 additions & 0 deletions src/test/TEST_unit/unit.jobs
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/bin/bash
# Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
# HYPRE Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

#=============================================================================
# Unit Test: CSR Overlap Extraction
# Tests various grid configurations and overlap orders
#=============================================================================

mpirun -np 8 ./test_csr_overlap > unit.out.0

110 changes: 110 additions & 0 deletions src/test/TEST_unit/unit.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
#!/bin/bash
# Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
# HYPRE Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

TNAME=`basename $0 .sh`
RTOL=$1
ATOL=$2

#=============================================================================
# Check that all tests passed
#=============================================================================

# Find all output files
OUTFILES=$(ls ${TNAME}.out.* 2>/dev/null | sort)

if [ -z "$OUTFILES" ]; then
echo "No output files found matching ${TNAME}.out.*" >&2
exit 1
fi

# Build mapping from output file to test driver from jobs file
declare -A DRIVER_MAP
if [ -f ${TNAME}.jobs ]; then
while IFS= read -r line; do
# Extract lines like: mpirun -np 8 ./test_csr_overlap > unit.out.0
if echo "$line" | grep -qE ">\s*${TNAME}\.out\.[0-9]+"; then
DRIVER=$(echo "$line" | grep -oE "\./[a-zA-Z0-9_]+" | sed 's|^\./||')
OUTFILE=$(echo "$line" | grep -oE "${TNAME}\.out\.[0-9]+" | head -1)
if [ -n "$DRIVER" ] && [ -n "$OUTFILE" ]; then
DRIVER_MAP[$OUTFILE]=$DRIVER
fi
fi
done < ${TNAME}.jobs
fi

# Check each output file for failures
TOTAL_PASSED=0
TOTAL_FAILED=0
FAILED_DETAILS=""

for OUTFILE in $OUTFILES; do
if [ ! -f $OUTFILE ]; then
continue
fi

# Count PASSED and FAILED messages
PASSED_COUNT=$(grep -c "PASSED" $OUTFILE 2>/dev/null || echo "0")
FAILED_COUNT=$(grep -c "FAILED" $OUTFILE 2>/dev/null || echo "0")

# Strip whitespace
PASSED_COUNT=$(echo "$PASSED_COUNT" | tr -d '[:space:]')
FAILED_COUNT=$(echo "$FAILED_COUNT" | tr -d '[:space:]')
PASSED_COUNT=${PASSED_COUNT:-0}
FAILED_COUNT=${FAILED_COUNT:-0}

TOTAL_PASSED=$((TOTAL_PASSED + PASSED_COUNT))
TOTAL_FAILED=$((TOTAL_FAILED + FAILED_COUNT))

if [ "$FAILED_COUNT" -gt 0 ]; then
# Find which test(s) failed in this file
FAILED_TESTS=$(grep "^Test[0-9]" $OUTFILE | grep -v "PASSED" | sed 's/ (.*procs).*//' | sed 's/:.*//')

# Get test driver for this output file
TEST_DRIVER="${DRIVER_MAP[$OUTFILE]}"
if [ -z "$TEST_DRIVER" ]; then
TEST_DRIVER="Unknown"
fi

# Add to failed details
if [ -n "$FAILED_TESTS" ]; then
while IFS= read -r test; do
FAILED_DETAILS="${FAILED_DETAILS} - [${TEST_DRIVER}]: ${test}\n"
done <<< "$FAILED_TESTS"
fi
fi
done

# Report failures if any
if [ "$TOTAL_FAILED" -gt 0 ]; then
echo "Failed test summary:" >&2
echo -e "$FAILED_DETAILS" | sed '/^$/d' >&2
fi

if [ "$TOTAL_PASSED" -eq 0 ] && [ "$TOTAL_FAILED" -eq 0 ]; then
echo "No test results found in output files" >&2
fi

# Collect summary output from all output files
{
for OUTFILE in $OUTFILES; do
if [ -f $OUTFILE ]; then
echo -e "# Output file: $OUTFILE\n"
# Extract test results (lines containing PASSED/FAILED)
grep -E "(PASSED|FAILED|Test[0-9])" $OUTFILE | head -20
echo ""
fi
done
} > ${TNAME}.out

# Verify we got results
OUTCOUNT=$(grep -c "PASSED\|FAILED" ${TNAME}.out 2>/dev/null || echo "0")
OUTCOUNT=$(echo "$OUTCOUNT" | tr -d '[:space:]')
OUTCOUNT=${OUTCOUNT:-0}
if [ "$OUTCOUNT" -eq 0 ]; then
echo "No test results found in output files" >&2
fi


Loading