Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: aws/aws-parallelcluster
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: develop
Choose a base ref
...
head repository: aws/aws-parallelcluster
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: release-2.4
Choose a head ref
Can’t automatically merge. Don’t worry, you can still create the pull request.
Loading
1 change: 1 addition & 0 deletions .github/CODEOWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
* @aws/aws-parallelcluster-admins
17 changes: 15 additions & 2 deletions tests/integration-tests/conftest.py
Original file line number Diff line number Diff line change
@@ -74,6 +74,7 @@ def pytest_addoption(parser):
)
parser.addoption("--benchmarks-target-capacity", help="set the target capacity for benchmarks tests", type=int)
parser.addoption("--benchmarks-max-time", help="set the max waiting time in minutes for benchmarks tests", type=int)
parser.addoption("--stackname-suffix", help="set a suffix in the integration tests stack names")


def pytest_generate_tests(metafunc):
@@ -189,7 +190,11 @@ def _cluster_factory(cluster_config):
cluster = Cluster(
name=request.config.getoption("cluster")
if request.config.getoption("cluster")
else "integ-tests-" + random_alphanumeric(),
else "integ-tests-{0}{1}{2}".format(
random_alphanumeric(),
"-" if request.config.getoption("stackname_suffix") else "",
request.config.getoption("stackname_suffix"),
),
config_file=cluster_config,
ssh_key=request.config.getoption("key_path"),
)
@@ -387,7 +392,15 @@ def _create_vpc_stack(request, template, region, cfn_stacks_factory):
logging.info("Using stack {0} in region {1}".format(request.config.getoption("vpc_stack"), region))
stack = CfnStack(name=request.config.getoption("vpc_stack"), region=region, template=template.to_json())
else:
stack = CfnStack(name="integ-tests-vpc-" + random_alphanumeric(), region=region, template=template.to_json())
stack = CfnStack(
name="integ-tests-vpc-{0}{1}{2}".format(
random_alphanumeric(),
"-" if request.config.getoption("stackname_suffix") else "",
request.config.getoption("stackname_suffix"),
),
region=region,
template=template.to_json(),
)
cfn_stacks_factory.create_stack(stack)
return stack

39 changes: 22 additions & 17 deletions tests/integration-tests/reports_generator.py
Original file line number Diff line number Diff line change
@@ -21,7 +21,7 @@
import untangle


def generate_cw_report(test_results_dir, namespace, aws_region):
def generate_cw_report(test_results_dir, namespace, aws_region, timestamp_day_start):
"""
Publish tests results to CloudWatch
:param test_results_dir: dir containing the tests outputs.
@@ -33,7 +33,11 @@ def generate_cw_report(test_results_dir, namespace, aws_region):
generate_junitxml_merged_report(test_results_dir)
report = generate_json_report(test_results_dir=test_results_dir, save_to_file=False)
cw_client = boto3.client("cloudwatch", region_name=aws_region)
timestamp = datetime.datetime.utcnow()
timestamp = (
datetime.datetime.combine(datetime.datetime.utcnow(), datetime.time())
if timestamp_day_start
else datetime.datetime.utcnow()
)

for key, value in report.items():
if key == "all":
@@ -76,21 +80,22 @@ def generate_json_report(test_results_dir, save_to_file=True):
result_to_label_mapping = {"skipped": "skipped", "failure": "failures", "error": "errors"}
results = {"all": _empty_results_dict()}
xml = untangle.parse(test_report_file)
for testcase in xml.testsuite.children:
label = "succeeded"
for key, value in result_to_label_mapping.items():
if hasattr(testcase, key):
label = value
break
results["all"][label] += 1
results["all"]["total"] += 1

if hasattr(testcase, "properties"):
for property in testcase.properties.children:
_record_result(results, property["name"], property["value"], label)

feature = re.sub(r"test_|_test|.py", "", os.path.splitext(os.path.basename(testcase["file"]))[0])
_record_result(results, "feature", feature, label)
for testsuite in xml.testsuites.children:
for testcase in testsuite.children:
label = "succeeded"
for key, value in result_to_label_mapping.items():
if hasattr(testcase, key):
label = value
break
results["all"][label] += 1
results["all"]["total"] += 1

if hasattr(testcase, "properties"):
for property in testcase.properties.children:
_record_result(results, property["name"], property["value"], label)

feature = re.sub(r"test_|_test|.py", "", os.path.splitext(os.path.basename(testcase["file"]))[0])
_record_result(results, "feature", feature, label)

if save_to_file:
with open("{0}/test_report.json".format(test_results_dir), "w") as out_f:
16 changes: 15 additions & 1 deletion tests/integration-tests/test_runner.py
Original file line number Diff line number Diff line change
@@ -58,6 +58,7 @@
"reports": [],
"cw_region": "us-east-1",
"cw_namespace": "ParallelCluster/IntegrationTests",
"cw_timestamp_day_start": False,
"sequential": False,
"output_dir": "tests_outputs",
"custom_node_url": None,
@@ -72,6 +73,7 @@
"benchmarks": False,
"benchmarks_target_capacity": 200,
"benchmarks_max_time": 30,
"stackname_suffix": "",
}


@@ -148,6 +150,12 @@ def _init_argparser():
help="CloudWatch namespace where to publish metrics",
default=TEST_DEFAULTS.get("cw_namespace"),
)
parser.add_argument(
"--cw-timestamp-day-start",
action="store_true",
help="CloudWatch metrics pushed with at timestamp equal to the start of the current day (midnight)",
default=TEST_DEFAULTS.get("cw_timestamp_day_start"),
)
parser.add_argument("--key-name", help="Key to use for EC2 instances", required=True)
parser.add_argument("--key-path", help="Path to the key to use for SSH connections", required=True, type=_is_file)
parser.add_argument(
@@ -205,6 +213,11 @@ def _init_argparser():
default=TEST_DEFAULTS.get("benchmarks_max_time"),
type=int,
)
parser.add_argument(
"--stackname-suffix",
help="set a suffix in the integration tests stack names",
default=TEST_DEFAULTS.get("stackname_suffix"),
)

return parser

@@ -244,6 +257,7 @@ def _get_pytest_args(args, regions, log_file, out_dir):
pytest_args.extend(["--output-dir", "{0}/{1}".format(args.output_dir, out_dir)])
pytest_args.extend(["--key-name", args.key_name])
pytest_args.extend(["--key-path", args.key_path])
pytest_args.extend(["--stackname-suffix", args.stackname_suffix])

if args.credential:
pytest_args.append("--credential")
@@ -390,7 +404,7 @@ def main():

if "cw" in args.reports:
logger.info("Publishing CloudWatch metrics")
generate_cw_report(reports_output_dir, args.cw_namespace, args.cw_region)
generate_cw_report(reports_output_dir, args.cw_namespace, args.cw_region, args.cw_timestamp_day_start)


if __name__ == "__main__":
1 change: 1 addition & 0 deletions tests/integration-tests/tests/storage/test_ebs.py
Original file line number Diff line number Diff line change
@@ -40,6 +40,7 @@ def test_ebs_single(scheduler, pcluster_config_reader, clusters_factory):
@pytest.mark.instances(["c5.xlarge"])
@pytest.mark.schedulers(["sge", "awsbatch"])
@pytest.mark.usefixtures("region", "os", "instance")
@pytest.mark.skip_oss("[centos6]")
def test_ebs_multiple(scheduler, pcluster_config_reader, clusters_factory):
mount_dirs = ["/ebs_mount_dir_{0}".format(i) for i in range(0, 5)]
volume_sizes = [15 + 5 * i for i in range(0, 5)]