From 26a4e146e060120a69ecfea3d5eb0de8db9bfcfe Mon Sep 17 00:00:00 2001 From: Nicola Soranzo Date: Sun, 24 Nov 2024 21:35:20 +0000 Subject: [PATCH] Fix UP031 errors reported by ruff 0.8.0 . --- cron/build_chrom_db.py | 2 +- lib/galaxy/app.py | 2 +- lib/galaxy_test/api/test_jobs.py | 7 +++---- .../api/test_workflow_extraction.py | 13 +++++++------ lib/galaxy_test/selenium/framework.py | 8 ++++---- lib/galaxy_test/selenium/test_uploads.py | 10 +++++----- scripts/api/common.py | 4 ++-- scripts/apply_tags.py | 6 +++--- scripts/build_toolbox.py | 6 +++--- .../cleanup_datasets/admin_cleanup_datasets.py | 8 ++++---- scripts/cleanup_datasets/pgcleanup.py | 5 +++-- scripts/drmaa_external_killer.py | 8 ++++---- scripts/drmaa_external_runner.py | 10 +++++----- scripts/metagenomics/convert_title.py | 6 +++--- scripts/runtime_stats.py | 4 ++-- scripts/set_dataset_sizes.py | 6 +++--- scripts/set_user_disk_usage.py | 4 ++-- scripts/tool_shed/api/common.py | 4 ++-- .../deprecate_repositories_without_metadata.py | 4 ++-- scripts/tools/maf/check_loc_file.py | 10 +++++----- test/functional/test_toolbox.py | 4 ++-- test/functional/webhooks/phdcomics/__init__.py | 2 +- test/integration_selenium/test_upload_ftp.py | 2 +- .../gen_history_export_test_artifacts.py | 2 +- test/manual/workflows_scaling.py | 4 ++-- test/unit/app/test_markdown_validate.py | 6 ++++-- .../data/dataset_collections/test_matching.py | 2 +- test/unit/data/test_model_copy.py | 18 +++++++++--------- 28 files changed, 85 insertions(+), 82 deletions(-) diff --git a/cron/build_chrom_db.py b/cron/build_chrom_db.py index de83836aae68..6a95366fe83e 100644 --- a/cron/build_chrom_db.py +++ b/cron/build_chrom_db.py @@ -47,7 +47,7 @@ def getchrominfo(url, db): if len(fields) > 1 and len(fields[0]) > 0 and int(fields[1]) > 0: yield [fields[0], fields[1]] else: - raise Exception("Problem parsing line %d '%s' in page '%s'" % (i, line, page)) + raise Exception(f"Problem parsing line {i} '{line}' in page '{page}'") if __name__ == "__main__": diff --git a/lib/galaxy/app.py b/lib/galaxy/app.py index 285e2eed8a1c..2076ef2599e8 100644 --- a/lib/galaxy/app.py +++ b/lib/galaxy/app.py @@ -551,7 +551,7 @@ def _wait_for_database(self, url): database_exists(url) break except Exception: - log.info("Waiting for database: attempt %d of %d" % (i, attempts)) + log.info("Waiting for database: attempt %d of %d", i, attempts) time.sleep(pause) @property diff --git a/lib/galaxy_test/api/test_jobs.py b/lib/galaxy_test/api/test_jobs.py index 9c16fc18eb72..52f36923269a 100644 --- a/lib/galaxy_test/api/test_jobs.py +++ b/lib/galaxy_test/api/test_jobs.py @@ -1129,10 +1129,9 @@ def _search(self, payload, expected_search_count=1): if search_count == expected_search_count: break time.sleep(1) - assert search_count == expected_search_count, "expected to find %d jobs, got %d jobs" % ( - expected_search_count, - search_count, - ) + assert ( + search_count == expected_search_count + ), f"expected to find {expected_search_count} jobs, got {search_count} jobs" return search_count def _search_count(self, search_payload): diff --git a/lib/galaxy_test/api/test_workflow_extraction.py b/lib/galaxy_test/api/test_workflow_extraction.py index ada641171af6..15e9d08d2835 100644 --- a/lib/galaxy_test/api/test_workflow_extraction.py +++ b/lib/galaxy_test/api/test_workflow_extraction.py @@ -5,6 +5,7 @@ dumps, loads, ) +from typing import Optional from galaxy_test.base.populators import ( skip_without_tool, @@ -611,11 +612,11 @@ def _extract_and_download_workflow(self, history_id: str, **extract_payload): downloaded_workflow = download_response.json() return downloaded_workflow - def _get_steps_of_type(self, downloaded_workflow, type, expected_len=None): + def _get_steps_of_type(self, downloaded_workflow, type: str, expected_len: Optional[int] = None): steps = [s for s in downloaded_workflow["steps"].values() if s["type"] == type] if expected_len is not None: n = len(steps) - assert n == expected_len, "Expected %d steps of type %s, found %d" % (expected_len, type, n) + assert n == expected_len, f"Expected {expected_len} steps of type {type}, found {n}" return sorted(steps, key=operator.itemgetter("id")) def __job_id(self, history_id, dataset_id): @@ -645,10 +646,10 @@ def _run_tool_get_collection_and_job_id(self, history_id: str, tool_id, inputs): def __check_workflow( self, workflow, - step_count=None, - verify_connected=False, - data_input_count=None, - data_collection_input_count=None, + step_count: Optional[int] = None, + verify_connected: bool = False, + data_input_count: Optional[int] = None, + data_collection_input_count: Optional[int] = None, tool_ids=None, ): steps = workflow["steps"] diff --git a/lib/galaxy_test/selenium/framework.py b/lib/galaxy_test/selenium/framework.py index 9c300dc7b0de..e5274319e2c0 100644 --- a/lib/galaxy_test/selenium/framework.py +++ b/lib/galaxy_test/selenium/framework.py @@ -233,7 +233,7 @@ def func_wrapper(self, *args, **kwds): class TestSnapshot: __test__ = False # Prevent pytest from discovering this class (issue #12071) - def __init__(self, driver, index, description): + def __init__(self, driver, index: int, description: str): self.screenshot_binary = driver.get_screenshot_as_png() self.description = description self.index = index @@ -241,7 +241,7 @@ def __init__(self, driver, index, description): self.stack = traceback.format_stack() def write_to_error_directory(self, write_file_func): - prefix = "%d-%s" % (self.index, self.description) + prefix = f"{self.index}-{self.description}" write_file_func(f"{prefix}-screenshot.png", self.screenshot_binary, raw=True) write_file_func(f"{prefix}-traceback.txt", self.exc) write_file_func(f"{prefix}-stack.txt", str(self.stack)) @@ -331,7 +331,7 @@ def setup_with_driver(self): def tear_down_selenium(self): self.tear_down_driver() - def snapshot(self, description): + def snapshot(self, description: str): """Create a debug snapshot (DOM, screenshot, etc...) that is written out on tool failure. This information will be automatically written to a per-test directory created for all @@ -371,7 +371,7 @@ def _screenshot_path(self, label, extension=".png"): copy = 1 while os.path.exists(target): # Maybe previously a test re-run - keep the original. - target = os.path.join(GALAXY_TEST_SCREENSHOTS_DIRECTORY, "%s-%d%s" % (label, copy, extension)) + target = os.path.join(GALAXY_TEST_SCREENSHOTS_DIRECTORY, f"{label}-{copy}{extension}") copy += 1 return target diff --git a/lib/galaxy_test/selenium/test_uploads.py b/lib/galaxy_test/selenium/test_uploads.py index aa8065af60de..de96875bd3c2 100644 --- a/lib/galaxy_test/selenium/test_uploads.py +++ b/lib/galaxy_test/selenium/test_uploads.py @@ -16,7 +16,7 @@ def test_upload_file(self): self.history_panel_wait_for_hid_ok(1) history_count = len(self.history_contents()) - assert history_count == 1, "Incorrect number of items in history - expected 1, found %d" % history_count + assert history_count == 1, f"Incorrect number of items in history - expected 1, found {history_count}" self.history_panel_click_item_title(hid=1, wait=True) self.assert_item_summary_includes(1, "28 lines") @@ -28,7 +28,7 @@ def test_upload_pasted_content(self): self.history_panel_wait_for_hid_ok(1) history_count = len(self.history_contents()) - assert history_count == 1, "Incorrect number of items in history - expected 1, found %d" % history_count + assert history_count == 1, f"Incorrect number of items in history - expected 1, found {history_count}" @selenium_test def test_upload_pasted_url_content(self): @@ -37,7 +37,7 @@ def test_upload_pasted_url_content(self): self.history_panel_wait_for_hid_ok(1) history_count = len(self.history_contents()) - assert history_count == 1, "Incorrect number of items in history - expected 1, found %d" % history_count + assert history_count == 1, f"Incorrect number of items in history - expected 1, found {history_count}" @selenium_test def test_upload_composite_dataset_pasted_data(self): @@ -46,7 +46,7 @@ def test_upload_composite_dataset_pasted_data(self): self.history_panel_wait_for_hid_ok(1) history_count = len(self.history_contents()) - assert history_count == 1, "Incorrect number of items in history - expected 1, found %d" % history_count + assert history_count == 1, f"Incorrect number of items in history - expected 1, found {history_count}" self.history_panel_click_item_title(hid=1, wait=True) self.history_panel_item_view_dataset_details(1) @@ -62,7 +62,7 @@ def test_upload_simplest(self): self.history_panel_wait_for_hid_ok(1) history_contents = self.history_contents() history_count = len(history_contents) - assert history_count == 1, "Incorrect number of items in history - expected 1, found %d" % history_count + assert history_count == 1, f"Incorrect number of items in history - expected 1, found {history_count}" hda = history_contents[0] assert hda["name"] == "1.sam", hda diff --git a/scripts/api/common.py b/scripts/api/common.py index 7a9cc62b6ec9..ea17005d8e4c 100644 --- a/scripts/api/common.py +++ b/scripts/api/common.py @@ -92,7 +92,7 @@ def display(api_key, url, return_formatted=True): # All collection members should have a name in the response. # url is optional if "url" in i: - print("#%d: %s" % (n + 1, i.pop("url"))) + print("#{}: {}".format(n + 1, i.pop("url"))) if "name" in i: print(f" name: {i.pop('name')}") try: @@ -102,7 +102,7 @@ def display(api_key, url, return_formatted=True): for item in i: print(item) print("") - print("%d element(s) in collection" % len(r)) + print(f"{len(r)} element(s) in collection") elif isinstance(r, dict): # Response is an element as defined in the REST style. print("Member Information") diff --git a/scripts/apply_tags.py b/scripts/apply_tags.py index bec742db8ab9..d12db56c1c95 100644 --- a/scripts/apply_tags.py +++ b/scripts/apply_tags.py @@ -48,7 +48,7 @@ def find_dataset_parents_update_tags(self, history, job, history_id): count_datasets_updated = 0 # get all datasets belonging to a history all_datasets = history.show_history(history_id, contents=True) - print("Total datasets: %d. Updating their tags may take a while..." % len(all_datasets)) + print(f"Total datasets: {len(all_datasets)}. Updating their tags may take a while...") for dataset in all_datasets: try: if not dataset["deleted"] and dataset["state"] == "ok": @@ -91,7 +91,7 @@ def find_dataset_parents_update_tags(self, history, job, history_id): ) if is_updated: count_datasets_updated += 1 - print("Tags of %d datasets updated" % count_datasets_updated) + print(f"Tags of {count_datasets_updated} datasets updated") def collect_parent_ids(self, datasets_inheritance_chain): """ @@ -158,4 +158,4 @@ def propagate_tags(self, history, current_history_id, parent_datasets_ids, datas history_tags = ApplyTagsHistory(sys.argv[1], sys.argv[2], history_id) history_tags.read_galaxy_history() end_time = time.time() - print("Program finished in %d seconds" % int(end_time - start_time)) + print(f"Program finished in {int(end_time - start_time)} seconds") diff --git a/scripts/build_toolbox.py b/scripts/build_toolbox.py index f26e571dcb9e..0a7edf362487 100644 --- a/scripts/build_toolbox.py +++ b/scripts/build_toolbox.py @@ -55,7 +55,7 @@ def add(self, toolelement, toolboxpositionelement): sectionorder = self.sectionorders[section] # Sortorder: add intelligent mix to the front - self.tools[("%05d-%s" % (sectionorder, section), label, order, section)].append(toolelement) + self.tools[(f"{sectionorder:05d}-{section}", label, order, section)].append(toolelement) def addElementsTo(self, rootelement): toolkeys = list(self.tools.keys()) @@ -77,7 +77,7 @@ def addElementsTo(self, rootelement): currentlabel = "" if section: sectionnumber += 1 - attrib = {"name": section, "id": "section%d" % sectionnumber} + attrib = {"name": section, "id": f"section{sectionnumber}"} sectionelement = ET.Element("section", attrib) rootelement.append(sectionelement) currentelement = sectionelement @@ -90,7 +90,7 @@ def addElementsTo(self, rootelement): currentlabel = label if label: labelnumber += 1 - attrib = {"text": label, "id": "label%d" % labelnumber} + attrib = {"text": label, "id": f"label{labelnumber}"} labelelement = ET.Element("label", attrib) currentelement.append(labelelement) diff --git a/scripts/cleanup_datasets/admin_cleanup_datasets.py b/scripts/cleanup_datasets/admin_cleanup_datasets.py index 624fb3fac98d..336f8227890f 100755 --- a/scripts/cleanup_datasets/admin_cleanup_datasets.py +++ b/scripts/cleanup_datasets/admin_cleanup_datasets.py @@ -171,7 +171,7 @@ def main(): now = strftime("%Y-%m-%d %H:%M:%S") print("##########################################") - print("\n# %s - Handling stuff older than %i days" % (now, args.days)) + print(f"\n# {now} - Handling stuff older than {args.days} days") if args.info_only: print("# Displaying info only ( --info_only )\n") @@ -260,7 +260,7 @@ def administrative_delete_datasets( # Mark the HistoryDatasetAssociation as deleted hda.deleted = True app.sa_session.add(hda) - print("Marked HistoryDatasetAssociation id %d as deleted" % hda.id) + print(f"Marked HistoryDatasetAssociation id {hda.id} as deleted") session = app.sa_session() with transaction(session): session.commit() @@ -268,7 +268,7 @@ def administrative_delete_datasets( emailtemplate = Template(filename=template_file) for email, dataset_list in user_notifications.items(): msgtext = emailtemplate.render(email=email, datasets=dataset_list, cutoff=cutoff_days) - subject = "Galaxy Server Cleanup - %d datasets DELETED" % len(dataset_list) + subject = f"Galaxy Server Cleanup - {len(dataset_list)} datasets DELETED" fromaddr = config.email_from print() print(f"From: {fromaddr}") @@ -281,7 +281,7 @@ def administrative_delete_datasets( stop = time.time() print() - print("Marked %d dataset instances as deleted" % deleted_instance_count) + print(f"Marked {deleted_instance_count} dataset instances as deleted") print("Total elapsed time: ", stop - start) print("##########################################") diff --git a/scripts/cleanup_datasets/pgcleanup.py b/scripts/cleanup_datasets/pgcleanup.py index 325cd6b24b08..a5256542401c 100755 --- a/scripts/cleanup_datasets/pgcleanup.py +++ b/scripts/cleanup_datasets/pgcleanup.py @@ -409,7 +409,7 @@ def recalculate_disk_usage(self): new_args[key] = val self._update(sql, new_args, add_event=False) - self.log.info("recalculate_disk_usage user_id %i" % user_id) + self.log.info("recalculate_disk_usage user_id %i", user_id) class RemovesMetadataFiles(RemovesObjects): @@ -1317,7 +1317,8 @@ def _dry_run_event(self): else: log.info( "Not executing event creation (increments sequence even when rolling back), using an old " - "event ID (%i) for dry run" % max_id + "event ID (%i) for dry run", + max_id, ) return max_id diff --git a/scripts/drmaa_external_killer.py b/scripts/drmaa_external_killer.py index f07d5477c421..86c03bded2d0 100755 --- a/scripts/drmaa_external_killer.py +++ b/scripts/drmaa_external_killer.py @@ -12,7 +12,7 @@ import drmaa -def validate_paramters(): +def validate_parameters(): if len(sys.argv) < 3: sys.stderr.write(f"usage: {sys.argv[0]} [job ID] [user uid]\n") exit(1) @@ -22,7 +22,7 @@ def validate_paramters(): return jobID, uid -def set_user(uid): +def set_user(uid: int): try: gid = pwd.getpwuid(uid).pw_gid os.setgid(gid) @@ -30,7 +30,7 @@ def set_user(uid): except OSError as e: if e.errno == errno.EPERM: sys.stderr.write( - "error: setuid(%d) failed: permission denied. Did you setup 'sudo' correctly for this script?\n" % uid + f"error: setuid({uid}) failed: permission denied. Did you setup 'sudo' correctly for this script?\n" ) exit(1) else: @@ -48,7 +48,7 @@ def set_user(uid): def main(): - jobID, uid = validate_paramters() + jobID, uid = validate_parameters() set_user(uid) s = drmaa.Session() s.initialize() diff --git a/scripts/drmaa_external_runner.py b/scripts/drmaa_external_runner.py index c07e164721a9..c074b1fc95e1 100755 --- a/scripts/drmaa_external_runner.py +++ b/scripts/drmaa_external_runner.py @@ -41,7 +41,7 @@ def valid_numeric_userid(userid): try: pwd.getpwuid(uid) except KeyError: - sys.stderr.write("error: User-ID (%d) is not valid.\n" % uid) + sys.stderr.write(f"error: User-ID ({uid}) is not valid.\n") exit(1) return True @@ -63,7 +63,7 @@ def json_file_exists(json_filename): return True -def validate_paramters(): +def validate_parameters(): assign_all_groups = False if "--assign_all_groups" in sys.argv: assign_all_groups = True @@ -88,7 +88,7 @@ def validate_paramters(): return uid, json_filename, assign_all_groups -def set_user(uid, assign_all_groups): +def set_user(uid: int, assign_all_groups: bool): try: # Get user's default group and set it to current process to make sure # file permissions are inherited correctly @@ -108,7 +108,7 @@ def set_user(uid, assign_all_groups): except OSError as e: if e.errno == errno.EPERM: sys.stderr.write( - "error: setuid(%d) failed: permission denied. Did you setup 'sudo' correctly for this script?\n" % uid + f"error: setuid({uid}) failed: permission denied. Did you setup 'sudo' correctly for this script?\n" ) exit(1) else: @@ -128,7 +128,7 @@ def set_user(uid, assign_all_groups): def main(): - userid, json_filename, assign_all_groups = validate_paramters() + userid, json_filename, assign_all_groups = validate_parameters() # load JSON job template data json_file_exists(json_filename) with open(json_filename) as f: diff --git a/scripts/metagenomics/convert_title.py b/scripts/metagenomics/convert_title.py index 4beafbc056d5..767b0b17838e 100644 --- a/scripts/metagenomics/convert_title.py +++ b/scripts/metagenomics/convert_title.py @@ -18,7 +18,7 @@ if len_seq > 0: if gi is None: raise Exception("The first sequence does not have an header.") - print(">%s_%d" % (gi, len_seq)) + print(f">{gi}_{len_seq}") print("\n".join(seq)) title = line fields = title.split("|") @@ -33,10 +33,10 @@ seq.append(line) len_seq += len(line) if len_seq > 0: - print(">%s_%d" % (gi, len_seq)) + print(f">{gi}_{len_seq}") print("\n".join(seq)) print( - "Unable to find gi number for %d sequences, the title is replaced as giunknown" % (invalid_lines), + f"Unable to find gi number for {invalid_lines} sequences, the title is replaced as giunknown", file=sys.stderr, ) diff --git a/scripts/runtime_stats.py b/scripts/runtime_stats.py index 49a08a5d3e87..81d74a120e6f 100755 --- a/scripts/runtime_stats.py +++ b/scripts/runtime_stats.py @@ -205,14 +205,14 @@ def query( if debug: print("Executed:") print(cur.query) - print("Query returned %d rows" % cur.rowcount) + print(f"Query returned {cur.rowcount} rows") if source == "metrics": times = numpy.array([r[0] for r in cur if r[0]]) elif source == "history": times = numpy.array([r[0].total_seconds() for r in cur if r[0]]) - print("Collected %d times" % times.size) + print(f"Collected {times.size} times") if times.size == 0: return diff --git a/scripts/set_dataset_sizes.py b/scripts/set_dataset_sizes.py index 64f010cacda2..d420141ad0c5 100644 --- a/scripts/set_dataset_sizes.py +++ b/scripts/set_dataset_sizes.py @@ -37,9 +37,9 @@ def init(): set = 0 dataset_count = sa_session.query(model.Dataset).count() - print("Processing %i datasets..." % dataset_count) + print(f"Processing {dataset_count} datasets...") percent = 0 - print("Completed %i%%" % percent, end=" ") + print(f"Completed {percent}%", end=" ") sys.stdout.flush() for i, dataset in enumerate(sa_session.query(model.Dataset).enable_eagerloads(False).yield_per(1000)): if dataset.total_size is None: @@ -51,7 +51,7 @@ def init(): new_percent = int(float(i) / dataset_count * 100) if new_percent != percent: percent = new_percent - print("\rCompleted %i%%" % percent, end=" ") + print(f"\rCompleted {percent}%", end=" ") sys.stdout.flush() with transaction(session): session.commit() diff --git a/scripts/set_user_disk_usage.py b/scripts/set_user_disk_usage.py index 9c10d3c8f554..da42f7a55902 100755 --- a/scripts/set_user_disk_usage.py +++ b/scripts/set_user_disk_usage.py @@ -74,9 +74,9 @@ def quotacheck(sa_session, users, engine, object_store): if not args.username and not args.email: user_count = sa_session.query(model.User).count() - print("Processing %i users..." % user_count) + print(f"Processing {user_count} users...") for i, user in enumerate(sa_session.query(model.User).enable_eagerloads(False).yield_per(1000)): - print("%3i%%" % int(float(i) / user_count * 100), end=" ") + print(f"{int(float(i) / user_count * 100):3d}%", end=" ") quotacheck(sa_session, user, engine, object_store) print("100% complete") object_store.shutdown() diff --git a/scripts/tool_shed/api/common.py b/scripts/tool_shed/api/common.py index fb1ec34bba2f..0de80913e7bc 100644 --- a/scripts/tool_shed/api/common.py +++ b/scripts/tool_shed/api/common.py @@ -33,13 +33,13 @@ def display(url, api_key=None, return_formatted=True): # All collection members should have a name in the response. # url is optional if "url" in i: - print("#%d: %s" % (n + 1, i.pop("url"))) + print("#{}: {}".format(n + 1, i.pop("url"))) if "name" in i: print(f" name: {i.pop('name')}") for k, v in i.items(): print(f" {k}: {v}") print() - print("%d element(s) in collection" % len(r)) + print(f"{len(r)} element(s) in collection") elif isinstance(r, dict): # Response is an element as defined in the REST style. print("Member Information") diff --git a/scripts/tool_shed/deprecate_repositories_without_metadata.py b/scripts/tool_shed/deprecate_repositories_without_metadata.py index 42a2e840886c..f03284941b0a 100644 --- a/scripts/tool_shed/deprecate_repositories_without_metadata.py +++ b/scripts/tool_shed/deprecate_repositories_without_metadata.py @@ -80,7 +80,7 @@ def main(): cutoff_time = datetime.utcnow() - timedelta(days=options.days) now = strftime("%Y-%m-%d %H:%M:%S") print("\n####################################################################################") - print("# %s - Handling stuff older than %i days" % (now, options.days)) + print(f"# {now} - Handling stuff older than {options.days} days") if options.info_only: print("# Displaying info only ( --info_only )") @@ -183,7 +183,7 @@ def deprecate_repositories(app, cutoff_time, days=14, info_only=False, verbose=F app, owner.username, owner.email, repositories_by_owner[repository_owner]["repositories"], days ) stop = time.time() - print("# Deprecated %d repositories." % len(repositories)) + print(f"# Deprecated {len(repositories)} repositories.") print("# Elapsed time: ", stop - start) print("####################################################################################") diff --git a/scripts/tools/maf/check_loc_file.py b/scripts/tools/maf/check_loc_file.py index ad66b8cc7adb..fd2a039c003a 100644 --- a/scripts/tools/maf/check_loc_file.py +++ b/scripts/tools/maf/check_loc_file.py @@ -40,19 +40,19 @@ def __main__(): # indexed species for spec in indexed_for_species: if spec not in species_indexed_in_maf: - print("Line %i, %s claims to be indexed for %s, but indexes do not exist." % (i, uid, spec)) + print(f"Line {i}, {uid} claims to be indexed for {spec}, but indexes do not exist.") for spec in species_indexed_in_maf: if spec not in indexed_for_species: - print("Line %i, %s is indexed for %s, but is not listed in loc file." % (i, uid, spec)) + print(f"Line {i}, {uid} is indexed for {spec}, but is not listed in loc file.") # existing species for spec in species_exist: if spec not in species_found_in_maf: - print("Line %i, %s claims to have blocks for %s, but was not found in MAF files." % (i, uid, spec)) + print(f"Line {i}, {uid} claims to have blocks for {spec}, but was not found in MAF files.") for spec in species_found_in_maf: if spec not in species_exist: - print("Line %i, %s contains %s, but is not listed in loc file." % (i, uid, spec)) + print(f"Line {i}, {uid} contains {spec}, but is not listed in loc file.") except Exception as e: - print("Line %i is invalid: %s" % (i, e)) + print(f"Line {i} is invalid: {e}") if __name__ == "__main__": diff --git a/test/functional/test_toolbox.py b/test/functional/test_toolbox.py index e8a357637d5e..344ce2ac2916 100644 --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -105,9 +105,9 @@ def test_tool(self): for tool_version, version_summary in tool_summary.items(): count = version_summary["count"] for i in range(count): - test_function_name = "test_tool_%06d" % all_versions_test_count + test_function_name = f"test_tool_{all_versions_test_count:06d}" test_method = make_test_method(tool_version, i, test_function_name) - test_method.__doc__ = "( %s ) > Test-%d" % (tool_id, all_versions_test_count + 1) + test_method.__doc__ = f"( {tool_id} ) > Test-{all_versions_test_count + 1}" namespace[test_function_name] = test_method namespace["tool_id"] = tool_id namespace["galaxy_interactor"] = galaxy_interactor diff --git a/test/functional/webhooks/phdcomics/__init__.py b/test/functional/webhooks/phdcomics/__init__.py index 275a03b0eb13..11ddd452ad29 100644 --- a/test/functional/webhooks/phdcomics/__init__.py +++ b/test/functional/webhooks/phdcomics/__init__.py @@ -29,7 +29,7 @@ def main(trans, webhook, params): ) random_id = random.randint(1, webhook.config["latest_id"]) - url = "http://www.phdcomics.com/comics/archive.php?comicid=%d" % random_id + url = f"http://www.phdcomics.com/comics/archive.php?comicid={random_id}" content = urlopen(url).read() soup = BeautifulSoup(content, "html.parser") comic_img = soup.find_all("img", id="comic2") diff --git a/test/integration_selenium/test_upload_ftp.py b/test/integration_selenium/test_upload_ftp.py index c851783116a7..5e7f68e144e7 100644 --- a/test/integration_selenium/test_upload_ftp.py +++ b/test/integration_selenium/test_upload_ftp.py @@ -27,7 +27,7 @@ def _upload_all(self, n): self.components.upload.file_dialog.wait_for_and_click() self.components.upload.file_source_selector(path="gxftp://").wait_for_and_click() for i in range(n): - self.components.upload.file_source_selector(path="gxftp://%i.txt" % i).wait_for_and_click() + self.components.upload.file_source_selector(path=f"gxftp://{i}.txt").wait_for_and_click() self.components.upload.file_dialog_ok.wait_for_and_click() for i in range(n): self.components.upload.row(n=i).wait_for_visible() diff --git a/test/manual/gen_history_export_test_artifacts.py b/test/manual/gen_history_export_test_artifacts.py index 6840f2615676..88acbe261b0d 100644 --- a/test/manual/gen_history_export_test_artifacts.py +++ b/test/manual/gen_history_export_test_artifacts.py @@ -47,7 +47,7 @@ def _run(args, gi): def _gi(args): gi = galaxy.GalaxyInstance(args.host, key=args.api_key) - name = "histexport-user-%d" % random.randint(0, 1000000) + name = f"histexport-user-{random.randint(0, 1000000)}" user = gi.users.create_local_user(name, f"{name}@galaxytesting.dev", "pass123") user_id = user["id"] diff --git a/test/manual/workflows_scaling.py b/test/manual/workflows_scaling.py index 2a1e39413342..20c1f73a8bd0 100644 --- a/test/manual/workflows_scaling.py +++ b/test/manual/workflows_scaling.py @@ -81,7 +81,7 @@ def _run(args, gi, workflow_id, uuid): if uuid is not None: contents = [] for i in range(args.collection_size): - contents.append("random dataset number #%d" % i) + contents.append(f"random dataset number #{i}") hdca = dataset_collection_populator.create_list_in_history(history_id, contents=contents).json() label_map = { uuid: {"src": "hdca", "id": hdca["id"]}, @@ -172,7 +172,7 @@ def _link(link, output_name=None): def _gi(args): gi = galaxy.GalaxyInstance(args.host, key=args.api_key) - name = "wftest-user-%d" % random.randint(0, 1000000) + name = f"wftest-user-{random.randint(0, 1000000)}" user = gi.users.create_local_user(name, f"{name}@galaxytesting.dev", "pass123") user_id = user["id"] diff --git a/test/unit/app/test_markdown_validate.py b/test/unit/app/test_markdown_validate.py index 0c99276ff915..b8dcae19ad86 100644 --- a/test/unit/app/test_markdown_validate.py +++ b/test/unit/app/test_markdown_validate.py @@ -1,3 +1,5 @@ +from typing import Optional + from galaxy.managers.markdown_parse import validate_galaxy_markdown @@ -5,14 +7,14 @@ def assert_markdown_valid(markdown): validate_galaxy_markdown(markdown) -def assert_markdown_invalid(markdown, at_line=None): +def assert_markdown_invalid(markdown, at_line: Optional[int] = None): failed = False try: validate_galaxy_markdown(markdown) except ValueError as e: failed = True if at_line is not None: - assert "Invalid line %d" % (at_line + 1) in str(e) + assert f"Invalid line {at_line + 1}" in str(e) assert failed, f"Expected markdown [{markdown}] to fail validation but it did not." diff --git a/test/unit/data/dataset_collections/test_matching.py b/test/unit/data/dataset_collections/test_matching.py index 8637304e9a10..a7bb1985569c 100644 --- a/test/unit/data/dataset_collections/test_matching.py +++ b/test/unit/data/dataset_collections/test_matching.py @@ -78,7 +78,7 @@ def build_collections_to_match(*items): collection_instance, subcollection_type = item else: collection_instance, subcollection_type = item, None - to_match.add("input_%d" % i, collection_instance, subcollection_type) + to_match.add(f"input_{i}", collection_instance, subcollection_type) return to_match diff --git a/test/unit/data/test_model_copy.py b/test/unit/data/test_model_copy.py index 117ce7ab0c20..4b827a972089 100644 --- a/test/unit/data/test_model_copy.py +++ b/test/unit/data/test_model_copy.py @@ -36,7 +36,7 @@ def test_history_dataset_copy(num_datasets=NUM_DATASETS, include_metadata_file=INCLUDE_METADATA_FILE): with _setup_mapping_and_user() as (test_config, object_store, model, old_history): for i in range(num_datasets): - hda_path = test_config.write("moo", "test_metadata_original_%d" % i) + hda_path = test_config.write("moo", f"test_metadata_original_{i}") _create_hda(model, object_store, old_history, hda_path, include_metadata_file=include_metadata_file) session = model.context @@ -62,7 +62,7 @@ def test_history_dataset_copy(num_datasets=NUM_DATASETS, include_metadata_file=I if include_metadata_file: _check_metadata_file(hda) annotation_str = hda.get_item_annotation_str(model.context, old_history.user, hda) - assert annotation_str == "annotation #%d" % hda.hid, annotation_str + assert annotation_str == f"annotation #{hda.hid}", annotation_str def test_history_collection_copy(list_size=NUM_DATASETS): @@ -70,7 +70,7 @@ def test_history_collection_copy(list_size=NUM_DATASETS): for i in range(NUM_COLLECTIONS): hdas = [] for i in range(list_size * 2): - hda_path = test_config.write("moo", "test_metadata_original_%d" % i) + hda_path = test_config.write("moo", f"test_metadata_original_{i}") hda = _create_hda( model, object_store, old_history, hda_path, visible=False, include_metadata_file=False ) @@ -100,7 +100,7 @@ def test_history_collection_copy(list_size=NUM_DATASETS): model.context, old_history.user, history_dataset_collection, - "annotation #%d" % history_dataset_collection.hid, + f"annotation #{history_dataset_collection.hid}", ) session = model.context @@ -118,7 +118,7 @@ def test_history_collection_copy(list_size=NUM_DATASETS): # if not instances: # print("FULL FLUSH...") # else: - # print("Flushing just %s" % instances) + # print(f"Flushing just {instances}") history_copy_timer = ExecutionTimer() new_history = old_history.copy(target_user=old_history.user) @@ -127,12 +127,12 @@ def test_history_collection_copy(list_size=NUM_DATASETS): for hda in new_history.active_datasets: assert hda.get_size() == 3 annotation_str = hda.get_item_annotation_str(model.context, old_history.user, hda) - assert annotation_str == "annotation #%d" % hda.hid, annotation_str + assert annotation_str == f"annotation #{hda.hid}", annotation_str assert len(new_history.active_dataset_collections) == NUM_COLLECTIONS for hdca in new_history.active_dataset_collections: annotation_str = hdca.get_item_annotation_str(model.context, old_history.user, hdca) - assert annotation_str == "annotation #%d" % hdca.hid, annotation_str + assert annotation_str == f"annotation #{hdca.hid}", annotation_str @contextlib.contextmanager @@ -180,7 +180,7 @@ def _create_hda( _check_metadata_file(hda) hda.set_size() history.add_dataset(hda) - hda.add_item_annotation(sa_session, history.user, hda, "annotation #%d" % hda.hid) + hda.add_item_annotation(sa_session, history.user, hda, f"annotation #{hda.hid}") return hda @@ -190,4 +190,4 @@ def _check_metadata_file(hda): assert os.path.exists(copied_index) with open(copied_index) as f: assert f.read() == "moo" - assert copied_index.endswith("metadata_%d.dat" % hda.id) + assert copied_index.endswith(f"metadata_{hda.id}.dat")