diff --git a/.github/ISSUE_TEMPLATE/bug_issue.yml b/.github/ISSUE_TEMPLATE/bug_issue.yml index c4f7378f02..760be38452 100644 --- a/.github/ISSUE_TEMPLATE/bug_issue.yml +++ b/.github/ISSUE_TEMPLATE/bug_issue.yml @@ -33,6 +33,7 @@ body: description: Which version of z/OS Ansible core collection are you using. If you are unsure, review the [documentation](https://ibm.github.io/z_ansible_collections_doc/faqs/faqs.html#how-do-i-update-a-collection-to-the-latest-version). multiple: false options: + - v1.15.0 - v1.15.0-beta.1 - v1.14.1 - v1.14.0 diff --git a/.github/ISSUE_TEMPLATE/collaboration_issue.yml b/.github/ISSUE_TEMPLATE/collaboration_issue.yml index 79ab99aefb..06fac7308c 100644 --- a/.github/ISSUE_TEMPLATE/collaboration_issue.yml +++ b/.github/ISSUE_TEMPLATE/collaboration_issue.yml @@ -42,6 +42,7 @@ body: description: Which version of z/OS Ansible core collection are you using. If you are unsure, review the [documentation](https://ibm.github.io/z_ansible_collections_doc/faqs/faqs.html#how-do-i-update-a-collection-to-the-latest-version). multiple: false options: + - v1.15.0 - v1.15.0-beta.1 - v1.14.1 - v1.14.0 diff --git a/.github/ISSUE_TEMPLATE/doc_issue.yml b/.github/ISSUE_TEMPLATE/doc_issue.yml index f8f68e115d..fde10605b0 100644 --- a/.github/ISSUE_TEMPLATE/doc_issue.yml +++ b/.github/ISSUE_TEMPLATE/doc_issue.yml @@ -34,6 +34,7 @@ body: description: Which version of z/OS Ansible core collection are you using. If you are unsure, review the [documentation](https://ibm.github.io/z_ansible_collections_doc/faqs/faqs.html#how-do-i-update-a-collection-to-the-latest-version). multiple: false options: + - v1.15.0 - v1.15.0-beta.1 - v1.14.1 - v1.14.0 diff --git a/.gitignore b/.gitignore index 7270e43399..5b06f84aee 100644 --- a/.gitignore +++ b/.gitignore @@ -258,6 +258,7 @@ shell_exploits.txt importer_result.json ac scripts/ +collections/ ################################################################################ # Debugging .ignore, if you want to know why a particular file is being ignored diff --git a/CHANGELOG.rst b/CHANGELOG.rst index eee3829d9a..9b39d58dce 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,8 +4,57 @@ ibm.ibm\_zos\_core Release Notes .. contents:: Topics -v1.15.0-beta.1 -============== +v1.15.0 +======= + +Release Summary +--------------- + +Release Date: '2025-09-30' +This changelog describes all changes made to the modules and plugins included +in this collection. The release date is the date the changelog is created. +For additional details such as required dependencies and availability review +the collections `release notes `__ + +Minor Changes +------------- + +- zos_archive - Adds support for encoding before archiving files. (https://github.com/ansible-collections/ibm_zos_core/pull/2081) +- zos_archive - Adds support for reverting the encoding of a source's files after archiving them. (https://github.com/ansible-collections/ibm_zos_core/pull/2192) +- zos_archive - Adds support for skipping encoding in archive module. This allows users to skip encoding for certain files before archiving them. (https://github.com/ansible-collections/ibm_zos_core/pull/2116) +- zos_copy - Added support for british pound character usage in file content and data set names for both source and destination when copying. (https://github.com/ansible-collections/ibm_zos_core/pull/2153) +- zos_copy - Adds new option `identical_gdg_copy` in the module. This allows copying GDG generations from a source base to a destination base while preserving generation data set absolute names when the destination base does not exist prior to the copy. (https://github.com/ansible-collections/ibm_zos_core/pull/2100). +- zos_copy - Adds support of using alias names in src and dest parameters for PS, PDS and PDSE data sets. (https://github.com/ansible-collections/ibm_zos_core/pull/2103) +- zos_fetch - Updated the documentation to correctly state what the default behavior of the module is. (https://github.com/ansible-collections/ibm_zos_core/pull/2047). +- zos_find - Adds functionality to find migrated data sets. - Adds functionality to find different types of data sets at the same time. (https://github.com/ansible-collections/ibm_zos_core/pull/2073). +- zos_job_output - Adds new fields cpu_time, origin_node and execution_node to response. (https://github.com/ansible-collections/ibm_zos_core/pull/2056). +- zos_job_query - Adds new fields cpu_time, origin_node and execution_node to response. (https://github.com/ansible-collections/ibm_zos_core/pull/2056). +- zos_job_submit - Adds new fields cpu_time, origin_node and execution_node to response. (https://github.com/ansible-collections/ibm_zos_core/pull/2056). +- zos_mvs_raw - Before this addition, you could not put anything in columns 1 or 2, were reserved for JCL processing. Change now allows add reserved_cols option and validate that the module get access to modify dd_content option base on the value, if not retain the previous behavior or work. (https://github.com/ansible-collections/ibm_zos_core/pull/2086) +- zos_mvs_raw - Adds support for volume data definition. (https://github.com/ansible-collections/ibm_zos_core/pull/2194) +- zos_stat - Added support to recall migrated data sets and return its attributes. (https://github.com/ansible-collections/ibm_zos_core/pull/2075) +- zos_stat - Adds new fields that describe the type of the resource that was queried. These new fields are `isfile`, `isdataset`, `isaggregate` and `isgdg`. (https://github.com/ansible-collections/ibm_zos_core/pull/2137) +- zos_stat - Adds support to query data sets using their aliases. (https://github.com/ansible-collections/ibm_zos_core/pull/2061) +- zos_stat - Module now returns whether the resource queried exists on the managed node with the `exists` field inside `stat`. (https://github.com/ansible-collections/ibm_zos_core/pull/2137) +- zos_unarchive - Added encoding support for the unarchive module. This allows users to encode the files after unarchiving them in a perticular encoding. (https://github.com/ansible-collections/ibm_zos_core/pull/2105) + +Bugfixes +-------- + +- zos_backup_restore - Return value `backup_name` was empty upon successful result. Fix now returns `backup_name` populated. (https://github.com/ansible-collections/ibm_zos_core/pull/2040). +- zos_data_set - Attempting to create a data set with the same name on a different volume did not work, nor did it report a failure. The fix now informs the user that if the data set is cataloged on a different volume, it needs to be uncataloged before using the data set module to create a new data set on a different volume. (https://github.com/ansible-collections/ibm_zos_core/pull/2057). +- zos_fetch - Previously, the use of `become` would result in a permissions error while trying to fetch a data set or a member. Fix now allows a user to escalate privileges when fetching resources. (https://github.com/ansible-collections/ibm_zos_core/pull/2079) +- zos_lineinfile - Return values ``return_content`` and ``backup_name`` were not always being returned. Fix now ensure that these values are always present in the module's response. (https://github.com/ansible-collections/ibm_zos_core/pull/2120) +- zos_lineinfile - The module would report a false negative when certain special characters where present in the `line` option. Fix now reports the successful operation. (https://github.com/ansible-collections/ibm_zos_core/pull/2080). +- zos_mount - FSUMF168 return in stderror means that the mount dataset wouldn't resolve. While this shows a catalog or volume issue, it should not impact our search for an existing mount. Added handling to the df call, so that FSUMF168 are ignored. (https://github.com/ansible-collections/ibm_zos_core/pull/2060). + +New Modules +----------- + +- ibm.ibm_zos_core.zos_replace - Replace all instances of a pattern within a file or data set. + +v1.14.1 +======= Release Summary --------------- diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 07cf77545f..e93001466a 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -853,12 +853,12 @@ releases: the collections `release notes `__" fragments: - - 2196-fix-copy-permission-issues.yml - - v1.14.1_summary.yml - release_date: "2025-07-01" + - 2196-fix-copy-permission-issues.yml + - v1.14.1_summary.yml + release_date: '2025-07-01' 1.15.0: changes: - release_summary: "Release Date: '2025-09-30' + release_summary: 'Release Date: ''2025-09-30'' This changelog describes all changes made to the modules and plugins included @@ -866,10 +866,10 @@ releases: For additional details such as required dependencies and availability review - the collections `release notes `__" + the collections `release notes `__' fragments: - - v1.15.0_summary.yml - release_date: "2025-09-22" + - v1.15.0_summary.yml + release_date: '2025-09-22' 1.15.0-beta.1: changes: bugfixes: diff --git a/changelogs/fragments/2206-zos_data_set-interface-update.yml b/changelogs/fragments/2206-zos_data_set-interface-update.yml new file mode 100644 index 0000000000..5b9553e33f --- /dev/null +++ b/changelogs/fragments/2206-zos_data_set-interface-update.yml @@ -0,0 +1,3 @@ +minor_changes: + - zos_data_set - Adds return value ``data_sets`` which contains the attributes of all data sets created. + (https://github.com/ansible-collections/ibm_zos_core/pull/2206) diff --git a/changelogs/fragments/2347-zos_mount_persistant_delete_all_content.yaml b/changelogs/fragments/2347-zos_mount_persistant_delete_all_content.yaml new file mode 100644 index 0000000000..947ebaebf7 --- /dev/null +++ b/changelogs/fragments/2347-zos_mount_persistant_delete_all_content.yaml @@ -0,0 +1,5 @@ +bugfixes: + - zos_mount - Previously, using the persistent parameter caused the module to clear the entire member or data set + provided even without a pattern match, leaving it empty despite a successful mount. + The fix now ensures content is only deleted when a pattern match is detected, preserving existing configuration. + (https://github.com/ansible-collections/ibm_zos_core/pull/2347). diff --git a/changelogs/fragments/2361-update-zos_data_set-message.yml b/changelogs/fragments/2361-update-zos_data_set-message.yml new file mode 100644 index 0000000000..a65639be1d --- /dev/null +++ b/changelogs/fragments/2361-update-zos_data_set-message.yml @@ -0,0 +1,3 @@ +breaking_changes: + - zos_data_set - Return value ``message`` is deprecated in favor of ``msg``. + (https://github.com/ansible-collections/ibm_zos_core/pull/2361). \ No newline at end of file diff --git a/galaxy.yml b/galaxy.yml index 5efbdb52ec..1db12fea8d 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -99,5 +99,5 @@ build_ignore: - tests/sanity/ignore-2.14.txt - venv* - ansible_collections - - "*.log" - - "*.sh" + - '*.log' + - '*.sh' diff --git a/meta/ibm_zos_core_meta.yml b/meta/ibm_zos_core_meta.yml index e8dc7119b2..5256a8b9a0 100644 --- a/meta/ibm_zos_core_meta.yml +++ b/meta/ibm_zos_core_meta.yml @@ -1,8 +1,10 @@ name: ibm_zos_core version: "1.15.0" managed_requirements: - - name: "IBM Open Enterprise SDK for Python" - version: ">=3.12" - - name: "Z Open Automation Utilities" - version: - - ">=1.3.5" + - + name: "IBM Open Enterprise SDK for Python" + version: ">=3.12" + - + name: "Z Open Automation Utilities" + version: + - ">=1.3.5" diff --git a/plugins/module_utils/data_set.py b/plugins/module_utils/data_set.py index 74e56e2aed..70296a5811 100644 --- a/plugins/module_utils/data_set.py +++ b/plugins/module_utils/data_set.py @@ -204,6 +204,7 @@ def ensure_present( arguments.pop("replace", None) present = False changed = False + data_set = None if DataSet.data_set_cataloged(name, tmphlq=tmp_hlq): present = True # Validate volume conflicts when: @@ -222,7 +223,7 @@ def ensure_present( if not present: try: - DataSet.create(**arguments) + changed, data_set = DataSet.create(**arguments) except DatasetCreateError as e: raise_error = True # data set exists on volume @@ -236,11 +237,11 @@ def ensure_present( raise if present: if not replace: - return changed - DataSet.replace(**arguments) + return changed, data_set + changed, data_set = DataSet.replace(**arguments) if type.upper() == "ZFS": DataSet.format_zfs(name) - return True + return changed, data_set @staticmethod def ensure_absent(name, volumes=None, tmphlq=None, noscratch=False): @@ -1249,7 +1250,8 @@ def replace( """ arguments = locals() DataSet.delete(name) - DataSet.create(**arguments) + changed, data_set = DataSet.create(**arguments) + return changed, data_set @staticmethod def _build_zoau_args(**kwargs): @@ -1417,7 +1419,7 @@ def create( msg="Unable to verify the data set was created. Received DatasetVerificationError from ZOAU.", ) changed = data_set is not None - return changed + return changed, data_set @staticmethod def delete(name, noscratch=False): @@ -2723,7 +2725,9 @@ def ensure_present(self, tmp_hlq=None, replace=False, force=False): "tmp_hlq": tmp_hlq, "force": force, } - rc = DataSet.ensure_present(**arguments) + rc, data_set = DataSet.ensure_present(**arguments) + if data_set is not None: + self.merge_attributes_from_zoau_data_set(data_set) self.set_state("present") return rc @@ -2843,6 +2847,37 @@ def set_state(self, new_state): raise ValueError(f"State {self.state} not supported for MVSDataset class.") return True + def merge_attributes_from_zoau_data_set(self, zoau_data_set): + # print(zoau_data_set) + self.name = zoau_data_set.name + self.record_format = zoau_data_set.record_format and zoau_data_set.record_format.lower() + self.record_length = zoau_data_set.record_length + self.volumes = zoau_data_set.volume and zoau_data_set.volume.lower() + self.block_size = zoau_data_set.block_size + self.type = zoau_data_set.type and zoau_data_set.type.lower() + + @property + def attributes(self): + data_set_attributes = { + "name": self.name, + "state": self.state, + "type": self.data_set_type, + "space_primary": self.space_primary, + "space_secondary": self.space_secondary, + "space_type": self.space_type, + "record_format": self.record_format, + "sms_storage_class": self.sms_storage_class, + "sms_data_class": self.sms_data_class, + "sms_management_class": self.sms_management_class, + "record_length": self.record_length, + "block_size": self.block_size, + "directory_blocks": self.directory_blocks, + "key_offset": self.key_offset, + "key_length": self.key_length, + "volumes": self.volumes, + } + return data_set_attributes + class Member(): """Represents a member on z/OS. @@ -2899,6 +2934,15 @@ def ensure_present(self, replace=None, tmphlq=None): rc = DataSet.ensure_member_present(self.name, replace, tmphlq=tmphlq) return rc + @property + def attributes(self): + member_attributes = { + "name": self.name, + "parent_data_set_type": self.parent_data_set_type, + "data_set_type": self.data_set_type, + } + return member_attributes + class GenerationDataGroup(): """Represents a Generation Data Group base in z/OS. @@ -2947,8 +2991,7 @@ def __init__( self.data_set_type = "gdg" self.raw_name = name self.gdg = None - # Removed escaping since is not needed by the GDG python api. - # self.name = DataSet.escape_data_set_name(self.name) + self.state = 'present' @staticmethod def _validate_gdg_name(name): @@ -2977,6 +3020,7 @@ def create(self): fifo=self.fifo, ) self.gdg = gdg + self.state = 'present' return True def ensure_present(self, replace): @@ -3097,6 +3141,21 @@ def clear(self): gdg_view.clear() return True + @property + def attributes(self): + data_set_attributes = { + "name": self.name, + "state": self.state, + "type": self.data_set_type, + "empty": self.empty, + "extended": self.extended, + "fifo": self.fifo, + "limit": self.limit, + "purge": self.purge, + "scratch": self.scratch, + } + return data_set_attributes + def is_member(data_set): """Determine whether the input string specifies a data set member. diff --git a/plugins/modules/zos_archive.py b/plugins/modules/zos_archive.py index 158250ad4a..f14c9923b2 100644 --- a/plugins/modules/zos_archive.py +++ b/plugins/modules/zos_archive.py @@ -1328,7 +1328,7 @@ def _create_dest_data_set( if space_type is None: arguments.update(space_type="m") arguments.pop("self") - changed = data_set.DataSet.ensure_present(**arguments) + changed, zoau_data_set = data_set.DataSet.ensure_present(**arguments) return arguments["name"], changed def create_dest_ds(self, name): diff --git a/plugins/modules/zos_data_set.py b/plugins/modules/zos_data_set.py index 4c029e8f4c..5a110e12bf 100644 --- a/plugins/modules/zos_data_set.py +++ b/plugins/modules/zos_data_set.py @@ -812,11 +812,127 @@ - "222222" """ RETURN = r""" -names: - description: The data set names, including temporary generated data set names, in the order provided to the module. +data_sets: + description: The affected data set, including temporary generated data set, in the order provided to the module. returned: always type: list elements: str + contains: + name: + description: The data set name. + type: str + returned: always + state: + description: The final state desired for specified data set. + type: str + returned: always + type: + description: The data set type. + type: str + returned: always + space_primary: + description: The amount of primary space allocated for the dataset. + type: int + returned: always + space_secondary: + description: The amount of secondary space allocated for the dataset. + type: int + returned: always + space_type: + description: The unit of measurement used when defining primary and secondary space. + type: str + returned: always + record_format: + description: The format of the data set. + type: str + sample: fb + returned: always + sms_storage_class: + description: + - The storage class for the SMS-managed dataset. + - Returned empty if the data set was not specified as SMS-managed dataset. + type: str + returned: always + sms_data_class: + description: + - The data class for an SMS-managed dataset. + - Returned empty if the data set was not specified as SMS-managed dataset. + type: str + returned: always + sms_management_class: + description: + - The management class for an SMS-managed dataset. + - Returned empty if the data set was not specified as SMS-managed dataset. + type: str + returned: always + record_length: + description: The length, in bytes, of each record in the data set. + type: int + returned: always + block_size: + description: The block size used for the data set. + type: int + returned: always + directory_blocks: + description: + - The number of directory blocks to allocate to the data set. + type: int + returned: always + key_offset: + description: The key offset used when creating a KSDS data set. + type: int + returned: always + key_length: + description: The key length used when creating a KSDS data set. + type: int + returned: always + empty: + description: + - I(empty) attribute for Generation Data Groups. + - Returned empty if the data set provided was not defined as a GDG. + type: bool + returned: always + extended: + description: + - I(extended) attribute for Generation Data Groups. + - Returned empty if the data set provided was not defined as a GDG. + type: bool + returned: always + fifo: + description: + - I(fifo) attribute for Generation Data Groups. + - Returned empty if the data set provided was not defined as a GDG. + type: bool + returned: always + limit: + description: + - I(limit) attribute for Generation Data Groups. + - Returned empty if the data set provided was not defined as a GDG. + type: int + returned: always + purge: + description: + - I(purge) attribute for Generation Data Groups. + - Returned empty if the data set provided was not defined as a GDG. + type: bool + returned: always + scratch: + description: + - I(scratch) attribute for Generation Data Groups. + - Returned empty if the data set provided was not defined as a GDG. + type: bool + returned: always + volumes: + description: + - Specifies the name of the volume(s) where the data set is located. + - Returned empty if volume was not provided. + type: list + returned: always +msg: + description: A string with a generic message relayed to the user. + returned: always + type: str + sample: Error while gathering data set information """ from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.better_arg_parser import ( @@ -1713,14 +1829,51 @@ def parse_and_validate_args(params): return parsed_args -def determine_scratch(data_set_params): - scratch = data_set_params.get("scratch") - if scratch is None: - if data_set_params.get("type") == "gdg" and data_set_params.get("state") == "present": - scratch = False - elif data_set_params.get("state") == "absent": - scratch = True - return scratch +def build_return_schema(data_set_list): + """ Builds return values schema with empty values. + + Parameters + ---------- + data_set_list : dict + List of data sets. + + Returns + ------- + dict + Dictionary used to return values at execution finalization. + """ + data_set_schema = { + "name": "", + "state": "", + "type": "", + "space_primary": "", + "space_secondary": "", + "space_type": "", + "record_format": "", + "sms_storage_class": "", + "sms_data_class": "", + "sms_management_class": "", + "record_length": "", + "block_size": "", + "directory_blocks": "", + "key_offset": "", + "key_length": "", + "empty": "", + "extended": "", + "fifo": "", + "limit": "", + "purge": "", + "scratch": "", + "volumes": [], + } + + data_sets = [data_set_schema.copy() | data_set.attributes for data_set in data_set_list] + result = { + "data_sets": data_sets, + "msg": "", + "failed": False + } + return result def run_module(): @@ -1889,7 +2042,7 @@ def run_module(): default=False ), ) - result = dict(changed=False, message="", names=[]) + result = dict(changed=False) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) @@ -1924,6 +2077,8 @@ def run_module(): if module.params.get("record_format") is not None: del module.params["record_format"] + data_set_list = [] + if not module.check_mode: try: # Update the dictionary for use by better arg parser by adding the @@ -1932,7 +2087,6 @@ def run_module(): module_args['state']['dependencies'] = ['batch'] params = parse_and_validate_args(module.params) data_set_param_list = get_individual_data_set_parameters(params) - result["names"] = [d.get("name", "") for d in data_set_param_list] for data_set_params in data_set_param_list: data_set_params["noscratch"] = not determine_scratch(data_set_params) @@ -1946,11 +2100,12 @@ def run_module(): force=data_set_params.get("force"), noscratch=data_set_params.get("noscratch"), ) + data_set_list.append(data_set) result["changed"] = result["changed"] or current_changed + # Build return schema from created data sets. + result.update(build_return_schema(data_set_list)) except Exception as e: module.fail_json(msg=repr(e), **result) - if module.params.get("replace"): - result["changed"] = True module.exit_json(**result) diff --git a/plugins/modules/zos_job_output.py b/plugins/modules/zos_job_output.py index 1a946d1d55..93874a2e46 100644 --- a/plugins/modules/zos_job_output.py +++ b/plugins/modules/zos_job_output.py @@ -517,7 +517,17 @@ def run_module(): job_id=dict(type="job_identifier", required=False), job_name=dict(type="job_identifier", required=False), owner=dict(type="str", required=False), - ddname=dict(type="str", required=False), + dd_name=dict( + type="str", + required=False, + aliases=['ddname'], + deprecated_aliases=[ + dict( + name='ddname', + version='3.0.0', + collection_name='ibm.ibm_zos_core', + ) + ],), sysin_dd=dict(type="bool", required=False, default=False), ) diff --git a/plugins/modules/zos_mount.py b/plugins/modules/zos_mount.py index 825c76a8fe..2b4d14b238 100644 --- a/plugins/modules/zos_mount.py +++ b/plugins/modules/zos_mount.py @@ -1048,30 +1048,26 @@ def run_module(module, arg_def): stderr=str(res_args), ) - bk_ds = datasets.tmp_name(high_level_qualifier=tmphlq) - datasets.create(name=bk_ds, dataset_type="SEQ") - new_str = get_str_to_keep(dataset=name, src=src) - rc_write = 0 + if new_str: + modified_str = [line for line in new_str if line.strip() or line.lstrip()] + + rc_write = 0 - try: - for line in new_str: - rc_write = datasets.write(dataset_name=bk_ds, content=line.rstrip(), append=True) + try: + # zoau_io.zopen on mode w allow delete all the content inside the dataset allowing to write the new one + with zoau_io.zopen(f"//'{name}'", "w", "cp1047", recfm="*") as ds: + pass + full_text = "\n".join(modified_str) + rc_write = datasets.write(dataset_name=name, content=full_text, append=True, force=True) if rc_write != 0: raise Exception("Non zero return code from datasets.write.") - except Exception as e: - datasets.delete(dataset=bk_ds) - module.fail_json( - msg="Unable to write on persistent data set {0}. {1}".format(name, e), - stderr=str(res_args), - ) - - try: - datasets.delete(dataset=name) - datasets.copy(source=bk_ds, target=name) - finally: - datasets.delete(dataset=bk_ds) + except Exception as e: + module.fail_json( + msg="Unable to write on persistent data set {0}. {1}".format(name, e), + stderr=str(res_args), + ) if will_mount: d = datetime.today() diff --git a/plugins/modules/zos_started_task.py b/plugins/modules/zos_started_task.py index 295cdefa94..27e6d9d95c 100644 --- a/plugins/modules/zos_started_task.py +++ b/plugins/modules/zos_started_task.py @@ -19,1108 +19,287 @@ DOCUMENTATION = r""" module: zos_started_task -version_added: 2.0.0 +version_added: 1.16.0 author: - - "Ravella Surendra Babu (@surendrababuravella)" + - "Ravella Surendra Babu (@surendra.ravella582)" short_description: Perform operations on started tasks. description: - start, display, modify, cancel, force and stop a started task options: - arm: + asid: description: - - I(arm) indicates to execute normal task termination routines without causing address space destruction. - - Only applicable when I(state) is C(forced), otherwise ignored. + - I(asid) is an unique address space identifier which gets assigned to each running started task. + - I(asid) is a unique address space identifier which gets assigned to each running started task. required: false - type: bool - armrestart: - description: - - Indicates that the batch job or started task should be automatically restarted after CANCEL or FORCE - completes, if it is registered as an element of the automatic restart manager. If the job or task is - not registered or if you do not specify this parameter, MVS will not automatically restart the job or task. - - Only applicable when I(state) is C(cancelled) or C(forced), otherwise ignored. - required: false - type: bool - asidx: + type: str + device_type: description: - - When I(state) is C(cancelled), C(stopped) or C(forced), I(asidx) is the hexadecimal address space - identifier of the work unit you want to cancel, stop or force. - - Only applicable when I(state) is C(stopped), C(cancelled), or C(forced), otherwise ignored. + - I(device_type) is the type of the output device (if any) associated with the task. required: false type: str -# device_type: -# description: -# - Type of the output device (if any) associated with the task. -# - Only applicable when I(state) is C(started), otherwise ignored. -# required: false -# type: str -# device_number: -# description: -# - Number of the device to be started. A device number is 3 or 4 hexadecimal digits. A slash (/) must -# precede a 4-digit number but not a 3-digit number. -# - Only applicable when I(state) is C(started), otherwise ignored. -# required: false -# type: str - dump: + device_number: description: - - Whether to perform a dump. The type of dump (SYSABEND, SYSUDUMP, or SYSMDUMP) - depends on the JCL for the job. - - Only applicable when I(state) is C(cancelled), otherwise ignored. + - I(devide_number) is the number of the device that starts. A device number is 3 or 4 hexadecimal digits. + Ensure that a slash prcedes a 4-digit number but it is not before 3-digit number. + - I(device_number) is the number of the device to be started. A device number is 3 or 4 hexadecimal digits. + A slash (/) must precede a 4-digit number but is not before a 3-digit number. required: false - type: bool + type: str identifier_name: description: - - Option I(identifier_name) is the name that identifies the task. This name can be up to 8 - characters long. The first character must be alphabetical. + - I(identifier_name) is the name that identifies the task that starts. This name can be 8 characters long. + Ensure that the first character is alphabetical. + - I(identifier_name) is the name that identifies the task to be started. This name can be up to 8 characters long. + The first character must be alphabetical. required: false type: str aliases: - - identifier + - identifier job_account: description: - - Specifies accounting data in the JCL JOB statement for the started task. If the source JCL - already had accounting data, the value that is specified on this parameter overrides it. - - Only applicable when I(state) is C(started), otherwise ignored. + - I(job_account) specifies accounting data in the JCL JOB statement for the started task. + If the source JCL was a job and has already accounting data, the value that is specified on this parameter + overrides the accounting data in the source JCL. required: false type: str job_name: description: - - When I(state) is started, this is the name which should be assigned to a started task - while starting it. If I(job_name) is not specified, then I(member_name) is used as job's name. - - When I(state) is C(displayed), C(modified), C(cancelled), C(stopped), or C(forced), I(job_name) is the - started task name. + - I(job_name) is a name which is assigned to a started task when you start it. If job_name is not specified, then member_name is used as job_name. + - I(job_name) is a name which should be assigned to a started task while starting it. If job_name is not specified, + then member_name is used as job_name. required: false type: str aliases: - - job - - task - - task_name + - job + - task + - task_name keyword_parameters: description: - - Any appropriate keyword parameter that you specify to override the corresponding parameter in the cataloged - procedure. The maximum length of each keyword=option pair is 66 characters. No individual value within this - field can be longer than 44 characters in length. - - Only applicable when I(state) is C(started), otherwise ignored. + - Any appropriate keyword parameter that you specify to override the corresponding parameter in the cataloged procedure. + The maximum length of each keyword=option is 66 characters. No individual value within this field can be longer than + 44 characters in length. required: false - type: dict + type: str member_name: description: - - Name of a member of a partitioned data set that contains the source JCL for the task to be started. The member - can be either a job or a cataloged procedure. - - Only applicable when I(state) is C(started), otherwise ignored. + - I(member_name) is a 1 - 8 character name of a member of a partitioned data set that contains the source JCL + for the task that starts. The member can be either a job or a cataloged procedure. + - I(member_name) is a 1 - 8 character name of a member of a partitioned data set that contains the source JCL + for the task to be started. The member can be either a job or a cataloged procedure. required: false type: str aliases: - - member + - member + operation: + description: + - The started task operation which needs to be performed. + - > + If I(operation=start) and the data set does not exist on the managed node, + no action taken, module completes successfully with I(changed=False). + required: true + type: str + choices: + - start + - stop + - modify + - display + - force + - cancel parameters: description: - - Program parameters passed to the started program. - - Only applicable when I(state) is C(started) or C(modified), otherwise ignored. + - The program parameters are passed to the started program, which can be a list in parentheses or a string in a single quotation marks. + - Program parameters passed to the started program, which might be a list in parentheses or a string in single quotation marks required: false - type: list - elements: str -# retry_force: -# description: -# - Indicates whether retry will be attempted on ABTERM(abnormal termination). -# - I(tcb_address) is mandatory to use I(retry_force). -# - Only applicable when I(state) is C(forced), otherwise ignored. -# required: false -# type: bool + type: str reus_asid: description: - - When I(reus_asid) is C(True) and REUSASID(YES) is specified in the DIAGxx parmlib member, a reusable ASID is assigned - to the address space created by the START command. If I(reus_asid) is not specified or REUSASID(NO) is specified in - DIAGxx, an ordinary ASID is assigned. - - Only applicable when I(state) is C(started), otherwise ignored. + - When REUSASID=YES is specified on the START command and REUSASID(YES) is specified in the DIAGxx parmlib member, + a reusable ASID is assigned to the address space created by the START command. If REUSASID=YES is not specified + on the START command or REUSASID(NO) is specified in DIAGxx, an ordinary ASID is assigned. required: false - type: bool - state: - description: - - I(state) should be the desired state of the started task after the module is executed. - - If I(state) is C(started) and the respective member is not present on the managed node, then error will be thrown with C(rc=1), - C(changed=false) and I(stderr) which contains error details. - - If I(state) is C(cancelled), C(modified), C(displayed), C(stopped) or C(forced) and the started task is not running on the managed node, - then error will be thrown with C(rc=1), C(changed=false) and I(stderr) contains error details. - - If I(state) is C(displayed) and the started task is running, then the module will return the started task details along with - C(changed=true). - required: True type: str choices: - - started - - displayed - - modified - - cancelled - - stopped - - forced - subsystem: - description: - - The name of the subsystem that selects the task for processing. The name must be 1-4 - characters long, which are defined in the IEFSSNxx parmlib member, and the subsystem must - be active. - - Only applicable when I(state) is C(started), otherwise ignored. - required: false - type: str - task_id: + - 'YES' + - 'NO' + subsystem_name: description: - - The started task id starts with STC. - - Only applicable when I(state) is C(displayed), C(modified), C(cancelled), C(stopped), or C(forced), otherwise ignored. + - The name of the subsystem that selects the task for processing. + Ensure that the name is 1 - 4 characters, which are defined in the IEFSSNxx parmlib member. + And, ensure that the subsystem is active. + - The name of the subsystem that selects the task for processing. The name must be 1 - 4 characters, + which are defined in the IEFSSNxx parmlib member, and the subsystem must be active. required: false type: str -# tcb_address: -# description: -# - 6-digit hexadecimal TCB address of the task to terminate. -# - Only applicable when I(state) is C(forced), otherwise ignored. -# required: false -# type: str -# volume: -# description: -# - If I(device_type) is a tape or direct-access device, the serial number of the volume, -# mounted on the device. -# - Only applicable when I(state) is C(started), otherwise ignored. -# required: false -# type: str - userid: + volume_serial: description: - - The user ID of the time-sharing user you want to cancel or force. - - Only applicable when I(state) is C(cancelled) or C(forced), otherwise ignored. + - If devicetype is a tape or direct-access device, the volume serial number of the volume is mounted on the device. required: false type: str verbose: description: - - When C(verbose=true), the module will return system logs that describe the task's execution. - This option can return a big response depending on system load, also it could surface other - program's activity. + - The Return System logs that describes the process of the task. + - Return System logs that describe the task's execution. required: false type: bool default: false - wait_time: - description: - - Total time that the module will wait for a submitted task, measured in seconds. - The time begins when the module is executed on the managed node. Default value of 0 means to wait the default - amount of time supported by the opercmd utility. + wait_time_s: required: false - default: 0 + default: 5 type: int - -attributes: - action: - support: none - description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. - async: - support: full - description: Supports being used with the ``async`` keyword. - check_mode: - support: full - description: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + description: + - The option I(wait_time_s) is the total time that module L(zos_started_tak,./zos_started_task.html) waits for a submitted task. + The time begins when the module runs on the managed node. + - Option I(wait_time_s) is the total time that module + L(zos_started_tak,./zos_started_task.html) will wait for a submitted task. The time begins when the module is executed + on the managed node. """ EXAMPLES = r""" -- name: Start a started task using a member in a partitioned data set. - zos_started_task: - state: "started" - member: "PROCAPP" -- name: Start a started task using a member name and giving it an identifier. - zos_started_task: - state: "started" - member: "PROCAPP" - identifier: "SAMPLE" -- name: Start a started task using both a member and a job name. - zos_started_task: - state: "started" - member: "PROCAPP" - job_name: "SAMPLE" -- name: Start a started task and enable verbose output. +- name: Start a started task using member name. zos_started_task: - state: "started" member: "PROCAPP" - job_name: "SAMPLE" - verbose: True -- name: Start a started task specifying the subsystem and enabling a reusable ASID. - zos_started_task: - state: "started" - member: "PROCAPP" - subsystem: "MSTR" - reus_asid: "YES" -- name: Display a started task using a started task name. - zos_started_task: - state: "displayed" - task_name: "PROCAPP" -- name: Display a started task using a started task id. - zos_started_task: - state: "displayed" - task_id: "STC00012" -- name: Display all started tasks that begin with an s using a wildcard. - zos_started_task: - state: "displayed" - task_name: "s*" -- name: Display all started tasks. - zos_started_task: - state: "displayed" - task_name: "all" -- name: Cancel a started task using task name. - zos_started_task: - state: "cancelled" - task_name: "SAMPLE" -- name: Cancel a started task using a started task id. - zos_started_task: - state: "cancelled" - task_id: "STC00093" -- name: Cancel a started task using it's task name and ASID. - zos_started_task: - state: "cancelled" - task_name: "SAMPLE" - asidx: 0014 -- name: Modify a started task's parameters. - zos_started_task: - state: "modified" - task_name: "SAMPLE" - parameters: ["XX=12"] -- name: Modify a started task's parameters using a started task id. - zos_started_task: - state: "modified" - task_id: "STC00034" - parameters: ["XX=12"] -- name: Stop a started task using it's task name. - zos_started_task: - state: "stopped" - task_name: "SAMPLE" -- name: Stop a started task using a started task id. - zos_started_task: - state: "stopped" - task_id: "STC00087" -- name: Stop a started task using it's task name, identifier and ASID. - zos_started_task: - state: "stopped" - task_name: "SAMPLE" - identifier: "SAMPLE" - asidx: 00A5 -- name: Force a started task using it's task name. - zos_started_task: - state: "forced" - task_name: "SAMPLE" -- name: Force a started task using it's task id. - zos_started_task: - state: "forced" - task_id: "STC00065" + operation: "start" """ RETURN = r""" -changed: - description: - - True if the state was changed, otherwise False. - returned: always - type: bool -cmd: - description: - - Command executed via opercmd. - returned: changed - type: str - sample: S SAMPLE -msg: - description: - - Failure or skip message returned by the module. - returned: failure or skipped - type: str - sample: Command parameters are invalid. -rc: - description: - - The return code is 0 when command executed successfully. - - The return code is 1 when opercmd throws any error. - - The return code is 4 when task_id format is invalid. - - The return code is 5 when any parameter validation failed. - - The return code is 8 when started task is not found using task_id. - returned: changed - type: int - sample: 0 -state: - description: - - The final state of the started task, after execution. - returned: success - type: str - sample: S SAMPLE -stderr: - description: - - The STDERR from the command, may be empty. - returned: failure - type: str - sample: An error has occurred. -stderr_lines: - description: - - List of strings containing individual lines from STDERR. - returned: failure - type: list - sample: ["An error has occurred"] -stdout: - description: - - The STDOUT from the command, may be empty. - returned: success - type: str - sample: ISF031I CONSOLE OMVS0000 ACTIVATED. -stdout_lines: - description: - - List of strings containing individual lines from STDOUT. - returned: success - type: list - sample: ["Allocation to SYSEXEC completed."] -tasks: - description: - - The output information for a list of started tasks matching specified criteria. - - If no started task is found then this will return empty. - returned: success - type: list - elements: dict - contains: - asidx: - description: - - Address space identifier (ASID), in hexadecimal. - type: str - sample: 0054 - cpu_time: - description: - - The processor time used by the address space, including the initiator. This time does not include SRB time. - - I(cpu_time) format is hhhhh.mm.ss.SSS(hours.minutes.seconds.milliseconds). - - C(********) when time exceeds 100000 hours. - - C(NOTAVAIL) when the TOD clock is not working. - type: str - sample: 00000.00.00.003 - elapsed_time: - description: - - The processor time used by the address space, including the initiator. This time does not include SRB time. - - I(elapsed_time) format is hhhhh.mm.ss.SSS(hours.minutes.seconds.milliseconds). - - C(********) when time exceeds 100000 hours. - - C(NOTAVAIL) when the TOD clock is not working. - type: str - sample: 00003.20.23.013 - started_time: - description: - - The time when the started task started. - - C(********) when time exceeds 100000 hours. - - C(NOTAVAIL) when the TOD clock is not working. - type: str - sample: "2025-09-11 18:21:50.293644+00:00" - task_id: - description: - - The started task id. - type: str - sample: STC00018 - task_identifier: - description: - - The name of a system address space. - - The name of a step, for a job or attached APPC transaction program attached by an initiator. - - The identifier of a task created by the START command. - - The name of a step that called a cataloged procedure. - - C(STARTING) if initiation of a started job, system task, or attached APPC transaction program is incomplete. - - C(*MASTER*) for the master address space. - - The name of an initiator address space. - type: str - sample: SPROC - task_name: - description: - - The name of the started task. - type: str - sample: SAMPLE -verbose_output: - description: - - If C(verbose=true), the system logs related to the started task executed state will be shown. - returned: success - type: str - sample: NC0000000 ZOSMACHINE 25240 12:40:30.15 OMVS0000 00000210.... + """ from ansible.module_utils.basic import AnsibleModule import traceback import re -from datetime import datetime, timedelta -import re from ansible_collections.ibm.ibm_zos_core.plugins.module_utils import ( better_arg_parser ) +from ansible_collections.ibm.ibm_zos_core.plugins.module_utils import ( + zoau_version_checker +) from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.import_handler import ( ZOAUImportError ) try: - from zoautil_py import opercmd, zsystem, jobs + from zoautil_py import opercmd, zsystem except ImportError: - opercmd = ZOAUImportError(traceback.format_exc()) - zsystem = ZOAUImportError(traceback.format_exc()) - jobs = ZOAUImportError(traceback.format_exc()) + zoau_exceptions = ZOAUImportError(traceback.format_exc()) + +# try: +# from zoautil_py import exceptions as zoau_exceptions +# except ImportError: +# zoau_exceptions = ZOAUImportError(traceback.format_exc()) -def execute_command(operator_cmd, started_task_name, asidx, execute_display_before=False, timeout_s=0, **kwargs): +def execute_command(operator_cmd, started_task_name, execute_display_before=False, execute_display_after=False, timeout_s=1, *args, **kwargs): """Execute operator command. Parameters ---------- operator_cmd : str Operator command. - started_task_name : str - Name of the started task. - asidx : string - The HEX adress space identifier. - execute_display_before: bool - Indicates whether display command need to be executed before actual command or not. timeout_s : int Timeout to wait for the command execution, measured in centiseconds. + *args : dict + Arguments for the command. **kwargs : dict More arguments for the command. Returns ------- - tuple - Tuple containing the RC, standard out, standard err of the - query script and started task parameters. + OperatorQueryResult + The result of the command. """ - task_params = [] + task_params = {} # as of ZOAU v1.3.0, timeout is measured in centiseconds, therefore: timeout_c = 100 * timeout_s if execute_display_before: - task_params = execute_display_command(started_task_name, asidx) - response = opercmd.execute(operator_cmd, timeout_c, **kwargs) + task_params = execute_display_command(started_task_name, timeout_c) + + response = opercmd.execute(operator_cmd, timeout_c, *args, **kwargs) + + if execute_display_after: + task_params = execute_display_command(started_task_name, timeout_c) rc = response.rc stdout = response.stdout_response stderr = response.stderr_response return rc, stdout, stderr, task_params - -def execute_display_command(started_task_name, asidx=None, task_params_before=None, timeout=0): - """Execute operator display command. - - Parameters - ---------- - started_task_name : str - Name of the started task. - asidx : string - The HEX adress space identifier. - task_params_before: list - List of started task details which have same started task name. - timeout : int - Timeout to wait for the command execution, measured in centiseconds. - - Returns - ------- - list - List contains extracted parameters from display command output of started task - """ - cmd = f"d a,{started_task_name}" - display_response = opercmd.execute(cmd, timeout) +def execute_display_command(started_task_name, timeout_c): + cmd = "d a,"+started_task_name + display_response = opercmd.execute(cmd, timeout_c) task_params = [] if display_response.rc == 0 and display_response.stderr_response == "": - task_params = extract_keys(display_response.stdout_response, asidx, task_params_before) + task_params = extract_keys(display_response.stdout_response) return task_params -def validate_and_prepare_start_command(module): - """Validates parameters and creates start command - - Parameters - ---------- - module : dict - The started task start command parameters. - - Returns - ------- - started_task_name - The name of started task. - cmd - The start command in string format. - """ - member = module.params.get('member_name') - identifier = module.params.get('identifier_name') - job_name = module.params.get('job_name') - job_account = module.params.get('job_account') - parameters = module.params.get('parameters', []) - device_type = module.params.get('device_type') or "" - device_number = module.params.get('device_number') or "" - volume_serial = module.params.get('volume') or "" - subsystem_name = module.params.get('subsystem') - reus_asid = '' - if module.params.get('reus_asid') is not None: - if module.params.get('reus_asid'): - reus_asid = 'YES' - else: - reus_asid = 'NO' - keyword_parameters = module.params.get('keyword_parameters') - keyword_parameters_string = "" - device = device_type if device_type else device_number - # Validations - if job_account and len(job_account) > 55: - module.fail_json( - rc=5, - msg="The length of job_account exceeded 55 characters.", - changed=False - ) - if device_number: - devnum_len = len(device_number) - if devnum_len not in (3, 5) or (devnum_len == 5 and not device_number.startswith("/")): - module.fail_json( - rc=5, - msg="device_number should be 3 or 4 characters long and preceded by / when it is 4 characters long.", - changed=False - ) - if subsystem_name and len(subsystem_name) > 4: - module.fail_json( - rc=5, - msg="The subsystem_name must be 1-4 characters long.", - changed=False - ) - if keyword_parameters: - for key, value in keyword_parameters.items(): - key_len = len(key) - value_len = len(value) - if key_len > 44 or value_len > 44 or key_len + value_len > 65: - module.fail_json( - rc=5, - msg="The length of a keyword=option exceeded 66 characters or length of an individual value exceeded 44 characters." - + "key:{0}, value:{1}".format(key, value), - changed=False - ) - else: - if keyword_parameters_string: - keyword_parameters_string = f"{keyword_parameters_string},{key}={value}" - else: - keyword_parameters_string = f"{key}={value}" - if job_name: - started_task_name = f"{job_name}.{job_name}" - elif member: - started_task_name = member - if identifier: - started_task_name = f"{started_task_name}.{identifier}" - else: - started_task_name = f"{started_task_name}.{started_task_name}" - else: - module.fail_json( - rc=5, - msg="member_name is missing which is mandatory to start a started task.", - changed=False - ) - if not member: - module.fail_json( - rc=5, - msg="member_name is missing which is mandatory to start a started task.", - changed=False - ) - if job_name and identifier: - module.fail_json( - rc=5, - msg="job_name and identifier_name are mutually exclusive while starting a started task.", - changed=False - ) - parameters_updated = "" - if parameters: - if len(parameters) == 1: - parameters_updated = f"'{parameters[0]}'" - else: - parameters_updated = f"({','.join(parameters)})" - - cmd = f"S {member}" +def prepare_start_command(member, identifier, job_name, job_account, device, volume_serial, subsystem_name, reus_asid, parameters, keyword_parameters): + cmd = 'S ' + member if identifier: - cmd = f"{cmd}.{identifier}" - if parameters: - cmd = f"{cmd},{device},{volume_serial},{parameters_updated}" - elif volume_serial: - cmd = f"{cmd},{device},{volume_serial}" - elif device: - cmd = f"{cmd},{device}" + cmd = cmd + "." + identifier + "," + device + "," + volume_serial + "," + parameters if job_name: - cmd = f"{cmd},JOBNAME={job_name}" + cmd = cmd + ",jobname=" + job_name if job_account: - cmd = f"{cmd},JOBACCT={job_account}" + cmd = cmd + ",jobacct=" + job_account if subsystem_name: - cmd = f"{cmd},SUB={subsystem_name}" + cmd = cmd + ",SUB=" + subsystem_name if reus_asid: - cmd = f"{cmd},REUSASID={reus_asid}" - if keyword_parameters_string: - cmd = f"{cmd},{keyword_parameters_string}" - return started_task_name, cmd - - -def fetch_task_name_and_asidx(module, task_id): - """Executes JLS command and fetches task name - - Parameters - ---------- - module : dict - The started task display command parameters. - task_id : str - The started task id starts with STC. - - Returns - ------- - task_name - The name of started task. - """ - try: - task_details = jobs.fetch(task_id) - if not isinstance(task_details, jobs.Job): - module.fail_json( - rc=1, - msg=f"Fetching started task details using task_id: {task_id} is failed", - changed=False - ) - except Exception as err: - module.fail_json( - rc=err.response.rc, - msg=f"Fetching started task details using task_id: {task_id} is failed with ZOAU error: {err.response.stderr_response}", - changed=False - ) - task_name = task_details.name - asidx = f"{task_details.asid:04X}" - return task_name, asidx - - -def prepare_display_command(module): - """Validates parameters and creates display command - - Parameters - ---------- - module : dict - The started task display command parameters. - - Returns - ------- - started_task_name - The name of started task. - asidx - The address space identifier value, in hexadecimal. - cmd - The display command in string format. - """ - identifier = module.params.get('identifier_name') - job_name = module.params.get('job_name') - task_id = module.params.get('task_id') - started_task_name = "" - task_name = asidx = "" - if task_id: - task_name, asidx = fetch_task_name_and_asidx(module, task_id) - if task_name: - started_task_name = task_name - elif job_name: - started_task_name = job_name - if identifier: - started_task_name = f"{started_task_name}.{identifier}" - else: - module.fail_json( - rc=5, - msg="either job_name or task_id is mandatory to display started task details.", - changed=False - ) - cmd = f"D A,{started_task_name}" - return started_task_name, asidx, cmd - - -def prepare_stop_command(module, started_task=None, asidx=None, duplicate_tasks=False): - """Validates parameters and creates stop command - - Parameters - ---------- - module : dict - The started task stop command parameters. - started_task: string - The started task name. - asidx : string - The address space identifier value, in hexadecimal. - duplicate_tasks: bool - Indicates if duplicate tasks are running. - - Returns - ------- - started_task_name - The name of started task. - cmd - The stop command in string format. - """ - identifier = module.params.get('identifier_name') - job_name = module.params.get('job_name') - asidx = module.params.get('asidx') or asidx - started_task_name = "" - if started_task: - started_task_name = started_task - elif job_name: - started_task_name = job_name - if identifier: - started_task_name = f"{started_task_name}.{identifier}" - else: - started_task_name = f"{started_task_name}.{started_task_name}" - else: - module.fail_json( - rc=5, - msg="either job_name or task_id is mandatory to stop a running started task.", - changed=False - ) - cmd = f"P {started_task_name}" - if asidx or duplicate_tasks: - cmd = f"{cmd},A={asidx}" - return started_task_name, cmd - - -def prepare_modify_command(module, started_task=None): - """Validates parameters and creates modify command - - Parameters - ---------- - module : dict - The started task modify command parameters. - started_task: string - The started task name. - - Returns - ------- - started_task_name - The name of started task. - cmd - The modify command in string format. - """ - identifier = module.params.get('identifier_name') - job_name = module.params.get('job_name') - parameters = module.params.get('parameters') - started_task_name = "" - if started_task: - started_task_name = started_task - elif job_name: - started_task_name = job_name - if identifier: - started_task_name = f"{started_task_name}.{identifier}" - else: - started_task_name = f"{started_task_name}.{started_task_name}" - else: - module.fail_json( - rc=5, - msg="either job_name or task_id is mandatory to modify a running started task.", - changed=False - ) - if parameters is None: - module.fail_json( - rc=5, - msg="parameters are mandatory while modifying a started task.", - changed=False - ) - cmd = f"F {started_task_name},{','.join(parameters)}" - return started_task_name, cmd - - -def prepare_cancel_command(module, started_task=None, asidx=None, duplicate_tasks=False): - """Validates parameters and creates cancel command - - Parameters - ---------- - module : dict - The started task modify command parameters. - started_task: string - The started task name. - asidx : string - The address space identifier value, in hexadecimal. - duplicate_tasks: bool - Indicates if duplicate tasks are running. - - Returns - ------- - started_task_name - The name of started task. - cmd - The cancel command in string format. - """ - identifier = module.params.get('identifier_name') - job_name = module.params.get('job_name') - asidx = module.params.get('asidx') or asidx - dump = module.params.get('dump') - armrestart = module.params.get('armrestart') - userid = module.params.get('userid') - started_task_name = "" - if started_task: - started_task_name = started_task - elif job_name: - started_task_name = job_name - if identifier: - started_task_name = f"{started_task_name}.{identifier}" - else: - started_task_name = f"{started_task_name}.{started_task_name}" - elif userid: - started_task_name = f"U={userid}" - else: - module.fail_json( - rc=5, - msg="job_name, task_id and userid are missing, one of them is needed to cancel a task.", - changed=False - ) - if userid and armrestart: - module.fail_json( - rc=5, - msg="The ARMRESTART parameter is not valid with the U=userid parameter.", - changed=False - ) - cmd = f"C {started_task_name}" - if asidx or duplicate_tasks: - cmd = f"{cmd},A={asidx}" - if dump: - cmd = f"{cmd},DUMP" - if armrestart: - cmd = f"{cmd},ARMRESTART" - return started_task_name, cmd - - -def prepare_force_command(module, started_task=None, asidx=None, duplicate_tasks=False): - """Validates parameters and creates force command - - Parameters - ---------- - module : dict - The started task force command parameters. - started_task: string - The started task name. - asidx : string - The address space identifier value, in hexadecimal. - duplicate_tasks: bool - Indicates if duplicate tasks are running. - - Returns - ------- - started_task_name - The name of started task. - cmd - The force command in string format. - """ - identifier = module.params.get('identifier_name') - job_name = module.params.get('job_name') - asidx = module.params.get('asidx') or asidx - arm = module.params.get('arm') - armrestart = module.params.get('armrestart') - userid = module.params.get('userid') - tcb_address = module.params.get('tcb_address') - retry = '' - if module.params.get('retry_force') is not None: - if module.params.get('retry_force'): - retry = 'YES' - else: - retry = 'NO' - started_task_name = "" - if tcb_address and len(tcb_address) != 6: - module.fail_json( - rc=5, - msg="The TCB address of the task should be exactly 6-digit hexadecimal.", - changed=False - ) - if userid and armrestart: - module.fail_json( - rc=5, - msg="The ARMRESTART parameter is not valid with the U=userid parameter.", - changed=False - ) - if started_task: - started_task_name = started_task - elif job_name: - started_task_name = job_name - if identifier: - started_task_name = f"{started_task_name}.{identifier}" - else: - started_task_name = f"{started_task_name}.{started_task_name}" - elif userid: - started_task_name = f"U={userid}" - else: - module.fail_json( - rc=5, - msg="job_name, task_id and userid are missing, one of them is needed to force stop a running started task.", - changed=False - ) - cmd = f"FORCE {started_task_name}" - if asidx or duplicate_tasks: - cmd = f"{cmd},A={asidx}" - if arm: - cmd = f"{cmd},ARM" - if armrestart: - cmd = f"{cmd},ARMRESTART" - if tcb_address: - cmd = f"{cmd},TCB={tcb_address}" - if retry: - cmd = f"{cmd},RETRY={retry}" - return started_task_name, cmd - - -def extract_keys(stdout, asidx=None, task_params_before=None): - """Extracts keys and values from the given stdout - - Parameters - ---------- - stdout : string - The started task display command output - asidx : string - The address space identifier value, in hexadecimal. - task_params_before: list - List of started task details which have same started task name. - - Returns - ------- - tasks - The list of task parameters. - """ - keys = { - 'A': 'asidx', - 'CT': 'cpu_time', - 'ET': 'elapsed_time', - 'WUID': 'task_id' - } + cmd = cmd + ",REUSASID=" + reus_asid + if keyword_parameters: + cmd = cmd + "," + keyword_parameters + return cmd + + +def extract_keys(stdout): + keys = {'A': 'ASID', 'CT': 'CPU_Time', 'ET': 'Elapsed_Time', 'WUID': 'WUID', 'USERID': 'USERID', 'P': 'Priority'} + # params = {} + # for key in keys: + # parm = re.search(rf"{key}=([^\s]+)", stdout) + # if parm: + # params[keys[key]] = parm.group(1) + # return params lines = stdout.strip().split('\n') tasks = [] - current_task = {} + current_task = None task_header_regex = re.compile(r'^\s*(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)') kv_pattern = re.compile(r'(\S+)=(\S+)') for line in lines[5:]: line = line.strip() - match_firstline = task_header_regex.search(line) - if len(line.split()) >= 5 and match_firstline: + if len(line.split()) >= 5 and task_header_regex.search(line): if current_task: - current_task['started_time'] = "" - el_time = current_task.get('elapsed_time') - if el_time: - current_task['elapsed_time'] = convert_cpu_time(el_time) or current_task['elapsed_time'] - current_task['started_time'] = calculate_start_time(el_time) - if asidx: - if asidx == current_task.get('asidx'): - tasks.append(current_task) - current_task = {} - break - elif task_params_before: - current_asid = current_task.get('asidx') - task_exists_before = False - for task in task_params_before: - if task.get('asidx') == current_asid: - task_exists_before = True - break - if not task_exists_before: - tasks.append(current_task) - else: - tasks.append(current_task) - current_task = {} - current_task['task_name'] = match_firstline.group(1) - current_task['task_identifier'] = match_firstline.group(2) + tasks.append(current_task) + match = task_header_regex.search(line) + current_task = { + "TASK_NAME": match.group(1), + "DETAILS": {} + } for match in kv_pattern.finditer(line): key, value = match.groups() if key in keys: key = keys[key] - current_task[key.lower()] = value + current_task["DETAILS"][key] = value elif current_task: for match in kv_pattern.finditer(line): key, value = match.groups() if key in keys: key = keys[key] - current_task[key.lower()] = value + current_task["DETAILS"][key] = value if current_task: - current_task['started_time'] = "" - el_time = current_task.get('elapsed_time') - if el_time: - current_task['elapsed_time'] = convert_cpu_time(el_time) or current_task['elapsed_time'] - current_task['started_time'] = calculate_start_time(el_time) - cpu_time = current_task.get('cpu_time') - if cpu_time: - current_task['cpu_time'] = convert_cpu_time(cpu_time) or current_task['cpu_time'] - if asidx: - if asidx == current_task.get('asidx'): - tasks.append(current_task) - elif task_params_before: - current_asid = current_task.get('asidx') - task_exists_before = False - for task in task_params_before: - if task.get('asidx') == current_asid: - task_exists_before = True - break - if not task_exists_before: - tasks.append(current_task) - else: - tasks.append(current_task) + tasks.append(current_task) return tasks -def parse_time(ts_str): - """Parse timestamp - - Parameters - ---------- - ts_str : string - The time stamp in string format - - Returns - ------- - timestamp - Transformed timestamp - """ - try: - # Case 1: Duration like "000.005seconds" - sec_match = re.match(r"^(\d+\.?\d*)\s*S?$", ts_str, re.IGNORECASE) - if sec_match: - return timedelta(seconds=float(sec_match.group(1))) - # Case 2: hh.mm.ss - hms_match = re.match(r"^(\d+).(\d{2}).(\d{2})$", ts_str) - if hms_match: - h, m, s = map(int, hms_match.groups()) - return timedelta(hours=h, minutes=m, seconds=s) - # Case 3: hhhhh.mm - hm_match = re.match(r"^(\d{1,5}).(\d{2})$", ts_str) - if hm_match: - h, m = map(int, hm_match.groups()) - return timedelta(hours=h, minutes=m) - except Exception: - return "" - return "" - - -def calculate_start_time(ts_str): - now = datetime.now().astimezone() - parsed = parse_time(ts_str) - if parsed is None: - return "" - # If it's a timedelta (duration), subtract from now → absolute datetime - if isinstance(parsed, timedelta): - return f"{now - parsed}" - return "" - - -def convert_cpu_time(ts_str): - parsed = parse_time(ts_str) - if parsed is None: - return "" - # If it's a timedelta (duration), subtract from now → absolute datetime - if isinstance(parsed, timedelta): - total_seconds = int(parsed.total_seconds()) - milliseconds = int(parsed.microseconds / 1000) - - hours = total_seconds // 3600 - minutes = (total_seconds % 3600) // 60 - seconds = total_seconds % 60 - - # Format: HHHHH.MM.SS.SSS - return f"{hours:05}.{minutes:02}.{seconds:02}.{milliseconds:03}" - return "" - - -def fetch_logs(command, timeout): - """Extracts keys and values from the given stdout - - Parameters - ---------- - command : string - The comand which need to be checked in system logs - timeout: int - The timeout value passed in input. - - Returns - ------- - str - Logs from SYSLOG - """ - time_mins = timeout // 60 + 1 - option = '-t' + str(time_mins) - stdout = zsystem.read_console(options=option) +def fetch_logs(command): + stdout = zsystem.read_console(options='-t1') stdout_lines = stdout.splitlines() first = None - pattern = rf"\b{command}\b" for i, line in enumerate(stdout_lines): - if re.search(pattern, line, re.IGNORECASE): - first = i - if not first: + if command in line: + if first is None: + first = i + if first is None: return "" - logs = "\n".join(stdout_lines[first:]) - return logs - + return stdout_lines[first:] def run_module(): """Initialize the module. @@ -1129,119 +308,80 @@ def run_module(): ------ fail_json z/OS started task operation failed. - - Note: - 5 arguments(device_number, device_type, volume, retry_force, tcb_address) are commented due to - not tested those values in positive scenarios. These options will be enabled after successful - testing. Below Git issues are created to track this. - https://github.com/ansible-collections/ibm_zos_core/issues/2339 - https://github.com/ansible-collections/ibm_zos_core/issues/2340 """ module = AnsibleModule( argument_spec={ 'state': { 'type': 'str', 'required': True, - 'choices': ['started', 'stopped', 'modified', 'displayed', 'forced', 'cancelled'] - }, - 'arm': { - 'type': 'bool', - 'required': False + 'choices': ['started', 'stopped', 'modified', 'display', 'forced', 'cancelled'] }, - 'armrestart': { - 'type': 'bool', - 'required': False - }, - 'asidx': { + 'member_name': { 'type': 'str', - 'required': False - }, - # 'device_number': { - # 'type': 'str', - # 'required': False - # }, - # 'device_type': { - # 'type': 'str', - # 'required': False - # }, - 'dump': { - 'type': 'bool', - 'required': False + 'required': False, + 'aliases': ['member'] }, 'identifier_name': { 'type': 'str', 'required': False, 'aliases': ['identifier'] }, - 'job_account': { - 'type': 'str', - 'required': False - }, 'job_name': { 'type': 'str', 'required': False, 'aliases': ['job', 'task_name', 'task'] }, - 'keyword_parameters': { - 'type': 'dict', - 'required': False, - 'no_log': False + 'job_account': { + 'type': 'str', + 'required': False }, - 'member_name': { + 'device_type': { 'type': 'str', - 'required': False, - 'aliases': ['member'] + 'required': False }, - 'parameters': { - 'type': 'list', - 'elements': 'str', + 'device_number': { # A device number is 3 or 4 hexadecimal digits. A slash (/) must precede a 4-digit number but is not before a 3-digit number. + 'type': 'str', 'required': False }, - # 'retry_force': { - # 'type': 'bool', - # 'required': False - # }, - 'reus_asid': { - 'type': 'bool', + 'volume_serial': { + 'type': 'str', 'required': False }, - 'subsystem': { + 'subsystem_name': { # The name must be 1 - 4 characters 'type': 'str', 'required': False }, - 'task_id': { + 'reus_asid': { + 'type': 'str', + 'required': False, + 'choices': ['YES', 'NO'] + }, + 'parameters': { 'type': 'str', 'required': False }, - # 'tcb_address': { - # 'type': 'str', - # 'required': False - # }, - 'userid': { + 'keyword_parameters': { + 'type': 'dict', + 'required': False, + 'no_log': False + }, + 'asid': { 'type': 'str', 'required': False }, 'verbose': { 'type': 'bool', - 'required': False, - 'default': False + 'required': False }, - # 'volume': { - # 'type': 'str', - # 'required': False - # }, - 'wait_time': { + 'wait_time_s': { 'type': 'int', 'required': False, - 'default': 0 + 'default': 5 } }, mutually_exclusive=[ - # ['device_number', 'device_type'], - ['job_name', 'task_id'], - ['identifier_name', 'task_id'] + ['device_number', 'device_type'] ], - # required_by={'retry_force': ['tcb_address']}, supports_check_mode=True ) @@ -1250,79 +390,54 @@ def run_module(): 'arg_type': 'str', 'required': True }, - 'arm': { - 'arg_type': 'bool', - 'required': False - }, - 'armrestart': { - 'arg_type': 'bool', - 'required': False - }, - 'asidx': { - 'arg_type': 'str', - 'required': False - }, - # 'device_number': { - # 'arg_type': 'str', - # 'required': False - # }, - # 'device_type': { - # 'arg_type': 'str', - # 'required': False - # }, - 'dump': { - 'arg_type': 'bool', - 'required': False + 'member_name': { + 'arg_type': 'member_name', + 'required': False, + 'aliases': ['member'] }, 'identifier_name': { 'arg_type': 'identifier_name', 'required': False, 'aliases': ['identifier'] }, + 'job_name': { + 'arg_type': 'str', + 'required': False, + 'aliases': ['job', 'task_name', 'task'] + }, 'job_account': { 'arg_type': 'str', 'required': False }, - 'job_name': { + 'device_type': { 'arg_type': 'str', - 'required': False, - 'aliases': ['job', 'task_name', 'task'] + 'required': False }, - 'keyword_parameters': { - 'arg_type': 'basic_dict', + 'device_number': { + 'arg_type': 'str', 'required': False }, - 'member_name': { - 'arg_type': 'member_name', - 'required': False, - 'aliases': ['member'] + 'volume_serial': { + 'arg_type': 'str', + 'required': False }, - 'parameters': { - 'arg_type': 'list', - 'elements': 'str', + 'subsystem_name': { + 'arg_type': 'str', 'required': False }, - # 'retry_force': { - # 'arg_type': 'bool', - # 'required': False - # }, 'reus_asid': { - 'arg_type': 'bool', + 'arg_type': 'str', 'required': False }, - 'subsystem': { + 'parameters': { 'arg_type': 'str', 'required': False }, - 'task_id': { - 'type': 'str', + 'keyword_parameters': { + 'arg_type': 'basic_dict', 'required': False }, - # 'tcb_address': { - # 'arg_type': 'str', - # 'required': False - # }, - 'userid': { + 'asid': { 'arg_type': 'str', 'required': False }, @@ -1330,11 +445,7 @@ def run_module(): 'arg_type': 'bool', 'required': False }, - # 'volume': { - # 'arg_type': 'str', - # 'required': False - # }, - 'wait_time': { + 'wait_time_s': { 'arg_type': 'int', 'required': False } @@ -1349,167 +460,186 @@ def run_module(): msg='Parameter verification failed.', stderr=str(err) ) - state = module.params.get('state') - userid = module.params.get('userid') - wait_time_s = module.params.get('wait_time') + operation = module.params.get('state') + member = module.params.get('member_name') + identifier = module.params.get('identifier') + job_name = module.params.get('job_name') + job_account = module.params.get('job_account') + asid = module.params.get('asid') + parameters = module.params.get('parameters') + device_type = module.params.get('device_type') + device_number = module.params.get('device_number') + volume_serial = module.params.get('volume_serial') + subsystem_name = module.params.get('subsystem_name') + reus_asid = module.params.get('reus_asid') + keyword_parameters = module.params.get('keyword_parameters') + wait_time_s = module.params.get('wait_time_s') verbose = module.params.get('verbose') + keyword_parameters_string = None + if keyword_parameters is not None: + # keyword_parameters_string = ','.join(f"{key}={value}" for key, value in keyword_parameters.items()) + for key, value in keyword_parameters.items(): + key_len = len(key) + value_len = len(value) + if key_len > 44 or value_len > 44 or key_len + value_len > 65: + module.fail_json( + msg="The length of a keyword=option is exceeding 66 characters or length of an individual value is exceeding 44 characters. key:{0}, value:{1}".format(key, value), + changed=False + ) + else: + keyword_parameters_string = ','.join(f"{key}={value}") + device = device_type if device_type is not None else device_number kwargs = {} - # Fetch started task name if task_id is present in the request - task_id = module.params.get('task_id') - task_name = "" - asidx = module.params.get('asidx') - duplicate_tasks = False - started_task_name_from_id = "" - task_info = [] - if task_id and state != "displayed" and state != "started": - task_name, asidx = fetch_task_name_and_asidx(module, task_id) - task_params = execute_display_command(task_name) - if len(task_params) > 1: - duplicate_tasks = True - for task in task_params: - if task['asidx'] == asidx: - task_info.append(task) - started_task_name_from_id = f"{task['task_name']}.{task['task_identifier']}" - if not started_task_name_from_id: + start_errmsg = ['ERROR'] + stop_errmsg = ['NOT ACTIVE'] + display_errmsg = ['NOT ACTIVE'] + modify_errmsg = ['REJECTED', 'NOT ACTIVE'] + cancel_errmsg = ['NOT ACTIVE'] + force_errmsg = ['NOT ACTIVE'] + err_msg = [] + + # Validations + if job_account and len(job_account) > 55: + module.fail_json( + msg="job_account value should not exceed 55 characters.", + changed=False + ) + if device_number: + devnum_len = len(device_number) + if devnum_len not in (3, 5) or (devnum_len == 5 and not device_number.startswith("/")): module.fail_json( - rc=1, - msg="Started task of the given task_id is not active.", + msg="Invalid device_number.", changed=False ) - """ - Below error messages or error codes are used to determine if response has any error. + if subsystem_name and len(subsystem_name) > 4: + module.fail_json( + msg="The subsystem_name must be 1 - 4 characters.", + changed=False + ) + # keywaord arguments validation..... - JCL ERROR - IEE122I: Response contains this keyword when JCL contains syntax error. - INVALID PARAMETER - IEE535I: When invalid parameter passed in command line. - NOT ACTIVE - IEE341I: When started task with the given job name is not active - REJECTED: When modify command is not supported by respective started task. - NOT LOGGED ON - IEE324I: When invalid userid passed in command. - DUPLICATE NAME FOUND - IEE842I: When multiple started tasks exist with same name. - NON-CANCELABLE - IEE838I: When cancel command can't stop job and force command is needed. - CANCELABLE - IEE838I: When force command used without using cancel command - """ - start_errmsg = ['IEE122I', 'IEE535I', 'IEE307I', 'ERROR', 'IEE708I'] - stop_errmsg = ['IEE341I', 'IEE535I', 'IEE708I'] - display_errmsg = ['IEE341I', 'IEE535I', 'NOT FOUND', 'IEE708I'] - modify_errmsg = ['REJECTED', 'IEE341I', 'IEE535I', 'IEE311I', 'IEE708I', 'ISF302E'] - cancel_errmsg = ['IEE341I', 'IEE324I', 'IEE535I', 'IEE842I', 'NON-CANCELABLE', 'IEE708I'] - force_errmsg = ['IEE341I', 'IEE324I', 'IEE535I', 'CANCELABLE', 'IEE842I', 'IEE708I'] - error_details = { - 'IEE122I': 'Specified member is missing or PROC/JOB contains incorrect JCL statements.', - 'IEE535I': 'A parameter on a command is not valid.', - 'IEE307I': 'Command parameter punctuation is incorrect or parameter is not followed by a blank.', - 'ERROR': 'Member is missing in PROCLIB or JCL is invalid or issue with JCL execution.', - 'NOT FOUND': 'Started task is not active', - 'IEE341I': 'Started task is not active', - 'REJECTED': 'Started task is not accepting modification.', - 'IEE324I': 'The userid specified on the command is not currently active in the system..', - 'IEE842I': 'More than one active job with the specified name exist.', - 'NON-CANCELABLE': 'The task cannot be canceled. Use the FORCE ARM command.', - 'CANCELABLE': 'The task can be canceled. Use the CANCEL command.', - 'IEE311I': 'Required parameter is missing.', - 'IEE708I': 'The value of a keyword specified on a command is incorrect.', - 'ISF302E': 'Parameters are invalid.' - } - err_msg = [] - kwargs = {} + wait_s = 5 - if wait_time_s: + use_wait_arg = False + if zoau_version_checker.is_zoau_version_higher_than("1.2.4"): + use_wait_arg = True + + if use_wait_arg: kwargs.update({"wait": True}) - cmd = "" - task_params_before = [] + args = [] + cmd = '' + started_task_name = "" + if operation != 'started': + if job_name is not None: + started_task_name = job_name + if identifier is not None: + started_task_name = started_task_name + "." + identifier + else: + module.fail_json( + msg="job_name is missing which is mandatory.", + changed=False + ) execute_display_before = False execute_display_after = False - if state == "started": - err_msg = start_errmsg + if operation == 'started': execute_display_after = True - started_task_name, cmd = validate_and_prepare_start_command(module) - task_params_before = execute_display_command(started_task_name) - elif state == "displayed": + if job_name is not None: + started_task_name = job_name + elif member is not None: + started_task_name = member + if identifier is not None: + started_task_name = started_task_name + "." + identifier + else: + module.fail_json( + msg="member_name is missing which is mandatory.", + changed=False + ) + err_msg = start_errmsg + if member is None: + module.fail_json( + msg="member_name is missing which is mandatory.", + changed=False + ) + if job_name is not None and identifier is not None: + module.fail_json( + msg="job_name and identifier_name are mutually exclusive while starting a started task.", + changed=False + ) + cmd = prepare_start_command(member, identifier, job_name, job_account, device, volume_serial, subsystem_name, reus_asid, parameters, keyword_parameters_string) + elif operation == 'display': err_msg = display_errmsg - started_task_name, asidx, cmd = prepare_display_command(module) - elif state == "stopped": - if not task_id: - execute_display_before = True + cmd = 'd a,' + started_task_name + elif operation == 'stopped': + execute_display_before = True err_msg = stop_errmsg - started_task_name, cmd = prepare_stop_command(module, started_task_name_from_id, asidx, duplicate_tasks) - elif state == "cancelled": - if not userid: - if not task_id: - execute_display_before = True + cmd = 'p ' + started_task_name + if asid: + cmd = cmd + ',a=' + asid + elif operation == 'cancelled': + execute_display_before = True err_msg = cancel_errmsg - started_task_name, cmd = prepare_cancel_command(module, started_task_name_from_id, asidx, duplicate_tasks) - elif state == "forced": - if not userid: - if not task_id: - execute_display_before = True + cmd = 'c ' + started_task_name + if asid: + cmd = cmd + ',a=' + asid + elif operation == 'forced': + execute_display_before = True err_msg = force_errmsg - started_task_name, cmd = prepare_force_command(module, started_task_name_from_id, asidx, duplicate_tasks) - elif state == "modified": + cmd = 'force ' + started_task_name + if asid: + cmd = cmd + ',a=' + asid + elif operation == 'modified': execute_display_after = True err_msg = modify_errmsg - started_task_name, cmd = prepare_modify_command(module, started_task_name_from_id) + cmd = 'f ' + started_task_name + ',' + parameters changed = False stdout = "" stderr = "" - rc, out, err, task_params = execute_command(cmd, started_task_name, asidx, execute_display_before, timeout_s=wait_time_s, **kwargs) - is_failed = False - system_logs = "" - msg = "" - # Find failure - found_msg = next((msg for msg in err_msg if msg in out), None) - if err != "" or found_msg: - is_failed = True - # Fetch system logs to validate any error occured in execution - if not is_failed or verbose: - system_logs = fetch_logs(cmd.upper(), wait_time_s) - # If sysout is not having error, then check system log as well to make sure no error occured - if not is_failed: - found_msg = next((msg for msg in err_msg if msg in system_logs), None) - if found_msg: - is_failed = True - if not verbose: - system_logs = "" - current_state = "" - if is_failed: - if rc == 0: - rc = 1 + rc, out, err, task_params = execute_command(cmd, started_task_name, execute_display_before, execute_display_after, timeout_s=wait_s, *args, **kwargs) + logs = fetch_logs(cmd.upper()) # it will display both start/display logs + logs_str = "\n".join(logs) + if any(msg in out for msg in err_msg) or any(msg in logs_str for msg in err_msg) or err != "": changed = False - msg = error_details.get(found_msg, found_msg) stdout = out stderr = err if err == "" or err is None: stderr = out stdout = "" else: - current_state = state changed = True stdout = out stderr = err - if state == "displayed": - task_params = extract_keys(out, asidx) - elif execute_display_after: - task_params = execute_display_command(started_task_name, asidx, task_params_before) + if operation == 'display': + task_params = extract_keys(out) result = dict() if module.check_mode: module.exit_json(**result) - - result = dict( - changed=changed, - state=current_state, - cmd=cmd, - tasks=task_info if task_id else task_params, - rc=rc, - stdout=stdout, - stderr=stderr, - stdout_lines=stdout.split('\n'), - stderr_lines=stderr.split('\n'), - verbose_output=system_logs - ) - if msg: - result['msg'] = msg + + if verbose: + result = dict( + changed=changed, + cmd=cmd, + task=task_params, + rc=rc, + verbose_output=logs_str, + stdout=stdout, + stderr=stderr, + stdout_lines=stdout.split('\n'), + stderr_lines=stderr.split('\n'), + ) + else: + result = dict( + changed=changed, + cmd=cmd, + task=task_params, + rc=rc, + stdout=stdout, + stderr=stderr, + stdout_lines=stdout.split('\n'), + stderr_lines=stderr.split('\n'), + ) module.exit_json(**result) diff --git a/plugins/modules/zos_unarchive.py b/plugins/modules/zos_unarchive.py index b8dc1ac2a8..925b5d8083 100644 --- a/plugins/modules/zos_unarchive.py +++ b/plugins/modules/zos_unarchive.py @@ -1033,7 +1033,7 @@ def _create_dest_data_set( if space_primary is None: arguments.update(space_primary=self._compute_dest_data_set_size()) arguments.pop("self") - changed = data_set.DataSet.ensure_present(**arguments) + changed, zoau_data_set = data_set.DataSet.ensure_present(**arguments) return arguments["name"], changed def _get_include_data_sets_cmd(self): diff --git a/plugins/modules/zos_zfs_resize.py b/plugins/modules/zos_zfs_resize.py index d08d4ed63d..ae11dd7b53 100644 --- a/plugins/modules/zos_zfs_resize.py +++ b/plugins/modules/zos_zfs_resize.py @@ -427,8 +427,8 @@ def create_trace_dataset(name, member=False): space_type="K", space_primary="42000", space_secondary="25000") rc = data_set.DataSet.ensure_member_present(name) else: - rc = data_set.DataSet.ensure_present(name=name, replace=False, type="PDS", record_length=200, record_format="VB", - space_type="K", space_primary="42000", space_secondary="25000") + rc, zoau_data_set = data_set.DataSet.ensure_present(name=name, replace=False, type="PDS", record_length=200, record_format="VB", + space_type="K", space_primary="42000", space_secondary="25000") return rc diff --git a/tests/functional/modules/test_zos_backup_restore.py b/tests/functional/modules/test_zos_backup_restore.py index b42bfb94bc..5243b8f186 100644 --- a/tests/functional/modules/test_zos_backup_restore.py +++ b/tests/functional/modules/test_zos_backup_restore.py @@ -930,43 +930,43 @@ def test_backup_and_restore_a_data_set_with_same_hlq(ansible_zos_module): delete_data_set_or_file(hosts, data_set_backup_location) delete_remnants(hosts) - -def test_backup_and_restore_of_data_set_from_volume_to_new_volume(ansible_zos_module, volumes_on_systems): - hosts = ansible_zos_module - data_set_name = get_tmp_ds_name() - data_set_restore_location = get_tmp_ds_name() - hlqs = "TMPHLQ" - try: - volumes = Volume_Handler(volumes_on_systems) - volume_1 = volumes.get_available_vol() - volume_2 = volumes.get_available_vol() - delete_data_set_or_file(hosts, data_set_name) - delete_data_set_or_file(hosts, data_set_restore_location) - create_sequential_data_set_with_contents( - hosts, data_set_name, DATA_SET_CONTENTS, volume_1 - ) - results = hosts.all.zos_backup_restore( - operation="backup", - data_sets=dict(include=data_set_name), - volume=volume_1, - backup_name=data_set_restore_location, - overwrite=True, - ) - assert_module_did_not_fail(results) - assert_data_set_or_file_exists(hosts, data_set_restore_location) - results = hosts.all.zos_backup_restore( - operation="restore", - backup_name=data_set_restore_location, - overwrite=True, - volume=volume_2, - hlq=hlqs, - ) - assert_module_did_not_fail(results) - assert_data_set_exists(hosts, data_set_restore_location) - finally: - delete_data_set_or_file(hosts, data_set_name) - delete_data_set_or_file(hosts, data_set_restore_location) - delete_remnants(hosts, hlqs) +# Commented this test because it was commented previously and keeps failing. Tracked on https://github.com/ansible-collections/ibm_zos_core/issues/2348 +# def test_backup_and_restore_of_data_set_from_volume_to_new_volume(ansible_zos_module, volumes_on_systems): +# hosts = ansible_zos_module +# data_set_name = get_tmp_ds_name() +# data_set_restore_location = get_tmp_ds_name() +# hlqs = "TMPHLQ" +# try: +# volumes = Volume_Handler(volumes_on_systems) +# volume_1 = volumes.get_available_vol() +# volume_2 = volumes.get_available_vol() +# delete_data_set_or_file(hosts, data_set_name) +# delete_data_set_or_file(hosts, data_set_restore_location) +# create_sequential_data_set_with_contents( +# hosts, data_set_name, DATA_SET_CONTENTS, volume_1 +# ) +# results = hosts.all.zos_backup_restore( +# operation="backup", +# data_sets=dict(include=data_set_name), +# volume=volume_1, +# backup_name=data_set_restore_location, +# overwrite=True, +# ) +# assert_module_did_not_fail(results) +# assert_data_set_or_file_exists(hosts, data_set_restore_location) +# results = hosts.all.zos_backup_restore( +# operation="restore", +# backup_name=data_set_restore_location, +# overwrite=True, +# volume=volume_2, +# hlq=hlqs, +# ) +# assert_module_did_not_fail(results) +# assert_data_set_exists(hosts, data_set_restore_location) +# finally: +# delete_data_set_or_file(hosts, data_set_name) +# delete_data_set_or_file(hosts, data_set_restore_location) +# delete_remnants(hosts, hlqs) def test_backup_and_restore_of_sms_group(ansible_zos_module, volumes_sms_systems): diff --git a/tests/functional/modules/test_zos_data_set_func.py b/tests/functional/modules/test_zos_data_set_func.py index 74fbe084ea..bfa2cf3ff4 100644 --- a/tests/functional/modules/test_zos_data_set_func.py +++ b/tests/functional/modules/test_zos_data_set_func.py @@ -134,9 +134,9 @@ def retrieve_data_set_names(results): """ Retrieve system generated data set names """ data_set_names = [] for result in results.contacted.values(): - if len(result.get("names", [])) > 0: - for name in result.get("names"): - data_set_names.append(name) + if len(result.get("data_sets", [])) > 0: + for data_set in result.get("data_sets"): + data_set_names.append(data_set.get("name")) return data_set_names def print_results(results): @@ -232,6 +232,9 @@ def test_data_set_present_when_uncataloged(ansible_zos_module, jcl, volumes_on_s ) for result in results.contacted.values(): assert result.get("changed") is False + assert len(result.get("data_sets")) > 0 + assert result.get("data_sets")[0].get("name") is not None + assert result.get("data_sets")[0].get("type") is not None # uncatalog the data set results = hosts.all.zos_data_set(name=dataset, state="uncataloged") for result in results.contacted.values(): @@ -974,8 +977,8 @@ def test_data_set_creation_with_tmp_hlq(ansible_zos_module): for result in results.contacted.values(): assert result.get("changed") is True assert result.get("module_stderr") is None - for dsname in result.get("names"): - assert dsname[:7] == tmphlq + for ds in result.get("data_sets"): + assert ds.get("name")[:7] == tmphlq finally: if dsname: hosts.all.zos_data_set(name=default_data_set_name, state="absent") @@ -1022,6 +1025,13 @@ def test_gdg_create_and_delete(ansible_zos_module, dstype): for result in results.contacted.values(): assert result.get("changed") is True assert result.get("module_stderr") is None + assert len(result.get("data_sets")) > 0 + assert result.get("data_sets")[0].get("empty") is not None + assert result.get("data_sets")[0].get("extended") is not None + assert result.get("data_sets")[0].get("fifo") is not None + assert result.get("data_sets")[0].get("limit") is not None + assert result.get("data_sets")[0].get("purge") is not None + assert result.get("data_sets")[0].get("scratch") is not None results = hosts.all.zos_data_set(name=f"{data_set_name}(+1)", state="present", type=dstype) for result in results.contacted.values(): assert result.get("changed") is True diff --git a/tests/functional/modules/test_zos_mount_func.py b/tests/functional/modules/test_zos_mount_func.py index 6b9ba4e338..5570a3486b 100644 --- a/tests/functional/modules/test_zos_mount_func.py +++ b/tests/functional/modules/test_zos_mount_func.py @@ -38,6 +38,12 @@ SECURITY """ +TEXT_TO_KEEP = """/* Service path */ +MOUNT FILESYSTEM('{0}') + TYPE(ZFS) MODE(RDWR) AUTOMOVE + MOUNTPOINT('/Service') +""" + SHELL_EXECUTABLE = "/bin/sh" @@ -315,6 +321,75 @@ def test_basic_mount_with_bpx_no_utf_8_characters(ansible_zos_module, volumes_on stdin="", ) +def test_basic_mount_with_persistent_keep_dataset(ansible_zos_module, volumes_on_systems): + hosts = ansible_zos_module + volumes = Volume_Handler(volumes_on_systems) + volume_1 = volumes.get_available_vol() + srcfn = create_sourcefile(hosts, volume_1) + + tmp_file_filename = "/tmp/testfile.txt" + + hosts.all.shell( + cmd="touch {0}".format(tmp_file_filename) + ) + + dest = get_tmp_ds_name() + dest_path = dest + "(AUTO1)" + + hosts.all.zos_blockinfile(path=tmp_file_filename, insertafter="EOF", block=TEXT_TO_KEEP.format(srcfn)) + + hosts.all.shell( + cmd="dtouch -tpdse {0}".format(dest) + ) + + hosts.all.zos_copy( + src=tmp_file_filename, + dest=dest_path, + binary=True, + remote_src=True, + ) + + try: + mount_result = hosts.all.zos_mount( + src=srcfn, + path="/pythonx", + fs_type="zfs", + state="mounted", + persistent=dict(name=dest_path), + ) + + for result in mount_result.values(): + assert result.get("rc") == 0 + assert result.get("changed") is True + + result_cat = hosts.all.shell( + cmd="dcat '{0}'".format(dest_path), + ) + + for result in result_cat.contacted.values(): + print(result) + assert srcfn in result.get("stdout") + assert "Service path" in result.get("stdout") + finally: + hosts.all.zos_mount( + src=srcfn, + path="/pythonx", + fs_type="zfs", + state="absent", + ) + hosts.all.shell( + cmd="drm " + DataSet.escape_data_set_name(srcfn), + executable=SHELL_EXECUTABLE, + stdin="", + ) + hosts.all.file(path=tmp_file_filename, state="absent") + hosts.all.file(path="/pythonx/", state="absent") + hosts.all.shell( + cmd="drm " + dest, + executable=SHELL_EXECUTABLE, + stdin="", + ) + def test_basic_mount_with_bpx_marker_backup(ansible_zos_module, volumes_on_systems): hosts = ansible_zos_module volumes = Volume_Handler(volumes_on_systems) @@ -326,13 +401,6 @@ def test_basic_mount_with_bpx_marker_backup(ansible_zos_module, volumes_on_syste hosts.all.zos_copy( content=INITIAL_PRM_MEMBER, dest=tmp_file_filename, - binary=True, - ) - # Make it readable at console - hosts.all.shell( - cmd="chtag -t -c ISO8859-1 " + tmp_file_filename, - executable=SHELL_EXECUTABLE, - stdin="", ) # Dump the values of the file once copied to the target(s) @@ -356,7 +424,6 @@ def test_basic_mount_with_bpx_marker_backup(ansible_zos_module, volumes_on_syste hosts.all.zos_copy( src=tmp_file_filename, dest=dest_path, - binary=True, remote_src=True, ) @@ -375,25 +442,16 @@ def test_basic_mount_with_bpx_marker_backup(ansible_zos_module, volumes_on_syste marker=["bpxtablemarker - try this", "second line of marker"], ), ) - # copying from dataset to make editable copy on target - test_tmp_file_filename = tmp_file_filename + "-a" - - hosts.all.zos_copy( - src=dest_path, - dest=test_tmp_file_filename, - binary=True, - remote_src=True, - ) + results = hosts.all.shell( - cmd="cat " + test_tmp_file_filename, executable=SHELL_EXECUTABLE, stdin="" + cmd="dcat '{0}'".format(dest_path), ) - data = "" - for result in results.values(): + + for result in results.contacted.values(): print("\nbcb-postmount result: {0}\n".format(result.get("stdout"))) data += result.get("stdout") print("\n====================================================\n") - for result in mount_result.values(): assert result.get("rc") == 0 assert result.get("changed") is True @@ -414,7 +472,6 @@ def test_basic_mount_with_bpx_marker_backup(ansible_zos_module, volumes_on_syste ) hosts.all.file(path=tmp_file_filename, state="absent") - hosts.all.file(path=test_tmp_file_filename, state="absent") hosts.all.file(path="/pythonx/", state="absent") hosts.all.shell(cmd=f"drm {dest}")