diff --git a/CHANGES.rst b/CHANGES.rst index 22182ff88..0739dd2b8 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,12 @@ Changelog ========= +-------------------- +3.23 (2025-04-18) +-------------------- +- Declared dependency on blessed for Windows, Linux and macOS +- Added `eb migrate` command to support migrations of IIS servers to Elastic Beanstalk: https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/dotnet-migrating-applications.html + -------------------- 3.22.1 (2025-04-17) -------------------- diff --git a/ebcli/__init__.py b/ebcli/__init__.py index 7cef76724..4a882368d 100644 --- a/ebcli/__init__.py +++ b/ebcli/__init__.py @@ -11,4 +11,4 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -__version__ = '3.22.1' +__version__ = '3.23' diff --git a/ebcli/controllers/migrate.py b/ebcli/controllers/migrate.py new file mode 100644 index 000000000..b8e0ff353 --- /dev/null +++ b/ebcli/controllers/migrate.py @@ -0,0 +1,3316 @@ +# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import datetime +import shutil +import os +import string +import sys +import re +import xml.etree.ElementTree as ET +from dataclasses import dataclass +import zipfile +from typing import Dict, List, Any, Union, Optional, Tuple, Set +import collections +import json +import argparse + +if sys.platform.startswith("win"): + import winreg + import clr + import win32com.client + + clr.AddReference("System.Reflection") + clr.AddReference(r"C:\Windows\System32\inetsrv\Microsoft.Web.Administration.dll") + clr.AddReference("System") + clr.AddReference("System.Core") + clr.AddReference("System.DirectoryServices.AccountManagement") + from System.DirectoryServices.AccountManagement import ( + PrincipalContext, + ContextType, + UserPrincipal, + PrincipalSearcher, + ) + from System.Collections.Generic import HashSet, Queue + from System.Reflection import Assembly + from Microsoft.Web.Administration import ( + ServerManager, + Binding, + Site, + Application, + ObjectState, + ) + from System.Diagnostics import Process, ProcessStartInfo + from System.Runtime.InteropServices import COMException + +from cement.utils.misc import minimal_logger + +LOG = minimal_logger(__name__) + +from ebcli.core.abstractcontroller import AbstractBaseController +from ebcli.core import io, fileoperations +from ebcli.lib import utils, ec2, elasticbeanstalk, aws +from ebcli.objects import requests +from ebcli.objects.platform import PlatformVersion +from ebcli.objects.exceptions import ( + NotFoundError, + NotAnEC2Instance, +) +from ebcli.resources.strings import prompts, flag_text +from ebcli.operations import commonops, createops, platformops, statusops +from ebcli.operations.tagops import tagops +from ebcli.resources.statics import namespaces + + +class MigrateExploreController(AbstractBaseController): + class Meta: + argument_formatter = argparse.RawTextHelpFormatter + label = "explore" + description = flag_text["migrate.explore"] + usage = "eb migrate explore" + stacked_on = "migrate" + stacked_type = "nested" + + def do_command(self): + verbose = self.app.pargs.verbose + + if verbose: + list_sites_verbosely() + else: + io.echo("\n".join([s.Name for s in ServerManager().Sites])) + + +class MigrateCleanupController(AbstractBaseController): + class Meta: + argument_formatter = argparse.RawTextHelpFormatter + label = "cleanup" + description = flag_text["migrate.cleanup"] + usage = "eb migrate cleanup" + stacked_on = "migrate" + stacked_type = "nested" + arguments = [ + (["--force"], dict(action="store_true", help=flag_text["migrate.force"])), + ] + + def do_command(self): + force = self.app.pargs.force + cleanup_previous_migration_artifacts(force, self.app.pargs.verbose) + + +# TODO: error when a physical path is in incidental to the migration execution path +class MigrateController(AbstractBaseController): + class Meta: + argument_formatter = argparse.RawTextHelpFormatter + label = "migrate" + description = "This is an experimental command that enables you to migrate an IIS site from a source machine to Elastic Beanstalk" + usage = "eb migrate [options ...]" + arguments = [ + (["-s", "--sites"], dict(help=flag_text["migrate.sites"])), + ( + ["-e", "--environment-name"], + dict(help=flag_text["migrate.environment_name"]), + ), + ( + ["-a", "--application-name"], + dict(help=flag_text["migrate.application_name"]), + ), + (["-p", "--platform"], dict(help=flag_text["migrate.platform"])), + (["-i", "--instance-type"], dict(help=flag_text["migrate.instance_type"])), + (["-c", "--cname"], dict(help=flag_text["migrate.cname"])), + ( + ["-ip", "--instance-profile"], + dict(help=flag_text["migrate.instance_profile"]), + ), + (["-sr", "--service-role"], dict(help=flag_text["migrate.service_role"])), + ( + ["-es", "--ebs-snapshots"], + dict(nargs="*", help=flag_text["migrate.ebs_snapshots"]), + ), + ( + ["-st", "--stream-to-cloudwatch"], + dict(action="store_true", help=argparse.SUPPRESS), + ), + ( + ["-hc", "--use-host-ebs-configuration"], + dict(action="store_true", help=argparse.SUPPRESS), + ), + (["-k", "--keyname"], dict(help=flag_text["migrate.keyname"])), + ( + ["-in", "--interactive"], + dict(action="store_true", help=flag_text["migrate.interactive"]), + ), + (["-t", "--tags"], dict(help=flag_text["migrate.tags"])), + (["-d", "--copy-deps"], dict(action="store_true", help=argparse.SUPPRESS)), + ( + ["-ao", "--archive-only"], + dict(action="store_true", help=flag_text["migrate.archive_only"]), + ), + ( + ["-op", "--on-prem-mode"], + dict(action="store_true", help=argparse.SUPPRESS), + ), + ( + ["-cf", "--copy-firewall-config"], + dict( + action="store_true", help=flag_text["migrate.copy_firewall_config"] + ), + ), + ( + ["--encrypt-ebs-volumes"], + dict( + action="store_true", help=flag_text["migrate.encrypt_ebs_volumes"] + ), + ), + ( + ["--ssl-certificates"], + dict(help=flag_text["migrate.ssl_certificate_arns"]), + ), + (["--archive"], dict(help=flag_text["migrate.archive"])), + (["-vpc", "--vpc-config"], dict(help=flag_text["migrate.vpc_config"])), + # TODO: support userdata copy using robocopy + ] + + def generate_ms_deploy_source_bundle( + self, + site: "Site", + destination: str, + verbose: bool, + additional_virtual_dir_physical_paths: List[str] = [], + ) -> None: + """ + Generate deployment bundle and manifest for an IIS site and its components. + + Creates a structured directory containing deployment artifacts for an IIS site, + including all its applications and virtual directories. Maintains a deployment + manifest that describes how to deploy these components. + + Args: + site: IIS Site object to package + destination: Base directory for deployment artifacts + verbose: If True, provides detailed output during generation + additional_virtual_dir_physical_paths: List to collect physical paths of + virtual directories for permission configuration + + Directory Structure: + destination/ + ├── upload_target/ + │ ├── source1.zip # Application bundles + │ ├── source2.zip + │ ├── aws-windows-deployment-manifest.json + │ └── ebmigrateScripts/ # Helper PowerShell scripts + │ ├── site_installer.ps1 + │ ├── permission_handler.ps1 + │ └── other helper scripts + └── upload_target.zip # Final package + + Process Flow: + 1. Creates upload_target and ebmigrateScripts directories + 2. Creates or updates deployment manifest + 3. For each application in site: + - Checks for password protection + - Generates MS Deploy package + - Collects non-root virtual directory paths + 4. Updates manifest with final configuration + + Manifest Structure: + { + "manifestVersion": 1, + "deployments": { + "msDeploy": [], # Default Web Site deployments + "custom": [] # Custom site deployments + } + } + + Notes: + - Creates directories with exist_ok=True + - Collects virtual directory paths for later permission setup + - Updates existing manifest if found, creates new if not + - Uses indented JSON format for manifest readability + """ + if verbose: + io.echo(f"Generating source bundle for {site.Name}") + upload_target_dir = os.path.join(destination, "upload_target") + os.makedirs(upload_target_dir, exist_ok=True) + os.makedirs(os.path.join(upload_target_dir, "ebmigrateScripts"), exist_ok=True) + + manifest_file_path = os.path.join( + upload_target_dir, "aws-windows-deployment-manifest.json" + ) + relative_normalized_manifest_path = absolute_to_relative_normalized_path( + manifest_file_path + ) + if os.path.exists(manifest_file_path): + if verbose: + io.echo(f" Updating {relative_normalized_manifest_path}") + with open(manifest_file_path) as file: + manifest_contents = json.load(file) + else: + manifest_contents = { + "manifestVersion": 1, + "deployments": {"msDeploy": [], "custom": []}, + } + for application in site.Applications: + warn_about_password_protection(site, application) + ms_deploy_sync_application( + site, application, destination, upload_target_dir, manifest_contents + ) + for vdir in application.VirtualDirectories: + if vdir.Path != "/": + additional_virtual_dir_physical_paths.append(vdir.PhysicalPath) + if verbose: + io.echo( + f"Updating manifest file for archive at {relative_normalized_manifest_path}" + ) + with open(manifest_file_path, "w") as file: + json.dump(manifest_contents, file, indent=4) + + def do_command(self): + validate_iis_version_greater_than_7_0() + verbose = self.app.pargs.verbose + + site_names = self.app.pargs.sites + env_name = self.app.pargs.environment_name + app_name = self.app.pargs.application_name + platform = self.app.pargs.platform + instance_type = self.app.pargs.instance_type + instance_profile = self.app.pargs.instance_profile + service_role = self.app.pargs.service_role + ebs_snapshots = self.app.pargs.ebs_snapshots + keyname = self.app.pargs.keyname + interactive = self.app.pargs.interactive + cname = self.app.pargs.cname + region = self.app.pargs.region + archive_only = self.app.pargs.archive_only + on_prem_mode = self.app.pargs.on_prem_mode + tags = self.app.pargs.tags + tags = tagops.get_and_validate_tags(tags) + copy_firewall_config = self.app.pargs.copy_firewall_config + encrypt_ebs_volumes = self.app.pargs.encrypt_ebs_volumes + ssl_certificate = self.app.pargs.ssl_certificates + archive = self.app.pargs.archive + if archive and archive_only: + raise ValueError("Cannot use --archive-only with --archive-dir together.") + vpc_config = self.app.pargs.vpc_config + + sites = establish_candidate_sites(site_names, interactive) + on_an_ec2_instance = True + try: + environment_vpc, _region, instance_id, instance_tags = ( + construct_environment_vpc_config(on_prem_mode, verbose) + ) + on_an_ec2_instance = not not instance_id + except NotAnEC2Instance: + environment_vpc, _region, instance_id, instance_tags = ( + dict(), + None, + list(), + None, + ) + on_an_ec2_instance = False + if vpc_config: + environment_vpc = load_environment_vpc_from_vpc_config(vpc_config) + region = _region or establish_region(region, interactive, app_name, platform) + LOG.debug("Writing region_name to .elasticbeanstalk/config") + fileoperations.write_config_setting("global", "region_name", region) + tags = tags or instance_tags + snapshots_string = generate_snapshots_string(ebs_snapshots) + + app_name = establish_app_name(app_name, interactive, sites) + env_name = establish_env_name(env_name, app_name, interactive, sites) + platform = establish_platform(platform, interactive) + process_keyname(keyname) + + listener_configs = [] + if not _arr_enabled(): + listener_configs = get_listener_configs(sites, ssl_certificate) + all_ports = get_all_ports(sites) + ec2_security_group = None + load_balancer_security_group = None + if on_an_ec2_instance and copy_firewall_config: + load_balancer_security_group, ec2_security_group = ( + ec2.establish_security_group(all_ports, env_name, environment_vpc["id"]) + ) + + source_bundle_zip = None + upload_target_dir = None + latest_migration_run_path = None + if not archive: + latest_migration_run_path = setup_migrations_dir(verbose) + upload_target_dir = os.path.join(latest_migration_run_path, "upload_target") + os.makedirs(upload_target_dir, exist_ok=True) + self.package_sites( + sites, latest_migration_run_path, upload_target_dir, verbose + ) + write_ebdeploy_utility_script(upload_target_dir) + if _arr_enabled(): + export_arr_config(upload_target_dir, verbose) + if copy_firewall_config: + write_copy_firewall_config_script(upload_target_dir, sites) + fileoperations.zip_up_folder(upload_target_dir, upload_target_zip_path()) + else: + if zipfile.is_zipfile(archive): + source_bundle_zip = archive + else: + upload_target_dir = archive + latest_migration_run_path = os.path.dirname(upload_target_dir) + self.package_sites( + sites, latest_migration_run_path, upload_target_dir, verbose + ) + fileoperations.zip_up_folder( + upload_target_dir, upload_target_zip_path() + ) + if listener_configs and latest_migration_run_path: + with open( + os.path.join(latest_migration_run_path, "listener_configs.json"), "w" + ) as file: + listener_configs_json = {"listener_configs": listener_configs} + json.dump(listener_configs_json, file, indent=2) + if archive_only and upload_target_dir: + generate_upload_target_archive(upload_target_dir, env_name) + return + + self.create_app_version_and_environment( + app_name=app_name, + source_bundle_zip=source_bundle_zip, + instance_profile=instance_profile, + service_role=service_role, + instance_type=instance_type, + cname=cname, + env_name=env_name, + encrypt_ebs_volumes=encrypt_ebs_volumes, + environment_vpc=environment_vpc, + ec2_security_group=ec2_security_group, + platform=platform, + keyname=keyname, + tags=tags, + snapshots_string=snapshots_string, + listener_configs=listener_configs, + load_balancer_security_group=load_balancer_security_group, + interactive=interactive, + ) + # ------------------------------------------------------------------------------- + # proceed to create application version and the EB environment beyond this point + # ------------------------------------------------------------------------------- + + def create_app_version_and_environment( + self, + app_name, + source_bundle_zip, + instance_profile, + service_role, + instance_type, + cname, + env_name, + encrypt_ebs_volumes, + environment_vpc, + ec2_security_group, + platform, + keyname, + tags, + snapshots_string, + listener_configs, + load_balancer_security_group, + interactive, + ): + version_label = commonops.create_app_version( + app_name, source_bundle=source_bundle_zip or upload_target_zip_path() + ) + instance_profile = establish_instance_profile(instance_profile) + if not service_role: + service_role = createops.create_default_service_role() + instance_type = instance_type or "c5.2xlarge" + cname = cname or get_unique_cname(env_name) + if encrypt_ebs_volumes: + do_encrypt_ebs_volumes() + + if environment_vpc and environment_vpc.get("securitygroups"): + vpc_security_groups = set(environment_vpc["securitygroups"].split(",")) + if ec2_security_group: + vpc_security_groups.add(ec2_security_group.get("Value", set())) + environment_vpc["securitygroups"] = ",".join(list(vpc_security_groups)) + ec2_security_group = None + + root_volume = [ + { + "Namespace": namespaces.LAUNCH_CONFIGURATION, + "OptionName": "RootVolumeSize", + "Value": "60", + } + ] + + env_request = requests.CreateEnvironmentRequest( + app_name=app_name, + env_name=env_name, + platform=platform, + version_label=version_label, + instance_profile=instance_profile, + service_role=service_role, + key_name=keyname, + tags=tags, + vpc=environment_vpc, + elb_type="application", + instance_types=instance_type, + min_instances="1", + max_instances="4", + block_device_mappings=snapshots_string, + listener_configs=listener_configs, + cname=cname, + description="Environment created by `eb migrate`", + load_balancer_security_group=load_balancer_security_group, + ec2_security_group=ec2_security_group, + root_volume=root_volume, + ) + + createops.make_new_env(env_request, interactive=interactive, timeout=15) + + def package_sites( + self, + sites: List["Site"], + latest_migration_run_path: str, + upload_target_dir: str, + verbose: bool, + ) -> None: + """ + Package IIS sites and their components for deployment. + + Creates deployment bundles for specified IIS sites, including their applications + and virtual directories. Generates necessary PowerShell scripts for deployment + and permission management. + + Args: + sites: List of IIS Site objects to package + latest_migration_run_path: Path to store migration artifacts + upload_target_dir: Directory for deployment scripts and bundles + verbose: If True, provides detailed output during packaging + + Process Flow: + 1. Announces sites being packaged (if not verbose) + 2. Generates MS Deploy bundles for each site + 3. Creates necessary PowerShell scripts: + - noop.ps1 for placeholder operations + - Virtual directory permission script (if needed) + 4. Updates manifest with virtual directory configurations + + Notes: + - Tracks additional virtual directory paths across all sites + - Creates permission management scripts only if virtual directories exist + - Uses MS Deploy for package generation + - Maintains list of physical paths requiring special permissions + + Example Output (non-verbose): + Generating source bundle for sites, applications, and virtual directories: [Site1, Site2] + """ + additional_virtual_dir_physical_paths = [] + if not verbose: + command_separated_sites_list = ", ".join([s.Name for s in sites]) + io.echo( + f"Generating source bundle for sites, applications, and virtual directories: [{command_separated_sites_list}]" + ) + for site in sites: + self.generate_ms_deploy_source_bundle( + site, + destination=latest_migration_run_path, + verbose=verbose, + additional_virtual_dir_physical_paths=additional_virtual_dir_physical_paths, + ) + create_noop_ps1_script(upload_target_dir) + if additional_virtual_dir_physical_paths: + create_virtualdir_path_permission_script( + additional_virtual_dir_physical_paths, upload_target_dir + ) + add_virtual_directory_custom_script_to_manifest(upload_target_dir) + + +def get_all_ports(sites): + all_ports = set() + for site in sites: + for binding in site.Bindings: + all_ports.add(int(binding.get_BindingInformation().split(":")[1])) + return all_ports + + +def get_unique_non_80_ports(sites): + all_ports = set() + for site in sites: + bindings = site.Bindings + for binding in bindings: + port = binding.BindingInformation.split(":")[1] + if port != "80": + all_ports.add(port) + return all_ports + + +def absolute_to_relative_normalized_path(abs_path): + relative_path = os.path.relpath(abs_path, os.getcwd()) + path_parts = relative_path.split(os.sep) # Split path into components + for i, part in enumerate(path_parts): + if part.startswith("migration_"): + path_parts[i] = "latest" + return os.path.join(*path_parts) + + +def do_encrypt_ebs_volumes(): + try: + ec2.enable_ebs_volume_encryption() + except Exception as e: + io.log_error(f"Failed to enable EBS volume encryption: {e}") + raise e + + +def establish_instance_profile(instance_profile): + instance_profile = instance_profile or commonops.create_default_instance_profile() + fileoperations.write_config_setting("global", "instance_profile", instance_profile) + return instance_profile + + +def generate_upload_target_archive(upload_target_dir, env_name): + fileoperations.zip_up_folder(upload_target_dir, upload_target_zip_path()) + relative_normalized_upload_target_dir_path = absolute_to_relative_normalized_path( + upload_target_dir + ) + + try: + test_environment_exists(env_name) + io.echo( + f"\nGenerated destination archive ZIP at .\\{relative_normalized_upload_target_dir_path}.zip. " + "You can now upload the zip using:\n\n" + " eb deploy --zip .\\migrations\\latest\\upload_target.zip\n" + ) + except NotFoundError: + io.echo( + f"\nGenerated destination archive directory at .\\{relative_normalized_upload_target_dir_path}.zip. " + "You can create en environment with the zip using:\n\n" + " eb migrate --archive .\\migrations\\latest\\upload_target.zip\n" + ) + + +def test_environment_exists(env_name): + elasticbeanstalk.get_environment(env_name=env_name) + + +def upload_target_zip_path(): + return os.path.join(os.getcwd(), "migrations", "latest", "upload_target.zip") + + +def add_virtual_directory_custom_script_to_manifest(upload_target_dir): + manifest_file_path = os.path.join( + upload_target_dir, "aws-windows-deployment-manifest.json" + ) + if os.path.exists(manifest_file_path): + with open(manifest_file_path) as file: + manifest_contents = json.load(file) + else: + manifest_contents = { + "manifestVersion": 1, + "deployments": {"msDeploy": [], "custom": []}, + } + manifest_contents["deployments"]["custom"].append( + create_custom_manifest_section( + "FixVirtualDirPermissions", + "add_virtual_dir_read_access.ps1", + "noop.ps1", + "noop.ps1", + ) + ) + with open(manifest_file_path, "w") as file: + json.dump(manifest_contents, file, indent=4) + + +def process_keyname(keyname): + if keyname: + commonops.upload_keypair_if_needed(keyname) + LOG.debug("Writing default_ec2_keyname to .elasticbeanstalk/config") + fileoperations.write_config_setting("global", "default_ec2_keyname", keyname) + + +def establish_platform(platform, interactive): + if not platform and interactive: + platform = platformops.prompt_for_platform() + elif not platform: + io.echo("Determining EB platform based on host machine properties") + platform = _determine_platform(platform_string=get_windows_server_version()) + else: + io.echo(f"Determining EB platform based on input, {platform}") + platform = _determine_platform(platform) + LOG.debug("Writing platform_name to .elasticbeanstalk/config") + fileoperations.write_config_setting("global", "platform_name", platform.name) + return platform + + +def establish_env_name(env_name, app_name, interactive, sites): + if not env_name and interactive: + env_name = get_environment_name(app_name) + elif not env_name: + LOG.debug("Setting env_name to site_name with whitespaces removed") + if len(sites) == 1: + candidate_env_name = sites[0].Name.replace(" ", "") + else: + candidate_env_name = "EBMigratedEnv" + env_name = get_unique_environment_name(candidate_env_name) + return env_name + + +def establish_app_name(app_name, interactive, sites): + if not app_name and interactive: + app_name = _get_application_name_interactive() + elif not app_name: + LOG.debug("Setting app_name to site_name with whitespaces removed") + if len(sites) == 1: + app_name = sites[0].Name.replace(" ", "") + else: + app_name = "EBMigratedApp" + LOG.debug("Writing application_name to .elasticbeanstalk/config") + fileoperations.write_config_setting("global", "application_name", app_name) + return app_name + + +def generate_snapshots_string(ebs_snapshots): + snapshots_string = [] + if ebs_snapshots: + char_iter = iter(string.ascii_lowercase) + io.echo(f"Using input EBS snapshot configuration: {snapshots_string}") + snapshots_string = ",".join( + [f"/dev/sd{next(char_iter)}={snapshot}" for snapshot in ebs_snapshots] + ) + return snapshots_string + + +def establish_region(region, interactive, app_name, platform): + if not region and interactive: + region = commonops.get_region(None, True) + elif not region: + region = commonops.get_region_force_non_interactive(platform) + aws.set_region(region) + fileoperations.create_config_file( + app_name=app_name, + region=region, + solution_stack=platform, + workspace_type="Application", + ) + return region + + +def establish_candidate_sites( + site_names: Optional[str], interactive: bool +) -> List["Site"]: + """ + Determine which IIS sites to include in the migration process. + + Resolves the list of IIS sites to migrate based on input parameters and + available sites. Sites can be specified explicitly, chosen interactively, + or determined automatically based on the presence of Default Web Site. + + Args: + site_names: Comma-separated string of site names to migrate. + If None, uses interactive or default behavior. + interactive: If True and site_names is None, prompts user to select + a site from available options. + + Returns: + List of IIS Site objects to be migrated + + Selection Logic: + 1. If site_names provided: + - Validates all specified sites exist + - Returns corresponding Site objects + 2. If interactive and no site_names: + - Prompts user to select one site + - Returns list with selected site + 3. If non-interactive and no site_names: + - If Default Web Site exists, returns all sites + - Otherwise, raises error + + Raises: + ValueError: If specified site name doesn't exist + EnvironmentError: If no sites specified in non-interactive mode and + Default Web Site doesn't exist + + Example: + >>> # Explicit selection + >>> sites = establish_candidate_sites("Site1,Site2", False) + >>> # Interactive selection + >>> sites = establish_candidate_sites(None, True) + >>> # Default behavior + >>> sites = establish_candidate_sites(None, False) + """ + server_manager = ServerManager() + if not server_manager.Sites: + raise ValueError( + "`eb migrate` failed because there are no sites on this IIS server." + ) + available_sites = [s.Name for s in server_manager.Sites] + if site_names: + site_names = site_names.split(",") + + if site_names: + for site_name in site_names: + if site_name not in available_sites: + raise ValueError( + f"Specified site, '{site_name}', does not exist. Available sites: [{', '.join(available_sites)}]" + ) + sites = server_manager.Sites + elif not site_names and interactive: + io.echo("Select an IIS site to migrate:") + site_name = utils.prompt_for_item_in_list( + [s.Name for s in server_manager.Sites], default="1" + ) + site = [s for s in server_manager.Sites if s.Name == site_name][0] + sites = [site] + else: + sites = server_manager.Sites + if not sites: + raise EnvironmentError( + "`eb migrate` failed because there are no sites on this IIS server." + ) + return sites + + +def list_sites_verbosely(): + # TODO: Show URL rewrites and proxy information + for i, site in enumerate(ServerManager().Sites, 1): + io.echo(f"{i}: {site.Name}:") + io.echo(f" - Bindings:") + for binding in site.Bindings: + io.echo(f" - {binding.BindingInformation}") + for application in site.Applications: + io.echo(f" - Application '{application.Path}':") + io.echo(f" - Application Pool: {application.ApplicationPoolName}") + io.echo(f" - Enabled Protocols: {application.EnabledProtocols}") + io.echo(f" - Virtual Directories:") + virdirs = application.VirtualDirectories + for vdir in virdirs: + io.echo(f" - {vdir.Path}:") + io.echo(f" - Physical Path: {vdir.PhysicalPath}") + io.echo(f" - Logon Method: {vdir.LogonMethod}") + if vdir.UserName: + io.echo(f" - Username: {vdir.UserName}") + if vdir.Password: + io.echo(" - Password: ") + try: + users = get_local_users() + except: + return + io.echo("----------------------------------------------------") + io.echo("Users:") + for username, homedir in users: + io.echo(f" - {username}") + io.echo(f" - Home: {homedir}") + + +def get_local_users(): + ctx = PrincipalContext(ContextType.Machine) + user_principal = UserPrincipal(ctx) + searcher = PrincipalSearcher(user_principal) + + users = searcher.FindAll() + user_list = [ + (user.SamAccountName, user.HomeDirectory) for user in users if user.Enabled + ] + + return user_list + + +def load_environment_vpc_from_vpc_config(vpc_config: str) -> Dict[str, any]: + """ + Load and validate VPC configuration from either a JSON file or JSON string. + + Parses VPC configuration from either a .json file or a JSON-formatted string, + validates required fields, and provides defaults for optional parameters. + + Args: + vpc_config: Either: + - Path to a JSON file (must end in .json) + - JSON-formatted string containing VPC configuration + + Returns: + Dictionary containing VPC configuration with keys: + - id: (required) VPC ID + - publicip: (optional) Whether to assign public IPs, default True + - elbscheme: (optional) ELB scheme, default "public" + - ec2subnets: (optional) List of EC2 subnet IDs, default [] + - securitygroups: (optional) Comma-separated security group IDs, default "" + - elbsubnets: (optional) List of ELB subnet IDs, default [] + + Raises: + FileNotFoundError: If vpc_config is a file path and: + - File doesn't exist + - File can't be opened + - File is a directory + - Permission denied + ValueError: If: + - JSON parsing fails + - Required 'id' field is missing + - Invalid JSON format in file or string + + Example JSON Format: + { + "id": "vpc-1234567890abcdef0", + "publicip": true, + "elbscheme": "public", + "ec2subnets": ["subnet-123", "subnet-456"], + "securitygroups": ["sg-123", "sg-456"], + "elbsubnets": ["subnet-789", "subnet-abc"] + } + """ + if vpc_config.endswith(".json"): + try: + with open(os.path.join(vpc_config)) as file: + vpc_config_dict = json.load(file) + except FileNotFoundError | PermissionError | IsADirectoryError | OSError: + raise FileNotFoundError( + f"Cannot open file {vpc_config} to parse VPC config. Verify that it exists and contains valid JSON." + ) + except json.JSONDecodeError: + raise ValueError( + f"Cannot parse {vpc_config}. Verify that it is a valid JSON file." + ) + else: + try: + vpc_config_dict = json.loads(vpc_config) + except json.JSONDecodeError: + raise ValueError( + f"Cannot parse VPC config: {vpc_config}. Verify that it is a valid JSON string." + ) + + try: + vpc_config_dict["id"] + except KeyError: + raise ValueError(f"Must specify a VPC ID in VPC config file '{vpc_config}'") + + return { + "id": vpc_config_dict["id"], + "publicip": vpc_config_dict.get("publicip", True), + "elbscheme": vpc_config_dict.get("elbscheme", "public"), + "ec2subnets": vpc_config_dict.get("ec2subnets", []), + "securitygroups": "".join(vpc_config_dict.get("securitygroups", [])), + "elbsubnets": vpc_config_dict.get("elbsubnets", []), + } + + +def construct_environment_vpc_config( + on_prem_mode: bool, verbose: bool +) -> Tuple[Dict[str, str], Optional[str], Optional[str], List[Dict[str, str]]]: + """ + Detect and construct VPC configuration from current EC2 instance or handle on-premises scenario. + + Attempts to gather VPC configuration from the current EC2 instance, including + subnets, security groups, and tags. Falls back to empty configuration if running + on-premises or if EC2 detection fails. + + Args: + on_prem_mode: If True, skip EC2 detection and return empty configuration + verbose: If True, print detailed VPC configuration information + + Returns: + Tuple containing: + - Dict[str, str]: VPC configuration with keys: + * id: VPC ID + * publicip: Always 'true' + * elbscheme: Always 'public' + * ec2subnets: Comma-separated list of first 3 subnet IDs + * securitygroups: Comma-separated list of security group IDs + * elbsubnets: Same as ec2subnets + - Optional[str]: AWS region of instance, or None if not on EC2 + - Optional[str]: Instance ID, or None if not on EC2 + - List[Dict[str, str]]: Instance tags, excluding AWS system tags + + Notes: + - Uses interleaved AZ subnet selection for high availability + - Only includes first 3 subnets for EC2 and ELB + - Filters out system tags (elasticbeanstalk:*, aws:*, Name) + - Returns empty VPC config if: + * on_prem_mode is True + * Not running on EC2 + * EC2 metadata access fails + + Example Output (verbose=True): + Identifying VPC configuration of this EC2 instance (i-1234567890abcdef0): + id: vpc-1234567890abcdef0 + publicip: true + elbscheme: public + ec2subnets: subnet-123,subnet-456,subnet-789 + securitygroups: sg-123,sg-456 + elbsubnets: subnet-123,subnet-456,subnet-789 + """ + environment_vpc = dict() + region = None + tags = [] + instance_id = None + + current_instance_details = ec2.get_current_instance_details() + + try: + if on_prem_mode: + raise NotAnEC2Instance("Pretend this is an on-prem instance") + + instance_id = current_instance_details["InstanceId"] + _vpc = current_instance_details["VpcId"] + security_groups = current_instance_details["SecurityGroupIds"] + subnets = ",".join(ec2.list_subnets_azs_interleaved(_vpc)[:3]) + region = current_instance_details["Region"] + environment_vpc = { + "id": _vpc, + "publicip": "true", + "elbscheme": "public", + "ec2subnets": subnets, + "publicip": "true", + "securitygroups": ",".join(security_groups), + "elbsubnets": subnets, + } + io.echo(f"Identifying VPC configuration of this EC2 instance ({instance_id}):") + if verbose: + for key, value in environment_vpc.items(): + io.echo(f" {key}: {value}") + tags = [ + tag + for tag in current_instance_details["Tags"] + if not ( + tag["Key"].startswith("elasticbeanstalk:") + or tag["Key"].startswith("aws:") + or tag["Key"] == "Name" + ) + ] + except NotAnEC2Instance: + raise + except Exception: + io.echo( + f"Unable to detect EC2 configuration. Possibly executing on a non-EC2 instance" + ) + pass + return environment_vpc, region, instance_id, tags + + +def _determine_platform(platform_string=None): + if not platform_string: + platform_string = platformops.get_configured_default_platform() + + if platform_string: + platform = platformops.get_platform_for_platform_string(platform_string) + else: + raise ValueError( + f"Couldn't identify a platform based on hint: {platform_string}" + ) + + if isinstance(platform, PlatformVersion): + platform.hydrate(elasticbeanstalk.describe_platform_version) + statusops.alert_platform_status(platform) + + return platform + + +def setup_migrations_dir(verbose: bool) -> str: + """ + Create and configure a timestamped migration directory structure. + + Creates a migrations directory with a timestamped subdirectory for the current + migration run, and sets up a 'latest' symlink pointing to it. The directory + structure is used to store migration artifacts, logs, and deployment files. + + Args: + verbose: If True, prints detailed information about log file locations + and directory purposes + + Returns: + str: Absolute path to the newly created migration directory + + Directory Structure: + migrations/ + ├── latest -> migration_[timestamp]/ (symlink) + └── migration_[timestamp]/ + ├── application.log (msbuild.exe logs) + ├── error.log (msbuild.exe errors) + └── upload_target/ (deployment artifacts) + + Notes: + - Creates 'migrations' directory in current working directory if it doesn't exist + - Generates unique directory name using UTC timestamp + - Updates 'latest' symlink to point to new directory + - Preserves original working directory + - Creates directories with exist_ok=True to handle race conditions + + Example Output: + Using .\\migrations\\migration_1708445678.123456 to contain artifacts for this migration run. + If verbose: + .\\migrations\\migration_1708445678.123456\\application.log -> msbuild.exe application logs + .\\migrations\\migration_1708445678.123456\\error.log -> msbuild.exe error logs + .\\migrations\\migration_1708445678.123456\\upload_target\\ -> destination archive dir + """ + migrations_dir = "migrations" + migrations_dir_path = os.path.join(os.getcwd(), migrations_dir) + cwd = os.getcwd() + os.makedirs(migrations_dir_path, exist_ok=True) + os.chdir(migrations_dir_path) + latest_migration_dir_name = "migration_" + str( + datetime.datetime.utcnow().timestamp() + ) + os.makedirs(latest_migration_dir_name) + if os.path.exists("latest"): + os.unlink("latest") + os.symlink(latest_migration_dir_name, "latest", target_is_directory=True) + os.chdir(cwd) + latest_migration_run_path = os.path.join( + migrations_dir_path, latest_migration_dir_name + ) + relative_path = os.path.relpath(latest_migration_run_path, os.curdir) + relative_normalized_path = absolute_to_relative_normalized_path(relative_path) + io.echo( + f"Using .\\{relative_normalized_path} to contain artifacts for this migration run." + ) + if verbose: + io.echo( + f" .\\{relative_normalized_path}\\application.log -> msbuild.exe application logs" + ) + io.echo(f" .\\{relative_normalized_path}\\error.log -> msbuild.exe error logs") + io.echo( + f" .\\{relative_normalized_path}\\upload_target\\ -> destination archive dir" + ) + return latest_migration_run_path + + +def get_environment_name(app_name): + return io.prompt_for_environment_name(get_unique_environment_name(app_name)) + + +def get_unique_environment_name(app_name): + default_name = app_name + "-dev" + current_environments = elasticbeanstalk.get_all_environment_names() + + return utils.get_unique_name(default_name, current_environments) + + +def _get_application_name_interactive(): + app_list = elasticbeanstalk.get_application_names() + file_name = fileoperations.get_current_directory_name() + new_app = False + if len(app_list) > 0: + io.echo() + io.echo("Select an application to use") + new_app_option = "[ Create new Application ]" + app_list.append(new_app_option) + try: + default_option = app_list.index(file_name) + 1 + except ValueError: + default_option = len(app_list) + app_name = utils.prompt_for_item_in_list(app_list, default=default_option) + if app_name == new_app_option: + new_app = True + + if len(app_list) == 0 or new_app: + io.echo() + io.echo("Enter Application Name") + unique_name = utils.get_unique_name(file_name, app_list) + app_name = io.prompt_for_unique_name(unique_name, app_list) + + return app_name + + +def get_registry_value(key, subkey, value): + try: + key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, subkey) + return winreg.QueryValueEx(key, value)[0] + except WindowsError: + return None, None + + +def get_windows_product_name(): + product_name = get_registry_value( + winreg.HKEY_LOCAL_MACHINE, + r"SOFTWARE\Microsoft\Windows NT\CurrentVersion", + "ProductName", + ) + + return product_name + + +def get_windows_server_version(): + product_name = get_windows_product_name() + return product_name.replace(" Datacenter", "") + + +def get_unique_cname(env_name): + """ + Derive a unique CNAME for a new environment based on the environment name + :param env_name: name of the environment + directory + :return: A unique CNAME for a new environment + """ + cname = env_name + tried_cnames = [] + while not elasticbeanstalk.is_cname_available(cname): + tried_cnames.append(cname) + utils.sleep(0.5) + cname = utils.get_unique_name(cname, tried_cnames) + return cname + + +def get_iis_version_from_registry() -> str: + """ + Retrieve the IIS version from the Windows registry. + + Returns: + str: The IIS version number. + + Raises: + OSError: If unable to access the registry or retrieve the IIS version. + """ + try: + with winreg.OpenKey( + winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\InetStp" + ) as key: + version = winreg.QueryValueEx(key, "VersionString")[0] + return version.split()[-1] + except (OSError, IndexError) as e: + raise OSError( + "Unable to retrieve IIS version from Windows registry. " + "Please ensure that IIS (version 7.0 or later) is installed " + "and that you have sufficient permissions to access the registry." + ) from e + + +def validate_iis_version_greater_than_7_0() -> None: + """ + Validate that the installed IIS version is 7.0 or later. + + Raises: + EnvironmentError: If the IIS version is less than 7.0 or cannot be determined. + """ + try: + iis_version = float(get_iis_version_from_registry()) + if iis_version < 7.0: + raise EnvironmentError( + f"IIS version {iis_version} is not supported. " + "Please upgrade to IIS version 7.0 or later." + ) + except ValueError as e: + raise EnvironmentError( + "Unable to determine IIS version. " + "Please ensure that IIS (version 7.0 or later) is properly installed." + ) from e + + +def get_all_assemblies(root_assembly): + visited = HashSet[str]() + queue = Queue[Assembly]() + queue.Enqueue(root_assembly) + + assemblies = [] + + while queue.Count > 0: + current_asm = queue.Dequeue() + + if not visited.Contains(current_asm.FullName): + visited.Add(current_asm.FullName) + assemblies.append(current_asm) + + # Enumerate references of the current assembly + for ref_name in current_asm.GetReferencedAssemblies(): + try: + ref_asm = Assembly.Load(ref_name) + if ref_asm is not None and not visited.Contains(ref_asm.FullName): + queue.Enqueue(ref_asm) + except Exception as e: + # Handle cases where an assembly fails to load + LOG.debug( + f"Could not load referenced assembly {ref_name.Name}: {e}" + ) + io.log_warning(f"Could not load {ref_name.Name}.") + + return assemblies + + +def copy_assemblies_into_bin(bin_path: str, site_name: str) -> None: + root_assembly = Assembly.LoadFrom(os.path.join(bin_path, f"{site_name}.dll")) + LOG.debug(f"Transitive dependencies: ") + for asm in get_all_assemblies(root_assembly): + if asm.Location == os.path.join(bin_path, f"{site_name}.dll"): + continue + LOG.debug(f"Copying {asm.FullName}: {asm.Location} into {bin_path}") + + +def hsts_disablement_arg(site): + try: + if not site.HSTS.Enabled: + return f'-skip:objectName=hsts,absolutePath="{site.Name}"' + except AttributeError as e: + if not "'Site' object has no attribute 'HSTS'" in str(e): + raise e + return "" + + +def ms_deploy_sync_application( + site: "Site", + application: "Application", + destination: str, + upload_target_dir: str, + manifest_contents: Dict[str, Any], +) -> None: + """ + Synchronize an IIS application to deployment artifacts and update the manifest. + + Creates deployment artifacts and manifest entries for an IIS application, + handling different deployment scenarios based on whether the application + belongs to the Default Web Site or a custom site. + + Args: + site: IIS Site object containing the application + application: IIS Application object to be synchronized + destination: Directory for deployment artifacts and logs + upload_target_dir: Target directory for generated files + manifest_contents: Elastic Beanstalk deployment manifest to update + + Manifest Handling: + - Default Web Site applications: + * Added to 'msDeploy' section + * Uses Web Deploy for deployment + * May include port reassignment if not on port 80 + - Custom site applications: + * Added to 'custom' section + * Includes installation, restart, and removal scripts + * May include ARR configuration if needed + + Special Cases: + 1. ARR Configuration: + - Generates ARR configuration scripts if proxy is enabled + - Adds Windows Proxy Feature enablement scripts + 2. Port Reassignment: + - Handles Default Web Site running on non-standard ports + 3. IIS Start Page: + - Adds configuration for iisstart.htm if present + + Generated Files: + - Application bundle (.zip) + - PowerShell scripts for site management + - ARR configuration scripts (if needed) + - Port reassignment scripts (if needed) + - Default document configuration (if needed) + + Notes: + - File paths in manifest are normalized and made relative to CWD + - Site names are normalized (spaces removed) for file naming + - Manifest sections are added only if they don't already exist + - Uses 'noop.ps1' for optional restart/uninstall operations + """ + _normalized_application_name = normalized_application_name(site, application) + destination_archive_path = os.path.join( + upload_target_dir, _normalized_application_name + ) + relative_path = os.path.relpath(upload_target_dir, os.curdir) + relative_normalized_path = absolute_to_relative_normalized_path(relative_path) + io.echo( + f" {site.Name}{application.Path} -> .\\{relative_normalized_path}\\{_normalized_application_name}.zip" + ) + _iis_application_name_value = iis_application_name_value(site, application) + ms_deploy_args_str = construct_ms_deploy_command_for_application( + site, application, _iis_application_name_value, destination_archive_path + ) + LOG.debug(" Executing the following script to create destination application:") + LOG.debug(f"\n {ms_deploy_args_str}\n") + do_ms_deploy_sync_application( + ms_deploy_args_str, + destination, + destination_archive_path, + upload_target_dir, + _normalized_application_name, + ) + manifest_section_name = _iis_application_name_value.strip("/") + + application_pool_name = application.ApplicationPoolName + virts = [virt for virt in application.VirtualDirectories if virt.Path == "/"] + if not virts: + return + physical_path = virts[0].PhysicalPath + contains_iistart_htm = os.path.exists(os.path.join(physical_path, "iisstart.htm")) + + if site.Name != "Default Web Site": + installation_script_name = f"install_site_{site.Name.replace(' ', '')}.ps1" + removal_script_name = f"remove_site_{site.Name.replace(' ', '')}.ps1" + restart_script_name = f"restart_site_{site.Name.replace(' ', '')}.ps1" + + write_custom_site_installer_script( + upload_target_dir, + site.Name, + site.Bindings, + physical_path, + installation_script_name, + ) + write_custom_site_removal_script( + upload_target_dir, site.Name, removal_script_name + ) + write_custom_site_restarter_script( + upload_target_dir, site.Name, restart_script_name + ) + manifest_section = create_custom_manifest_section( + manifest_section_name, + installation_script_name, + restart_script_name, + removal_script_name, + f"Custom script to install {site.Name}", + ) + manifest_contents["deployments"]["custom"].append(manifest_section) + if _arr_enabled(): + write_windows_proxy_feature_enabler_script(upload_target_dir) + manifest_section = create_custom_manifest_section( + "WindowsProxyFeatureEnabler", + "windows_proxy_feature_enabler.ps1", + "noop.ps1", + "noop.ps1", + f"Custom script to execute Install-WindowsFeature Web Proxy", + ) + add_unique_manifest_section( + "WindowsProxyFeatureEnabler", manifest_contents, manifest_section + ) + + write_arr_configuration_importer_script(upload_target_dir) + manifest_section = create_custom_manifest_section( + "ArrConfigurationImporterScript", + "arr_configuration_importer_script.ps1", + "noop.ps1", + "noop.ps1", + f"Custom script to enable ARR proxy", + ) + add_unique_manifest_section( + "ArrConfigurationImporterScript", manifest_contents, manifest_section + ) + # manifest_contents['deployments']['custom'].append(manifest_section) + else: + manifest_section = { + "name": manifest_section_name, + "parameters": { + "appBundle": f"{_normalized_application_name}.zip", + "iisPath": application.Path, + "iisWebSite": site.Name, + }, + } + post_install_custom_script_section = None + for binding in site.Bindings: + host, port, domain = binding.get_BindingInformation().split(":") + if port != "80": + port_reassignment_script_name = ( + "default_web_site_port_reassignment_script.ps1" + ) + write_default_web_site_port_reassignment_script( + upload_target_dir, binding, port_reassignment_script_name + ) + post_install_custom_script_section = create_custom_manifest_section( + "ExecuteDefaultWebSitePortReassignment", + port_reassignment_script_name, + port_reassignment_script_name, + port_reassignment_script_name, + f"Perform port-reassignment for {site.Name} away from port 80", + ) + break + manifest_contents["deployments"]["msDeploy"].append(manifest_section) + if post_install_custom_script_section: + add_unique_manifest_section( + "ExecuteDefaultWebSitePortReassignment", + manifest_contents, + post_install_custom_script_section, + ) + # TODO: identify all DefaultDocuments for a given site and determine + # whether there are any extra ones to account for + if contains_iistart_htm: + write_reinstate_iisstart_htm_default_document_script(upload_target_dir) + manifest_section = create_custom_manifest_section( + "ReinstateIISStartHTMDefaultDocumentScript", + "reinstate_iisstart_htm_default_document.ps1", + "noop.ps1", + "noop.ps1", + f"Custom script to enable iisstart.htm default document", + ) + add_unique_manifest_section( + "ExecuteDefaultWebSitePortReassignment", manifest_contents, manifest_section + ) + + +def add_unique_manifest_section( + section_name: str, + manifest_contents: Dict[str, Any], + section_contents: Dict[str, Any], +) -> None: + """ + Add a section to the deployment manifest only if it doesn't already exist. + + Checks the custom deployments section of an Elastic Beanstalk manifest for + a section with the specified name, and adds the new section only if no + section with that name exists. + + Args: + section_name: Name of the section to check for and potentially add + manifest_contents: Complete manifest dictionary containing the deployments + structure with a 'custom' list + section_contents: New section configuration to add if section_name + doesn't exist + + Notes: + - Operates on manifest_contents in-place + - Only checks sections under ['deployments']['custom'] + - Preserves existing section if name match is found + - Appends new section if no matching name is found + + Example: + >>> manifest = { + ... 'deployments': { + ... 'custom': [ + ... {'name': 'existing_section', 'scripts': {...}} + ... ] + ... } + ... } + >>> new_section = {'name': 'new_section', 'scripts': {...}} + >>> add_unique_manifest_section('new_section', manifest, new_section) + """ + found = False + for manifest_section in manifest_contents["deployments"]["custom"]: + if manifest_section["name"] == section_name: + found = True + break + if not found: + manifest_contents["deployments"]["custom"].append(section_contents) + + +def cleanup_previous_migration_artifacts(force: bool, verbose: bool) -> None: + """ + Clean up old migration directories while preserving the 'latest' migration. + + Removes previous migration artifacts from the 'migrations' directory, keeping + only the 'latest' symbolic link and its target. Can operate in interactive + or force mode. + + Args: + force: If True, skip confirmation prompt and delete automatically. + If False, prompt user for confirmation. + verbose: If True, print detailed information about deleted directories. + If False, log deletions at debug level only. + + Directory Structure: + migrations/ + ├── latest -> timestamp_directory/ (symlink preserved) + ├── timestamp_directory/ (preserved if latest) + ├── old_timestamp_1/ (deleted) + └── old_timestamp_2/ (deleted) + + Notes: + - Only operates if 'migrations' directory exists in current working directory + - Preserves 'latest' symlink and its target directory + - Prompts for confirmation unless force=True + - Logs or prints deletion operations based on verbose setting + - Handles both relative and absolute paths safely + + Example: + >>> cleanup_previous_migration_artifacts(force=True, verbose=True) + # Output: + # Deleting older migration artifacts. + # - Deleting directory: migrations/20240219_120000 + # - Deleting directory: migrations/20240218_120000 + """ + migration_dir_name = "migrations" + if not os.path.exists(migration_dir_name): + io.echo("There is no directory called 'migrations' in PWD. Nothing to do.") + return + should_delete = force or io.get_boolean_response( + text=prompts["migrate.should_cleanup"], default=False + ) + if not should_delete: + return + latest_path = os.path.realpath(os.path.join(migration_dir_name, "latest")) + io.echo("Deleting older migration artifacts.") + for item in os.listdir(migration_dir_name): + if item == "latest": + continue + item_path = os.path.abspath(os.path.join("migrations", item)) + if os.path.isdir(item_path) and item_path != latest_path: + if verbose: + io.echo(f" - Deleting directory: {item_path}") + else: + LOG.debug(f" - Deleting directory: {item_path}") + shutil.rmtree(item_path) + + +def do_ms_deploy_sync_application( + ms_deploy_args_str: str, + destination: str, + destination_archive_path: str, + upload_target_dir: str, + _normalized_application_name: str, +) -> None: + """ + Execute Web Deploy synchronization for an IIS application and process the results. + + Runs msdeploy.exe with specified arguments to export an IIS application, + captures output and errors, and packages the result into a ZIP file if successful. + + Args: + ms_deploy_args_str: Complete Web Deploy command arguments string + destination: Directory for log files + destination_archive_path: Temporary directory for Web Deploy output + upload_target_dir: Target directory for final ZIP file + _normalized_application_name: Sanitized application name for ZIP file + + Raises: + RuntimeError: If Web Deploy process exits with non-zero status + + Process Flow: + 1. Locates msdeploy.exe + 2. Executes Web Deploy with specified arguments + 3. Captures stdout/stderr to log files + 4. On success: + - Creates ZIP from destination_archive_path + - Cleans up temporary directory + 5. On failure: + - Logs error + - Raises exception with exit code + + Notes: + - Creates/appends to 'application.log' and 'error.log' in destination directory + - Final ZIP file will be named '{_normalized_application_name}.zip' + - Cleans up temporary archive directory after successful ZIP creation + """ + ms_deploy_exe, use_64bit = get_webdeployv3path() + + start_info = ProcessStartInfo() + start_info.FileName = ms_deploy_exe + start_info.Arguments = ms_deploy_args_str + start_info.RedirectStandardOutput = True + start_info.RedirectStandardError = True + start_info.UseShellExecute = False + start_info.CreateNoWindow = True + + # Start the process + process = Process() + process.StartInfo = start_info + process.Start() + + # Redirect standard output and error to files + with open(os.path.join(destination, "application.log"), "a") as stdout_file: + stdout_file.write(process.StandardOutput.ReadToEnd()) + + with open(os.path.join(destination, "error.log"), "a") as stderr_file: + stderr_file.write(process.StandardError.ReadToEnd()) + + process.WaitForExit() + + if process.ExitCode == 0: + fileoperations.zip_up_folder( + destination_archive_path, + os.path.join(upload_target_dir, f"{_normalized_application_name}.zip"), + ) + shutil.rmtree(destination_archive_path) + else: + io.log_error(f"MSDeploy process exited with code {process.ExitCode}.") + raise RuntimeError( + f"MSDeploy process exited with code {process.ExitCode}. You can find execution logs at .\\migrations\\latest\\error.log')" + ) + + +def construct_ms_deploy_command_for_application( + site: "Site", + application: "Application", + _iis_application_name_value: str, + destination_archive_path: str, +) -> str: + """ + Construct Web Deploy (MSDeploy) command for exporting an IIS application. + + Builds a command string for Web Deploy to synchronize an IIS application + configuration to an archive directory, including application pool settings + and security configurations. + + Args: + site: IIS Site object containing the application + application: IIS Application object to be exported + _iis_application_name_value: Full application path in IIS format + (e.g., "Default Web Site/MyApp") + destination_archive_path: Path where the application archive will be created + + Returns: + str: Complete MSDeploy command string with all necessary parameters and + settings for application export + + Command Components: + - Sync verb for export operation + - Source from IIS application host configuration + - Destination archive directory + - Application pool configuration and parameterization + - IIS application name parameterization + - Security content handling + - HSTS settings handling + + Example: + >>> cmd = construct_ms_deploy_command_for_application( + ... site, + ... app, + ... "Default Web Site/MyApp", + ... "C:\\export\\myapp" + ... ) + >>> # Returns: "-verb:sync -source:apphostconfig=... -dest:archiveDir=..." + """ + ms_deploy_verb = "-verb:sync" + ms_deploy_source = f'-source:apphostconfig="{_iis_application_name_value}"' + ms_deploy_dest = f"-dest:archiveDir='{destination_archive_path}'" + ms_deploy_enable_app_pool_ext = "-enableLink:AppPoolExtension" + application_pool_name = application.ApplicationPoolName + ms_deploy_app_pool = ( + "-declareParam:name='Application Pool'," + f"defaultValue='{application_pool_name}'," + f"description='Application pool for application {application.Path}'," + "kind=DeploymentObjectAttribute," + "scope=appHostConfig," + "match='application/@applicationPool'" + ) + + iis_website_application_name_arg = ( + "-declareParam:name='IIS Web Application Name'," + f"defaultValue='{_iis_application_name_value.strip('/')}'" + ) + + copy_secure_content_arg = "-enableRule:CopySecureContent" + + ms_deploy_args = [ + ms_deploy_verb, + ms_deploy_source, + ms_deploy_dest, + ms_deploy_enable_app_pool_ext, + ms_deploy_app_pool, + iis_website_application_name_arg, + hsts_disablement_arg(site), + copy_secure_content_arg, + ] + return " ".join(ms_deploy_args) + + +def normalized_application_name(site: "Site", application: "Application") -> str: + if application.Path == "/": + return site.Name.replace(" ", "") + return f"{site.Name}-{application.Path.strip('/')}" + + +def get_webdeployv3path() -> Tuple[str, bool]: + """ + Locate the Web Deploy V3 (msdeploy.exe) installation path from Windows registry. + + Searches the Windows registry for Web Deploy V3 installation, checking both + 64-bit and 32-bit registry views. Returns the full path to msdeploy.exe and + indicates whether it was found in the 64-bit registry. + + Returns: + tuple: (path_to_msdeploy, is_64bit) where: + - path_to_msdeploy (str): Full path to msdeploy.exe + - is_64bit (bool): True if found in 64-bit registry, False if in 32-bit + + Raises: + RuntimeError: If Web Deploy V3 installation cannot be found in either + registry view, with instructions for installation + + Notes: + - Checks registry key: SOFTWARE\\Microsoft\\IIS Extensions\\MSDeploy\\3 + - Tries 64-bit registry first, falls back to 32-bit + - Returns InstallPath value plus "msdeploy.exe" + + Example: + >>> path, is_64bit = get_webdeployv3path() + >>> # Typical return value: + >>> # ("C:\\Program Files\\IIS\\Microsoft Web Deploy V3\\msdeploy.exe", True) + """ + webdeployv3_key = r"SOFTWARE\Microsoft\IIS Extensions\MSDeploy\3" + use_64bit = True + # Try 64-bit registry first + try: + key = winreg.OpenKey( + winreg.HKEY_LOCAL_MACHINE, + webdeployv3_key, + 0, + winreg.KEY_READ | winreg.KEY_WOW64_64KEY, + ) + except WindowsError: + use_64bit = False + # If 64-bit fails, try 32-bit + try: + key = winreg.OpenKey( + winreg.HKEY_LOCAL_MACHINE, + webdeployv3_key, + 0, + winreg.KEY_READ | winreg.KEY_WOW64_32KEY, + ) + except WindowsError: + error = "Couldn't find msdeploy.exe. Follow instructions here: https://learn.microsoft.com/en-us/iis/install/installing-publishing-technologies/installing-and-configuring-web-deploy" + raise RuntimeError(error) + + install_path = winreg.QueryValueEx(key, "InstallPath")[0] + winreg.CloseKey(key) + return install_path + "msdeploy.exe", use_64bit + + +def warn_about_password_protection(site, application): + _iis_application_name_value = iis_application_name_value(site, application) + try: + for vdir in application.VirtualDirectories: + if vdir.Password: + io.log_warning( + f"{_iis_application_name_value} is hosted at {vdir.PhysicalPath} " + "which is password-protected and won't be copied." + ) + except AttributeError: + pass + + +def iis_application_name_value(site: "Site", application: "Application"): + if application.Path == "/": + return site.Name + "/" + else: + return f"{site.Name}\\{application.Path.strip('/')}" + + +def create_noop_ps1_script(upload_target_dir): + script_path = os.path.join(os.path.dirname(__file__), "migrate_scripts", "noop.ps1") + with open(script_path, "r") as source_file: + script = source_file.read() + + with open( + os.path.join(upload_target_dir, "ebmigrateScripts", "noop.ps1"), "w" + ) as file: + file.write(script) + + +def create_virtualdir_path_permission_script(physical_paths, upload_target_dir): + script_path = os.path.join( + os.path.dirname(__file__), "migrate_scripts", "add_virtual_dir_read_access.ps1" + ) + with open(script_path, "r") as source_file: + script_template = source_file.read() + + # Create the physical paths array string + physical_paths_array_string = ",\n ".join([f'"{p}"' for p in set(physical_paths)]) + + # Replace the placeholder in the template + script = script_template.replace( + "# This will be populated dynamically with physical paths", + physical_paths_array_string, + ) + + with open( + os.path.join( + upload_target_dir, "ebmigrateScripts", "add_virtual_dir_read_access.ps1" + ), + "w", + ) as file: + file.write(script) + + +def _arr_enabled() -> bool: + """ + Check if Application Request Routing (ARR) is enabled in IIS configuration. + + Examines the applicationHost.config file to determine if ARR is both installed + and enabled by checking the system.webServer/proxy section's 'enabled' attribute. + + Returns: + bool: True if ARR is installed and enabled in IIS configuration, + False if ARR is not installed or is disabled + + Notes: + - Checks applicationHost.config via ServerManager COM interface + - Returns False if proxy section doesn't exist (ARR not installed) + - Logs debug message if proxy section is missing + - Propagates unexpected exceptions + + Raises: + Exception: Any unexpected errors during configuration access, + except for missing configuration section + """ + server_manager = ServerManager() + try: + proxy_config_section = ( + server_manager.GetApplicationHostConfiguration().GetSection( + "system.webServer/proxy" + ) + ) + if proxy_config_section is None: + return False + return proxy_config_section.GetAttributeValue("enabled") + except COMException: + LOG.debug( + "ConfigurationSection system.webServer/proxy does not exist in applicationHost.config." + ) + pass + except Exception as e: + raise e + + +def export_arr_config(upload_target_dir: str, verbose: bool) -> None: + """ + Export IIS Application Request Routing (ARR) configuration to XML files. + + Exports modified (non-default) settings from ARR-related IIS configuration + sections to XML files. These files can be used to replicate ARR configuration + on other servers during Elastic Beanstalk deployments. + + Args: + upload_target_dir: Base directory for deployment artifacts. Configuration files + will be written to '{upload_target_dir}/ebmigrateScripts/' + verbose: If True, provides detailed output about each configuration section + and export operation + + Configuration Sections Exported: + - system.webServer/proxy: ARR proxy settings + - system.webServer/rewrite: URL rewrite rules + - system.webServer/caching: Caching configuration + + Notes: + - Only exports attributes that differ from default values + - Creates one XML file per configuration section: + * arr_config_proxy.xml + * arr_config_rewrite.xml + * arr_config_caching.xml + - Skips sections that don't exist in the current configuration + - Automatically generates ARR import script after export + + Raises: + Exception: If export fails, with detailed error message + + Example XML Output: + + """ + config_sections = [ + "system.webServer/proxy", + "system.webServer/rewrite", + "system.webServer/caching", + ] + if not _arr_enabled() and verbose: + io.echo("No Automatic Request Routing configuration found.") + return + else: + io.echo("Automatic Request Routing (ARR) configuration found.") + + server_manager = ServerManager() + try: + for i, section in enumerate(config_sections, 1): + section_name = section.split("/")[-1] + arr_config_file = f"arr_config_{section_name}.xml" + arr_config_file_path = os.path.join( + upload_target_dir, "ebmigrateScripts", arr_config_file + ) + with open(arr_config_file_path, "w") as file: + config = server_manager.GetApplicationHostConfiguration() + try: + section_obj = config.GetSection(section) + except COMException: + if verbose: + io.echo(f" {i}. Section {section} not found") + continue + + modified_attributes = [ + attr + for attr in section_obj.Attributes + if not attr.IsInheritedFromDefaultValue + ] + + # TODO: Handle child attributes for system.webserver/caching as well + xml_content = f"<{section_name}" + for attr in modified_attributes: + xml_content += f' {attr.Name}="{attr.Value}"' + xml_content += " />" + + file.write(xml_content) + if verbose: + io.echo( + f" {i}. Modified {section_name} configuration exported to {arr_config_file_path}" + ) + if not verbose: + io.echo("Exported ARR config.") + except Exception as e: + io.log_error(f"Failed to export ARR configuration: {str(e)}") + raise + write_arr_import_script_to_source_bundle(upload_target_dir) + + +def write_arr_import_script_to_source_bundle(upload_target_dir: str) -> None: + """ + Generate a PowerShell script for downloading and installing IIS ARR component. + + Creates a script that handles the download and installation of Application Request + Routing (ARR) for IIS. The script includes verification of existing installation + and error handling for download/installation failures. + + Args: + upload_target_dir: Base directory for deployment artifacts. The script will be + written to '{upload_target_dir}/ebmigrateScripts/arr_msi_installer.ps1' + + Notes: + - Generated script downloads from Microsoft's official URL + - Creates installers directory at C:\\installers\\arr-install + - Reports issues to aws-elastic-beanstalk-cli GitHub repository + """ + script_path = os.path.join( + os.path.dirname(__file__), "migrate_scripts", "arr_msi_installer.ps1" + ) + with open(script_path, "r") as source_file: + script = source_file.read() + + with open( + os.path.join(upload_target_dir, "ebmigrateScripts", "arr_msi_installer.ps1"), + "w", + ) as file: + file.write(script) + + +def write_windows_proxy_feature_enabler_script(upload_target_dir): + script_path = os.path.join( + os.path.dirname(__file__), + "migrate_scripts", + "windows_proxy_feature_enabler.ps1", + ) + with open(script_path, "r") as source_file: + script_contents = source_file.read() + + with open( + os.path.join( + upload_target_dir, "ebmigrateScripts", "windows_proxy_feature_enabler.ps1" + ), + "w", + ) as file: + file.write(script_contents) + + +def write_arr_configuration_importer_script(upload_target_dir: str) -> None: + """ + Generate a PowerShell script for importing Application Request Routing (ARR) configuration. + + Creates a script that handles the import of ARR configuration from XML files, + including proxy, rewrite, and caching settings. The script includes backup + functionality and type-safe configuration import. + + Args: + upload_target_dir: Base directory for deployment artifacts. The script will be + written to '{upload_target_dir}/ebmigrateScripts/arr_configuration_importer_script.ps1' + + Notes: + - Generated script requires WebAdministration PowerShell module + - Handles three IIS configuration sections: + * system.webServer/proxy + * system.webServer/rewrite + * system.webServer/caching + - Expects configuration files in C:\\staging\\ebmigrateScripts\\ + """ + script_path = os.path.join( + os.path.dirname(__file__), + "migrate_scripts", + "arr_configuration_importer_script.ps1", + ) + with open(script_path, "r") as source_file: + script_contents = source_file.read() + + with open( + os.path.join( + upload_target_dir, + "ebmigrateScripts", + "arr_configuration_importer_script.ps1", + ), + "w", + ) as file: + file.write(script_contents) + + +def write_custom_site_installer_script( + upload_target_dir: str, + site_name: str, + bindings: List["Binding"], + physical_path: str, + installation_script_name: str, +) -> None: + """ + Generate a PowerShell script for installing and configuring an IIS website. + + Creates an installation script that will be referenced in the Elastic Beanstalk + deployment manifest's custom section. The script handles complete website setup + including app pool creation, website configuration, and permissions. If Application + Request Routing (ARR) is enabled in IIS, additional ARR configuration is included. + + Args: + upload_target_dir: Base directory for deployment artifacts + site_name: Name of the IIS website to create + bindings: List of IIS binding objects defining site endpoints + physical_path: Physical path where website content will be deployed + installation_script_name: Name of the PowerShell script file to generate + + Generated Script Features: + - Creates and configures application pool with .NET 4.0 runtime + - Creates website with specified bindings and physical path + - Deploys content using Web Deploy (msdeploy.exe) + - Sets appropriate file system permissions + - Handles ARR configuration if proxy is enabled: + * Installs ARR components if needed + * Imports ARR configuration from XML files + * Configures proxy settings + + Notes: + - Script requires WebAdministration PowerShell module + - Uses site_name for both website and app pool names + - Expects website content at 'C:\\staging\\{site_name}.zip' + - Includes ARR configuration only if proxy is enabled in IIS + - Generated script is referenced in EB deployment manifest + """ + binding_protocol_tuples = [] + invoke_arr_import_script_call = "" + for binding in bindings: + binding_string = binding.BindingInformation + # Always add the binding information regardless of ARR status + if binding_string and binding_string.strip(): + binding_protocol_tuples.append( + f'"{binding_string.strip()}" = "{binding.Protocol.lower()}"' + ) + # Only set ARR import script if ARR is enabled + if _arr_enabled(): + invoke_arr_import_script_call = "Invoke-ARRImportScript" + binding_protocol_powershell_array = "\n".join(binding_protocol_tuples) + + # Read the template script + script_path = os.path.join( + os.path.dirname(__file__), "migrate_scripts", "site_installer_template.ps1" + ) + with open(script_path, "r") as source_file: + script_template = source_file.read() + + # Replace placeholders in the template + script_content = ( + script_template.replace("{site_name}", site_name) + .replace( + "{binding_protocol_powershell_array}", binding_protocol_powershell_array + ) + .replace("{physical_path}", physical_path) + .replace("{invoke_arr_import_script_call}", invoke_arr_import_script_call) + ) + + with open( + os.path.join(upload_target_dir, "ebmigrateScripts", installation_script_name), + "w", + ) as file: + file.write(script_content) + + +def write_custom_site_removal_script( + upload_target_dir: str, site_name: str, uninstallation_script_name: str +) -> None: + """ + Generate a PowerShell script for removing an IIS website during uninstallation. + + Creates an uninstallation script that will be referenced in the Elastic Beanstalk + deployment manifest's custom section, typically used as an uninstall script + in a custom deployment action. + + Args: + upload_target_dir: Base directory for deployment artifacts + site_name: Name of the IIS website to remove + uninstallation_script_name: Name of the PowerShell script file to generate + (must have .ps1 extension) + + Notes: + - Creates script in '{upload_target_dir}/ebmigrateScripts/{uninstallation_script_name}' + - Generated script requires WebAdministration PowerShell module + - Script includes utility functions from ebdeploy_utils.ps1 + """ + # Read the template script + script_path = os.path.join( + os.path.dirname(__file__), "migrate_scripts", "site_removal_template.ps1" + ) + with open(script_path, "r") as source_file: + script_template = source_file.read() + + # Replace the site_name placeholder + script_contents = script_template.replace("{site_name}", site_name) + + with open( + os.path.join(upload_target_dir, "ebmigrateScripts", uninstallation_script_name), + "w", + ) as file: + file.write(script_contents) + + +def write_custom_site_restarter_script( + upload_target_dir: str, site_name: str, restarter_script_name: str +) -> None: + """ + Generate a PowerShell script for restarting an IIS website during deployment. + + Creates a restart script that will be referenced in the Elastic Beanstalk + deployment manifest's custom section, typically used as a restart script + in a custom deployment action. + + Args: + upload_target_dir: Base directory for deployment artifacts + site_name: Name of the IIS website to restart + restarter_script_name: Name of the PowerShell script file to generate + (must have .ps1 extension) + + Notes: + - Creates script in '{upload_target_dir}/ebmigrateScripts/{restarter_script_name}' + - Generated script requires WebAdministration PowerShell module + - Script includes utility functions from ebdeploy_utils.ps1 + """ + # Read the template script + script_path = os.path.join( + os.path.dirname(__file__), "migrate_scripts", "site_restart_template.ps1" + ) + with open(script_path, "r") as source_file: + script_template = source_file.read() + + # Replace the site_name placeholder + script_contents = script_template.replace("{site_name}", site_name) + + with open( + os.path.join(upload_target_dir, "ebmigrateScripts", restarter_script_name), "w" + ) as file: + file.write(script_contents) + + +def write_default_web_site_port_reassignment_script( + upload_target_dir: str, binding: "Binding", port_reassignment_script_name: str +) -> None: + """ + Generate a PowerShell script for IIS Default Web Site port reassignment. + + Args: + upload_target_dir: Base directory for deployment artifacts + binding: IIS Binding object containing the target binding configuration + port_reassignment_script_name: Name of the PowerShell script file to generate + (must have .ps1 extension) + + Notes: + - Creates script in '{upload_target_dir}/ebmigrateScripts/{port_reassignment_script_name}' + - Generated script requires WebAdministration PowerShell module + - Script includes utility functions from ebdeploy_utils.ps1 + """ + host, port, domain = binding.BindingInformation.split(":") + + # Read the template script + script_path = os.path.join( + os.path.dirname(__file__), + "migrate_scripts", + "default_web_site_port_reassignment_template.ps1", + ) + with open(script_path, "r") as source_file: + script_template = source_file.read() + + # Replace placeholders in the template + script_content = ( + script_template.replace("{host}", host) + .replace("{port}", port) + .replace("{domain}", domain) + ) + + with open( + os.path.join( + upload_target_dir, "ebmigrateScripts", port_reassignment_script_name + ), + "w", + ) as file: + file.write(script_content) + + +def write_ebdeploy_utility_script(upload_target_dir: str) -> None: + """ + Generate a PowerShell utility script containing common deployment functions. + + Creates ebdeploy_utils.ps1, a shared PowerShell module used by other deployment + scripts. This utility script provides common functions for logging and ACL + management during the Elastic Beanstalk deployment process. + + Args: + upload_target_dir: Base directory for deployment artifacts. The script will be + written to '{upload_target_dir}/ebmigrateScripts/ebdeploy_utils.ps1' + + Notes: + - Script is imported by other deployment scripts using dot-sourcing + - All logging functions use UTC timestamps for consistency + - ACL rules include both container and object inheritance + - All access rules are "Allow" type + """ + script_path = os.path.join( + os.path.dirname(__file__), "migrate_scripts", "ebdeploy_utils.ps1" + ) + with open(script_path, "r") as source_file: + script_content = source_file.read() + + with open( + os.path.join(upload_target_dir, "ebmigrateScripts", "ebdeploy_utils.ps1"), "w" + ) as file: + file.write(script_content) + + +def write_copy_firewall_config_script( + upload_target_dir: str, sites: List["Site"] +) -> None: + """ + Generate and configure deployment of Windows Firewall rules based on IIS site bindings. + + Creates a PowerShell script that replicates the source environment's firewall + configuration on the target environment. The script is added to the Elastic Beanstalk + deployment manifest for execution during deployment. + + The function: + 1. Extracts HTTP/HTTPS ports from IIS site bindings + 2. Retrieves existing firewall rules for those ports + 3. Generates New-NetFirewallRule commands for each rule + 4. Creates a deployment script in the ebmigrateScripts directory + 5. Adds the script to the deployment manifest's custom section + + Args: + upload_target_dir: Base directory for deployment artifacts. The script will be + written to '{upload_target_dir}/ebmigrateScripts/modify_firewall_config.ps1' + + Notes: + - Only processes HTTP and HTTPS site bindings + - Generates inbound firewall rules only + - Preserves original rule properties: + * Display name + * Action (Allow/Block) + * Protocol + * Port specifications + * Enabled state + - Uses noop.ps1 for restart and uninstall operations + - If no relevant firewall rules are found, no script is generated + - Modifies aws-windows-deployment-manifest.json to include the script + + Warning: + Current implementation does not handle cleanup of firewall rules when + sites are removed. Firewall rules persist after site removal. + """ + ports = set() + for site in sites: + for binding in site.Bindings: + host, port, domain = binding.BindingInformation.split(":") + protocol = binding.Protocol + if protocol in ["http", "https"]: + ports.add(port.strip()) + firewall_rules = get_firewall_rules(ports) + powershell_commands = [] + for rule in firewall_rules: + command = ( + f'New-NetFirewallRule -DisplayName "{rule["Name"]}" ' + f'-Direction Inbound -Action {rule["Action"]} ' + f'-Protocol {rule["Protocol"]} -LocalPort {rule["LocalPorts"]} ' + ) + if not rule["Enabled"]: + command += "-Enabled False" + + powershell_commands.append(command) + if not powershell_commands: + return + + # Read the template script + script_path = os.path.join( + os.path.dirname(__file__), "migrate_scripts", "modify_firewall_config.ps1" + ) + with open(script_path, "r") as source_file: + script_template = source_file.read() + + # Replace the placeholder with the actual firewall commands + script_content = script_template.replace( + "{firewall_rules}", "\n".join(powershell_commands) + ) + + with open( + os.path.join( + upload_target_dir, "ebmigrateScripts", "modify_firewall_config.ps1" + ), + "w", + ) as file: + file.write(script_content) + + with open( + os.path.join(upload_target_dir, "aws-windows-deployment-manifest.json") + ) as file: + manifest = json.load(file) + manifest["deployments"]["custom"].append( + create_custom_manifest_section( + "ModifyFirewallConfig", + "modify_firewall_config.ps1", + "noop.ps1", + "noop.ps1", + ) + ) + with open( + os.path.join(upload_target_dir, "aws-windows-deployment-manifest.json"), "w" + ) as file: + json.dump(manifest, file, indent=4) + + +def create_custom_manifest_section( + section_name: str, + install_file: str, + restart_file: str, + uninstall_file: str, + description: Optional[str] = None, +) -> Dict[str, any]: + """ + Create a custom deployment section for the AWS Windows Deployment Manifest. + + Generates a configuration dictionary for the 'custom' section of an Elastic Beanstalk + Windows deployment manifest. This section defines PowerShell scripts to be executed + during installation, restart, and uninstallation phases of deployment. + + Args: + section_name: Name identifier for this custom deployment section + install_file: Name of PowerShell script to run during installation + restart_file: Name of PowerShell script to run during restart + uninstall_file: Name of PowerShell script to run during uninstallation + description: Optional description of the deployment section. + Defaults to section_name if not provided. + + Returns: + Dictionary containing the custom deployment section configuration: + { + "name": section name, + "description": section description, + "scripts": { + "install": {"file": "ebmigrateScripts\\install_script.ps1"}, + "restart": {"file": "ebmigrateScripts\\restart_script.ps1"}, + "uninstall": {"file": "ebmigrateScripts\\uninstall_script.ps1"} + } + } + + Notes: + - All scripts are expected to be in the 'ebmigrateScripts' directory + - This section will be added to the 'custom' array in the manifest + - The manifest is used by Elastic Beanstalk to orchestrate deployments + """ + ebmigrate_scripts_dir_name = "ebmigrateScripts" + return { + "name": section_name, + "description": description or section_name, + "scripts": { + "install": {"file": f"{ebmigrate_scripts_dir_name}\\{install_file}"}, + "restart": {"file": f"{ebmigrate_scripts_dir_name}\\{restart_file}"}, + "uninstall": {"file": f"{ebmigrate_scripts_dir_name}\\{uninstall_file}"}, + }, + } + + +def is_port_in_rule(port: Union[int, str], local_ports: str) -> bool: + """ + Check if a specific port is covered by a Windows Firewall rule's port specification. + + Evaluates whether a port number matches a firewall rule's local ports definition, + handling both individual ports and port ranges. + + Args: + port: Port number to check (can be integer or string) + local_ports: Port specification string from firewall rule. Can be: + - Individual ports: "80", "443" + - Port ranges: "8081-8083" + - Multiple specifications: "80,443,8081-8083" + - "*" (all ports) + + Returns: + True if the port is covered by the local_ports specification, + False otherwise + + Notes: + - Returns False for empty port specifications or "*" + - Handles comma-separated lists of port specifications + - Port ranges must be numeric and properly formatted (start-end) + - Whitespace around port specifications is ignored + + Examples: + >>> is_port_in_rule(80, "80") + True + >>> is_port_in_rule(80, "80,443") + True + >>> is_port_in_rule(8082, "8081-8083") + True + >>> is_port_in_rule(8080, "*") + False + """ + if not local_ports or local_ports == "*": + return False + + port = str(port) + for part in local_ports.split(","): + part = part.strip() + # Handle ports specified as ranges. e.g. 8081-8083 + if "-" in part: + start, end = part.split("-") + if start.isdigit() and end.isdigit(): + if int(start) <= int(port) <= int(end): + return True + elif part == port: + return True + return False + + +def get_firewall_rules(ports_to_check: Set[int]) -> List[Dict[str, Any]]: + """ + Retrieve Windows Firewall rules that apply to specified HTTP/HTTPS ports. + + Uses the Windows Firewall COM interface (INetFwPolicy2) to enumerate firewall rules + and filter those that affect the specified ports. + + Args: + ports_to_check: Set of port numbers (typically HTTP/HTTPS ports) to check + for associated firewall rules + + Returns: + List of dictionaries, each representing a firewall rule with: + - Name: Rule name (str) + - ServiceName: Associated Windows service name (str) + - Protocol: 'TCP' or 'UDP' + - LocalPorts: Port specification string + - Action: 'Allow' or 'Block' + - Enabled: Boolean indicating if rule is active + + Raises: + EnvironmentError: If unable to access or query the Windows Firewall + + Notes: + - Uses Windows Firewall COM interface (HNetCfg.FwPolicy2) + - Only includes rules that explicitly reference the specified ports + - Protocol values are mapped from: + * 6 -> 'TCP' + * 17 -> 'UDP' + - Action values are mapped from: + * 1 -> 'Allow' + * 2 -> 'Block' + """ + try: + fw_policy = win32com.client.Dispatch("HNetCfg.FwPolicy2") + except Exception as e: + io.log_error( + f"Encountered failure during firewall configuration analysis: {str(e)}" + ) + raise EnvironmentError(e) + + rules = fw_policy.Rules + rule_list = [] + + for rule in rules: + if rule.LocalPorts and any( + is_port_in_rule(p, rule.LocalPorts) for p in ports_to_check + ): + rule_list.append( + { + "Name": rule.Name, + "ServiceName": rule.ServiceName, + "Protocol": "TCP" if rule.Protocol == 6 else "UDP", + "LocalPorts": rule.LocalPorts, + "Action": "Allow" if rule.Action == 1 else "Block", + "Enabled": rule.Enabled, + } + ) + return rule_list + + +def write_reinstate_iisstart_htm_default_document_script( + upload_target_dir: str, +) -> None: + """ + Generate a PowerShell script to restore IIS's default document configuration. + + Creates a PowerShell script that ensures 'iisstart.htm' is reinstated as a default + document in IIS's configuration. This is necessary because Elastic Beanstalk's + deployment process removes 'iisstart.htm' from IIS's default document list, which + can affect sites relying on this default document behavior. + + Default documents in IIS determine which files (e.g., 'iisstart.htm', 'default.htm', + 'index.html') are served when a user requests a directory without specifying a + specific file. + + Args: + upload_target_dir: Base directory for deployment artifacts. The script will be + written to '{upload_target_dir}/ebmigrateScripts/reinstate_iisstart_htm_default_document.ps1' + + Notes: + - Script execution is managed by Elastic Beanstalk through its manifest file + - The manifest entry ensures this script runs during deployment + - Uses Add-WebConfigurationProperty cmdlet to modify IIS configuration + - Modifies system.webServer/defaultDocument/files configuration section + - Script is typically deployed to '.\\migrations\\latest\\upload_target\\ebmigrateScripts\' + - Requires IIS WebAdministration module to be available on target system + """ + script_path = os.path.join( + os.path.dirname(__file__), + "migrate_scripts", + "reinstate_iisstart_htm_default_document.ps1", + ) + with open(script_path, "r") as source_file: + script_contents = source_file.read() + + with open( + os.path.join( + upload_target_dir, + "ebmigrateScripts", + "reinstate_iisstart_htm_default_document.ps1", + ), + "w", + ) as file: + file.write(script_contents) + + +# TODO: allow override through .ebextensions or a `--alb-configs alb-configs.json` +def get_listener_configs(sites: List["Site"], ssl_certificate_domain_name: str = None): + """ + Generate complete Elastic Beanstalk listener configurations from IIS site configurations. + + Processes IIS sites to create comprehensive ALB listener configurations, including HTTP + and HTTPS listeners, rules, and process mappings. Handles both protocol types and + automatically determines default processes. + + Args: + sites: List of IIS Site objects to generate listener configs from + ssl_certificate_domain_name: Optional ARN of SSL certificate for HTTPS listeners. + Required if any HTTPS listeners are configured. + + Returns: + List of Elastic Beanstalk option settings containing: + - HTTP/HTTPS listener configurations with default processes + - Listener rules with priorities and conditions + - Process configurations with ports and protocols + - Protocol mappings for each target group + Returns empty list if no valid listener rules are found or on error. + + Notes: + - Processes sites to extract bindings and create corresponding ALB rules + - Handles both HTTP and HTTPS protocols if certificate provided + - Creates process mappings based on port numbers + - Assigns rule priorities based on specificity + - Treats configuration errors as non-fatal + """ + option_settings = [] + try: + site_configs = get_site_configs(sites=sites) + alb_rules = create_alb_rules(site_configs) + + converted_alb_rules = convert_alb_rules_to_option_settings( + alb_rules, ssl_certificate_domain_name + ) + http_listener_rule_option_settings = ( + converted_alb_rules.http_listener_rule_option_settings + ) + if ssl_certificate_domain_name: + https_listener_rule_option_settings = ( + converted_alb_rules.https_listener_rule_option_settings + ) + else: + https_listener_rule_option_settings = [] + process_protocol_mappings = converted_alb_rules.process_protocol_mappings + + if ( + not http_listener_rule_option_settings + and not https_listener_rule_option_settings + ): + return [] + + http_listener_rule_names = _extract_and_join_rule_names( + http_listener_rule_option_settings + ) + if ssl_certificate_domain_name: + https_listener_rule_names = _extract_and_join_rule_names( + https_listener_rule_option_settings + ) + else: + https_listener_rule_names = [] + + http_processes = _extract_process_values(http_listener_rule_option_settings) + if ssl_certificate_domain_name: + https_processes = _extract_process_values( + https_listener_rule_option_settings + ) + else: + https_processes = [] + + default_process_name = None + if "default" in http_processes: + default_process_name = "default" + elif http_processes: + default_process_name = sorted(list(http_processes))[0] + + default_https_process_name = None + if https_processes: + default_https_process_name = sorted(list(https_processes))[0] + + option_settings.extend( + _create_http_listener_settings( + default_process_name, http_listener_rule_names + ) + ) + + if https_listener_rule_option_settings: + option_settings.extend( + _create_https_listener_settings( + default_https_process_name, + https_listener_rule_names, + ssl_certificate_domain_name, + ) + ) + + process_option_settings = _create_process_option_settings( + process_protocol_mappings + ) + + option_settings += ( + http_listener_rule_option_settings + + https_listener_rule_option_settings + + process_option_settings + ) + + return option_settings + except Exception as e: + io.log_warning( + f"Error: {str(e)}. Treating listener rule creation as non-fatal. This might cause environment to be in degraded state." + ) + + +def _create_process_option_settings( + process_protocol_mappings: dict[str, str], +) -> list[dict]: + """ + Create Elastic Beanstalk process option settings from process-protocol mappings. + + Generates configuration settings for each process, including protocol and port settings. + The 'default' process is mapped to port 80, while other processes use their name as the port. + + Args: + process_protocol_mappings: Dictionary mapping process names to their protocols + (e.g., {'default': 'HTTP', '8080': 'HTTPS'}) + + Returns: + List of option settings dictionaries, each containing: + - Namespace: AWS namespace for the process (aws:elasticbeanstalk:environment:process:{process}) + - OptionName: Either 'Protocol' or 'Port' + - Value: The corresponding protocol or port value + + Example: + >>> _create_process_option_settings({'default': 'HTTP', '8080': 'HTTPS'}) + [ + { + 'Namespace': 'aws:elasticbeanstalk:environment:process:default', + 'OptionName': 'Protocol', + 'Value': 'HTTP' + }, + { + 'Namespace': 'aws:elasticbeanstalk:environment:process:default', + 'OptionName': 'Port', + 'Value': '80' + }, + { + 'Namespace': 'aws:elasticbeanstalk:environment:process:8080', + 'OptionName': 'Protocol', + 'Value': 'HTTPS' + }, + { + 'Namespace': 'aws:elasticbeanstalk:environment:process:8080', + 'OptionName': 'Port', + 'Value': '8080' + } + ] + """ + settings = [] + for process, protocol in process_protocol_mappings.items(): + port = "80" if process == "default" else process + namespace = f"aws:elasticbeanstalk:environment:process:{process}" + settings.extend( + [ + { + "Namespace": namespace, + "OptionName": "Protocol", + "Value": protocol.upper(), + }, + {"Namespace": namespace, "OptionName": "Port", "Value": port}, + ] + ) + return settings + + +def _create_listener_settings( + namespace: str, option_name_value_pairs: List[Tuple[str, str]] +) -> list[dict]: + """ + Create Elastic Beanstalk listener option settings from name-value pairs. + + Args: + namespace: The AWS Elastic Beanstalk namespace for the settings + option_name_value_pairs: List of tuples containing (option_name, value) pairs + + Returns: + List of dictionaries, each containing: + - Namespace: The provided namespace + - OptionName: Name of the option + - Value: Value for the option + """ + return [ + {"Namespace": namespace, "OptionName": option_name, "Value": value} + for option_name, value in option_name_value_pairs + ] + + +def _create_http_listener_settings( + default_process_name: str, rule_names: str +) -> list[dict]: + """ + Create HTTP listener configuration settings for Elastic Beanstalk. + + Creates settings for an HTTP listener with specified default process and rules. + + Args: + default_process_name: Name of the default process to handle requests + rule_names: Comma-separated string of rule names + + Returns: + List of option settings configuring an HTTP listener with: + - HTTP protocol + - Specified default process + - Enabled listener + - Specified routing rules + """ + option_name_value_pairs = [ + ("Protocol", "HTTP"), + ("DefaultProcess", default_process_name), + ("ListenerEnabled", "true"), + ("Rules", rule_names), + ] + return _create_listener_settings( + "aws:elbv2:listener:default", option_name_value_pairs + ) + + +def _create_https_listener_settings( + default_process_name: str, rule_names: str, ssl_cert: str +) -> list[dict]: + """ + Create HTTPS listener configuration settings for Elastic Beanstalk. + + Creates settings for an HTTPS listener with specified default process, rules, + and SSL certificate. + + Args: + default_process_name: Name of the default process to handle requests + rule_names: Comma-separated string of rule names + ssl_cert: ARN of the SSL certificate to use for HTTPS + + Returns: + List of option settings configuring an HTTPS listener with: + - HTTPS protocol + - Specified default process + - Enabled listener + - Specified routing rules + - SSL certificate configuration + """ + option_name_value_pairs = [ + ("Protocol", "HTTPS"), + ("DefaultProcess", default_process_name), + ("ListenerEnabled", "true"), + ("Rules", rule_names), + ] + if ssl_cert: + option_name_value_pairs.append(("SSLCertificate", ssl_cert)) + return _create_listener_settings("aws:elbv2:listener:443", option_name_value_pairs) + + +def _extract_and_join_rule_names(option_settings: list[dict]) -> str: + rule_names = {option["Namespace"].split(":")[-1] for option in option_settings} + return ",".join(sorted(rule_names)) + + +def _extract_process_values(option_settings: list[dict]) -> set[str]: + return { + setting["Value"] + for setting in option_settings + if setting["OptionName"] == "Process" + } + + +@dataclass +class ConvertedALBRules: + """ + Container for ALB rule configurations separated by protocol. + + Attributes: + http_listener_rule_option_settings: List of option settings for HTTP listener rules + https_listener_rule_option_settings: List of option settings for HTTPS listener rules + process_protocol_mappings: Dictionary mapping process names to their protocols + """ + + http_listener_rule_option_settings: List[Dict[str, str]] + https_listener_rule_option_settings: List[Dict[str, str]] + process_protocol_mappings: Dict[str, str] + + +def convert_alb_rules_to_option_settings( + alb_rules: List[Dict[str, Any]], ssl_certificate_domain_name: Optional[str] +) -> ConvertedALBRules: + """ + Convert ALB rules into Elastic Beanstalk option settings format. + + Transforms ALB rules into EB option settings, organizing them by protocol (HTTP/HTTPS) + and creating the necessary listener rule configurations. + + Args: + alb_rules: List of ALB rule dictionaries, each containing: + - Priority: Rule priority number + - Protocol: 'HTTP' or 'HTTPS' + - Conditions: List of routing conditions (host-headers, path-pattern) + - Actions: Forward actions with target group configuration + + Returns: + ConvertedALBRules object containing: + - HTTP listener rule options + - HTTPS listener rule options + - Process to protocol mappings + + Each listener rule option is a dictionary with: + - Namespace: 'aws:elbv2:listenerrule:ruleN' where N is the rule number + - OptionName: One of 'Priority', 'Process', 'HostHeaders', or 'PathPatterns' + - Value: Corresponding value for the option + + Notes: + - Process names are extracted from target group ARNs + - Port 80 is mapped to 'default' process name + - If no host header is specified, path pattern defaults to '*' + """ + http_listener_rule_option_settings: List[Dict[str, str]] = [] + https_listener_rule_option_settings: List[Dict[str, str]] = [] + process_protocol_mappings: Dict[str, str] = dict() + + for i, rule in enumerate(alb_rules, 1): + host_header, path_pattern = None, None + + conditions = rule.get("Conditions", []) + if conditions: + for condition in conditions: + if condition["Field"] == "host-header": + host_header = condition["Values"][0] + if condition["Field"] == "path-pattern": + path_pattern = condition["Values"][0] + + namespace = f"aws:elbv2:listenerrule:rule{i}" + protocol = rule["Protocol"].upper() + listener_rule_option_settings = list() + listener_rule_option_settings.append( + { + "Namespace": namespace, + "OptionName": "Priority", + "Value": str(rule["Priority"]), + } + ) + + target_group = rule["Actions"][0]["ForwardConfig"]["TargetGroups"][0][ + "TargetGroupArn" + ] + process = target_group.split("/")[-1] + if process == "80": + process = "default" + listener_rule_option_settings.append( + {"Namespace": namespace, "OptionName": "Process", "Value": process} + ) + if host_header: + listener_rule_option_settings.append( + { + "Namespace": namespace, + "OptionName": "HostHeaders", + "Value": host_header, + } + ) + if not host_header: + path_pattern = path_pattern or "*" + if path_pattern: + listener_rule_option_settings.append( + { + "Namespace": namespace, + "OptionName": "PathPatterns", + "Value": path_pattern, + } + ) + if protocol.strip().lower() == "http": + http_listener_rule_option_settings.extend(listener_rule_option_settings) + else: + https_listener_rule_option_settings.extend(listener_rule_option_settings) + if protocol.strip().lower() == "http": + process_protocol_mappings[process] = protocol.strip().upper() + elif protocol.strip().lower() == "https" and ssl_certificate_domain_name: + process_protocol_mappings[process] = protocol.strip().upper() + + return ConvertedALBRules( + http_listener_rule_option_settings, + https_listener_rule_option_settings, + process_protocol_mappings, + ) + + +class SiteConfig: + """ + Configuration class representing an IIS website with its binding and path information. + + Args: + name: The name of the IIS website + binding_info: A colon-separated string containing binding information in the format + "ip:port:hostname". Example: "*:80:example.com" + physical_path: The filesystem path where the website content is located + protocol: The web protocol used by the site ('http' or 'https') + + Attributes: + name (str): The name of the IIS website + port (int): The port number extracted from binding_info + host_header (Optional[str]): The hostname for the site, or None if not specified + physical_path (str): The filesystem path where the website content is located + protocol (str): The lowercase protocol ('http' or 'https') + rewrite_rules (List): A list of rewrite rules for the site (empty by default) + + Raises: + ValueError: If binding_info doesn't contain enough segments for parsing + """ + + def __init__( + self, name: str, binding_info: str, physical_path: str, protocol: str + ) -> None: + self.name = name + self.port = int(binding_info.split(":")[1]) + self.host_header = binding_info.split(":")[2] or None + self.physical_path = physical_path + self.protocol = protocol.lower() + self.rewrite_rules: List = [] + + +def _parse_binding_info(binding: "Binding") -> Dict[str, Union[str, int]]: + """ + Parse an IIS binding configuration into a dictionary of its components. + + Takes a Binding object from IIS's configuration and splits its BindingInformation + string (format: "IP:port:hostname") into individual components. + + Args: + binding: An IIS Binding object containing BindingInformation and Protocol properties + + Returns: + A dictionary containing: + - 'ip' (str): IP address or '*' for all addresses + - 'port' (int): Port number + - 'host' (str): Hostname or empty string if not specified + - 'protocol' (str): Protocol type ('http' or 'https') + + Raises: + IndexError: If binding.BindingInformation doesn't contain the expected three colon-separated values + """ + parts = binding.BindingInformation.split(":") + return { + "ip": parts[0], + "port": int(parts[1]), + "host": parts[2], + "protocol": binding.Protocol, # This should give us http/https + } + + +def get_site_configs(sites: List["Site"]): + """ + Retrieve and parse IIS site configurations from the local server. + + Connects to IIS using ServerManager and processes each site's: + - Basic site information (name, bindings) + - Physical path information + - URL rewrite rules from web.config files + + Args: + sites: List of IIS Site objects to process + + Returns: + List[SiteConfig]: A list of SiteConfig objects, each containing: + - name: Site name from IIS + - binding_info: Parsed binding information (ip:port:hostname) + - physical_path: Site's root directory path + - protocol: HTTP/HTTPS protocol + - rewrite_rules: List of dictionaries containing URL rewrite rules from web.config: + * name: Rule name + * pattern: URL pattern to match + * action_type: Type of rewrite action (Rewrite/Redirect) + * action_url: Target URL for rewrite + + Notes: + - Processes each site's bindings separately + - Parses web.config files for URL rewrite rules + - Skips invalid web.config files with warning + - Requires administrative access to IIS + """ + site_configs = [] + + for site in sites: + for binding in site.Bindings: + binding_info = _parse_binding_info(binding) + + config = SiteConfig( + name=site.Name, + binding_info=binding.BindingInformation, + physical_path=site.Applications["/"] + .VirtualDirectories["/"] + .PhysicalPath, + protocol=binding_info["protocol"], + ) + + # Get rewrite rules from web.config + web_config_path = os.path.join(config.physical_path, "web.config") + if os.path.exists(web_config_path): + try: + tree = ET.parse(web_config_path) + root = tree.getroot() + rules = root.findall(".//rewrite/rules/rule") + + for rule in rules: + config.rewrite_rules.append( + { + "name": rule.get("name"), + "pattern": rule.find("match").get("url"), + "action_type": rule.find("action").get("type"), + "action_url": rule.find("action").get("url"), + } + ) + except Exception as e: + io.log_warning( + f"Error reading web.config for {site.Name}: {str(e)}. Skipping over rewrite rule identification for {site.Name}" + ) + + site_configs.append(config) + + return site_configs + + +def _sort_rules_by_specificity(rules: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """ + Sort Application Load Balancer rules by their specificity in descending order. + + Rules are sorted based on a scoring system where: + - Path pattern conditions contribute 20 points + - Host header conditions contribute 10 points + The total score determines the rule's specificity, with higher scores indicating + more specific rules. + + Args: + rules: A list of ALB rule dictionaries. Each rule is a dictionary containing: + - 'Actions': List of action configurations + - 'Protocol': String indicating the protocol (e.g., "http") + - 'Conditions' (optional): List of condition dictionaries with 'Field' and 'Values' + + Returns: + A new list containing the same rules sorted by specificity in descending order, + where rules with more specific conditions (higher scores) appear first. + + Example: + A rule with both path-pattern and host-header conditions (score: 30) would be + sorted before a rule with only a host-header condition (score: 10). + """ + + def calculate_rule_specificity(rule): + score = 0 + conditions = rule.get("Conditions", []) + + for condition in conditions: + if condition["Field"] == "host-header": + score += 10 + elif condition["Field"] == "path-pattern": + score += 20 + return score + + return sorted(rules, key=calculate_rule_specificity, reverse=True) + + +def create_alb_rules(site_configs: List["SiteConfig"]) -> List[Dict[str, Any]]: + """ + Create Application Load Balancer (ALB) rules from IIS site configurations. + + Transforms IIS site configurations into ALB rules by: + 1. Grouping sites by port + 2. Creating host-header based rules for each site + 3. Creating path-pattern based rules from site rewrite rules + 4. Sorting rules by specificity and assigning priorities + + Args: + site_configs: List of SiteConfig objects containing IIS site configurations + + Returns: + List of ALB rule dictionaries, each containing: + - Priority: Integer indicating rule precedence (1-based) + - Protocol: The protocol ('http' or 'https') + - Actions: Forward action configuration with target group ARN + - Conditions: List of conditions: + * host-header conditions from site bindings + * path-pattern conditions from rewrite rules + + Process Flow: + 1. Groups sites by port for target group creation + 2. Processes host-header based rules first + 3. Processes URL rewrite rules from web.config + 4. Sorts rules by specificity (host+path > host > path) + 5. Assigns sequential priorities to sorted rules + + Notes: + - Creates synthetic target group ARNs using port numbers + - Deduplicates patterns per host header + - Only processes HTTP/HTTPS protocols + - Host header rules take precedence over path patterns + """ + port_groups = collections.defaultdict(list) + valid_protocols = ["http", "https"] + for config in site_configs: + if config.protocol in valid_protocols: + port_groups[config.port].append(config) + + target_group_template_arn = ( + "arn:aws:elasticloadbalancing:region:account-id:targetgroup/{port}" + ) + target_groups = { + port: target_group_template_arn.format(port=port) for port in port_groups.keys() + } + + unsorted_rules = _process_host_header_based_rules(port_groups, target_groups) + processed_patterns = set() + rewrite_rules = _process_url_rewrite_rules( + port_groups, processed_patterns, target_groups + ) + unsorted_rules.extend(rewrite_rules) + sorted_rules = _sort_rules_by_specificity(unsorted_rules) + return _assign_priorities_after_sorting(sorted_rules) + + +def _process_host_header_based_rules(port_groups, target_groups): + host_header_rules = [] + for iis_port, sites in port_groups.items(): + for site in sites: + host_rule = { + "Actions": [ + { + "Type": "forward", + "ForwardConfig": { + "TargetGroups": [ + {"TargetGroupArn": target_groups[iis_port], "Weight": 1} + ] + }, + } + ], + "Protocol": site.protocol, + } + if site.host_header: + host_rule["Conditions"] = [ + {"Field": "host-header", "Values": [site.host_header]} + ] + host_header_rules.append(host_rule) + continue + return host_header_rules + + +def _process_url_rewrite_rules(port_groups, processed_patterns, target_groups): + rules = [] + for iis_port, sites in port_groups.items(): + for site in sites: + for rule in site.rewrite_rules: + rewrite_rule = _process_url_rewrite_rule( + rule, processed_patterns, site, target_groups, iis_port + ) + rules.append(rewrite_rule) + return rules + + +def _process_url_rewrite_rule(rule, processed_patterns, site, target_groups, iis_port): + # Create unique key for pattern+host combination + pattern_key = f"{rule['pattern']}:{site.host_header}" + + # Skip if we've already processed this pattern for this host + if pattern_key in processed_patterns: + return None + + processed_patterns.add(pattern_key) + + conditions = [] + if site.host_header: + conditions.append({"Field": "host-header", "Values": [site.host_header]}) + + conditions.append( + {"Field": "path-pattern", "Values": [translate_iis_to_alb(rule["pattern"])]} + ) + + return _create_rewrite_rule(conditions, target_groups[iis_port], site.protocol) + + +def _create_rewrite_rule( + conditions: List[Dict[str, str | List[str]]], target_group_arn: str, protocol: str +) -> Dict[str, Any]: + return { + "Conditions": conditions, + "Actions": [ + { + "Type": "forward", + "ForwardConfig": { + "TargetGroups": [{"TargetGroupArn": target_group_arn, "Weight": 1}] + }, + } + ], + "Protocol": protocol, + } + + +def _assign_priorities_after_sorting( + sorted_rules: List[Dict[str, Any]], +) -> List[Dict[str, Any]]: + alb_rules = [] + for priority, rule in enumerate(sorted_rules, 1): + rule["Priority"] = priority + alb_rules.append(rule) + return alb_rules + + +def translate_iis_to_alb(iis_pattern: str) -> str: + """ + Convert an IIS (Internet Information Services) URL rewrite pattern to an ALB (Application Load Balancer) path pattern. + + This function takes a URL rewrite pattern typically used in IIS and transforms it into a format compatible with + AWS Application Load Balancer (ALB) path patterns. The transformation involves several steps to simplify and + standardize the pattern. + + Parameters: + iis_pattern (str): The IIS URL rewrite pattern to be converted. This pattern may contain various regex elements + such as anchors, capture groups, character classes, and quantifiers. + + Returns: + str: The converted ALB path pattern. This pattern will be simplified to use '*' as a wildcard character and + will be prefixed with a '/' if it does not already start with one. + + Example: + >>> translate_iis_to_alb("^/products/{id:int}/details$") + '/products/*/details' + >>> translate_iis_to_alb("^/users/([a-zA-Z0-9]+)/profile$") + '/users/*/profile' + >>> translate_iis_to_alb("/articles/(.*)") + '/articles/*' + + Note: + - The function assumes that the input pattern is a valid IIS URL rewrite pattern. + - The resulting ALB pattern will use '*' as a wildcard to match any sequence of characters. + - Complex regex features not covered by the simplification rules will be approximated with '*'. + """ + alb_pattern = iis_pattern.strip("^$") + + # replace .+ with * + alb_pattern = re.sub(r"\.\+", "?*", alb_pattern) + # replace .* with * + alb_pattern = re.sub(r"\.\*", "*", alb_pattern) + # replace {segment} with * + alb_pattern = re.sub("({.*})", "*", alb_pattern) + alb_pattern = alb_pattern.lstrip("^") + # replace groupings of the type "[0-9]+" with "*" + alb_pattern = re.sub(r"\[.*?\]\+", "*", alb_pattern) + # replace groupings of the type "[0-9]*" with "*" + alb_pattern = re.sub(r"\[.*?\]\*", "*", alb_pattern) + # replace groupings of the type "([0-9]*)" with "*" + alb_pattern = re.sub(r"\([^()]*\)", "*", alb_pattern) + + # Ensure pattern starts with / + if not alb_pattern.startswith("/"): + alb_pattern = "/" + alb_pattern + # Remove any double asterisks + alb_pattern = alb_pattern.replace("**", "*") + # Remove any trailing * + alb_pattern = alb_pattern.rstrip("$") + + return alb_pattern diff --git a/ebcli/controllers/migrate_scripts/add_virtual_dir_read_access.ps1 b/ebcli/controllers/migrate_scripts/add_virtual_dir_read_access.ps1 new file mode 100644 index 000000000..2f52c4106 --- /dev/null +++ b/ebcli/controllers/migrate_scripts/add_virtual_dir_read_access.ps1 @@ -0,0 +1,19 @@ +. "$PSScriptRoot\\ebdeploy_utils.ps1" + +$paths = @( + # This will be populated dynamically with physical paths +) +foreach ($path in $paths) { + if (-not (Test-Path $path)) { + Write-HostWithTimestamp "'$path' for virtual directory does not exist. Creating." + New-Item -Path $path -ItemType Directory -Force | Out-Null + } + + $acl = Get-Acl $path + foreach ($rule in $(Get-GenericWebpathACLRules)) { + $acl.AddAccessRule($rule) + } + Set-Acl $path $acl + + Write-HostWithTimestamp "Read permission granted for $path" +} diff --git a/ebcli/controllers/migrate_scripts/arr_configuration_importer_script.ps1 b/ebcli/controllers/migrate_scripts/arr_configuration_importer_script.ps1 new file mode 100644 index 000000000..345972cd4 --- /dev/null +++ b/ebcli/controllers/migrate_scripts/arr_configuration_importer_script.ps1 @@ -0,0 +1,262 @@ +<# + .SYNOPSIS + Imports Application Request Routing (ARR) configuration from XML files. + + .DESCRIPTION + Handles the import of ARR configuration settings, including backup of current + configuration and type-safe import of new settings. + + .NOTES + Requires: + - WebAdministration module + - Administrative privileges + - Configuration files named arr_config_[section].xml +#> + +. "$PSScriptRoot\\ebdeploy_utils.ps1" + +if (-not [Environment]::Is64BitProcess) { + Write-HostWithTimestamp "Restarting in 64-bit PowerShell" + $scriptPath = $MyInvocation.MyCommand.Path + $args = "-ExecutionPolicy unrestricted -NonInteractive -NoProfile -File `"$scriptPath`"" + Start-Process "$env:windir\\sysnative\\WindowsPowerShell\\v1.0\\powershell.exe" -ArgumentList $args -Wait -NoNewWindow + exit +} + +Import-Module WebAdministration + +function Export-ARRConfig { + param ( + [Parameter(Mandatory=$false)] + [string]$configPath + ) + <# + .SYNOPSIS + Exports current ARR configuration to XML files. + + .DESCRIPTION + Exports modified (non-default) settings for proxy, rewrite, and caching + configurations to separate XML files. + + .PARAMETER configPath + Optional base path for output files. Files will be named {configPath}-{section}.xml + + .NOTES + - Only exports modified settings (different from defaults) + - Creates separate files for each configuration section + #> + + # Get the proxy configuration + $configSections = @( + "system.webServer/proxy", + "system.webServer/rewrite", + "system.webServer/caching" + ) + + $outputPath = "" + try { + foreach ($section in $configSections) { + $sectionName = $section.Split('/')[-1] + if ([string]::IsNullOrEmpty($configPath)) { + $outputPath = ".\\arr_config_$sectionName.xml" + } + else { + $outputPath = "$configPath-$sectionName.xml" + } + $proxyConfig = Get-WebConfiguration -Filter $section + + # Filter attributes that have been modified from defaults + $modifiedAttributes = $proxyConfig.Attributes | + Where-Object { -not $_.IsInheritedFromDefaultValue } + + # Build XML string + $xmlContent = " + param( + [Parameter(Mandatory=$true)] + [string]$SectionPrefix + ) + # Get the proxy section properties + $sectionConfig = Get-WebConfiguration "$SectionPrefix" -pspath 'MACHINE/WEBROOT/APPHOST' + + # Loop through each attribute and determine its type + $propertyMappings = @{} + + foreach ($attr in $sectionConfig.Attributes) { + $propName = $attr.Name + $propValue = $sectionConfig.$propName + $propType = if ($propValue -ne $null) { $propValue.GetType().Name } else { "String" } + # Store in hashtable + $propertyMappings[$propName] = $propType + } + return $propertyMappings +} + + +function Import-ARRConfig { + <# + .SYNOPSIS + Imports ARR configuration from XML files. + + .DESCRIPTION + Reads configuration from XML files and applies settings to IIS, handling + proper type conversion and validation. + + .NOTES + - Creates backup before import + - Handles type conversion for various configuration values + - Logs all changes with detailed output + - Provides backup restoration information on failure + #> + $configSections = @( + "system.webServer/proxy", + "system.webServer/rewrite", + "system.webServer/caching" + ) + + $configurationExists = $false + foreach ($section in $configSections) { + $sectionName = $section.Split('/')[-1] + $outputPath = "C:\\staging\\ebmigrateScripts\\arr_config_$sectionName.xml" + + if (Test-Path $outputPath) { + $configurationExists = $true + break + } + } + if (! $configurationExists) { + Write-HostWithTimestamp "No Automatic Request Routing configuration found." + return + } + try { + # Create backup of current state + $backupPath = "arr-backup" + Export-ARRConfig -configPath $backupPath + + Write-HostWithTimestamp "Applying new ARR configuration" + $i = 1 + foreach ($section in $configSections) { + $sectionName = $section.Split('/')[-1] + $outputPath = "C:\\staging\\ebmigrateScripts\\arr_config_$sectionName.xml" + + Write-HostWithTimestamp "Handling $sectionName at $outputPath" + + if (! $(Test-Path $outputPath)) { + Write-HostWithTimestamp " $($i; $i++). $outputPath doesn't exist. No relevant configuration for $sectionName present." + continue + } + [xml]$config = Get-Content $outputPath + $proxyNode = $config.proxy + + $attributes = $proxyNode.Attributes + + if ([string]::IsNullOrEmpty($attributes)) { + Write-HostWithTimestamp " $($i; $i++). $section -> {}" + continue + } + + $propertyTypeMappings = Get-SectionAttributeTypeMappings $section + foreach ($attr in $proxyNode.Attributes) { + $propName = $attr.Name + $propValue = $attr.Value + if ($propertyTypeMappings.ContainsKey($propName)) { + $expectedType = $propertyTypeMappings[$propName] + + # Convert based on expected type + switch ($expectedType) { + "Boolean" { $propValue = [System.Boolean]::Parse($propValue) } + "Int32" { $propValue = [int]$propValue } + "Int64" { $propValue = [long]$propValue } + "Double" { $propValue = [double]$propValue } + "TimeSpan" { $propValue = [System.TimeSpan]::Parse($propValue) } + "String" { $propValue = [string]$propValue } + default { Write-Host "Warning: Unknown type $expectedType for $propName. Using string."; $propValue = [string]$propValue } + } + + Set-WebConfigurationProperty -pspath 'MACHINE/WEBROOT/APPHOST' ` + -filter $section ` + -name $propName ` + -value $propValue + } else { + Write-WarningWithTimestamp "Ignoring unknown type for $propName from section $section" + } + } + + $output = @" + $($i; $i++). $section -> + { +$($proxyNode.Attributes | ForEach-Object { " $($_.Name): $($_.Value)" } | Out-String) } +"@ + + Write-HostWithTimestamp $output + } + } + catch { + Write-ErrorWithTimestamp "Failed to import ARR configuration: $_" + if (![string]::IsNullOrEmpty($backupPath)) { + if (Test-Path $backupPath) { + Write-HostWithTimestamp "Backup is available at: $backupPath*" + } + } + throw + } +} + +Import-ARRConfig diff --git a/ebcli/controllers/migrate_scripts/arr_msi_installer.ps1 b/ebcli/controllers/migrate_scripts/arr_msi_installer.ps1 new file mode 100644 index 000000000..3d8e581da --- /dev/null +++ b/ebcli/controllers/migrate_scripts/arr_msi_installer.ps1 @@ -0,0 +1,168 @@ +<# + .SYNOPSIS + Downloads and installs IIS Application Request Routing component. + + .DESCRIPTION + Handles the download and installation of ARR module, + including verification of existing installation and error handling. + + .NOTES + Requires: + - Administrative privileges + - Internet access for downloads + - Windows MSI installer +#> + +. "$PSScriptRoot\\ebdeploy_utils.ps1" + +if (-not [Environment]::Is64BitProcess) { + Write-HostWithTimestamp "Restarting in 64-bit PowerShell" + $scriptPath = $MyInvocation.MyCommand.Path + $args = "-ExecutionPolicy unrestricted -NonInteractive -NoProfile -File `"$scriptPath`"" + Start-Process "$env:windir\\sysnative\\WindowsPowerShell\\v1.0\\powershell.exe" -ArgumentList $args -Wait -NoNewWindow + exit +} + +function Test-ARRInstalled { + <# + .SYNOPSIS + Checks if ARR is already installed. + + .DESCRIPTION + Verifies ARR installation by checking for system.webServer/proxy configuration section. + + .OUTPUTS + Boolean indicating whether ARR is installed + #> + Write-HostWithTimestamp "Checking for ARR configuration based on 'system.webServer/proxy'" + $arrFeature = Get-WebConfiguration -Filter "system.webServer/proxy" + return ($null -ne $arrFeature) +} + + +function Download-ARR { + param( + [Parameter(Mandatory=$true)] + [string]$ArrInstallPath + ) + <# + .SYNOPSIS + Downloads ARR MSI installer from Microsoft. + + .PARAMETER ArrInstallPath + Directory where the MSI will be downloaded + + .NOTES + - Downloads from Microsoft's official URL + - Handles 404 and 403 errors with user guidance + - Provides GitHub issue reporting information + #> + $arrMSIPath = "https://download.microsoft.com/download/E/9/8/E9849D6A-020E-47E4-9FD0-A023E99B54EB/requestRouter_amd64.msi" + $ebcliGithub = "https://github.com/aws/aws-elastic-beanstalk-cli" + + try { + Write-HostWithTimestamp "Downloading $arrMSIPath into $ArrInstallPath" + Invoke-WebRequest $arrMSIPath -OutFile "$ArrInstallPath\\requestRouter_amd64.msi" + } catch [System.Net.WebException] { + if ($_.Exception.Response.StatusCode.Value__ -eq 404) { + Write-WarningWithTimestamp @" +The Automatic Request Routing (ARR) module MSI *does not exist* at the following path anymore: + + $arrMSIPath + +Install the latest version of ARR manually and report this issue at $ebcliGithub. +"@ + } elseif ($_.Exception.Response.StatusCode.Value__ -eq 403) { + Write-WarningWithTimestamp @" +Failed to download and install the Automatic Request Routing (ARR) module MSI from the following path: + + $arrMSIPath + +Install the latest version of ARR manually and report this issue on $ebcliGithub. +"@ + } + } catch { + Write-Host "Some exception" + } +} + + +function Install-ARRFromMSI { + param( + [Parameter(Mandatory=$true)] + [string]$ArrInstallPath + ) + <# + .SYNOPSIS + Installs ARR module from downloaded MSI. + + .PARAMETER ArrInstallPath + Directory containing the downloaded MSI file + + .NOTES + - Runs installer in quiet mode + - Waits for installation completion + - Provides detailed error reporting + #> + $ebcliGithub = "https://github.com/aws/aws-elastic-beanstalk-cli" + + try { + Write-HostWithTimestamp "Installing ARR and Rewrite modules" + Start-Process msiexec.exe -ArgumentList "/i $ArrInstallPath\\requestRouter_amd64.msi /quiet" -Wait + Write-HostWithTimestamp "Successfully installed the ARR module" + } + catch { + Write-WarningWithTimestamp @" + Failed to install ARR module: + + $($_.Exception.Message) + + Install the latest versions of these modules manually and report this issue on $ebcliGithub. +"@ + } +} + + +function Install-ARR { + <# + .SYNOPSIS + Orchestrates the complete ARR installation process. + + .DESCRIPTION + Creates installation directory, downloads required MSI, + and executes the installation process. + + .NOTES + - Creates directory at C:\\installers\\arr-install + - Coordinates download and installation functions + - Provides overall process logging + #> + $arrInstallPath = "C:\\installers\\arr-install" + Write-HostWithTimestamp "Create temp dir, $arrInstallPath, to store installers" + New-Item -ItemType Directory -Path $arrInstallPath -Force | Out-Null + + Download-ARR -ArrInstallPath $arrInstallPath + Install-ARRFromMSI -ArrInstallPath $arrInstallPath +} + + +$arrInstalled = $false +if (Test-ARRInstalled) { + Write-HostWithTimestamp "Application Request Routing is already installed." + $arrInstalled = $true +} +try { + if (! $arrInstalled) { + Install-ARR + if (Test-ARRInstalled) { + Write-HostWithTimestamp "Application Request Routing was installed successfully." + } + else { + throw "ARR installation could not be verified." + } + } +} +catch { + Write-ErrorWithTimestamp "ARR Installation failed: $_" + exit 1 +} diff --git a/ebcli/controllers/migrate_scripts/default_web_site_port_reassignment_template.ps1 b/ebcli/controllers/migrate_scripts/default_web_site_port_reassignment_template.ps1 new file mode 100644 index 000000000..47f111fe0 --- /dev/null +++ b/ebcli/controllers/migrate_scripts/default_web_site_port_reassignment_template.ps1 @@ -0,0 +1,70 @@ +<# + .SYNOPSIS + Reassigns the Default Web Site's port 80 binding to a specified configuration. + + .DESCRIPTION + PowerShell script that modifies the Default Web Site's port binding from + port 80 to a specified port configuration. Includes site restart and + logging functionality. + + .NOTES + Requires: + - WebAdministration module + - Administrative privileges + - ebdeploy_utils.ps1 in same directory +#> + +. "$PSScriptRoot\\ebdeploy_utils.ps1" + +if (-not [Environment]::Is64BitProcess) { + Write-HostWithTimestamp "Restarting in 64-bit PowerShell" + $scriptPath = $MyInvocation.MyCommand.Path + $args = "-ExecutionPolicy unrestricted -NonInteractive -NoProfile -File `"$scriptPath`"" + Start-Process "$env:windir\\sysnative\\WindowsPowerShell\\v1.0\\powershell.exe" -ArgumentList $args -Wait -NoNewWindow + exit +} + +Import-Module WebAdministration + +function Reassign-DefaultWebSitePort { + <# + .SYNOPSIS + Updates Default Web Site bindings and restarts the site. + + .DESCRIPTION + Locates the port 80 binding of Default Web Site and updates it to + the specified binding configuration. Restarts the website to apply changes. + + .OUTPUTS + Logs binding configuration before and after changes. + + .NOTES + - Exits silently if no port 80 binding exists + - Restarts website as current user + - Requires WebAdministration module + #> + $site = Get-Item "IIS:\\Sites\\Default Web Site" + $bindings = $site.Bindings.Collection + + $bindingToUpdate = $bindings | Where-Object { + $_.protocol -eq "http" -and + $_.bindingInformation -eq "*:80:" + } + if (-not $bindingToUpdate) { + Write-HostWithTimestamp "Site, 'Default Web Site', is already running on a non-80 port:" + Get-WebBinding -Name 'Default Web Site' + return + } + $bindingToUpdate.bindingInformation = "{host}:{port}:{domain}" + + Set-ItemProperty 'IIS:\\Sites\\Default Web Site' -Name bindings -Value $bindings + + $username = [Environment]::UserName + Write-HostWithTimestamp "Restarting Site, 'Default Web Site', as $username for new bindings to take effect." + Stop-Website -Name 'Default Web Site' + Start-Website -Name 'Default Web Site' + Write-HostWithTimestamp "Site, 'Default Web Site', has been reassigned to run with the following bindings:" + Get-WebBinding -Name 'Default Web Site' +} + +Reassign-DefaultWebSitePort diff --git a/ebcli/controllers/migrate_scripts/ebdeploy_utils.ps1 b/ebcli/controllers/migrate_scripts/ebdeploy_utils.ps1 new file mode 100644 index 000000000..843210441 --- /dev/null +++ b/ebcli/controllers/migrate_scripts/ebdeploy_utils.ps1 @@ -0,0 +1,135 @@ +# This script hosts functions for convenience and utility. +# It is meant to be imported by the rest of the EB-defined +# scripts during deployment. + +function utcNow { + return $(Get-Date -Format "yyyy-MM-dd HH:mm:ss") +} + +function Write-HostWithTimestamp { + <# + .SYNOPSIS + Write standard output message with UTC timestamp. + + .DESCRIPTION + Writes a message to the host (standard output) prefixed with UTC timestamp. + Supports pipeline input for the message parameter. + + .PARAMETER Message + The message to write to the host. + + .EXAMPLE + Write-HostWithTimestamp "Deployment started" + # Output: [2024-02-20 15:30:45] Deployment started + + .EXAMPLE + "Process completed" | Write-HostWithTimestamp + # Output: [2024-02-20 15:30:45] Process completed + #> + param( + [Parameter(ValueFromPipeline=$true)] + [string]$Message + ) + + Write-Host "[$(utcNow)] $Message" +} + +function Write-ErrorWithTimestamp { + <# + .SYNOPSIS + Write error message with UTC timestamp. + + .DESCRIPTION + Writes a message to the error stream prefixed with UTC timestamp. + Supports pipeline input for the message parameter. + + .PARAMETER Message + The message to write to the error stream. + + .EXAMPLE + Write-ErrorWithTimestamp "Failed to create website" + # Output: [2024-02-20 15:30:45] Failed to create website + #> + param( + [Parameter(ValueFromPipeline=$true)] + [string]$Message + ) + + Write-Error "[$(utcNow)] $Message" +} + +function Write-WarningWithTimestamp { + <# + .SYNOPSIS + Write warning message with UTC timestamp. + + .DESCRIPTION + Writes a message to the warning stream prefixed with UTC timestamp. + Supports pipeline input for the message parameter. + + .PARAMETER Message + The message to write to the warning stream. + + .EXAMPLE + Write-WarningWithTimestamp "Configuration file not found" + # Output: [2024-02-20 15:30:45] Configuration file not found + #> + param( + [Parameter(ValueFromPipeline=$true)] + [string]$Message + ) + + Write-Warning "[$(utcNow)] $Message" +} + +function Get-GenericWebPathACLRules { + <# + .SYNOPSIS + Get standard IIS web application ACL rules. + + .DESCRIPTION + Returns an array of FileSystemAccessRule objects that define standard + read and execute permissions for IIS web applications. These rules + grant necessary access to IIS service accounts and authenticated users. + + .OUTPUTS + System.Security.AccessControl.FileSystemAccessRule[] + Array of three access rules: + 1. IIS_IUSRS: ReadAndExecute with inheritance + 2. IUSR: ReadAndExecute with inheritance + 3. Authenticated Users: ReadAndExecute with inheritance + + .NOTES + All rules are configured with: + - ContainerInherit and ObjectInherit flags + - Allow type access + - ReadAndExecute permissions + #> + $rules = @( + [System.Security.AccessControl.FileSystemAccessRule]::new( + "IIS_IUSRS", + "ReadAndExecute", + "ContainerInherit,ObjectInherit", + "None", + "Allow" + ), + + [System.Security.AccessControl.FileSystemAccessRule]::new( + "IUSR", + "ReadAndExecute", + "ContainerInherit,ObjectInherit", + "None", + "Allow" + ), + + [System.Security.AccessControl.FileSystemAccessRule]::new( + "Authenticated Users", + "ReadAndExecute", + "ContainerInherit,ObjectInherit", + "None", + "Allow" + ) + ) + + return $rules +} diff --git a/ebcli/controllers/migrate_scripts/modify_firewall_config.ps1 b/ebcli/controllers/migrate_scripts/modify_firewall_config.ps1 new file mode 100644 index 000000000..9157cd1d0 --- /dev/null +++ b/ebcli/controllers/migrate_scripts/modify_firewall_config.ps1 @@ -0,0 +1,15 @@ +# This script executes PowerShell commands on a remote machine to +# configure the firewall based on configuration of a source machine + +. "$PSScriptRoot\\ebdeploy_utils.ps1" + +if (-not [Environment]::Is64BitProcess) { + $utcNow = Get-Date -Format "yyyy-MM-dd HH:mm:ss" + Write-HostWithTimestamp "[$($utcNow)] Restarting in 64-bit PowerShell" + $scriptPath = $MyInvocation.MyCommand.Path + $args = "-ExecutionPolicy unrestricted -NonInteractive -NoProfile -File `"$scriptPath`"" + Start-Process "$env:windir\\sysnative\\WindowsPowerShell\\v1.0\\powershell.exe" -ArgumentList $args -Wait -NoNewWindow + exit +} + +{firewall_rules} diff --git a/ebcli/controllers/migrate_scripts/noop.ps1 b/ebcli/controllers/migrate_scripts/noop.ps1 new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/ebcli/controllers/migrate_scripts/noop.ps1 @@ -0,0 +1 @@ + diff --git a/ebcli/controllers/migrate_scripts/reinstate_iisstart_htm_default_document.ps1 b/ebcli/controllers/migrate_scripts/reinstate_iisstart_htm_default_document.ps1 new file mode 100644 index 000000000..f8fffbadb --- /dev/null +++ b/ebcli/controllers/migrate_scripts/reinstate_iisstart_htm_default_document.ps1 @@ -0,0 +1,13 @@ +# This script ensures that iisstart.htm is reinstated as a DefaultDocument + +. "$PSScriptRoot\\ebdeploy_utils.ps1" + +if (-not [Environment]::Is64BitProcess) { + Write-HostWithTimestamp "Restarting in 64-bit PowerShell" + $scriptPath = $MyInvocation.MyCommand.Path + $args = "-ExecutionPolicy unrestricted -NonInteractive -NoProfile -File `"$scriptPath`"" + Start-Process "$env:windir\\sysnative\\WindowsPowerShell\\v1.0\\powershell.exe" -ArgumentList $args -Wait -NoNewWindow + exit +} + +Add-WebConfigurationProperty -Filter "system.webServer/defaultDocument/files" -Name "." -Value @{value='iisstart.htm'} diff --git a/ebcli/controllers/migrate_scripts/site_installer_template.ps1 b/ebcli/controllers/migrate_scripts/site_installer_template.ps1 new file mode 100644 index 000000000..2f44bc3d5 --- /dev/null +++ b/ebcli/controllers/migrate_scripts/site_installer_template.ps1 @@ -0,0 +1,212 @@ +<# +.SYNOPSIS + Installs and configures an IIS website with specified bindings and application pool. + +.DESCRIPTION + Deploys a website from a ZIP package, configuring IIS bindings, application pool, + and file system permissions. Handles ARR configuration if enabled. + +.NOTES + Requires: + - WebAdministration module + - Web Deploy V3 (msdeploy.exe) + - Administrative privileges + - Source ZIP at C:\\staging\\{site_name}.zip + - ebdeploy_utils.ps1 in same directory +#> + +. "$PSScriptRoot\\ebdeploy_utils.ps1" + +if (-not [Environment]::Is64BitProcess) { + Write-HostWithTimestamp "Restarting in 64-bit PowerShell" + $scriptPath = $MyInvocation.MyCommand.Path + $args = "-ExecutionPolicy unrestricted -NonInteractive -NoProfile -File `"$scriptPath`"" + Start-Process "$env:windir\\sysnative\\WindowsPowerShell\\v1.0\\powershell.exe" -ArgumentList $args -Wait -NoNewWindow + exit +} + +Import-Module WebAdministration + +$destination = "{physical_path}" +$sourceZip = "C:\staging\{site_name}.zip" +if (-not (Test-Path $sourceZip)) { + Write-HostWithTimestamp "$sourceZip not found. Nothing to do." + exit 0 +} +Write-HostWithTimestamp "Found or expecting source ZIP at $sourceZip" +$websiteName = "{site_name}" +$appPoolName = "{site_name}" + +function Ensure-AppPool { + param( + [Parameter(Mandatory=$true)] + [string]$Name + ) + <# + .SYNOPSIS + Creates and configures an IIS application pool with standard settings. + + .DESCRIPTION + Creates a new application pool if it doesn't exist, configures it with + .NET 4.0 runtime, integrated pipeline mode, and ApplicationPoolIdentity. + + .PARAMETER Name + Name of the application pool to create/configure + + .NOTES + - Silently handles creation if pool already exists + - Sets managed runtime to v4.0 + - Uses integrated pipeline mode + - Uses ApplicationPoolIdentity + #> + try { + Write-HostWithTimestamp "Attempting to create application pool, $Name" + New-WebAppPool -Name $Name -ErrorAction SilentlyContinue + } catch {} + + Write-HostWithTimestamp "Setting AppPool properties of application pool, $Name" + Set-ItemProperty IIS:\\AppPools\\$Name -Name "managedRuntimeVersion" -Value "v4.0" + Set-ItemProperty IIS:\\AppPools\\$Name -Name "managedPipelineMode" -Value "Integrated" + Set-ItemProperty IIS:\\AppPools\\$Name -Name "processModel.identityType" -Value "ApplicationPoolIdentity" + + try { + Write-HostWithTimestamp "Setting AppPool properties of application pool, $Name" + Start-WebAppPool -Name $Name + } + catch {} + Write-HostWithTimestamp "Application pool '$Name' configured and started." +} + +function Ensure-Website { + param( + [Parameter(Mandatory=$true)] + [string]$Name, + + [Parameter(Mandatory=$true)] + [string]$Path, + + [Parameter(Mandatory=$true)] + [string]$AppPoolName + ) + <# + .SYNOPSIS + Creates and configures an IIS website with specified settings. + + .DESCRIPTION + Creates a new IIS website if it doesn't exist, configures its application + pool and bindings. Skips creation if website already exists. + + .PARAMETER Name + Name of the website to create + + .PARAMETER Path + Physical path for the website content + + .PARAMETER AppPoolName + Name of the application pool to use + + .NOTES + - Checks for existing website before creation + - Configures bindings from pre-defined array + - Associates with specified application pool + #> + if (Get-Website -Name $Name) { + Write-HostWithTimestamp "Site, $Name, already exists. Returning." + return + } + + Write-HostWithTimestamp "Creating new Site, $Name, to run on application pool, $AppPoolName." + New-Website -Name $Name -PhysicalPath $Path -ApplicationPool $AppPoolName | Out-Null + Set-ItemProperty IIS:\\Sites\\$Name -Name applicationPool -Value $AppPoolName + + $bindings = @{ + {binding_protocol_powershell_array} + } + + # Create an array of binding objects + $bindingsArray = $bindings.GetEnumerator() | ForEach-Object { + @{ + protocol = $_.Value + bindingInformation = $_.Key + } + } + + Write-HostWithTimestamp "Associating the following bindings with Site, $Name" + $bindingsArray + Set-ItemProperty IIS:\\Sites\\$Name -Name bindings -Value $bindingsArray + Write-HostWithTimestamp "Website, $Name, created or updated." +} + +function Install-Website { + param( + [Parameter(Mandatory=$true)] + [string]$Name + ) + <# + .SYNOPSIS + Deploys website content using Web Deploy and configures permissions. + + .DESCRIPTION + Uses msdeploy.exe to sync website content from a ZIP package, sets + application pool association, and configures file system permissions. + + .PARAMETER Name + Name of the website to install + + .NOTES + - Uses Web Deploy V3 + - Skips app pool and ACL settings during sync + - Applies standard IIS file permissions + - Requires Get-GenericWebPathACLRules from ebdeploy_utils.ps1 + #> + + $msDeploy = "C:\\Program Files\\IIS\\Microsoft Web Deploy V3\\msdeploy.exe" + Write-HostWithTimestamp "Using msdeploy.exe at $msDeploy to sync $sourceZip with its destination" + + & $msDeploy ` + -verb:sync ` + -source:package="$sourceZip" ` + -dest:auto ` + -skip:objectName=appPool ` + -skip:objectName=setAcl ` + + Set-ItemProperty "IIS:\\Sites\\$Name" -Name "applicationPool" -Value $Name + + $acl = Get-Acl '{physical_path}' + + Write-HostWithTimestamp "Granting ReadAndExecute permissions ot IIS_IUSRS, IUSR, and Authenticated Users" + foreach ($rule in $(Get-GenericWebpathACLRules)) { + $acl.AddAccessRule($rule) + } + Set-Acl '{physical_path}' $acl +} + +function Invoke-ARRImportScript { + <# + .SYNOPSIS + Installs Application Request Routing components. + + .DESCRIPTION + Executes the ARR MSI installer script to set up Application Request + Routing features. + + .NOTES + - Script must be at C:\\staging\\ebmigrateScripts\\arr_msi_installer.ps1 + - Silently handles execution errors + #> + try { + & 'C:\\staging\\ebmigrateScripts\\arr_msi_installer.ps1' + Write-HostWithTimestamp "Successfully executed 'C:\\staging\\ebmigrateScripts\\arr_msi_installer.ps1'" + } + catch {} +} + +Ensure-AppPool -Name $appPoolName +New-Item -ItemType Directory -Force -Path $destination | Out-Null +Ensure-Website -Name $websiteName -Path $destination -AppPoolName $appPoolName +Install-Website -Name $websiteName +Write-HostWithTimestamp "Stopping site, $websiteName" +Stop-Website -Name $websiteName -ErrorAction SilentlyContinue +{invoke_arr_import_script_call} +Start-Website -Name $websiteName +Write-HostWithTimestamp "Started site, $websiteName" diff --git a/ebcli/controllers/migrate_scripts/site_removal_template.ps1 b/ebcli/controllers/migrate_scripts/site_removal_template.ps1 new file mode 100644 index 000000000..364419b23 --- /dev/null +++ b/ebcli/controllers/migrate_scripts/site_removal_template.ps1 @@ -0,0 +1,46 @@ +<# + .SYNOPSIS + Removes a specified IIS website during Elastic Beanstalk uninstallation. + + .DESCRIPTION + PowerShell script that safely stops and removes a specified IIS website. + Includes error handling and detailed logging of the removal process. + + .NOTES + Requires: + - WebAdministration module + - Administrative privileges + - ebdeploy_utils.ps1 in same directory + + .OUTPUTS + Logs the removal process with timestamps: + - Site stop operation + - Site removal operation + - Any errors encountered during the process +#> + +. "$PSScriptRoot\\ebdeploy_utils.ps1" + +if (-not [Environment]::Is64BitProcess) { + Write-HostWithTimestamp "Restarting in 64-bit PowerShell" + $scriptPath = $MyInvocation.MyCommand.Path + $args = "-ExecutionPolicy unrestricted -NonInteractive -NoProfile -File `"$scriptPath`"" + Start-Process "$env:windir\\sysnative\\WindowsPowerShell\\v1.0\\powershell.exe" -ArgumentList $args -Wait -NoNewWindow + exit +} + +Import-Module WebAdministration + +$websiteName = "{site_name}" + +try { + Write-HostWithTimestamp "Stopping IIS site, $websiteName." + Stop-Website -Name $websiteName + Write-HostWithTimestamp "Successfully stopped IIS site, $websiteName." + Write-HostWithTimestamp "Removing IIS site $websiteName from IIS server." + Remove-Website -Name $websiteName + Write-HostWithTimestamp "Successfully removed site $websiteName from IIS server." +} catch { + Write-ErrorWithTimestamp "Could not remove IIS site ${websiteName}: $_" + throw $_ +} diff --git a/ebcli/controllers/migrate_scripts/site_restart_template.ps1 b/ebcli/controllers/migrate_scripts/site_restart_template.ps1 new file mode 100644 index 000000000..92e391101 --- /dev/null +++ b/ebcli/controllers/migrate_scripts/site_restart_template.ps1 @@ -0,0 +1,46 @@ +<# + .SYNOPSIS + Restarts a specified IIS website during Elastic Beanstalk deployment. + + .DESCRIPTION + PowerShell script that safely stops and starts a specified IIS website. + Includes checks for site existence and detailed logging of the restart process. + + .NOTES + Requires: + - WebAdministration module + - Administrative privileges + - ebdeploy_utils.ps1 in same directory + + .OUTPUTS + Logs the restart process with timestamps, including: + - Site existence verification + - Stop operation + - Start operation + - Cases where site doesn't exist +#> + +. "$PSScriptRoot\\ebdeploy_utils.ps1" + +if (-not [Environment]::Is64BitProcess) { + Write-HostWithTimestamp "Restarting in 64-bit PowerShell" + $scriptPath = $MyInvocation.MyCommand.Path + $args = "-ExecutionPolicy unrestricted -NonInteractive -NoProfile -File `"$scriptPath`"" + Start-Process "$env:windir\\sysnative\\WindowsPowerShell\\v1.0\\powershell.exe" -ArgumentList $args -Wait -NoNewWindow + exit +} + +Import-Module WebAdministration + +$websiteName = "{site_name}" + +if (Get-Website -Name $websiteName) { + Write-HostWithTimestamp "Restarting IIS site, $websiteName." + Write-HostWithTimestamp "Stopping ..." + Stop-Website -Name $websiteName + Write-HostWithTimestamp "Starting ..." + Start-Website -Name $websiteName +} +else { + Write-HostWithTimestamp "Website IIS site, $websiteName, doesn't exist. Nothing to restart." +} diff --git a/ebcli/controllers/migrate_scripts/windows_proxy_feature_enabler.ps1 b/ebcli/controllers/migrate_scripts/windows_proxy_feature_enabler.ps1 new file mode 100644 index 000000000..9084593dc --- /dev/null +++ b/ebcli/controllers/migrate_scripts/windows_proxy_feature_enabler.ps1 @@ -0,0 +1,20 @@ +# This script imports Automatic Request Routing (ARR) config from arr_*.xml +# files, if found, exported by some source machine. This script will also +# ensure that the ARR and Rewrite modules are installed from well-known +# locations. + +. "$PSScriptRoot\\ebdeploy_utils.ps1" + +if (-not [Environment]::Is64BitProcess) { + Write-HostWithTimestamp "Restarting in 64-bit PowerShell" + $scriptPath = $MyInvocation.MyCommand.Path + $args = "-ExecutionPolicy unrestricted -NonInteractive -NoProfile -File `"$scriptPath`"" + Start-Process "$env:windir\\sysnative\\WindowsPowerShell\\v1.0\\powershell.exe" -ArgumentList $args -Wait -NoNewWindow + exit +} + +Import-Module WebAdministration + +Write-HostWithTimestamp "Installing Web-Application-Proxy Windows feature. This may take a few minutes." +Install-WindowsFeature Web-Application-Proxy +Write-HostWithTimestamp "Successfully installed Web-Application-Proxy" diff --git a/ebcli/core/ebcore.py b/ebcli/core/ebcore.py index b9288b08d..62e860152 100644 --- a/ebcli/core/ebcore.py +++ b/ebcli/core/ebcore.py @@ -32,6 +32,7 @@ from ebcli.controllers.list import ListController from ebcli.controllers.local import LocalController from ebcli.controllers.logs import LogsController +from ebcli.controllers.migrate import MigrateController, MigrateExploreController, MigrateCleanupController from ebcli.controllers.open import OpenController from ebcli.controllers.platform import PlatformController from ebcli.controllers.platform.initialize import PlatformInitController @@ -91,6 +92,9 @@ def setup(self): ListController, LocalController, LogsController, + MigrateController, + MigrateExploreController, + MigrateCleanupController, OpenController, PlatformController, PrintEnvController, diff --git a/ebcli/lib/aws.py b/ebcli/lib/aws.py index d4f13a5bb..fd9982997 100644 --- a/ebcli/lib/aws.py +++ b/ebcli/lib/aws.py @@ -25,6 +25,7 @@ from cement.utils.misc import minimal_logger from ebcli import __version__ +from ebcli.core import fileoperations from ebcli.lib.botopatch import apply_patches from ebcli.lib.utils import static_var from ebcli.objects.exceptions import ServiceError, NotAuthorizedError, \ @@ -198,6 +199,8 @@ def make_api_call(service_name, operation_name, **operation_options): region = _region_name if not region: region = 'default' + if region == 'placeholder': + set_region(fileoperations.get_config_setting('global', 'default_region')) attempt = 0 while True: diff --git a/ebcli/lib/ec2.py b/ebcli/lib/ec2.py index 5218b6cf1..f212586f1 100644 --- a/ebcli/lib/ec2.py +++ b/ebcli/lib/ec2.py @@ -10,13 +10,17 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import collections +import urllib.parse, urllib.request, urllib.error +import socket from cement.utils.misc import minimal_logger from ebcli.lib import aws from ebcli.objects.exceptions import ServiceError, AlreadyExistsError, \ - NotFoundError + NotFoundError, NotAnEC2Instance from ebcli.resources.strings import responses +from ebcli.core import fileoperations, io LOG = minimal_logger(__name__) @@ -134,3 +138,327 @@ def terminate_instance(instance_id): def reboot_instance(instance_id): return _make_api_call('reboot_instances', InstanceIds=[instance_id]) + + +def ensure_vpc_exists(vpc_id): + return _make_api_call('describe_vpcs', + VpcIds=[vpc_id]) + + +# Function to get metadata +def get_instance_metadata(path): + metadata_url = f"http://169.254.169.254/latest/meta-data/{path}" + token_url = "http://169.254.169.254/latest/api/token" + + token_request = urllib.request.Request(token_url, method="PUT") + token_request.add_header("X-aws-ec2-metadata-token-ttl-seconds", "21600") + try: + with urllib.request.urlopen(token_request) as token_response: + token = token_response.read().decode('utf-8') + + metadata_request = urllib.request.Request(metadata_url) + metadata_request.add_header("X-aws-ec2-metadata-token", token) + with urllib.request.urlopen(metadata_request, timeout=5) as response: + return response.read().decode('utf-8') + except (urllib.error.URLError, socket.timeout, ConnectionError) as e: + if _is_timeout_exception(e): + LOG.debug("Communication with IMDSv2 timed out. This is likely not an EC2 instance.") + raise NotAnEC2Instance(e) + raise e + + +def _is_timeout_exception(exception: urllib.error.URLError) -> bool: + return ( + isinstance(exception.__dict__.get('reason', False), TimeoutError) + or 'timed out' in str(exception) + ) + + +def get_current_instance_details(): + instance_id = get_instance_metadata('instance-id') + availability_zone = get_instance_metadata('placement/availability-zone') + region = availability_zone[:-1] + aws.set_region(region) + fileoperations.write_config_setting('global', 'default_region', region) + mac_address = get_instance_metadata('mac') + vpc_id = get_instance_metadata(f'network/interfaces/macs/{mac_address}/vpc-id') + subnet_id = get_instance_metadata(f'network/interfaces/macs/{mac_address}/subnet-id') + try: + ensure_vpc_exists(vpc_id) + instance = describe_instance(instance_id=instance_id) + except Exception as e: + if 'InvalidVpcID.NotFound' in str(e) or f"The vpc ID '{vpc_id}' does not exist" in str(e): + io.log_warning(f'Unable to retrieve details of VPC, {vpc_id}') + vpc_id, subnet_id, instance_id, security_group_ids, tags = None, None, None, [], [] + elif 'InvalidInstanceID.NotFound' in str(e): + vpc_id, subnet_id, instance_id, security_group_ids, tags = None, None, None, [], [] + io.log_warning(f'Unable to retrieve details of instance, {instance_id}') + instance = None + if instance: + security_group_ids = [sg['GroupId'] for sg in instance['SecurityGroups']] + else: + security_group_ids = [] + + try: + tags = instance_tags(instance_id) + except Exception: + # Probably client-error, ignore + tags = [] + + return { + 'InstanceId': instance_id, + 'VpcId': vpc_id, + 'SubnetId': subnet_id, + 'SecurityGroupIds': security_group_ids, + 'Region': region, + 'Tags': tags, + } + + +def list_subnets(vpc_id): + result = _make_api_call( + 'describe_subnets', + Filters=[ + { + 'Name': 'vpc-id', + 'Values': [vpc_id] + }, + ] + ) + return [subnet['SubnetId'] for subnet in result['Subnets']] + + +def list_subnets_azs_interleaved(vpc_id): + result = _make_api_call( + 'describe_subnets', + Filters=[ + { + 'Name': 'vpc-id', + 'Values': [vpc_id] + }, + ] + ) + + subnet_map = collections.defaultdict(list) + subnet_ids = [] + for subnet in result['Subnets']: + subnet_map[subnet['AvailabilityZone']].append(subnet['SubnetId']) + keys = subnet_map.keys() + while any([v for v in subnet_map.values()]): + for key in keys: + subnet_ids.append(subnet_map[key].pop()) + return [subnet['SubnetId'] for subnet in result['Subnets']] + + +def get_instance_volumes(instance_id): + response = _make_api_call( + 'describe_volumes', + Filters=[ + { + 'Name': 'attachment.instance-id', + 'Values': [instance_id] + } + ] + ) + return response['Volumes'] + + +def enable_ebs_volume_encryption(): + _make_api_call('enable_ebs_encryption_by_default') + + +def instance_tags(instance_id): + response = _make_api_call( + 'describe_tags', + Filters=[ + { + 'Name': 'resource-id', + 'Values': [instance_id] + } + ] + ) + return [ + { + "Key": tag['Key'], + "Value": tag['Value'] + } + for tag in response['Tags'] + ] + + +def establish_security_group(ports, env_name, vpc_id): + ec2_security_group = get_security_group(f'{env_name}-EC2') + if ec2_security_group: + ec2_security_group_id = ec2_security_group['GroupId'] + revoke_security_group_ingress(ec2_security_group_id, ec2_security_group['IpPermissions']) + else: + ec2_security_group_id = _create_security_group(f'{env_name}-EC2', vpc_id, f'EC2 Security group for {env_name}-EC2') + + alb_security_group = get_security_group(f'{env_name}-ALB') + if alb_security_group: + alb_security_group_id = ec2_security_group['GroupId'] + revoke_security_group_egress(alb_security_group_id, alb_security_group['IpPermissionsEgress']) + else: + alb_security_group_id = _create_security_group(f'{env_name}-ALB', vpc_id, f'ALB Security group for {env_name}-EC2') + + ingress_permissions, egress_permissions = create_peer_security_group_permissions( + ports, + ec2_security_group_id, + alb_security_group_id, + vpc_id + ) + authorize_security_group_egress(alb_security_group_id, egress_permissions) + authorize_security_group_ingress(ec2_security_group_id, ingress_permissions) + + return [ + { + 'Namespace': 'aws:elbv2:loadbalancer', + 'OptionName': 'SecurityGroups', + 'Value': alb_security_group_id + }, + { + 'Namespace': 'aws:autoscaling:launchconfiguration', + 'OptionName': 'SecurityGroups', + 'Value': ec2_security_group_id + }, + ] + + +def create_peer_security_group_permissions( + ports, + from_security_group, + to_security_group, + vpc_id +): + egress_permissions, ingress_permissions = [], [] + for port in ports: + egress_permission = define_group_pair_permission( + port, + from_security_group, + f"Rule to allow {from_security_group} to access {to_security_group} at port {port} over tcp" + ) + + ingress_permission = define_group_pair_permission( + port, + to_security_group, + f"Rule to allow {to_security_group} to receive traffic from {from_security_group} at {port} over tcp" + ) + + if vpc_id: + ingress_permission["UserIdGroupPairs"][0]["VpcId"] = vpc_id + egress_permission["UserIdGroupPairs"][0]["VpcId"] = vpc_id + + egress_permissions.append(egress_permission) + ingress_permissions.append(ingress_permission) + + return ingress_permissions, egress_permissions + + +def define_group_pair_permission(port, security_group_id, description): + return { + 'IpProtocol': 'tcp', + 'FromPort': port, + 'ToPort': port, + "UserIdGroupPairs": [ + { + "Description": description, + "GroupId": security_group_id, + } + ], + } + + +def revoke_security_group_ingress(security_group_ip, ip_permissions): + kwargs = { + 'GroupId': security_group_ip, + 'IpPermissions': ip_permissions, + } + try: + _make_api_call( + 'revoke_security_group_ingress', + **kwargs + ) + except Exception as e: + if 'MissingParameter' in str(e) or "Either 'ipPermissions' or 'securityGroupRuleIds' should be provided." in str(e): + return + raise e + + +def revoke_security_group_egress(security_group_id, ip_permissions_egress): + kwargs = { + 'GroupId': security_group_id, + 'IpPermissions': ip_permissions_egress, + } + try: + _make_api_call( + 'revoke_security_group_egress', + **kwargs + ) + except Exception as e: + if 'MissingParameter' in str(e) or "Either 'ipPermissions' or 'securityGroupRuleIds' should be provided." in str(e): + return + raise e + + +def authorize_security_group_ingress(security_group_id, ingress_permissions): + kwargs = { + 'GroupId': security_group_id, + 'IpPermissions': ingress_permissions, + } + try: + _make_api_call( + 'authorize_security_group_ingress', + **kwargs + ) + except Exception as e: + if 'MissingParameter' in str(e): + return + raise e + + +def authorize_security_group_egress(security_group_id, egress_permissions): + kwargs = { + 'GroupId': security_group_id, + 'IpPermissions': egress_permissions, + } + try: + _make_api_call( + 'authorize_security_group_egress', + **kwargs + ) + except Exception as e: + if 'MissingParameter' in str(e): + return + if 'already exists' in str(e): + LOG.debug(f"Received non-fatal exception {str(e)} during invocation of ec2::authorize_security_group_egress.") + return + raise e + + +def get_security_group(group_name): + try: + response = _make_api_call( + 'describe_security_groups', + GroupNames=[group_name] + ) + return response['SecurityGroups'][0] + except Exception as e: + if 'InvalidGroup.NotFound' in str(e) or 'does not exist' in str(e): + return None + raise e + + +def _create_security_group(group_name, vpc_id, description): + kwargs = { + 'GroupName': group_name, + 'Description': description, + } + if vpc_id: + kwargs['VpcId'] = vpc_id + + response = _make_api_call( + 'create_security_group', + **kwargs + ) + + return response['GroupId'] diff --git a/ebcli/objects/exceptions.py b/ebcli/objects/exceptions.py index 80a524769..c1a5a31c4 100644 --- a/ebcli/objects/exceptions.py +++ b/ebcli/objects/exceptions.py @@ -209,3 +209,7 @@ class EndOfTestError(EOFError): Must not be raised in source code. """ pass + + +class NotAnEC2Instance(EBCLIException): + pass diff --git a/ebcli/objects/requests.py b/ebcli/objects/requests.py index 8eb83be4f..5b80416a7 100644 --- a/ebcli/objects/requests.py +++ b/ebcli/objects/requests.py @@ -60,7 +60,9 @@ def __init__(self, app_name=None, env_name=None, cname=None, platform=None, elb_type=None, shared_lb=None, shared_lb_port=None, enable_spot=None, instance_types=None, spot_max_price=None, on_demand_base_capacity=None, on_demand_above_base_capacity=None, min_instances=None, - max_instances=None): + max_instances=None, block_device_mappings=None, listener_configs=None, + description=None, load_balancer_security_group=None, ec2_security_group=None, + ssl_certificate=None, root_volume=None): self.app_name = app_name self.cname = cname self.env_name = env_name @@ -94,7 +96,7 @@ def __init__(self, app_name=None, env_name=None, cname=None, platform=None, self.scale = None self.option_settings = [] self.compiled = False - self.description = strings['env.description'] + self.description = description or strings['env.description'] self.enable_spot = enable_spot self.instance_types = instance_types self.spot_max_price = spot_max_price @@ -102,6 +104,16 @@ def __init__(self, app_name=None, env_name=None, cname=None, platform=None, self.on_demand_above_base_capacity = on_demand_above_base_capacity self.min_instances = min_instances self.max_instances = max_instances + self.block_device_mappings = block_device_mappings + self.ssl_certificate = ssl_certificate + if listener_configs: + self.merge_option_settings(listener_configs) + if load_balancer_security_group: + self.merge_option_settings([load_balancer_security_group]) + if ec2_security_group: + security_groups_expressed_through_vpc_config = self.vpc and self.vpc.get('securitygroups') + if not security_groups_expressed_through_vpc_config : + self.merge_option_settings([ec2_security_group]) if not self.app_name: raise TypeError(self.__class__.__name__ + ' requires key-word argument app_name') @@ -113,6 +125,13 @@ def __init__(self, app_name=None, env_name=None, cname=None, platform=None, raise TypeError('key-word argument scale must be of type int') else: self.scale = str(scale) + if ssl_certificate: + self.add_option_setting( + namespaces.LOAD_BALANCER_V2, + option_names.SSL_CERT_ID, + ssl_certificate) + if root_volume: + self.merge_option_settings(root_volume) def __eq__(self, other): self_dict = copy.deepcopy(self.__dict__) @@ -139,6 +158,12 @@ def add_option_setting(self, namespace, option_name, value, resource=None): self.option_settings.append(setting) + def merge_option_settings(self, option_settings): + if not option_settings: + return + self.option_settings = self.option_settings or [] + self.option_settings += option_settings + def convert_to_kwargs(self): self.compile_option_settings() return self.get_standard_kwargs() @@ -245,6 +270,12 @@ def compile_common_options(self): namespaces.ENVIRONMENT, option_names.LOAD_BALANCER_TYPE, self.elb_type) + if self.block_device_mappings: + self.add_option_setting( + namespaces.LAUNCH_CONFIGURATION, + option_names.BLOCK_DEVICE_MAPPINGS, + self.block_device_mappings + ) def add_client_defaults(self): if self.template_name: @@ -314,26 +345,26 @@ def compile_vpc_options(self): namespace = namespaces.VPC self.add_option_setting(namespace, option_names.VPC_ID, self.vpc['id']) - if self.vpc['publicip']: + if self.vpc.get('publicip'): self.add_option_setting( namespace, option_names.PUBLIC_IP, self.vpc['publicip'] ) - if self.vpc['elbscheme']: + if self.vpc.get('elbscheme'): self.add_option_setting(namespace, option_names.ELB_SCHEME, self.vpc['elbscheme']) - if self.vpc['elbsubnets']: + if self.vpc.get('elbsubnets'): self.add_option_setting(namespace, option_names.ELB_SUBNETS, self.vpc['elbsubnets']) - if self.vpc['ec2subnets']: + if self.vpc.get('ec2subnets'): self.add_option_setting(namespace, option_names.SUBNETS, self.vpc['ec2subnets']) - if self.vpc['securitygroups']: + if self.vpc.get('securitygroups'): self.add_option_setting(namespaces.LAUNCH_CONFIGURATION, option_names.SECURITY_GROUPS, self.vpc['securitygroups']) - if self.vpc['dbsubnets']: + if self.vpc.get('dbsubnets'): self.add_option_setting(namespace, option_names.DB_SUBNETS, self.vpc['dbsubnets']) diff --git a/ebcli/objects/solutionstack.py b/ebcli/objects/solutionstack.py index f1466272c..60d935ea5 100644 --- a/ebcli/objects/solutionstack.py +++ b/ebcli/objects/solutionstack.py @@ -431,6 +431,35 @@ def match_with_pythonified_solution_string( if solution_stack.pythonify() == pythonified_solution_string.lower(): return solution_stack + @classmethod + def match_with_windows_server_version_string( + cls, + solution_stack_list, + windows_server_version_string + ): + if 'windows' not in windows_server_version_string.lower(): + return + try: + version_substring = windows_server_version_string.split('Windows Server ')[1] + version_year = version_substring.split(' ')[0].strip() + except IndexError: + version_year = None + pass + good_match = None + better_match = None + for solution_stack in solution_stack_list: + if 'windows server' in solution_stack.name.lower(): + good_match = solution_stack + if version_year and version_year in solution_stack.name: + # if you already have non-core, stick to it + if better_match and 'core' not in solution_stack.name.lower(): + better_match = solution_stack + # set core or non-core since we don't have anything better than a "good" match + elif not better_match: + better_match = solution_stack + + return better_match or good_match + def __language_version(self, match_number=0): """ Private method returns a the version number of language. If there are multiple versions, diff --git a/ebcli/operations/commonops.py b/ebcli/operations/commonops.py index 40bcabf40..ff838bf28 100644 --- a/ebcli/operations/commonops.py +++ b/ebcli/operations/commonops.py @@ -13,6 +13,7 @@ import os import sys import time +import typing from datetime import datetime, timedelta import platform import zipfile @@ -532,9 +533,12 @@ def create_app_version(app_name, process=False, label=None, message=None, staged file_name, file_path = _zip_up_project( version_label, source_control, staged=staged) elif zipfile.is_zipfile(source_bundle): + if not label: + label = f"{source_control.get_version_label()}.zip" file_name, file_path = label, source_bundle - return handle_upload_target(app_name, + return handle_upload_target( + app_name, s3_bucket, s3_key, file_name, @@ -1063,6 +1067,19 @@ def get_region(region_argument, interactive, force_non_interactive=False, platfo return region +def get_region_force_non_interactive(_platform: typing.Optional[str]) -> str: + region = None + if _platform: + region = PlatformVersion.get_region_from_platform_arn(_platform) + + if region: + return region + + # Choose defaults + region_list = get_all_regions() + return region_list[2].name + + def check_credentials(profile, given_profile, given_region, interactive, force_non_interactive): try: # Note, region is None unless explicitly set or read from old eb diff --git a/ebcli/operations/gitops.py b/ebcli/operations/gitops.py index 8f32e6bb3..80dbd6e01 100644 --- a/ebcli/operations/gitops.py +++ b/ebcli/operations/gitops.py @@ -93,7 +93,7 @@ def initialize_codecommit(): source_control_setup = False if not source_control_setup: - io.log_error("Cannot setup CodeCommit because there is no Source Control setup") + LOG.debug("Cannot setup CodeCommit because there is no Source Control setup") return if codecommit.region_supported(): diff --git a/ebcli/operations/solution_stack_ops.py b/ebcli/operations/solution_stack_ops.py index d5ae6f732..49bf23e5c 100644 --- a/ebcli/operations/solution_stack_ops.py +++ b/ebcli/operations/solution_stack_ops.py @@ -91,6 +91,7 @@ def find_solution_stack_from_string(solution_string, find_newer=False): SolutionStack.match_with_solution_string_shorthand, SolutionStack.match_with_solution_string_language_name, SolutionStack.match_with_pythonified_solution_string, + SolutionStack.match_with_windows_server_version_string, ]: if not match: match = solution_string_matcher(available_solution_stacks, solution_string) diff --git a/ebcli/resources/statics.py b/ebcli/resources/statics.py index 6aac27f31..77bde49c4 100644 --- a/ebcli/resources/statics.py +++ b/ebcli/resources/statics.py @@ -69,6 +69,7 @@ class namespaces(object): class option_names(object): BATCH_SIZE = 'BatchSize' BATCH_SIZE_TYPE = 'BatchSizeType' + BLOCK_DEVICE_MAPPINGS = 'BlockDeviceMappings' CONNECTION_DRAINING = 'ConnectionDrainingEnabled' CROSS_ZONE = 'CrossZone' DB_DELETION_POLICY = 'DBDeletionPolicy' diff --git a/ebcli/resources/strings.py b/ebcli/resources/strings.py index 6f9694869..c22f6b6e8 100644 --- a/ebcli/resources/strings.py +++ b/ebcli/resources/strings.py @@ -223,8 +223,7 @@ 'sstacks.notaversion': 'Elastic Beanstalk could not find any supported platforms for the ' 'given version {version}.', 'timeout.error': "The EB CLI timed out after {timeout_in_minutes} minute(s). The operation " - "might still be running. To keep viewing events, run 'eb events -f'. To " - "set timeout duration, use '--timeout MINUTES'.", + "might still be running. To keep viewing events, run 'eb events -f'.", 'sc.notfound': 'Git is not set up for this project. EB CLI will deploy a .zip file of the ' 'entire directory.', 'exit.platformworkspacenotsupported': 'This command is not supported outside Application workspaces.', @@ -496,8 +495,6 @@ 'attributes. Unable to continue with deployment.', 'appversion.attribute.success': 'Found attributes for application version {app_version}', - 'codecommit.nosc': 'Cannot setup CodeCommit because there is no Source Control setup, ' - 'continuing with initialization', 'codecommit.norepo': 'Repository does not exist in CodeCommit', 'codecommit.nobranch': 'Branch does not exist in CodeCommit', 'codecommit.badregion': 'AWS CodeCommit is not supported in this region; continuing ' @@ -707,6 +704,8 @@ 'Would you like your new environment to use a shared load balancer?', 'sharedlb.shared_load_balancer_prompt': 'Select a shared load balancer', 'sharedlb.listener_prompt': 'Select a listener port for your shared load balancer', + + 'migrate.should_cleanup': 'Are you sure you would like to cleanup older artifacts within `./migrations/`?', } alerts = { @@ -866,6 +865,64 @@ ] ), + 'migrate.sites': 'Specify a comma-separated list of IIS sites to migrate. If not specified,\n' + 'migrates all available sites on the server.', + 'migrate.environment_name': 'Name for the new Elastic Beanstalk environment. Defaults to EBMigratedApp.', + 'migrate.application_name': 'Name for the new Elastic Beanstalk application. Defaults to EBMigratedEnv.', + + 'migrate.platform': 'Elastic Beanstalk platform runtime for the environment. If not specified,\n' + 'automatically detected from host VM or application.\n' + 'Example: "64bit Windows Server 2016 v2.16.2 running IIS 10.0"', + + 'migrate.execution_role': 'IAM role for executing eb migrate. Uses credentials from:\n' + '1. ~/.aws/config\n' + '2. AWS CLI credential chain (if config not found)', + 'migrate.instance_type': 'EC2 instance type for the Elastic Beanstalk environment. Defaults to c5.2xlarge.', + 'migrate.cname': 'CNAME prefix for the Elastic Beanstalk environment.', + 'migrate.instance_profile': 'Instance Profile to associate with the environment\'s EC2 instances.', + 'migrate.service_role': 'IAM service role for Elastic Beanstalk to manage related AWS services.', + 'migrate.ebs_snapshots': 'Comma-separated list of EBS snapshot IDs to associate with the environment.', + 'migrate.stream_to_cloudwatch': 'Stream EB CLI debug logs and execution metrics to CloudWatch.', + 'migrate.use_host_ebs_configuration': 'Generate EBS snapshots from volumes attached to the current instance.', + 'migrate.keyname': 'EC2 key pair to enable SSH/RDP access to environment instances.\n' + 'Useful for investigating instance-level issues not visible in logs.', + 'migrate.interactive': 'Force interactive mode for the migration process.', + 'migrate.tags': 'Comma-separated list of key=value pairs to tag new resources:\n' + '- Elastic Beanstalk application\n' + '- Environment\n' + '- Application version', + 'migrate.copy_deps': 'Include all dependency DLLs in the environment, including those in Global\n' + 'Assembly Cache (GAC).', + 'migrate.archive_only': 'Create only the destination archive directory without deployment.\n' + 'The resulting directory can be manually deployed following:\n' + 'https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/GettingStarted.DeployApp.html', + 'migrate.on_prem_mode': 'Execute EB CLI in EC2-agnostic mode.', + 'migrate.force': 'Force non-interactive mode for the migration process.', + 'migrate.cleanup': 'Remove all previous migration artifacts from ./migrations/*\n' + ' except ./migrations/latest/', + 'migrate.explore': 'List all available IIS sites on this server.\n' + 'Use --verbose for detailed information.', + 'migrate.copy_firewall_config': 'Copy source server firewall configuration to the destination\n' + 'for all HTTP ports with active bindings.', + 'migrate.encrypt_ebs_volumes': 'Enforce encryption for all new EBS volumes.\n' + 'Note: This is an account-wide setting that affects all future\n' + 'EBS volume creation.', + 'migrate.ssl_certificate_arns': 'Comma-Separated list of Amazon Certificate Manager (ACM) SSL certificate\n' + 'ARN to associate with the Application Load Balancer.', + 'migrate.archive': 'The directory or the ZIP file containing source code that\n' + '`eb migrate --archive-only` previously generated.', + 'migrate.vpc_config': """VPC config for the environment either in the form of a JSON file or' +a string. In both cases, config must have the format: + { + "id": "", + "publicip": "true|false", + "elbscheme": "public|private", + "ec2subnets": [list of subnets IDs for the EC2 instances], + "securitygroups": [list of security group IDs], + "elbsubnets": [list of subnets IDs for the load balancer] + } +""", + 'restore.env': 'The ID of the environment to restore', 'scale.number': 'number of desired instances', diff --git a/requirements.txt b/requirements.txt index 55be5f9a9..e6c0bccd0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,14 @@ -botocore>=1.35.0,<1.36.0 +botocore>=1.35.0,<2 cement==2.10.14 colorama>=0.4.6,<0.5 -pathspec==0.10.1 -python-dateutil>=2.1,<3.0.0 # use the same range that 'botocore' uses +pathspec==0.12.1 +python-dateutil>=2.1,<3.0.0 requests>=2.31,<3 setuptools>=20.0 semantic_version>=2.10.0,<2.11 termcolor>=2.4.0,<3 wcwidth>=0.2.13,<0.3 -PyYAML>=5.3.1,<6.1 # use the same range that 'aws-cli' uses +PyYAML>=5.3.1,<6.1 urllib3>=1.26.5,<2 packaging>=24.2,<25.0 +blessed>=1.20.0 diff --git a/setup.py b/setup.py index 2c0f9e709..f2f24acbd 100755 --- a/setup.py +++ b/setup.py @@ -55,8 +55,8 @@ def parse_requirements(filename): ':sys_platform == "win32" and python_version >= "3.6"': 'pypiwin32==223', } -if not sys.platform.startswith('win'): - requires.append('blessed>=1.20.0') +if sys.platform.startswith('win'): + requires.append('pythonnet>=3.0.5,<4') setup_options = dict( @@ -73,7 +73,8 @@ def parse_requirements(filename): package_data={ 'ebcli.lib': ['botocoredata/*/*/*.json'], 'ebcli.containers': ['containerfiles/*'], - 'ebcli.labs': ['cloudwatchfiles/*.config']}, + 'ebcli.labs': ['cloudwatchfiles/*.config'], + 'ebcli.controllers': ['migrate_scripts/*.ps1']}, install_requires=requires, extras_require=extras_require, license="Apache License 2.0", diff --git a/tests/unit/controllers/test_migrate.py b/tests/unit/controllers/test_migrate.py new file mode 100644 index 000000000..2eb58375f --- /dev/null +++ b/tests/unit/controllers/test_migrate.py @@ -0,0 +1,931 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +import unittest +import sys +from unittest import skipIf + +import mock +from typing import List, Dict, Any +import xml.etree.ElementTree as ET + +from ebcli.controllers.migrate import ( + translate_iis_to_alb, + create_alb_rules, + _sort_rules_by_specificity, + get_site_configs, + SiteConfig, + convert_alb_rules_to_option_settings, + ConvertedALBRules, + get_listener_configs, +) + + +@skipIf(not sys.platform.startswith("win"), "`eb migrate` only supports Windows") +class TestMigrateController(unittest.TestCase): + """Tests for the migrate controller module.""" + + def test_translate_iis_to_alb_removes_anchors(self): + """Test that start and end anchors are removed.""" + self.assertEqual("/path", translate_iis_to_alb("^/path$")) + self.assertEqual("/path", translate_iis_to_alb("^/path")) + self.assertEqual("/path", translate_iis_to_alb("/path$")) + + def test_translate_iis_to_alb_simplifies_segment_patterns(self): + """Test that {segment} patterns are replaced with *.""" + self.assertEqual( + "/products/*/details", translate_iis_to_alb("/products/{id:int}/details") + ) + self.assertEqual( + "/users/*/profile", translate_iis_to_alb("/users/{name}/profile") + ) + self.assertEqual("/*/test", translate_iis_to_alb("/{any}/test")) + + def test_translate_iis_to_alb_simplifies_capture_groups(self): + """Test that capture groups are replaced with *.""" + self.assertEqual( + "/products/*/details", translate_iis_to_alb("/products/([0-9]+)/details") + ) + self.assertEqual( + "/users/*/profile", translate_iis_to_alb("/users/([a-zA-Z0-9]+)/profile") + ) + # Note: This test is currently failing because the function removes trailing asterisks + # The expected behavior should be to keep the asterisk + self.assertEqual("/articles/*", translate_iis_to_alb("/articles/(.*)")) + + def test_translate_iis_to_alb_simplifies_character_classes(self): + """Test that character classes are replaced with *.""" + self.assertEqual( + "/products/*/details", translate_iis_to_alb("/products/[0-9]+/details") + ) + self.assertEqual( + "/users/*/profile", translate_iis_to_alb("/users/[a-zA-Z0-9]+/profile") + ) + + def test_translate_iis_to_alb_replaces_plus_with_asterisk(self): + """Test that + quantifier is replaced with *.""" + self.assertEqual("/files/?*", translate_iis_to_alb("/files/.+")) + self.assertEqual("/api/?*", translate_iis_to_alb("/api/.+")) + + def test_translate_iis_to_alb_ensures_leading_slash(self): + """Test that patterns without leading slash get one added.""" + self.assertEqual("/path", translate_iis_to_alb("path")) + self.assertEqual("/api/v1", translate_iis_to_alb("api/v1")) + self.assertEqual("/products/*", translate_iis_to_alb("products/(.*)")) + + def test_translate_iis_to_alb_removes_double_asterisks(self): + """Test that double asterisks are simplified to single asterisk.""" + self.assertEqual("/path/*", translate_iis_to_alb("/path/(.*)(.*)")) + self.assertEqual("/api/*", translate_iis_to_alb("/api/[0-9]+[a-z]+")) + + def test_translate_iis_to_alb_removes_trailing_asterisk(self): + """Test that trailing asterisks are removed.""" + self.assertEqual("/path/*", translate_iis_to_alb("/path/(.*)")) + self.assertEqual("/api/v1/*", translate_iis_to_alb("/api/v1/(.*)")) + + def test_translate_iis_to_alb_complex_patterns(self): + """Test complex pattern combinations.""" + self.assertEqual( + "/api/v*/users/*/profile", + translate_iis_to_alb("^/api/v[0-9]+/users/([a-zA-Z0-9]+)/profile$"), + ) + self.assertEqual("/search/*", translate_iis_to_alb("/search/(.*)")) + self.assertEqual( + "/products/*/reviews/*", + translate_iis_to_alb("/products/{id}/reviews/[0-9]+"), + ) + + +@skipIf(not sys.platform.startswith("win"), "`eb migrate` only supports Windows") +class TestSortRulesBySpecificity(unittest.TestCase): + """Tests for the _sort_rules_by_specificity function.""" + + def test_sort_rules_by_specificity_empty_list(self): + """Test sorting an empty list of rules.""" + rules = [] + sorted_rules = _sort_rules_by_specificity(rules) + self.assertEqual(sorted_rules, []) + + def test_sort_rules_by_specificity_no_conditions(self): + """Test sorting rules with no conditions.""" + rules = [ + {"Actions": [{"Type": "forward"}], "Protocol": "http"}, + {"Actions": [{"Type": "forward"}], "Protocol": "https"}, + ] + sorted_rules = _sort_rules_by_specificity(rules) + # Order should remain the same since no conditions to sort by + self.assertEqual(sorted_rules, rules) + + def test_sort_rules_by_specificity_host_header_only(self): + """Test sorting rules with only host-header conditions.""" + rules = [ + { + "Actions": [{"Type": "forward"}], + "Protocol": "http", + "Conditions": [{"Field": "host-header", "Values": ["example.com"]}], + }, + { + "Actions": [{"Type": "forward"}], + "Protocol": "http", + "Conditions": [{"Field": "host-header", "Values": ["api.example.com"]}], + }, + ] + sorted_rules = _sort_rules_by_specificity(rules) + # Both rules have the same specificity (host-header only), so order should remain + self.assertEqual(sorted_rules, rules) + + def test_sort_rules_by_specificity_path_pattern_only(self): + """Test sorting rules with only path-pattern conditions.""" + rules = [ + { + "Actions": [{"Type": "forward"}], + "Protocol": "http", + "Conditions": [{"Field": "path-pattern", "Values": ["/api/*"]}], + }, + { + "Actions": [{"Type": "forward"}], + "Protocol": "http", + "Conditions": [{"Field": "path-pattern", "Values": ["/users/*"]}], + }, + ] + sorted_rules = _sort_rules_by_specificity(rules) + # Both rules have the same specificity (path-pattern only), so order should remain + self.assertEqual(sorted_rules, rules) + + def test_sort_rules_by_specificity_mixed_conditions(self): + """Test sorting rules with mixed condition types.""" + # Define rules with different specificity levels + path_only_rule = { + "Actions": [{"Type": "forward"}], + "Protocol": "http", + "Conditions": [{"Field": "path-pattern", "Values": ["/api/*"]}], + } + host_only_rule = { + "Actions": [{"Type": "forward"}], + "Protocol": "http", + "Conditions": [{"Field": "host-header", "Values": ["example.com"]}], + } + both_conditions_rule = { + "Actions": [{"Type": "forward"}], + "Protocol": "http", + "Conditions": [ + {"Field": "host-header", "Values": ["api.example.com"]}, + {"Field": "path-pattern", "Values": ["/v1/*"]}, + ], + } + no_conditions_rule = {"Actions": [{"Type": "forward"}], "Protocol": "http"} + + # Test with rules in different orders + rules = [ + path_only_rule, + host_only_rule, + both_conditions_rule, + no_conditions_rule, + ] + sorted_rules = _sort_rules_by_specificity(rules) + + # Verify the sorting order: both_conditions > path_only > host_only > no_conditions + # Instead of comparing entire rule objects, check their specificity scores + self.assertEqual(sorted_rules[0]["Protocol"], both_conditions_rule["Protocol"]) + self.assertEqual( + len(sorted_rules[0]["Conditions"]), 2 + ) # Both host and path conditions + self.assertEqual(sorted_rules[1]["Protocol"], path_only_rule["Protocol"]) + self.assertEqual(sorted_rules[1]["Conditions"][0]["Field"], "path-pattern") + self.assertEqual(sorted_rules[2]["Protocol"], host_only_rule["Protocol"]) + self.assertEqual(sorted_rules[2]["Conditions"][0]["Field"], "host-header") + self.assertEqual( + sorted_rules[3], no_conditions_rule + ) # No conditions rule can be compared directly + + # Test with a different initial order + rules = [ + no_conditions_rule, + host_only_rule, + path_only_rule, + both_conditions_rule, + ] + sorted_rules = _sort_rules_by_specificity(rules) + + # Verify the sorting order remains consistent + self.assertEqual( + len(sorted_rules[0]["Conditions"]), 2 + ) # Both host and path conditions + self.assertEqual(sorted_rules[1]["Conditions"][0]["Field"], "path-pattern") + self.assertEqual(sorted_rules[2]["Conditions"][0]["Field"], "host-header") + self.assertEqual(sorted_rules[3], no_conditions_rule) + + def test_sort_rules_by_specificity_multiple_same_type(self): + """Test sorting rules with multiple conditions of the same type.""" + # Rule with multiple path patterns + multiple_paths_rule = { + "Actions": [{"Type": "forward"}], + "Protocol": "http", + "Conditions": [ + {"Field": "path-pattern", "Values": ["/api/*"]}, + {"Field": "path-pattern", "Values": ["/v1/*"]}, + ], + } + + # Rule with multiple host headers + multiple_hosts_rule = { + "Actions": [{"Type": "forward"}], + "Protocol": "http", + "Conditions": [ + {"Field": "host-header", "Values": ["api.example.com"]}, + {"Field": "host-header", "Values": ["dev.example.com"]}, + ], + } + + # Rule with both types + mixed_rule = { + "Actions": [{"Type": "forward"}], + "Protocol": "http", + "Conditions": [ + {"Field": "host-header", "Values": ["example.com"]}, + {"Field": "path-pattern", "Values": ["/users/*"]}, + ], + } + + rules = [multiple_paths_rule, multiple_hosts_rule, mixed_rule] + sorted_rules = _sort_rules_by_specificity(rules) + + self.assertEqual(multiple_paths_rule, sorted_rules[0]) + self.assertEqual(mixed_rule, sorted_rules[1]) + self.assertEqual(multiple_hosts_rule, sorted_rules[2]) + + +@skipIf(not sys.platform.startswith("win"), "`eb migrate` only supports Windows") +class TestCreateAlbRules(unittest.TestCase): + """Tests for the create_alb_rules function.""" + + def test_create_alb_rules_basic_sites(self): + """Test creating ALB rules from basic site configurations.""" + # Create a single HTTP site + site = SiteConfig( + name="Default Web Site", + binding_info="*:80:example.com", + physical_path="C:\\inetpub\\wwwroot", + protocol="http", + ) + + site.rewrite_rules = [ + { + "name": "Rewrite to index", + "pattern": "^/home/", + "action_type": "Rewrite", + "action_url": "index.html", + } + ] + + # Test with a single HTTP site + rules = create_alb_rules([site]) + + # Verify basic structure + self.assertIsInstance(rules, list) + self.assertEqual(len(rules), 2) # Host rule + rewrite rule + + # Check host-based rule + host_rule = next( + ( + r + for r in rules + if "Conditions" in r and r["Conditions"][0]["Field"] == "host-header" + ), + None, + ) + self.assertIsNotNone(host_rule) + self.assertEqual(host_rule["Protocol"], "http") + self.assertEqual(host_rule["Priority"], 1) # Priority should be assigned + self.assertEqual(host_rule["Conditions"][0]["Values"][0], "example.com") + + # Check target group in actions + self.assertEqual(host_rule["Actions"][0]["Type"], "forward") + self.assertIn( + "TargetGroupArn", + host_rule["Actions"][0]["ForwardConfig"]["TargetGroups"][0], + ) + self.assertIn( + "80", + host_rule["Actions"][0]["ForwardConfig"]["TargetGroups"][0][ + "TargetGroupArn" + ], + ) + + def test_create_alb_rules_multiple_sites(self): + """Test creating ALB rules from multiple site configurations.""" + # Create multiple sites + site1 = SiteConfig( + name="Default Web Site", + binding_info="*:80:example.com", + physical_path="C:\\inetpub\\wwwroot", + protocol="http", + ) + + site1.rewrite_rules = [ + { + "name": "Rewrite to index", + "pattern": "^/home/", + "action_type": "Rewrite", + "action_url": "index.html", + } + ] + + site2 = SiteConfig( + name="API Site", + binding_info="*:8080:api.example.com", + physical_path="C:\\inetpub\\apiroot", + protocol="http", + ) + + site2.rewrite_rules = [ + { + "name": "API version rewrite", + "pattern": "^/api/v1/(.+)", + "action_type": "Rewrite", + "action_url": "api.php?path=$1", + }, + { + "name": "API docs rewrite", + "pattern": "^/docs/(.*)", + "action_type": "Rewrite", + "action_url": "documentation.html", + }, + ] + + site3 = SiteConfig( + name="Secure Site", + binding_info="*:443:secure.example.com", + physical_path="C:\\inetpub\\secureroot", + protocol="https", + ) + + rules = create_alb_rules([site1, site2, site3]) + + # Should have rules for all sites and their rewrite rules + self.assertEqual(len(rules), 6) # 3 host rules + 3 rewrite rules + + # Verify protocols + http_rules = [r for r in rules if r["Protocol"] == "http"] + https_rules = [r for r in rules if r["Protocol"] == "https"] + self.assertEqual(len(http_rules), 5) # 2 sites + 3 rewrite rules + self.assertEqual(len(https_rules), 1) # 1 site + + # Verify priorities are assigned sequentially + priorities = [r["Priority"] for r in rules] + self.assertEqual(sorted(priorities), list(range(1, len(rules) + 1))) + + def test_create_alb_rules_rewrite_patterns(self): + """Test that rewrite patterns are properly translated to ALB patterns.""" + # Create site with rewrite rules + site = SiteConfig( + name="API Site", + binding_info="*:8080:api.example.com", + physical_path="C:\\inetpub\\apiroot", + protocol="http", + ) + + site.rewrite_rules = [ + { + "name": "API version rewrite", + "pattern": "^/api/v1/(.+)", + "action_type": "Rewrite", + "action_url": "api.php?path=$1", + }, + { + "name": "API docs rewrite", + "pattern": "^/docs/(.*)", + "action_type": "Rewrite", + "action_url": "documentation.html", + }, + ] + + rules = create_alb_rules([site]) + + # Find the rule for API version rewrite + api_rule = next( + ( + r + for r in rules + if "Conditions" in r + and any( + c["Field"] == "path-pattern" and "/api/v1/*" in c["Values"] + for c in r["Conditions"] + ) + ), + None, + ) + + self.assertIsNotNone(api_rule) + path_condition = next( + c for c in api_rule["Conditions"] if c["Field"] == "path-pattern" + ) + self.assertEqual(path_condition["Values"][0], "/api/v1/*") + + # Find the rule for docs rewrite + docs_rule = next( + ( + r + for r in rules + if "Conditions" in r + and any( + c["Field"] == "path-pattern" and "/docs/*" in c["Values"] + for c in r["Conditions"] + ) + ), + None, + ) + + self.assertIsNotNone(docs_rule) + path_condition = next( + c for c in docs_rule["Conditions"] if c["Field"] == "path-pattern" + ) + self.assertEqual(path_condition["Values"][0], "/docs/*") + + def test_create_alb_rules_rule_specificity_sorting(self): + """Test that rules are sorted by specificity.""" + # Create site with both host header and path pattern rules + site = SiteConfig( + name="Complex Site", + binding_info="*:80:complex.example.com", + physical_path="C:\\inetpub\\complexroot", + protocol="http", + ) + + site.rewrite_rules = [ + { + "name": "Path only rule", + "pattern": "^/path/", + "action_type": "Rewrite", + "action_url": "path.html", + }, + { + "name": "Host and path rule", + "pattern": "^/specific/", + "action_type": "Rewrite", + "action_url": "specific.html", + }, + ] + + rules = create_alb_rules([site]) + + # Rules with both host and path conditions should have higher priority (lower number) + host_and_path_rule = next( + (r for r in rules if "Conditions" in r and len(r["Conditions"]) > 1), None + ) + + path_only_rule = next( + ( + r + for r in rules + if "Conditions" in r + and len(r["Conditions"]) == 1 + and r["Conditions"][0]["Field"] == "path-pattern" + ), + None, + ) + + if host_and_path_rule and path_only_rule: + self.assertLess(host_and_path_rule["Priority"], path_only_rule["Priority"]) + + def test_create_alb_rules_empty_input(self): + """Test behavior with empty input.""" + rules = create_alb_rules([]) + self.assertEqual(rules, []) + + def test_create_alb_rules_non_http_protocols(self): + """Test that only HTTP/HTTPS protocols are processed.""" + # Create a site with non-HTTP protocol + site = SiteConfig( + name="FTP Site", + binding_info="*:21:ftp.example.com", + physical_path="C:\\inetpub\\ftproot", + protocol="ftp", + ) + + rules = create_alb_rules([site]) + self.assertEqual(rules, []) # Should ignore non-HTTP/HTTPS sites + + +@skipIf(not sys.platform.startswith("win"), "`eb migrate` only supports Windows") +class TestGetSiteConfigs(unittest.TestCase): + """Tests for the get_site_configs function.""" + + @mock.patch("os.path.exists") + @mock.patch("xml.etree.ElementTree.parse") + def test_get_site_configs_with_url_rewrites(self, mock_parse, mock_path_exists): + """Test that URL rewrite rules are correctly parsed from web.config.""" + # Setup mock to indicate web.config exists + mock_path_exists.return_value = True + + # Create a mock XML structure for web.config with URL rewrite rules + mock_xml = """ + + + + + + + + + + + + + + + + + + + + """ + + # Create a mock ElementTree + mock_root = ET.fromstring(mock_xml) + mock_tree = mock.MagicMock() + mock_tree.getroot.return_value = mock_root + mock_parse.return_value = mock_tree + + # Create mock site + site = MockSite( + name="API Site", + physical_path="C:\\inetpub\\apiroot", + binding_info="*:80:api.example.com", + ) + + # Call the function + site_configs = get_site_configs([site]) + + # Verify results + self.assertEqual(len(site_configs), 1) + self.assertEqual(site_configs[0].name, "API Site") + + # Check that rewrite rules were parsed correctly + self.assertEqual(len(site_configs[0].rewrite_rules), 2) + + # Check first rule + self.assertEqual(site_configs[0].rewrite_rules[0]["name"], "Redirect to HTTPS") + self.assertEqual(site_configs[0].rewrite_rules[0]["pattern"], "(.*)") + self.assertEqual(site_configs[0].rewrite_rules[0]["action_type"], "Redirect") + self.assertEqual( + site_configs[0].rewrite_rules[0]["action_url"], "https://{HTTP_HOST}/{R:1}" + ) + + # Check second rule + self.assertEqual(site_configs[0].rewrite_rules[1]["name"], "API Rewrite") + self.assertEqual(site_configs[0].rewrite_rules[1]["pattern"], "^api/v1/(.*)$") + self.assertEqual(site_configs[0].rewrite_rules[1]["action_type"], "Rewrite") + self.assertEqual( + site_configs[0].rewrite_rules[1]["action_url"], "api.php?path={R:1}" + ) + + @mock.patch("os.path.exists") + @mock.patch("xml.etree.ElementTree.parse") + def test_get_site_configs_with_multiple_rules_sections( + self, mock_parse, mock_path_exists + ): + """Test parsing web.config with multiple sections.""" + # Setup mock to indicate web.config exists + mock_path_exists.return_value = True + + # Create a mock XML structure with multiple sections + mock_xml = """ + + + + + + + + + + + + + + + + + + + """ + + # Create a mock ElementTree + mock_root = ET.fromstring(mock_xml) + mock_tree = mock.MagicMock() + mock_tree.getroot.return_value = mock_root + mock_parse.return_value = mock_tree + + # Create mock site + site = MockSite( + name="Multi Rules Site", + physical_path="C:\\inetpub\\multiroot", + binding_info="*:80:multi.example.com", + ) + + # Call the function + site_configs = get_site_configs([site]) + + # Verify results + self.assertEqual(len(site_configs), 1) + + # Check that both rules were parsed correctly + self.assertEqual(len(site_configs[0].rewrite_rules), 2) + + # Check first rule + self.assertEqual(site_configs[0].rewrite_rules[0]["name"], "Rule1") + self.assertEqual(site_configs[0].rewrite_rules[0]["pattern"], "^site1/") + self.assertEqual(site_configs[0].rewrite_rules[0]["action_type"], "Rewrite") + self.assertEqual(site_configs[0].rewrite_rules[0]["action_url"], "index1.html") + + # Check second rule + self.assertEqual(site_configs[0].rewrite_rules[1]["name"], "Rule2") + self.assertEqual(site_configs[0].rewrite_rules[1]["pattern"], "^site2/") + self.assertEqual(site_configs[0].rewrite_rules[1]["action_type"], "Rewrite") + self.assertEqual(site_configs[0].rewrite_rules[1]["action_url"], "index2.html") + + @mock.patch("os.path.exists") + @mock.patch("xml.etree.ElementTree.parse") + def test_get_site_configs_with_malformed_web_config( + self, mock_parse, mock_path_exists + ): + """Test handling of malformed web.config files.""" + # Setup mock to indicate web.config exists + mock_path_exists.return_value = True + + # Make parse throw an exception to simulate malformed XML + mock_parse.side_effect = ET.ParseError("XML syntax error") + + # Create mock site + site = MockSite( + name="Malformed Config Site", + physical_path="C:\\inetpub\\badconfig", + binding_info="*:80:bad.example.com", + ) + + # Call the function - should not raise an exception + site_configs = get_site_configs([site]) + + # Verify results - site should be included but with no rewrite rules + self.assertEqual(len(site_configs), 1) + self.assertEqual(site_configs[0].name, "Malformed Config Site") + self.assertEqual(site_configs[0].rewrite_rules, []) + + @mock.patch("os.path.exists") + @mock.patch("xml.etree.ElementTree.parse") + def test_get_site_configs_with_no_rewrite_section( + self, mock_parse, mock_path_exists + ): + """Test handling of web.config files with no rewrite section.""" + # Setup mock to indicate web.config exists + mock_path_exists.return_value = True + + # Create a mock XML structure with no rewrite section + mock_xml = """ + + + + + + + + + + """ + + # Create a mock ElementTree + mock_root = ET.fromstring(mock_xml) + mock_tree = mock.MagicMock() + mock_tree.getroot.return_value = mock_root + mock_parse.return_value = mock_tree + + # Create mock site + site = MockSite( + name="No Rewrite Site", + physical_path="C:\\inetpub\\norewrite", + binding_info="*:80:norewrite.example.com", + ) + + # Call the function + site_configs = get_site_configs([site]) + + # Verify results - site should be included but with no rewrite rules + self.assertEqual(len(site_configs), 1) + self.assertEqual(site_configs[0].name, "No Rewrite Site") + self.assertEqual(site_configs[0].rewrite_rules, []) + + @mock.patch("os.path.exists") + @mock.patch("xml.etree.ElementTree.parse") + def test_get_site_configs_with_empty_rules_section( + self, mock_parse, mock_path_exists + ): + """Test handling of web.config files with empty rules section.""" + # Setup mock to indicate web.config exists + mock_path_exists.return_value = True + + # Create a mock XML structure with empty rules section + mock_xml = """ + + + + + + + + + """ + + # Create a mock ElementTree + mock_root = ET.fromstring(mock_xml) + mock_tree = mock.MagicMock() + mock_tree.getroot.return_value = mock_root + mock_parse.return_value = mock_tree + + # Create mock site + site = MockSite( + name="Empty Rules Site", + physical_path="C:\\inetpub\\emptyrules", + binding_info="*:80:emptyrules.example.com", + ) + + # Call the function + site_configs = get_site_configs([site]) + + # Verify results - site should be included but with no rewrite rules + self.assertEqual(len(site_configs), 1) + self.assertEqual(site_configs[0].name, "Empty Rules Site") + self.assertEqual(site_configs[0].rewrite_rules, []) + + @mock.patch("os.path.exists") + @mock.patch("xml.etree.ElementTree.parse") + def test_get_site_configs_with_complex_rewrite_rules( + self, mock_parse, mock_path_exists + ): + """Test parsing of complex rewrite rules with conditions.""" + # Setup mock to indicate web.config exists + mock_path_exists.return_value = True + + # Create a mock XML structure with complex rewrite rules + mock_xml = """ + + + + + + + + + + + + + + + + + + + + + """ + + # Create a mock ElementTree + mock_root = ET.fromstring(mock_xml) + mock_tree = mock.MagicMock() + mock_tree.getroot.return_value = mock_root + mock_parse.return_value = mock_tree + + # Create mock site + site = MockSite( + name="Complex Rules Site", + physical_path="C:\\inetpub\\complexrules", + binding_info="*:80:complex.example.com", + ) + + # Call the function + site_configs = get_site_configs([site]) + + # Verify results + self.assertEqual(len(site_configs), 1) + + # Check that both rules were parsed correctly + self.assertEqual(len(site_configs[0].rewrite_rules), 2) + + # Check first rule + self.assertEqual(site_configs[0].rewrite_rules[0]["name"], "Complex Rule") + self.assertEqual( + site_configs[0].rewrite_rules[0]["pattern"], "^products/([0-9]+)/details$" + ) + self.assertEqual(site_configs[0].rewrite_rules[0]["action_type"], "Rewrite") + self.assertEqual( + site_configs[0].rewrite_rules[0]["action_url"], + "product_details.php?id={R:1}", + ) + + # Check second rule + self.assertEqual(site_configs[0].rewrite_rules[1]["name"], "Regex Capture Rule") + self.assertEqual( + site_configs[0].rewrite_rules[1]["pattern"], + "^blog/([a-zA-Z0-9\-]+)/([0-9]{4})/([0-9]{2})/?$", + ) + self.assertEqual(site_configs[0].rewrite_rules[1]["action_type"], "Rewrite") + self.assertEqual( + site_configs[0].rewrite_rules[1]["action_url"], + "blog.php?slug={R:1}&year={R:2}&month={R:3}", + ) + + +# Mock classes for IIS objects +class MockVirtualDirectory: + """Mock for IIS VirtualDirectory object.""" + + def __init__(self, path: str, physical_path: str): + self.Path = path + self.PhysicalPath = physical_path + + +class MockVirtualDirectoryCollection: + """Mock for IIS VirtualDirectory collection.""" + + def __init__(self, virtual_directories: List[MockVirtualDirectory]): + self._virtual_directories = virtual_directories + + def __getitem__(self, key): + for vdir in self._virtual_directories: + if vdir.Path == key: + return vdir + raise KeyError(f"VirtualDirectory with path '{key}' not found") + + +class MockApplication: + """Mock for IIS Application object.""" + + def __init__(self, path: str, physical_path: str): + self.Path = path + self._virtual_directories = [MockVirtualDirectory("/", physical_path)] + self.VirtualDirectories = MockVirtualDirectoryCollection( + self._virtual_directories + ) + + +class MockApplicationCollection: + """Mock for IIS Application collection.""" + + def __init__(self, applications: List[MockApplication]): + self._applications = applications + + def __getitem__(self, key): + for app in self._applications: + if app.Path == key: + return app + raise KeyError(f"Application with path '{key}' not found") + + +class MockBinding: + """Mock for IIS Binding object.""" + + def __init__(self, binding_info: str, protocol: str = "http"): + self.BindingInformation = binding_info + self.Protocol = protocol + + +class MockBindingCollection: + """Mock for IIS Binding collection.""" + + def __init__(self, bindings: List[MockBinding]): + self._bindings = bindings + + def __iter__(self): + return iter(self._bindings) + + +class MockSite: + """Mock for IIS Site object.""" + + def __init__( + self, name: str, physical_path: str, binding_info: str, protocol: str = "http" + ): + self.Name = name + self._applications = [MockApplication("/", physical_path)] + self.Applications = MockApplicationCollection(self._applications) + self.Bindings = MockBindingCollection([MockBinding(binding_info, protocol)]) + + +class MockServerManager: + """Mock for IIS ServerManager object.""" + + def __init__(self, sites: List[MockSite]): + self.Sites = sites diff --git a/tests/unit/integration/test_migrate.py b/tests/unit/integration/test_migrate.py new file mode 100644 index 000000000..8e84e164a --- /dev/null +++ b/tests/unit/integration/test_migrate.py @@ -0,0 +1,2176 @@ +import shutil +import typing +from unittest import skipUnless + +import pytest +import os +import subprocess +from pathlib import Path +from unittest.mock import patch, MagicMock +import unittest +import sys + +if sys.platform.startswith("win"): + import clr + + clr.AddReference("System.Reflection") + clr.AddReference(r"C:\Windows\System32\inetsrv\Microsoft.Web.Administration.dll") + clr.AddReference("System") + clr.AddReference("System.Core") + try: + clr.AddReference("System.DirectoryServices.AccountManagement") + from System.DirectoryServices.AccountManagement import ( + PrincipalContext, + ContextType, + ContextType, + UserPrincipal, + PrincipalSearcher, + ) + from System.Collections.Generic import HashSet, Queue + from System.Reflection import Assembly + from Microsoft.Web.Administration import ( + ServerManager, + Binding, + Site, + Application, + ) + from System.Diagnostics import Process, ProcessStartInfo + from System.Runtime.InteropServices import COMException + except: + # TODO: Make this raise on source machine; Pass this on dumb-terminals + pass + + import json + from ebcli.objects.exceptions import NotAnEC2Instance, NotFoundError + + # TODO: preserve current state of the IIS server prior to execution of any of the tests + @skipUnless(os.getenv('EB_IIS_TESTS') == '1', "Run this test suite only if explicitly instructed to.") + class TestEBMigrateIntegration(unittest.TestCase): + @classmethod + def setUpClass(cls): + """Setup that runs once for the entire test class""" + test_dir = Path(".") / "testDir" + test_dir.mkdir(exist_ok=True) + cls.state_file = test_dir / "iis_state_file.json" + cls.backup_state() + cls.original_dir = os.getcwd() + os.chdir(test_dir) + + # Save existing firewall rules for HTTP/HTTPS ports + cls.saved_firewall_rules = cls.get_existing_firewall_rules() + + # After backing up, wipe all sites + server_manager = ServerManager() + for site in list(server_manager.Sites): + server_manager.Sites.Remove(site) + server_manager.CommitChanges() + + @staticmethod + def get_existing_firewall_rules(): + """Get existing firewall rules for HTTP/HTTPS ports""" + try: + result = subprocess.run( + [ + "powershell", + "-Command", + 'Get-NetFirewallRule | Where-Object { $_.DisplayName -like "*EB-CLI-Test*" } | Select-Object -Property DisplayName | ConvertTo-Json', + ], + capture_output=True, + text=True, + check=True, + ) + + if result.stdout.strip(): + try: + return json.loads(result.stdout) + except json.JSONDecodeError: + return [] + return [] + except subprocess.CalledProcessError: + return [] + + def setUp(self): + """Setup that runs before each test method""" + # Clear any sites that might exist from previous tests + self.server_manager = ServerManager() + for site in list(self.server_manager.Sites): + self.server_manager.Sites.Remove(site) + self.server_manager.CommitChanges() + + # Only create Default Web Site for tests that need it + self.default_site = self.server_manager.Sites.Add( + "Default Web Site", "http", "*:80:", "c:\\inetpub\\wwwroot" + ) + self.server_manager.CommitChanges() + + @staticmethod + def get_existing_firewall_rules(): + """Get existing firewall rules for HTTP/HTTPS ports""" + try: + result = subprocess.run( + [ + "powershell", + "-Command", + 'Get-NetFirewallRule | Where-Object { $_.DisplayName -like "*EB-CLI-Test*" } | Select-Object -Property DisplayName | ConvertTo-Json', + ], + capture_output=True, + text=True, + check=True, + ) + + if result.stdout.strip(): + try: + return json.loads(result.stdout) + except json.JSONDecodeError: + return [] + return [] + except subprocess.CalledProcessError: + return [] + + def create_test_firewall_rules(self, http_port, https_port): + """Create test firewall rules for HTTP and HTTPS ports""" + try: + # Create HTTP rule (Allow) + subprocess.run( + [ + "powershell", + "-Command", + f'New-NetFirewallRule -DisplayName "EB-CLI-Test-HTTP" -Direction Inbound -Action Allow -Protocol TCP -LocalPort {http_port} -Enabled True', + ], + check=True, + ) + + # Create HTTPS rule (Block) + subprocess.run( + [ + "powershell", + "-Command", + f'New-NetFirewallRule -DisplayName "EB-CLI-Test-HTTPS" -Direction Inbound -Action Block -Protocol TCP -LocalPort {https_port} -Enabled True', + ], + check=True, + ) + except subprocess.CalledProcessError as e: + pytest.skip(f"Failed to create firewall rules: {e}") + + def cleanup_test_firewall_rules(self): + """Remove test firewall rules""" + try: + subprocess.run( + [ + "powershell", + "-Command", + 'Remove-NetFirewallRule -DisplayName "EB-CLI-Test-HTTP" -ErrorAction SilentlyContinue', + ] + ) + + subprocess.run( + [ + "powershell", + "-Command", + 'Remove-NetFirewallRule -DisplayName "EB-CLI-Test-HTTPS" -ErrorAction SilentlyContinue', + ] + ) + except Exception: + pass + + @patch("ebcli.lib.elasticbeanstalk.get_application_names") + @patch("ebcli.lib.elasticbeanstalk.create_application_version") + @patch("ebcli.controllers.migrate.get_unique_environment_name") + @patch("ebcli.controllers.migrate.establish_platform") + @patch("ebcli.controllers.migrate.test_environment_exists") + @patch("ebcli.lib.ec2.get_instance_metadata") + def test_migrate_single_default_web_site( + self, + mock_get_metadata, + mock_test_environment_exists, + mock_establish_platform, + mock_get_unique_environment_name, + mock_create_version, + mock_get_apps, + ): + """ + Test basic migration of Default Web Site with default settings. + """ + from ebcli.core.ebcore import EB + + # Setup mock responses + mock_get_apps.return_value = [] + mock_create_version.return_value = "v1" + mock_get_metadata.side_effect = NotAnEC2Instance + mock_get_unique_environment_name.return_value = ["EBMigratedApp"] + mock_establish_platform.return_value = "arn:aws:elasticbeanstalk:us-west-2::platform/.NET 6 running on 64bit Amazon Linux 2023/3.4.0" + mock_test_environment_exists.side_effect = NotFoundError + + # Create and run EB CLI app + app = EB(argv=["migrate", "--archive-only"]) + app.setup() + app.run() + + # Verify source bundle was created + migrations_dir = Path(".") / "migrations" / "latest" + assert migrations_dir.exists() + + upload_target_dir = migrations_dir / "upload_target" + assert upload_target_dir.exists() + + # Verify manifest was created and has correct structure + manifest_path = upload_target_dir / "aws-windows-deployment-manifest.json" + assert manifest_path.exists() + + with open(manifest_path) as f: + manifest = json.load(f) + assert manifest["manifestVersion"] == 1 + assert "msDeploy" in manifest["deployments"] + + # Verify Default Web Site deployment configuration + ms_deploy_sections = manifest["deployments"]["msDeploy"] + assert any( + section["name"] == "Default Web Site" + for section in ms_deploy_sections + ) + + @patch("ebcli.lib.elasticbeanstalk.get_application_names") + @patch("ebcli.lib.elasticbeanstalk.create_application_version") + @patch("ebcli.controllers.migrate.get_unique_environment_name") + @patch("ebcli.controllers.migrate.establish_platform") + @patch("ebcli.controllers.migrate.test_environment_exists") + @patch("ebcli.lib.ec2.get_instance_metadata") + def test_migrate_single_custom_site( + self, + mock_get_metadata, + mock_test_environment_exists, + mock_establish_platform, + mock_get_unique_environment_name, + mock_create_version, + mock_get_apps, + ): + """ + Test migration of a custom site running on port 8080. + """ + from ebcli.core.ebcore import EB + + # Remove Default Web Site and create custom site on port 8080 + server_manager = ServerManager() + default_site = server_manager.Sites[ + "Default Web Site" + ] # Get fresh reference + server_manager.Sites.Remove(default_site) + custom_site = server_manager.Sites.Add( + "Custom Site", "http", "*:8080:", "c:\\inetpub\\customsite" + ) + server_manager.CommitChanges() + + # Setup mock responses + mock_get_apps.return_value = [] + mock_create_version.return_value = "v1" + mock_get_metadata.side_effect = NotAnEC2Instance + mock_get_unique_environment_name.return_value = ["CustomSite"] + mock_establish_platform.return_value = "arn:aws:elasticbeanstalk:us-west-2::platform/.NET 6 running on 64bit Amazon Linux 2023/3.4.0" + mock_test_environment_exists.side_effect = NotFoundError + + # Create and run EB CLI app with specific site name + app = EB(argv=["migrate", "--archive-only", "--sites", "Custom Site"]) + app.setup() + app.run() + + # Verify source bundle was created + migrations_dir = Path(".") / "migrations" / "latest" + assert migrations_dir.exists() + + upload_target_dir = migrations_dir / "upload_target" + assert upload_target_dir.exists() + + # Verify manifest was created and has correct structure + manifest_path = upload_target_dir / "aws-windows-deployment-manifest.json" + assert manifest_path.exists() + + with open(manifest_path) as f: + manifest = json.load(f) + assert manifest["manifestVersion"] == 1 + assert "custom" in manifest["deployments"] + + # Verify Custom Site deployment configuration + custom_sections = manifest["deployments"]["custom"] + assert any( + section["name"] == "Custom Site" for section in custom_sections + ) + + # Verify installation script exists + install_script_path = None + for section in custom_sections: + if section["name"] == "Custom Site": + install_script_path = section["scripts"]["install"]["file"] + break + + assert install_script_path is not None + full_script_path = upload_target_dir / install_script_path + assert full_script_path.exists() + + # Verify port 8080 is configured in the installation script + with open(full_script_path) as script_file: + script_content = script_file.read() + assert "*:8080:" in script_content + + @classmethod + def backup_state(cls): + """Persist current IIS state including ARR proxy configuration""" + server_manager = ServerManager() + + # Get ARR configuration sections + config = server_manager.GetApplicationHostConfiguration() + arr_sections = { + "proxy": "system.webServer/proxy", + "rewrite": "system.webServer/rewrite", + "caching": "system.webServer/caching", + } + + arr_config = {} + for section_name, section_path in arr_sections.items(): + try: + section = config.GetSection(section_path) + if section: + arr_config[section_name] = {} + for attr in section.Attributes: + if not attr.IsInheritedFromDefaultValue: + # Handle TimeSpan values specially + if attr.Value.__class__.__name__.endswith("TimeSpan"): + arr_config[section_name][ + attr.Name + ] = attr.Value.TotalSeconds + else: + arr_config[section_name][attr.Name] = attr.Value + except Exception as e: + print(f"Warning: Could not backup ARR section {section_name}: {e}") + + state = { + "arr_config": arr_config, + "sites": [ + { + "name": site.Name, + "bindings": [b.BindingInformation for b in site.Bindings], + "applications": [ + { + "path": app.Path, + "pool": app.ApplicationPoolName, + "vdirs": [ + { + "path": vdir.Path, + "physical_path": vdir.PhysicalPath, + } + for vdir in app.VirtualDirectories + ], + } + for app in site.Applications + ], + } + for site in server_manager.Sites + ], + } + + with open(cls.state_file, "w") as f: + json.dump(state, f, indent=2) + + @classmethod + def restore_state(cls): + """Restore IIS state including ARR configuration""" + server_manager = ServerManager() + + if not cls.state_file.exists(): + return + + with open(cls.state_file, "r") as f: + state = json.load(f) + + # Restore ARR configuration first + if "arr_config" in state: + config = server_manager.GetApplicationHostConfiguration() + for section_name, attributes in state["arr_config"].items(): + section_path = f"system.webServer/{section_name}" + try: + section = config.GetSection(section_path) + if section: + for attr_name, attr_value in attributes.items(): + # If this is a timeout attribute, convert seconds back to TimeSpan + if attr_name == "timeout": + from System import TimeSpan + + attr_value = TimeSpan.FromSeconds(attr_value) + section.SetAttributeValue(attr_name, attr_value) + except Exception as e: + print( + f"Warning: Could not restore ARR section {section_name}: {e}" + ) + + # Remove all current sites except those in saved state + saved_site_names = {site["name"] for site in state["sites"]} + for site in list(server_manager.Sites): + if site.Name not in saved_site_names: + server_manager.Sites.Remove(site) + + # Restore sites from saved state + for site_state in state["sites"]: + site = server_manager.Sites.Add( + site_state["name"], "http", "*:80:", "c:\\inetpub\\wwwroot" + ) + + # Clear default binding + site.Bindings.Clear() + + # Restore bindings + for binding in site_state["bindings"]: + site.Bindings.Add(binding, "http") + + # Restore applications and virtual directories + for app_state in site_state["applications"]: + if app_state["path"] != "/": # Root app already exists + app = site.Applications.Add( + app_state["path"], + app_state["vdirs"][0]["physical_path"], + ) + app.ApplicationPoolName = app_state["pool"] + + # Add additional virtual directories + app = site.Applications[app_state["path"]] + for vdir_state in app_state["vdirs"][1:]: + app.VirtualDirectories.Add( + vdir_state["path"], vdir_state["physical_path"] + ) + + server_manager.CommitChanges() + + @patch("ebcli.lib.elasticbeanstalk.get_application_names") + @patch("ebcli.lib.elasticbeanstalk.create_application_version") + @patch("ebcli.controllers.migrate.get_unique_environment_name") + @patch("ebcli.controllers.migrate.establish_platform") + @patch("ebcli.controllers.migrate.test_environment_exists") + @patch("ebcli.lib.ec2.get_instance_metadata") + def test_migrate_multiple_sites( + self, + mock_get_metadata, + mock_test_environment_exists, + mock_establish_platform, + mock_get_unique_environment_name, + mock_create_version, + mock_get_apps, + ): + """ + Test migration of multiple custom sites running on different ports. + """ + from ebcli.core.ebcore import EB + + # Remove Default Web Site and create multiple custom sites on different ports + server_manager = ServerManager() + default_site = server_manager.Sites[ + "Default Web Site" + ] # Get fresh reference + server_manager.Sites.Remove(default_site) + + # Create first custom site on port 8080 + custom_site1 = server_manager.Sites.Add( + "Custom Site 1", "http", "*:8080:", "c:\\inetpub\\customsite1" + ) + + # Create second custom site on port 8082 + custom_site2 = server_manager.Sites.Add( + "Custom Site 2", "http", "*:8082:", "c:\\inetpub\\customsite2" + ) + + server_manager.CommitChanges() + + # Setup mock responses + mock_get_apps.return_value = [] + mock_create_version.return_value = "v1" + mock_get_metadata.side_effect = NotAnEC2Instance + mock_get_unique_environment_name.return_value = ["MultiSiteEnv"] + mock_establish_platform.return_value = "arn:aws:elasticbeanstalk:us-west-2::platform/.NET 6 running on 64bit Amazon Linux 2023/3.4.0" + mock_test_environment_exists.side_effect = NotFoundError + + # Create and run EB CLI app with multiple site names + app = EB( + argv=[ + "migrate", + "--archive-only", + "--sites", + "Custom Site 1,Custom Site 2", + ] + ) + app.setup() + app.run() + + # Verify source bundle was created + migrations_dir = Path(".") / "migrations" / "latest" + assert migrations_dir.exists() + + upload_target_dir = migrations_dir / "upload_target" + assert upload_target_dir.exists() + + # Verify manifest was created and has correct structure + manifest_path = upload_target_dir / "aws-windows-deployment-manifest.json" + assert manifest_path.exists() + + with open(manifest_path) as f: + manifest = json.load(f) + assert manifest["manifestVersion"] == 1 + assert "custom" in manifest["deployments"] + + # Verify both custom sites are in the deployment configuration + custom_sections = manifest["deployments"]["custom"] + + # Check for Custom Site 1 + site1_section = next( + ( + section + for section in custom_sections + if section["name"] == "Custom Site 1" + ), + None, + ) + assert site1_section is not None + + # Check for Custom Site 2 + site2_section = next( + ( + section + for section in custom_sections + if section["name"] == "Custom Site 2" + ), + None, + ) + assert site2_section is not None + + # Verify installation scripts exist and contain correct port bindings + # For Custom Site 1 + install_script_path1 = site1_section["scripts"]["install"]["file"] + full_script_path1 = upload_target_dir / install_script_path1 + assert full_script_path1.exists() + + with open(full_script_path1) as script_file: + script_content = script_file.read() + assert "*:8080:" in script_content + + # For Custom Site 2 + install_script_path2 = site2_section["scripts"]["install"]["file"] + full_script_path2 = upload_target_dir / install_script_path2 + assert full_script_path2.exists() + + with open(full_script_path2) as script_file: + script_content = script_file.read() + assert "*:8082:" in script_content + + # Verify that each site has its own installation script + assert install_script_path1 != install_script_path2 + + ms_deploy_section = manifest["deployments"]["msDeploy"] + + assert ms_deploy_section == [] + + @patch("ebcli.lib.elasticbeanstalk.get_application_names") + @patch("ebcli.lib.elasticbeanstalk.create_application_version") + @patch("ebcli.controllers.migrate.get_unique_environment_name") + @patch("ebcli.controllers.migrate.establish_platform") + @patch("ebcli.controllers.migrate.test_environment_exists") + @patch("ebcli.lib.ec2.get_instance_metadata") + def test_migrate_site_with_multiple_applications( + self, + mock_get_metadata, + mock_test_environment_exists, + mock_establish_platform, + mock_get_unique_environment_name, + mock_create_version, + mock_get_apps, + ): + """ + Test migration of Default Web Site with multiple applications. + Ensures that the manifest contains correct references to Default Web Site + and its additional application at "/application1". + + This test: + 1. Creates content in both application directories + 2. Runs the migration process + 3. Verifies the manifest structure + 4. Extracts and inspects the application bundles + 5. Checks for parameter files and archive.xml files + """ + import zipfile + import tempfile + import shutil + from ebcli.core.ebcore import EB + + # Setup Default Web Site with an additional application + server_manager = ServerManager() + default_site = server_manager.Sites[ + "Default Web Site" + ] # Get fresh reference + + # Get the physical path of the default web site + default_physical_path = ( + default_site.Applications["/"].VirtualDirectories["/"].PhysicalPath + ) + + # Create content for the default web site + with open(os.path.join(default_physical_path, "index.html"), "w") as f: + f.write( + "

Default Web Site Root

This is the main application.

" + ) + + with open(os.path.join(default_physical_path, "web.config"), "w") as f: + f.write( + """ + + + + + + + + + """ + ) + + # Create physical path for the additional application + app_physical_path = "c:\\inetpub\\application1" + os.makedirs(app_physical_path, exist_ok=True) + + # Create content for the additional application + with open(os.path.join(app_physical_path, "index.html"), "w") as f: + f.write( + "

Application1

This is the additional application.

" + ) + + with open(os.path.join(app_physical_path, "web.config"), "w") as f: + f.write( + """ + + + + + + + + + """ + ) + + # Add an additional application to Default Web Site at "/application1" + additional_app = default_site.Applications.Add( + "/application1", app_physical_path + ) + additional_app.ApplicationPoolName = "DefaultAppPool" + server_manager.CommitChanges() + + # Setup mock responses + mock_get_apps.return_value = [] + mock_create_version.return_value = "v1" + mock_get_metadata.side_effect = NotAnEC2Instance + mock_get_unique_environment_name.return_value = ["DefaultWebSiteWithApps"] + mock_establish_platform.return_value = "arn:aws:elasticbeanstalk:us-west-2::platform/.NET 6 running on 64bit Amazon Linux 2023/3.4.0" + mock_test_environment_exists.side_effect = NotFoundError + + # Create and run EB CLI app + app = EB(argv=["migrate", "--archive-only"]) + app.setup() + app.run() + + # Verify source bundle was created + migrations_dir = Path(".") / "migrations" / "latest" + assert migrations_dir.exists() + + upload_target_dir = migrations_dir / "upload_target" + assert upload_target_dir.exists() + + # Verify manifest was created and has correct structure + manifest_path = upload_target_dir / "aws-windows-deployment-manifest.json" + assert manifest_path.exists() + + with open(manifest_path) as f: + manifest = json.load(f) + assert manifest["manifestVersion"] == 1 + assert "msDeploy" in manifest["deployments"] + + # Verify Default Web Site deployment configuration + ms_deploy_sections = manifest["deployments"]["msDeploy"] + + # Check for root application (Default Web Site) + default_site_section = next( + ( + section + for section in ms_deploy_sections + if section["name"] == "Default Web Site" + ), + None, + ) + assert default_site_section is not None + assert default_site_section["parameters"]["iisPath"] == "/" + assert ( + default_site_section["parameters"]["iisWebSite"] + == "Default Web Site" + ) + + # Check for additional application (/application1) + + app1_section = [ + section + for section in ms_deploy_sections + if section["name"] == "Default Web Site\\application1" + ][0] + + assert app1_section["parameters"]["iisPath"] == "/application1" + assert app1_section["parameters"]["iisWebSite"] == "Default Web Site" + + # Verify both application bundles exist + default_site_bundle = ( + upload_target_dir + / f"{default_site_section['parameters']['appBundle']}" + ) + assert default_site_bundle.exists() + + app1_bundle = ( + upload_target_dir / f"{app1_section['parameters']['appBundle']}" + ) + assert app1_bundle.exists() + + # Extract and inspect the application bundles + def extract_and_verify_bundle(bundle_path, expected_files): + temp_dir = tempfile.mkdtemp() + try: + with zipfile.ZipFile(bundle_path, "r") as zip_ref: + zip_ref.extractall(temp_dir) + + # Check for expected files + for file_path in expected_files: + full_path = os.path.join(temp_dir, file_path) + assert os.path.exists( + full_path + ), f"Expected file {file_path} not found in bundle" + + return temp_dir + except: + shutil.rmtree(temp_dir) + raise + + # Verify default site bundle contents + default_site_temp_dir = extract_and_verify_bundle( + default_site_bundle, + ["archive.xml", "parameters.xml", "systemInfo.xml"], + ) + + # Verify application1 bundle contents + app1_temp_dir = extract_and_verify_bundle( + app1_bundle, ["archive.xml", "parameters.xml", "systemInfo.xml"] + ) + + # Check for parameter files and verify their structure + with open( + os.path.join(default_site_temp_dir, "parameters.xml"), "r" + ) as f: + params_content = f.read() + assert "IIS Web Application Name" in params_content + assert "Default Web Site" in params_content + + with open(os.path.join(app1_temp_dir, "parameters.xml"), "r") as f: + params_content = f.read() + assert "IIS Web Application Name" in params_content + assert "Default Web Site\\application1" in params_content + + # Clean up temp directories + shutil.rmtree(default_site_temp_dir) + shutil.rmtree(app1_temp_dir) + + @patch("ebcli.lib.elasticbeanstalk.get_application_names") + @patch("ebcli.lib.elasticbeanstalk.create_application_version") + @patch("ebcli.controllers.migrate.get_unique_environment_name") + @patch("ebcli.controllers.migrate.establish_platform") + @patch("ebcli.controllers.migrate.test_environment_exists") + @patch("ebcli.lib.ec2.get_instance_metadata") + def test_migrate_site_with_multiple_applications_and_virtual_directories( + self, + mock_get_metadata, + mock_test_environment_exists, + mock_establish_platform, + mock_get_unique_environment_name, + mock_create_version, + mock_get_apps, + ): + """ + Test migration of Default Web Site with multiple applications and virtual directories. + + This test: + 1. Creates Default Web Site with an additional application at "/application1" + 2. Adds virtual directories to both the Default Web Site ("/virtualdirectory1") and + the application ("/virtualdirectory2") + 3. Runs the migration process + 4. Verifies the manifest contains correct references to all components + """ + import zipfile + import tempfile + import shutil + from ebcli.core.ebcore import EB + + # Setup Default Web Site with an additional application and virtual directories + server_manager = ServerManager() + default_site = server_manager.Sites[ + "Default Web Site" + ] # Get fresh reference + + # Get the physical path of the default web site + default_physical_path = ( + default_site.Applications["/"].VirtualDirectories["/"].PhysicalPath + ) + + # Create content for the default web site + with open(os.path.join(default_physical_path, "index.html"), "w") as f: + f.write( + "

Default Web Site Root

This is the main application.

" + ) + + # Create physical path for the additional application + app_physical_path = "c:\\inetpub\\application1" + os.makedirs(app_physical_path, exist_ok=True) + + # Create content for the additional application + with open(os.path.join(app_physical_path, "index.html"), "w") as f: + f.write( + "

Application1

This is the additional application.

" + ) + + # Create physical paths for virtual directories + vdir1_physical_path = "c:\\inetpub\\virtualdirectory1" + vdir2_physical_path = "c:\\inetpub\\virtualdirectory2" + os.makedirs(vdir1_physical_path, exist_ok=True) + os.makedirs(vdir2_physical_path, exist_ok=True) + + # Create content for virtual directories + with open(os.path.join(vdir1_physical_path, "index.html"), "w") as f: + f.write( + "

Virtual Directory 1

This is virtual directory 1.

" + ) + + with open(os.path.join(vdir2_physical_path, "index.html"), "w") as f: + f.write( + "

Virtual Directory 2

This is virtual directory 2.

" + ) + + # Add an additional application to Default Web Site at "/application1" + additional_app = default_site.Applications.Add( + "/application1", app_physical_path + ) + additional_app.ApplicationPoolName = "DefaultAppPool" + + # Add virtual directory to Default Web Site + default_site.Applications["/"].VirtualDirectories.Add( + "/virtualdirectory1", vdir1_physical_path + ) + + # Add virtual directory to the additional application + additional_app.VirtualDirectories.Add( + "/virtualdirectory2", vdir2_physical_path + ) + + server_manager.CommitChanges() + + # Setup mock responses + mock_get_apps.return_value = [] + mock_create_version.return_value = "v1" + mock_get_metadata.side_effect = NotAnEC2Instance + mock_get_unique_environment_name.return_value = ["DefaultWebSiteWithVDirs"] + mock_establish_platform.return_value = "arn:aws:elasticbeanstalk:us-west-2::platform/.NET 6 running on 64bit Amazon Linux 2023/3.4.0" + mock_test_environment_exists.side_effect = NotFoundError + + # Create and run EB CLI app + app = EB(argv=["migrate", "--archive-only"]) + app.setup() + app.run() + + # Verify source bundle was created + migrations_dir = Path(".") / "migrations" / "latest" + assert migrations_dir.exists() + + upload_target_dir = migrations_dir / "upload_target" + assert upload_target_dir.exists() + + # Verify manifest was created and has correct structure + manifest_path = upload_target_dir / "aws-windows-deployment-manifest.json" + assert manifest_path.exists() + + with open(manifest_path) as f: + manifest = json.load(f) + assert manifest["manifestVersion"] == 1 + assert "msDeploy" in manifest["deployments"] + assert "custom" in manifest["deployments"] + + # Verify Default Web Site deployment configuration + ms_deploy_sections = manifest["deployments"]["msDeploy"] + + # Check for root application (Default Web Site) using list comprehension + default_site_sections = [ + section + for section in ms_deploy_sections + if section["name"] == "Default Web Site" + ] + assert len(default_site_sections) > 0 + default_site_section = default_site_sections[0] + assert default_site_section["parameters"]["iisPath"] == "/" + assert ( + default_site_section["parameters"]["iisWebSite"] + == "Default Web Site" + ) + + # Check for additional application (/application1) using list comprehension + app1_sections = [ + section + for section in ms_deploy_sections + if section["name"] == "Default Web Site\\application1" + ] + assert len(app1_sections) > 0 + app1_section = app1_sections[0] + assert app1_section["parameters"]["iisPath"] == "/application1" + assert app1_section["parameters"]["iisWebSite"] == "Default Web Site" + + # Verify both application bundles exist + default_site_bundle = ( + upload_target_dir + / f"{default_site_section['parameters']['appBundle']}" + ) + assert default_site_bundle.exists() + + app1_bundle = ( + upload_target_dir / f"{app1_section['parameters']['appBundle']}" + ) + assert app1_bundle.exists() + + # Verify virtual directory permission script exists using list comprehension + custom_sections = manifest["deployments"]["custom"] + vdir_permission_sections = [ + section + for section in custom_sections + if section["name"] == "FixVirtualDirPermissions" + ] + assert len(vdir_permission_sections) > 0 + vdir_permission_section = vdir_permission_sections[0] + + # Verify the permission script file exists + permission_script_path = vdir_permission_section["scripts"]["install"][ + "file" + ] + full_script_path = upload_target_dir / permission_script_path + assert full_script_path.exists() + + # Verify the permission script contains paths to both virtual directories + with open(full_script_path) as script_file: + script_content = script_file.read() + assert vdir1_physical_path in script_content + assert vdir2_physical_path in script_content + + # Extract and inspect the application bundles to verify virtual directories are included + def extract_and_verify_bundle(bundle_path, expected_files): + temp_dir = tempfile.mkdtemp() + try: + with zipfile.ZipFile(bundle_path, "r") as zip_ref: + zip_ref.extractall(temp_dir) + + # Check for expected files + for file_path in expected_files: + full_path = os.path.join(temp_dir, file_path) + assert os.path.exists( + full_path + ), f"Expected file {file_path} not found in bundle" + + return temp_dir + except: + shutil.rmtree(temp_dir) + raise + + # Verify default site bundle contains virtual directory configuration + default_site_temp_dir = extract_and_verify_bundle( + default_site_bundle, + ["archive.xml", "parameters.xml", "systemInfo.xml"], + ) + + # Verify application1 bundle contains virtual directory configuration + app1_temp_dir = extract_and_verify_bundle( + app1_bundle, ["archive.xml", "parameters.xml", "systemInfo.xml"] + ) + + # Clean up temp directories + shutil.rmtree(default_site_temp_dir) + shutil.rmtree(app1_temp_dir) + + @patch("ebcli.lib.elasticbeanstalk.get_application_names") + @patch("ebcli.lib.elasticbeanstalk.create_application_version") + @patch("ebcli.controllers.migrate.get_unique_environment_name") + @patch("ebcli.controllers.migrate.establish_platform") + @patch("ebcli.controllers.migrate.test_environment_exists") + @patch("ebcli.lib.ec2.get_instance_metadata") + @patch("ebcli.controllers.migrate.establish_instance_profile") + @patch("ebcli.operations.createops.create_default_service_role") + @patch("ebcli.controllers.migrate.get_unique_cname") + @patch("ebcli.controllers.migrate.do_encrypt_ebs_volumes") + @patch("ebcli.objects.requests.CreateEnvironmentRequest") + @patch("ebcli.operations.createops.make_new_env") + @patch( + "ebcli.controllers.migrate.commonops.elasticbeanstalk.application_version_exists" + ) + @patch( + "ebcli.controllers.migrate.commonops.elasticbeanstalk.get_storage_location" + ) + @patch("ebcli.controllers.migrate.commonops.s3.get_object_info") + @patch("ebcli.controllers.migrate.commonops.s3.upload_application_version") + def test_migrate_site_with_tags_passed_in_through_stdin( + self, + mock_upload_application_version, + mock_get_object_info, + mock_get_storage_location, + mock_application_version_exists, + mock_make_new_env, + mock_create_env_request, + mock_do_encrypt_ebs_volumes, + mock_get_unique_cname, + mock_create_service_role, + mock_establish_instance_profile, + mock_get_metadata, + mock_test_environment_exists, + mock_establish_platform, + mock_get_unique_environment_name, + mock_create_version, + mock_get_apps, + ): + """ + Test migration of Default Web Site with tags parameter. + + This test: + 1. Ensures Default Web Site exists + 2. Invokes `eb migrate` with `--tags key1=val1,key2=val2` + 3. Verifies that create_app_version_and_environment is called with correct tags + """ + from ebcli.core.ebcore import EB + + # Setup mock responses + mock_create_env_request_object = MagicMock() + mock_create_env_request.return_value = mock_create_env_request_object + mock_get_apps.return_value = [] + mock_create_version.return_value = "v1" + mock_establish_instance_profile.return_value = ( + "aws-elasticbeanstalk-ec2-role" + ) + mock_create_service_role.return_value = "aws-elasticbeanstalk-service-role" + mock_get_metadata.side_effect = NotAnEC2Instance + mock_get_unique_environment_name.return_value = ["DefaultWebSiteWithTags"] + mock_establish_platform.return_value = "arn:aws:elasticbeanstalk:us-west-2::platform/.NET 6 running on 64bit Amazon Linux 2023/3.4.0" + mock_test_environment_exists.side_effect = NotFoundError + mock_get_unique_cname.return_value = ( + "default-web-site-with-tags.elasticbeanstalk.com" + ) + mock_application_version_exists.return_value = None + mock_get_storage_location.return_value = "my-s3-bucket-location" + mock_get_object_info.side_effect = NotFoundError + + # Define the tags we'll pass to the command + tags = "key1=val1,key2=val2" + expected_tags = [ + {"Key": "key1", "Value": "val1"}, + {"Key": "key2", "Value": "val2"}, + ] + + # Create and run EB CLI app with tags parameter + app = EB(argv=["migrate", "--tags", tags]) + app.setup() + app.run() + + # Verify CreateEnvironmentRequest was called with the correct tags + call_kwargs = mock_create_env_request.call_args[1] + assert call_kwargs["app_name"] == "DefaultWebSite" + assert call_kwargs["env_name"] == ["DefaultWebSiteWithTags"] + assert ( + call_kwargs["platform"] + == "arn:aws:elasticbeanstalk:us-west-2::platform/.NET 6 running on 64bit Amazon Linux 2023/3.4.0" + ) + assert call_kwargs["version_label"].startswith("app-") + assert call_kwargs["instance_profile"] == "aws-elasticbeanstalk-ec2-role" + assert call_kwargs["service_role"] == "aws-elasticbeanstalk-service-role" + assert call_kwargs["key_name"] is None + assert call_kwargs["tags"] == [ + {"Key": "key1", "Value": "val1"}, + {"Key": "key2", "Value": "val2"}, + ] + assert call_kwargs["vpc"] == {} + assert call_kwargs["elb_type"] == "application" + assert call_kwargs["instance_types"] == "c5.2xlarge" + assert call_kwargs["min_instances"] == "1" + assert call_kwargs["max_instances"] == "4" + assert call_kwargs["block_device_mappings"] == [] + assert call_kwargs["listener_configs"] == [ + { + "Namespace": "aws:elbv2:listener:default", + "OptionName": "Protocol", + "Value": "HTTP", + }, + { + "Namespace": "aws:elbv2:listener:default", + "OptionName": "DefaultProcess", + "Value": "default", + }, + { + "Namespace": "aws:elbv2:listener:default", + "OptionName": "ListenerEnabled", + "Value": "true", + }, + { + "Namespace": "aws:elbv2:listener:default", + "OptionName": "Rules", + "Value": "rule1", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule1", + "OptionName": "Priority", + "Value": "1", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule1", + "OptionName": "Process", + "Value": "default", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule1", + "OptionName": "PathPatterns", + "Value": "*", + }, + { + "Namespace": "aws:elasticbeanstalk:environment:process:default", + "OptionName": "Protocol", + "Value": "HTTP", + }, + { + "Namespace": "aws:elasticbeanstalk:environment:process:default", + "OptionName": "Port", + "Value": "80", + }, + ] + assert ( + call_kwargs["cname"] + == "default-web-site-with-tags.elasticbeanstalk.com" + ) + assert call_kwargs["description"] == "Environment created by `eb migrate`" + assert call_kwargs["load_balancer_security_group"] is None + assert call_kwargs["ec2_security_group"] is None + assert call_kwargs["root_volume"] == [ + { + "Namespace": "aws:autoscaling:launchconfiguration", + "OptionName": "RootVolumeSize", + "Value": "60", + } + ] + + mock_make_new_env.assert_called_once_with( + mock_create_env_request_object, interactive=False, timeout=15 + ) + mock_do_encrypt_ebs_volumes.assert_not_called() + call_args = mock_upload_application_version.call_args[0] + assert call_args[0] == "my-s3-bucket-location" + assert call_args[1].startswith("DefaultWebSite/app-") + assert call_args[2] == os.path.join( + os.getcwd(), "migrations", "latest", "upload_target.zip" + ) + + call_args, call_kwargs = mock_create_env_request.call_args + assert ( + call_kwargs.get("tags") == expected_tags + ), f"Tags parameter was not passed to CreateEnvironmentRequest, instead got: {call_kwargs.get('tags')}" + + @patch("ebcli.lib.elasticbeanstalk.get_application_names") + @patch("ebcli.lib.elasticbeanstalk.create_application_version") + @patch("ebcli.lib.ec2.establish_security_group") + @patch("ebcli.controllers.migrate.construct_environment_vpc_config") + @patch("ebcli.controllers.migrate.get_unique_environment_name") + @patch("ebcli.controllers.migrate.establish_platform") + @patch("ebcli.controllers.migrate.test_environment_exists") + @patch("ebcli.controllers.migrate.establish_instance_profile") + @patch("ebcli.operations.createops.create_default_service_role") + @patch("ebcli.controllers.migrate.get_unique_cname") + @patch("ebcli.controllers.migrate.do_encrypt_ebs_volumes") + @patch("ebcli.objects.requests.CreateEnvironmentRequest") + @patch("ebcli.operations.createops.make_new_env") + @patch( + "ebcli.controllers.migrate.commonops.elasticbeanstalk.application_version_exists" + ) + @patch( + "ebcli.controllers.migrate.commonops.elasticbeanstalk.get_storage_location" + ) + @patch("ebcli.controllers.migrate.commonops.s3.get_object_info") + @patch("ebcli.controllers.migrate.commonops.s3.upload_application_version") + def test_migrate_site_with_default_tags_and_vpc_config( + self, + mock_upload_application_version, + mock_get_object_info, + mock_get_storage_location, + mock_application_version_exists, + mock_make_new_env, + mock_create_env_request, + mock_do_encrypt_ebs_volumes, + mock_get_unique_cname, + mock_create_service_role, + mock_establish_instance_profile, + mock_test_environment_exists, + mock_establish_platform, + mock_get_unique_environment_name, + mock_construct_environment_vpc_config, + mock_establish_security_group, + mock_create_version, + mock_get_apps, + ): + """ + Test migration of Default Web Site with tags parameter. + + This test: + 1. Ensures Default Web Site exists + 2. Invokes `eb migrate` with `--tags key1=val1,key2=val2` + 3. Verifies that create_app_version_and_environment is called with correct tags + """ + from ebcli.core.ebcore import EB + + # Define the tags we'll pass to the command + instance_tags = [ + {"Key": "instancetag1", "Value": "val1"}, + {"Key": "instancetag2", "Value": "val2"}, + ] + + # Setup mock responses + mock_create_env_request_object = MagicMock() + mock_create_env_request.return_value = mock_create_env_request_object + mock_get_apps.return_value = [] + mock_create_version.return_value = "v1" + mock_establish_instance_profile.return_value = ( + "aws-elasticbeanstalk-ec2-role" + ) + mock_create_service_role.return_value = "aws-elasticbeanstalk-service-role" + mock_construct_environment_vpc_config.return_value = ( + {"id": "vpc-id"}, + "us-west-2", + "i-1234124adsf", + instance_tags, + ) + mock_establish_security_group.return_value = ("sg-lbsg", "sg-ec2sg") + mock_get_unique_environment_name.return_value = ["DefaultWebSiteWithTags"] + mock_establish_platform.return_value = "arn:aws:elasticbeanstalk:us-west-2::platform/.NET 6 running on 64bit Amazon Linux 2023/3.4.0" + mock_test_environment_exists.side_effect = NotFoundError + mock_get_unique_cname.return_value = ( + "default-web-site-with-tags.elasticbeanstalk.com" + ) + mock_get_unique_cname.return_value = ( + "default-web-site-with-tags.elasticbeanstalk.com" + ) + mock_application_version_exists.return_value = None + mock_get_storage_location.return_value = "my-s3-bucket-location" + mock_get_object_info.side_effect = NotFoundError + + # Create and run EB CLI app with tags parameter + app = EB(argv=["migrate", "--encrypt-ebs-volume"]) + app.setup() + app.run() + + # Verify CreateEnvironmentRequest was called with the correct parameters + # Verify CreateEnvironmentRequest was called with the correct tags + call_kwargs = mock_create_env_request.call_args[1] + assert call_kwargs["app_name"] == "DefaultWebSite" + assert call_kwargs["env_name"] == ["DefaultWebSiteWithTags"] + assert ( + call_kwargs["platform"] + == "arn:aws:elasticbeanstalk:us-west-2::platform/.NET 6 running on 64bit Amazon Linux 2023/3.4.0" + ) + assert call_kwargs["version_label"].startswith("app-") + assert call_kwargs["instance_profile"] == "aws-elasticbeanstalk-ec2-role" + assert call_kwargs["service_role"] == "aws-elasticbeanstalk-service-role" + assert call_kwargs["key_name"] is None + assert call_kwargs["tags"] == instance_tags + assert call_kwargs["vpc"] == {"id": "vpc-id"} + assert call_kwargs["elb_type"] == "application" + assert call_kwargs["instance_types"] == "c5.2xlarge" + assert call_kwargs["min_instances"] == "1" + assert call_kwargs["max_instances"] == "4" + assert call_kwargs["block_device_mappings"] == [] + assert call_kwargs["listener_configs"] == [ + { + "Namespace": "aws:elbv2:listener:default", + "OptionName": "Protocol", + "Value": "HTTP", + }, + { + "Namespace": "aws:elbv2:listener:default", + "OptionName": "DefaultProcess", + "Value": "default", + }, + { + "Namespace": "aws:elbv2:listener:default", + "OptionName": "ListenerEnabled", + "Value": "true", + }, + { + "Namespace": "aws:elbv2:listener:default", + "OptionName": "Rules", + "Value": "rule1", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule1", + "OptionName": "Priority", + "Value": "1", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule1", + "OptionName": "Process", + "Value": "default", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule1", + "OptionName": "PathPatterns", + "Value": "*", + }, + { + "Namespace": "aws:elasticbeanstalk:environment:process:default", + "OptionName": "Protocol", + "Value": "HTTP", + }, + { + "Namespace": "aws:elasticbeanstalk:environment:process:default", + "OptionName": "Port", + "Value": "80", + }, + ] + assert ( + call_kwargs["cname"] + == "default-web-site-with-tags.elasticbeanstalk.com" + ) + assert call_kwargs["description"] == "Environment created by `eb migrate`" + assert call_kwargs["load_balancer_security_group"] is None + assert call_kwargs["ec2_security_group"] is None + assert call_kwargs["root_volume"] == [ + { + "Namespace": "aws:autoscaling:launchconfiguration", + "OptionName": "RootVolumeSize", + "Value": "60", + } + ] + + mock_make_new_env.assert_called_once_with( + mock_create_env_request_object, interactive=False, timeout=15 + ) + mock_do_encrypt_ebs_volumes.assert_called_once_with() + call_args = mock_upload_application_version.call_args[0] + assert call_args[0] == "my-s3-bucket-location" + assert call_args[1].startswith("DefaultWebSite/app-") + assert call_args[2] == os.path.join( + os.getcwd(), "migrations", "latest", "upload_target.zip" + ) + + @patch("ebcli.lib.elasticbeanstalk.get_application_names") + @patch("ebcli.lib.elasticbeanstalk.create_application_version") + @patch("ebcli.controllers.migrate.get_unique_environment_name") + @patch("ebcli.controllers.migrate.establish_platform") + @patch("ebcli.controllers.migrate.test_environment_exists") + @patch("ebcli.lib.ec2.get_instance_metadata") + def test_migrate_site_with_special_firewall_rules__not_copied_over_unless_referenced_in_bindings( + self, + mock_get_metadata, + mock_test_environment_exists, + mock_establish_platform, + mock_get_unique_environment_name, + mock_create_version, + mock_get_apps, + ): + """ + Test migration with firewall configuration copying. + + This test verifies that when --copy-firewall-config is passed to eb migrate: + 1. It looks up firewall configuration on the current machine + 2. It generates a firewall config PS1 script + 3. It references the script in the manifest file + """ + from ebcli.core.ebcore import EB + + try: + http_port = 8080 + https_port = 8443 + self.create_test_firewall_rules(http_port, https_port) + # Setup Default Web Site with an additional application + server_manager = ServerManager() + default_site = server_manager.Sites[ + "Default Web Site" + ] # Get fresh reference + + # Get the physical path of the default web site + default_physical_path = ( + default_site.Applications["/"].VirtualDirectories["/"].PhysicalPath + ) + + # Create content for the default web site + with open(os.path.join(default_physical_path, "index.html"), "w") as f: + f.write("

Default Web Site Root

") + + # Create physical path for the additional application + app_physical_path = "c:\\inetpub\\application1" + os.makedirs(app_physical_path, exist_ok=True) + + # Create content for the additional application + with open(os.path.join(app_physical_path, "index.html"), "w") as f: + f.write("

Application1

") + + # Add an additional application to Default Web Site at "/application1" + additional_app = default_site.Applications.Add( + "/application1", app_physical_path + ) + additional_app.ApplicationPoolName = "DefaultAppPool" + server_manager.CommitChanges() + + # Setup mock responses + mock_create_env_request_object = MagicMock() + mock_get_apps.return_value = [] + mock_create_version.return_value = "v1" + mock_get_metadata.side_effect = NotAnEC2Instance + mock_get_unique_environment_name.return_value = [ + "DefaultWebSiteWithFirewall" + ] + mock_establish_platform.return_value = "arn:aws:elasticbeanstalk:us-west-2::platform/.NET 6 running on 64bit Amazon Linux 2023/3.4.0" + mock_test_environment_exists.side_effect = NotFoundError + + # Create and run EB CLI app with copy-firewall-config flag + app = EB(argv=["migrate", "--archive-only", "--copy-firewall-config"]) + app.setup() + app.run() + + # Verify source bundle was created + migrations_dir = Path(".") / "migrations" / "latest" + assert migrations_dir.exists() + + upload_target_dir = migrations_dir / "upload_target" + assert upload_target_dir.exists() + + # Verify manifest was created and has correct structure + manifest_path = ( + upload_target_dir / "aws-windows-deployment-manifest.json" + ) + assert manifest_path.exists() + + # Verify firewall config script was generated + firewall_script_path = ( + upload_target_dir + / "ebmigrateScripts" + / "modify_firewall_config.ps1" + ) + assert firewall_script_path.exists() + + # Verify the firewall script contains the expected commands + with open(firewall_script_path) as f: + script_content = f.read() + assert ( + 'New-NetFirewallRule -DisplayName "EB-CLI-Test-HTTP"' + not in script_content + ) + assert ( + 'New-NetFirewallRule -DisplayName "EB-CLI-Test-HTTPS"' + not in script_content + ) + + # Verify manifest references the firewall script + with open(manifest_path) as f: + manifest = json.load(f) + assert manifest["manifestVersion"] == 1 + assert "custom" in manifest["deployments"] + + # Check for ModifyFirewallConfig section using list comprehension + firewall_sections = [ + section + for section in manifest["deployments"]["custom"] + if section["name"] == "ModifyFirewallConfig" + ] + assert len(firewall_sections) > 0 + + firewall_section = firewall_sections[0] + assert ( + firewall_section["scripts"]["install"]["file"] + == "ebmigrateScripts\\modify_firewall_config.ps1" + ) + assert ( + firewall_section["scripts"]["restart"]["file"] + == "ebmigrateScripts\\noop.ps1" + ) + assert ( + firewall_section["scripts"]["uninstall"]["file"] + == "ebmigrateScripts\\noop.ps1" + ) + finally: + self.cleanup_test_firewall_rules() + + @patch("ebcli.lib.elasticbeanstalk.get_application_names") + @patch("ebcli.lib.elasticbeanstalk.create_application_version") + @patch("ebcli.controllers.migrate.get_unique_environment_name") + @patch("ebcli.controllers.migrate.establish_platform") + @patch("ebcli.controllers.migrate.test_environment_exists") + @patch("ebcli.lib.ec2.get_instance_metadata") + def test_migrate_site_with_special_firewall_rules( + self, + mock_get_metadata, + mock_test_environment_exists, + mock_establish_platform, + mock_get_unique_environment_name, + mock_create_version, + mock_get_apps, + ): + """ + Test migration with firewall configuration copying. + + This test verifies that when --copy-firewall-config is passed to eb migrate: + 1. It looks up firewall configuration on the current machine + 2. It generates a firewall config PS1 script + 3. It references the script in the manifest file + """ + from ebcli.core.ebcore import EB + + try: + http_port = 8080 + https_port = 8443 + self.create_test_firewall_rules(http_port, https_port) + # Setup Default Web Site with an additional application + server_manager = ServerManager() + default_site = server_manager.Sites[ + "Default Web Site" + ] # Get fresh reference + default_site.Bindings.Add("*:8080:", "http") + default_site.Bindings.Add("*:8443:", "https") + + # Get the physical path of the default web site + default_physical_path = ( + default_site.Applications["/"].VirtualDirectories["/"].PhysicalPath + ) + + # Create content for the default web site + with open(os.path.join(default_physical_path, "index.html"), "w") as f: + f.write("

Default Web Site Root

") + + # Create physical path for the additional application + app_physical_path = "c:\\inetpub\\application1" + os.makedirs(app_physical_path, exist_ok=True) + + # Create content for the additional application + with open(os.path.join(app_physical_path, "index.html"), "w") as f: + f.write("

Application1

") + + # Add an additional application to Default Web Site at "/application1" + additional_app = default_site.Applications.Add( + "/application1", app_physical_path + ) + additional_app.ApplicationPoolName = "DefaultAppPool" + server_manager.CommitChanges() + + # Setup mock responses + mock_get_apps.return_value = [] + mock_create_version.return_value = "v1" + mock_get_metadata.side_effect = NotAnEC2Instance + mock_get_unique_environment_name.return_value = [ + "DefaultWebSiteWithFirewall" + ] + mock_establish_platform.return_value = "arn:aws:elasticbeanstalk:us-west-2::platform/.NET 6 running on 64bit Amazon Linux 2023/3.4.0" + mock_test_environment_exists.side_effect = NotFoundError + + # Create and run EB CLI app with copy-firewall-config flag + app = EB(argv=["migrate", "--archive-only", "--copy-firewall-config"]) + app.setup() + app.run() + + # Verify source bundle was created + migrations_dir = Path(".") / "migrations" / "latest" + assert migrations_dir.exists() + + upload_target_dir = migrations_dir / "upload_target" + assert upload_target_dir.exists() + + # Verify manifest was created and has correct structure + manifest_path = ( + upload_target_dir / "aws-windows-deployment-manifest.json" + ) + assert manifest_path.exists() + + # Verify firewall config script was generated + firewall_script_path = ( + upload_target_dir + / "ebmigrateScripts" + / "modify_firewall_config.ps1" + ) + assert firewall_script_path.exists() + + # Verify the firewall script contains the expected commands + with open(firewall_script_path) as f: + script_content = f.read() + assert ( + 'New-NetFirewallRule -DisplayName "EB-CLI-Test-HTTP"' + in script_content + ) + assert ( + 'New-NetFirewallRule -DisplayName "EB-CLI-Test-HTTPS"' + in script_content + ) + assert "-Action Allow" in script_content + assert "-Action Block" in script_content + assert "-Protocol TCP" in script_content + assert f"-LocalPort {http_port}" in script_content + assert f"-LocalPort {https_port}" in script_content + + # Verify manifest references the firewall script + with open(manifest_path) as f: + manifest = json.load(f) + assert manifest["manifestVersion"] == 1 + assert "custom" in manifest["deployments"] + + # Check for ModifyFirewallConfig section using list comprehension + firewall_sections = [ + section + for section in manifest["deployments"]["custom"] + if section["name"] == "ModifyFirewallConfig" + ] + assert len(firewall_sections) > 0 + + firewall_section = firewall_sections[0] + assert ( + firewall_section["scripts"]["install"]["file"] + == "ebmigrateScripts\\modify_firewall_config.ps1" + ) + assert ( + firewall_section["scripts"]["restart"]["file"] + == "ebmigrateScripts\\noop.ps1" + ) + assert ( + firewall_section["scripts"]["uninstall"]["file"] + == "ebmigrateScripts\\noop.ps1" + ) + finally: + self.cleanup_test_firewall_rules() + + @patch("ebcli.lib.elasticbeanstalk.get_application_names") + @patch("ebcli.lib.elasticbeanstalk.create_application_version") + @patch("ebcli.controllers.migrate.get_unique_environment_name") + @patch("ebcli.controllers.migrate.establish_platform") + @patch("ebcli.controllers.migrate.test_environment_exists") + @patch("ebcli.lib.ec2.get_instance_metadata") + @patch( + "ebcli.controllers.migrate.MigrateController.create_app_version_and_environment" + ) + def test_migrate_multiple_sites_with_listener_configs( + self, + mock_create_app_version_and_environment, + mock_get_metadata, + mock_test_environment_exists, + mock_establish_platform, + mock_get_unique_environment_name, + mock_create_version, + mock_get_apps, + ): + """ + Test migration of multiple sites with listener configurations. + + This test verifies: + 1. Creation of three HTTP sites: Admin (8080), Reporting (8081), Payment (8082) + 2. Addition of HTTPS binding (8443) to Admin site + 3. Generation of listener configurations in listener_configs.json + 4. Proper process option settings for each site + 5. Correct listener rule configurations + + The test uses --copy-firewall-config flag but no --ssl-certificate-arn, + so only HTTP listener configurations should be generated. + """ + from ebcli.core.ebcore import EB + + # Save original ARR configuration + server_manager = ServerManager() + config = server_manager.GetApplicationHostConfiguration() + original_arr_config = None + original_arr_enabled = False + + http_ports = [8080, 8081, 8082] + https_port = 8443 + try: + self.get_and_save_original_arr_config(config) + + # Explicitly disable ARR + try: + proxy_section = config.GetSection("system.webServer/proxy") + if proxy_section is not None: + proxy_section.SetAttributeValue("enabled", False) + server_manager.CommitChanges() + print("ARR proxy has been disabled for this test") + else: + print("ARR proxy section not found in configuration") + except Exception as e: + print(f"Warning: Could not disable ARR: {e}") + + # Setup firewall rules for our test ports + for port in http_ports: + self.create_test_firewall_rules(port, https_port) + + # Remove Default Web Site and create multiple custom sites + server_manager = ServerManager() + default_site = server_manager.Sites[ + "Default Web Site" + ] # Get fresh reference + server_manager.Sites.Remove(default_site) + + # Create Admin site on port 8080 with HTTPS on 8443 + admin_site = server_manager.Sites.Add( + "Admin", "http", "*:8080:", "c:\\inetpub\\admin" + ) + # Add HTTPS binding to Admin site + admin_site.Bindings.Add("*:8443:", "https") + + # Create Reporting site on port 8081 + reporting_site = server_manager.Sites.Add( + "Reporting", "http", "*:8081:", "c:\\inetpub\\reporting" + ) + + # Create Payment site on port 8082 + payment_site = server_manager.Sites.Add( + "Payment", "http", "*:8082:", "c:\\inetpub\\payment" + ) + + server_manager.CommitChanges() + + # Setup mock responses + mock_get_apps.return_value = [] + mock_create_version.return_value = "v1" + mock_get_metadata.side_effect = NotAnEC2Instance + mock_get_unique_environment_name.return_value = ["MultiSiteListenerEnv"] + mock_establish_platform.return_value = "arn:aws:elasticbeanstalk:us-west-2::platform/.NET 6 running on 64bit Amazon Linux 2023/3.4.0" + mock_test_environment_exists.side_effect = NotFoundError + + # Create and run EB CLI app with copy-firewall-config flag but no SSL certificate + app = EB(argv=["migrate", "--copy-firewall-config"]) + app.setup() + app.run() + + # Verify source bundle was created + migrations_dir = Path(".") / "migrations" / "latest" + assert migrations_dir.exists() + + # Verify listener_configs.json was created + listener_configs_path = migrations_dir / "listener_configs.json" + assert listener_configs_path.exists() + + # Load and verify listener configurations + with open(listener_configs_path) as f: + listener_configs = json.load(f) + listener_configs = [ + OptionSetting(option_setting) + for option_setting in listener_configs["listener_configs"] + ] + expected_listener_configs = [ + { + "Namespace": "aws:elbv2:listener:default", + "OptionName": "Protocol", + "Value": "HTTP", + }, + { + "Namespace": "aws:elbv2:listener:default", + "OptionName": "DefaultProcess", + "Value": "8080", + }, + { + "Namespace": "aws:elbv2:listener:default", + "OptionName": "ListenerEnabled", + "Value": "true", + }, + { + "Namespace": "aws:elbv2:listener:default", + "OptionName": "Rules", + "Value": "rule1,rule3,rule4", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule1", + "OptionName": "Priority", + "Value": "1", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule1", + "OptionName": "Process", + "Value": "8080", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule1", + "OptionName": "PathPatterns", + "Value": "*", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule3", + "OptionName": "Priority", + "Value": "3", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule3", + "OptionName": "Process", + "Value": "8081", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule3", + "OptionName": "PathPatterns", + "Value": "*", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule4", + "OptionName": "Priority", + "Value": "4", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule4", + "OptionName": "Process", + "Value": "8082", + }, + { + "Namespace": "aws:elbv2:listenerrule:rule4", + "OptionName": "PathPatterns", + "Value": "*", + }, + { + "Namespace": "aws:elasticbeanstalk:environment:process:8080", + "OptionName": "Protocol", + "Value": "HTTP", + }, + { + "Namespace": "aws:elasticbeanstalk:environment:process:8080", + "OptionName": "Port", + "Value": "8080", + }, + { + "Namespace": "aws:elasticbeanstalk:environment:process:8081", + "OptionName": "Protocol", + "Value": "HTTP", + }, + { + "Namespace": "aws:elasticbeanstalk:environment:process:8081", + "OptionName": "Port", + "Value": "8081", + }, + { + "Namespace": "aws:elasticbeanstalk:environment:process:8082", + "OptionName": "Protocol", + "Value": "HTTP", + }, + { + "Namespace": "aws:elasticbeanstalk:environment:process:8082", + "OptionName": "Port", + "Value": "8082", + }, + ] + expected_listener_configs = [ + OptionSetting(option_setting) + for option_setting in expected_listener_configs + ] + assert set(listener_configs) == set(expected_listener_configs) + finally: + self.cleanup_test_firewall_rules() + self.revert_arr_config_changes( + original_arr_config, original_arr_enabled + ) + + @patch("ebcli.lib.elasticbeanstalk.get_application_names") + @patch("ebcli.lib.elasticbeanstalk.create_application_version") + @patch("ebcli.controllers.migrate.get_unique_environment_name") + @patch("ebcli.controllers.migrate.establish_platform") + @patch("ebcli.controllers.migrate.test_environment_exists") + @patch("ebcli.lib.ec2.get_instance_metadata") + @patch( + "ebcli.controllers.migrate.MigrateController.create_app_version_and_environment" + ) + @patch("ebcli.controllers.migrate._arr_enabled") + def test_migrate_multiple_sites_with_arr_configuration( + self, + mock_arr_enabled, + mock_create_app_version_and_environment, + mock_get_metadata, + mock_test_environment_exists, + mock_establish_platform, + mock_get_unique_environment_name, + mock_create_version, + mock_get_apps, + ): + """ + Test migration of multiple sites with ARR (Application Request Routing) configuration. + + This test verifies: + 1. Creation of three HTTP sites: Router (port 80), Reporting (port 8081), Admin (port 8082) + 2. Addition of HTTPS binding (port 8443) to Admin site + 3. Enabling of ARR (Application Request Routing) in IIS + 4. Generation of ARR configuration scripts in the deployment package + 5. Absence of listener configurations in listener_configs.json (since ARR is enabled) + 6. Proper manifest configuration for ARR-enabled sites + + The test uses --copy-firewall-config flag but since ARR is enabled, + no listener configurations should be generated. + """ + from ebcli.core.ebcore import EB + + # Save original ARR configuration + server_manager = ServerManager() + config = server_manager.GetApplicationHostConfiguration() + original_arr_config = None + original_arr_enabled = False + + # Mock ARR as enabled + mock_arr_enabled.return_value = True + + try: + self.get_and_save_original_arr_config(config) + + # Setup firewall rules for our test ports + http_ports = [80, 8081, 8082] + https_port = 8443 + for port in http_ports: + self.create_test_firewall_rules(port, https_port) + + # Remove Default Web Site and create multiple custom sites + server_manager = ServerManager() + default_site = server_manager.Sites[ + "Default Web Site" + ] # Get fresh reference + server_manager.Sites.Remove(default_site) + + # Create Router site on port 80 + router_site = server_manager.Sites.Add( + "Router", "http", "*:80:", "c:\\inetpub\\router" + ) + # Add HTTPS binding to Admin site + router_site.Bindings.Add("*:443:", "https") + + # Create Reporting site on port 8081 + reporting_site = server_manager.Sites.Add( + "Reporting", "http", "*:8081:", "c:\\inetpub\\reporting" + ) + + # Create Admin site on port 8082 with HTTPS on 8443 + admin_site = server_manager.Sites.Add( + "Admin", "http", "*:8082:", "c:\\inetpub\\admin" + ) + + server_manager.CommitChanges() + + # Setup mock responses + mock_get_apps.return_value = [] + mock_create_version.return_value = "v1" + mock_get_metadata.side_effect = NotAnEC2Instance + mock_get_unique_environment_name.return_value = ["MultiSiteArrEnv"] + mock_establish_platform.return_value = "arn:aws:elasticbeanstalk:us-west-2::platform/.NET 6 running on 64bit Amazon Linux 2023/3.4.0" + mock_test_environment_exists.side_effect = NotFoundError + + # Create and run EB CLI app with copy-firewall-config flag + app = EB(argv=["migrate"]) + app.setup() + app.run() + + # Verify source bundle was created + migrations_dir = Path(".") / "migrations" / "latest" + assert migrations_dir.exists() + + upload_target_dir = migrations_dir / "upload_target" + assert upload_target_dir.exists() + + # Verify listener_configs.json was NOT created (since ARR is enabled) + listener_configs_path = migrations_dir / "listener_configs.json" + assert not listener_configs_path.exists() + + # Verify manifest was created and has correct structure + manifest_path = ( + upload_target_dir / "aws-windows-deployment-manifest.json" + ) + assert manifest_path.exists() + + with open(manifest_path) as f: + manifest = json.load(f) + assert manifest["manifestVersion"] == 1 + assert "custom" in manifest["deployments"] + + # Verify ARR configuration scripts exist + arr_msi_installer_path = ( + upload_target_dir / "ebmigrateScripts" / "arr_msi_installer.ps1" + ) + assert arr_msi_installer_path.exists() + + arr_config_importer_path = ( + upload_target_dir + / "ebmigrateScripts" + / "arr_configuration_importer_script.ps1" + ) + assert arr_config_importer_path.exists() + + windows_proxy_enabler_path = ( + upload_target_dir + / "ebmigrateScripts" + / "windows_proxy_feature_enabler.ps1" + ) + assert windows_proxy_enabler_path.exists() + + # Verify ARR configuration sections in manifest + custom_sections = manifest["deployments"]["custom"] + + # Check for WindowsProxyFeatureEnabler section + proxy_feature_sections = [ + section + for section in custom_sections + if section["name"] == "WindowsProxyFeatureEnabler" + ] + assert len(proxy_feature_sections) > 0 + + # Check for ArrConfigurationImporterScript section + arr_config_sections = [ + section + for section in custom_sections + if section["name"] == "ArrConfigurationImporterScript" + ] + assert len(arr_config_sections) > 0 + + # Verify site installation scripts exist for all three sites + router_install_script = None + reporting_install_script = None + admin_install_script = None + + for section in custom_sections: + if section["name"] == "Router": + router_script_path = section["scripts"]["install"]["file"] + router_install_script = ( + upload_target_dir / router_script_path + ) + elif section["name"] == "Reporting": + reporting_script_path = section["scripts"]["install"][ + "file" + ] + reporting_install_script = ( + upload_target_dir / reporting_script_path + ) + elif section["name"] == "Admin": + admin_script_path = section["scripts"]["install"]["file"] + admin_install_script = upload_target_dir / admin_script_path + + assert ( + router_install_script is not None + and router_install_script.exists() + ) + assert ( + reporting_install_script is not None + and reporting_install_script.exists() + ) + assert ( + admin_install_script is not None + and admin_install_script.exists() + ) + + # Verify ARR configuration in site installation scripts + with open(router_install_script) as f: + router_script = f.read() + assert "Invoke-ARRImportScript" in router_script + assert "*:80:" in router_script + assert "*:443:" in router_script + + with open(reporting_install_script) as f: + reporting_script = f.read() + assert "*:8081:" in reporting_script + + with open(admin_install_script) as f: + admin_script = f.read() + assert "*:8082:" in admin_script + + # Verify firewall config script was generated + firewall_script_path = ( + upload_target_dir + / "ebmigrateScripts" + / "modify_firewall_config.ps1" + ) + assert not firewall_script_path.exists() + + finally: + self.cleanup_test_firewall_rules() + self.revert_arr_config_changes( + original_arr_config, original_arr_enabled + ) + + def tearDown(self): + os.unlink(os.path.join("migrations", "latest")) + shutil.rmtree(os.path.join("migrations")) + + @classmethod + def tearDownClass(cls): + """Cleanup that runs once after all tests in the class""" + try: + cls.restore_state() + finally: + if cls.state_file.exists(): + cls.state_file.unlink() + os.chdir(cls.original_dir) + cls._delete_testDir_if_exists() + + @classmethod + def _delete_testDir_if_exists(cls): + if os.path.exists("testDir"): + if sys.platform == "win32": + subprocess.run(["rd", "/s", "/q", "testDir"], shell=True) + else: + shutil.rmtree("testDir") + + def revert_arr_config_changes(self, original_arr_config, original_arr_enabled): + try: + if original_arr_config is not None: + server_manager = ServerManager() + config = server_manager.GetApplicationHostConfiguration() + proxy_section = config.GetSection("system.webServer/proxy") + + if proxy_section is not None: + # Restore enabled state + proxy_section.SetAttributeValue("enabled", original_arr_enabled) + + # Restore other attributes + for attr_name, attr_value in original_arr_config.items(): + # Handle TimeSpan values + if attr_name == "timeout": + from System import TimeSpan + + attr_value = TimeSpan.FromSeconds(attr_value) + proxy_section.SetAttributeValue(attr_name, attr_value) + + server_manager.CommitChanges() + print("Original ARR configuration has been restored") + except Exception as e: + print(f"Warning: Could not restore ARR configuration: {e}") + + def get_and_save_original_arr_config(self, config): + # Get and save the original ARR proxy configuration + try: + proxy_section = config.GetSection("system.webServer/proxy") + if proxy_section is not None: + original_arr_enabled = proxy_section.GetAttributeValue("enabled") + original_arr_config = {} + for attr in proxy_section.Attributes: + if not attr.IsInheritedFromDefaultValue: + # Handle TimeSpan values specially + if attr.Value.__class__.__name__.endswith("TimeSpan"): + original_arr_config[attr.Name] = attr.Value.TotalSeconds + else: + original_arr_config[attr.Name] = attr.Value + except Exception as e: + print(f"Warning: Could not backup ARR configuration: {e}") + + class OptionSetting: + def __init__(self, option_setting_dict: typing.Dict[str, str]): + self.namespace = option_setting_dict["Namespace"] + self.value = option_setting_dict["Value"] + self.option = option_setting_dict["OptionName"] + + def __hash__(self): + return hash((self.namespace, self.value, self.option)) + + def __eq__(self, other): + return ( + self.namespace == other.namespace + and self.value == other.value + and self.option == other.option + ) + + def __lt__(self, other): + return ( + self.namespace < other.namespace + or self.value < other.value + or self.option < other.option + ) + + def __repr__(self): + return f"(Namespace: {self.namespace}, Value: {self.value}, OptionName: {self.option})" diff --git a/tests/unit/lib/test_ec2.py b/tests/unit/lib/test_ec2.py new file mode 100644 index 000000000..a9a1402a1 --- /dev/null +++ b/tests/unit/lib/test_ec2.py @@ -0,0 +1,358 @@ +# Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import os +import shutil +import socket +import urllib.error +import urllib.request + +import unittest +import mock + +from ebcli.lib import ec2 +from ebcli.objects.exceptions import NotAnEC2Instance + + +class TestEC2(unittest.TestCase): + def setUp(self): + self.test_dir = 'testDir' + self.ebcli_root = os.getcwd() + + if not os.path.exists(self.test_dir): + os.makedirs(self.test_dir) + os.chdir(self.test_dir) + + def tearDown(self): + os.chdir(self.ebcli_root) + if os.path.exists(self.test_dir): + shutil.rmtree(self.test_dir) + + @mock.patch('ebcli.lib.ec2.get_instance_metadata') + @mock.patch('ebcli.lib.ec2.describe_instance') + @mock.patch('ebcli.lib.ec2.ensure_vpc_exists') + @mock.patch('ebcli.lib.ec2.aws.set_region') + @mock.patch('ebcli.lib.ec2.fileoperations.write_config_setting') + @mock.patch('ebcli.lib.ec2.instance_tags') + def test_get_current_instance_details_success( + self, + instance_tags_mock, + write_config_setting_mock, + set_region_mock, + ensure_vpc_exists_mock, + describe_instance_mock, + get_instance_metadata_mock + ): + # Setup mocks + get_instance_metadata_mock.side_effect = [ + 'i-1234567890abcdef0', # instance-id + 'us-west-2a', # availability-zone + '0a:1b:2c:3d:4e:5f', # mac + 'vpc-12345678', # vpc-id + 'subnet-12345678' # subnet-id + ] + + describe_instance_mock.return_value = { + 'InstanceId': 'i-1234567890abcdef0', + 'SecurityGroups': [ + {'GroupId': 'sg-12345678'}, + {'GroupId': 'sg-87654321'} + ] + } + + instance_tags_mock.return_value = [ + {'Key': 'Name', 'Value': 'test-instance'}, + {'Key': 'Environment', 'Value': 'test'} + ] + + # Call the function + result = ec2.get_current_instance_details() + + # Verify results + self.assertEqual('i-1234567890abcdef0', result['InstanceId']) + self.assertEqual('vpc-12345678', result['VpcId']) + self.assertEqual('subnet-12345678', result['SubnetId']) + self.assertEqual(['sg-12345678', 'sg-87654321'], result['SecurityGroupIds']) + self.assertEqual('us-west-2', result['Region']) + self.assertEqual([ + {'Key': 'Name', 'Value': 'test-instance'}, + {'Key': 'Environment', 'Value': 'test'} + ], result['Tags']) + + # Verify mock calls + get_instance_metadata_mock.assert_any_call('instance-id') + get_instance_metadata_mock.assert_any_call('placement/availability-zone') + get_instance_metadata_mock.assert_any_call('mac') + get_instance_metadata_mock.assert_any_call('network/interfaces/macs/0a:1b:2c:3d:4e:5f/vpc-id') + get_instance_metadata_mock.assert_any_call('network/interfaces/macs/0a:1b:2c:3d:4e:5f/subnet-id') + + set_region_mock.assert_called_once_with('us-west-2') + write_config_setting_mock.assert_called_once_with('global', 'default_region', 'us-west-2') + ensure_vpc_exists_mock.assert_called_once_with('vpc-12345678') + describe_instance_mock.assert_called_once_with(instance_id='i-1234567890abcdef0') + instance_tags_mock.assert_called_once_with('i-1234567890abcdef0') + + @mock.patch('ebcli.lib.ec2.get_instance_metadata') + @mock.patch('ebcli.lib.ec2.describe_instance') + @mock.patch('ebcli.lib.ec2.ensure_vpc_exists') + @mock.patch('ebcli.lib.ec2.aws.set_region') + @mock.patch('ebcli.lib.ec2.fileoperations.write_config_setting') + @mock.patch('ebcli.lib.ec2.io.log_warning') + def test_get_current_instance_details_vpc_not_found( + self, + log_warning_mock, + write_config_setting_mock, + set_region_mock, + ensure_vpc_exists_mock, + describe_instance_mock, + get_instance_metadata_mock + ): + # Setup mocks + get_instance_metadata_mock.side_effect = [ + 'i-1234567890abcdef0', # instance-id + 'us-west-2a', # availability-zone + '0a:1b:2c:3d:4e:5f', # mac + 'vpc-12345678', # vpc-id + 'subnet-12345678' # subnet-id + ] + + # Simulate VPC not found error + ensure_vpc_exists_mock.side_effect = Exception("InvalidVpcID.NotFound") + + # Call the function + result = ec2.get_current_instance_details() + + # Verify results + self.assertIsNone(result['InstanceId']) + self.assertIsNone(result['VpcId']) + self.assertIsNone(result['SubnetId']) + self.assertEqual([], result['SecurityGroupIds']) + self.assertEqual('us-west-2', result['Region']) + self.assertEqual([], result['Tags']) + + # Verify warning was logged + log_warning_mock.assert_called_once_with('Unable to retrieve details of VPC, vpc-12345678') + + # Verify describe_instance was not called + describe_instance_mock.assert_not_called() + + @mock.patch('ebcli.lib.ec2.get_instance_metadata') + @mock.patch('ebcli.lib.ec2.describe_instance') + @mock.patch('ebcli.lib.ec2.ensure_vpc_exists') + @mock.patch('ebcli.lib.ec2.aws.set_region') + @mock.patch('ebcli.lib.ec2.fileoperations.write_config_setting') + @mock.patch('ebcli.lib.ec2.io.log_warning') + def test_get_current_instance_details_instance_not_found( + self, + log_warning_mock, + write_config_setting_mock, + set_region_mock, + ensure_vpc_exists_mock, + describe_instance_mock, + get_instance_metadata_mock + ): + # Setup mocks + instance_id = 'i-1234567890abcdef0' + get_instance_metadata_mock.side_effect = [ + instance_id, # instance-id + 'us-west-2a', # availability-zone + '0a:1b:2c:3d:4e:5f', # mac + 'vpc-12345678', # vpc-id + 'subnet-12345678' # subnet-id + ] + + # Simulate instance not found error + describe_instance_mock.side_effect = Exception("InvalidInstanceID.NotFound") + + # Call the function + result = ec2.get_current_instance_details() + + # Verify results + self.assertIsNone(result['InstanceId']) + self.assertIsNone(result['VpcId']) + self.assertIsNone(result['SubnetId']) + self.assertEqual([], result['SecurityGroupIds']) + self.assertEqual('us-west-2', result['Region']) + self.assertEqual([], result['Tags']) + + # Verify warning was logged - use the actual message from the code + log_warning_mock.assert_called_once_with('Unable to retrieve details of instance, None') + + @mock.patch('ebcli.lib.ec2.get_instance_metadata') + @mock.patch('ebcli.lib.ec2.describe_instance') + @mock.patch('ebcli.lib.ec2.ensure_vpc_exists') + @mock.patch('ebcli.lib.ec2.aws.set_region') + @mock.patch('ebcli.lib.ec2.fileoperations.write_config_setting') + @mock.patch('ebcli.lib.ec2.instance_tags') + def test_get_current_instance_details_tags_exception( + self, + instance_tags_mock, + write_config_setting_mock, + set_region_mock, + ensure_vpc_exists_mock, + describe_instance_mock, + get_instance_metadata_mock + ): + # Setup mocks + get_instance_metadata_mock.side_effect = [ + 'i-1234567890abcdef0', # instance-id + 'us-west-2a', # availability-zone + '0a:1b:2c:3d:4e:5f', # mac + 'vpc-12345678', # vpc-id + 'subnet-12345678' # subnet-id + ] + + describe_instance_mock.return_value = { + 'InstanceId': 'i-1234567890abcdef0', + 'SecurityGroups': [ + {'GroupId': 'sg-12345678'} + ] + } + + # Simulate exception when getting tags + instance_tags_mock.side_effect = Exception("Client error") + + # Call the function + result = ec2.get_current_instance_details() + + # Verify results + self.assertEqual('i-1234567890abcdef0', result['InstanceId']) + self.assertEqual('vpc-12345678', result['VpcId']) + self.assertEqual('subnet-12345678', result['SubnetId']) + self.assertEqual(['sg-12345678'], result['SecurityGroupIds']) + self.assertEqual('us-west-2', result['Region']) + self.assertEqual([], result['Tags']) # Tags should be empty due to exception + + @mock.patch('ebcli.lib.ec2.get_instance_metadata') + def test_get_current_instance_details_not_an_ec2_instance( + self, + get_instance_metadata_mock + ): + # Simulate not being on an EC2 instance + get_instance_metadata_mock.side_effect = NotAnEC2Instance("Not an EC2 instance") + + # Call the function and expect exception + with self.assertRaises(NotAnEC2Instance): + ec2.get_current_instance_details() + + @mock.patch('ebcli.lib.ec2.urllib.request.Request') + @mock.patch('ebcli.lib.ec2.urllib.request.urlopen') + def test_get_instance_metadata_success( + self, + urlopen_mock, + request_mock + ): + # Setup mocks for successful metadata retrieval + token_response_mock = mock.MagicMock() + token_response_mock.read.return_value = b'mock-token' + token_response_mock.__enter__.return_value = token_response_mock + + metadata_response_mock = mock.MagicMock() + metadata_response_mock.read.return_value = b'metadata-value' + metadata_response_mock.__enter__.return_value = metadata_response_mock + + urlopen_mock.side_effect = [token_response_mock, metadata_response_mock] + + # Call the function + result = ec2.get_instance_metadata('instance-id') + + # Verify result + self.assertEqual('metadata-value', result) + + # Verify request creation + request_mock.assert_any_call('http://169.254.169.254/latest/api/token', method="PUT") + request_mock.assert_any_call('http://169.254.169.254/latest/meta-data/instance-id') + + @mock.patch('ebcli.lib.ec2.urllib.request.Request') + @mock.patch('ebcli.lib.ec2.urllib.request.urlopen') + @mock.patch('ebcli.lib.ec2._is_timeout_exception') + def test_get_instance_metadata_timeout( + self, + is_timeout_exception_mock, + urlopen_mock, + request_mock + ): + # Setup mock for timeout + timeout_error = socket.timeout("Timed out") + urlopen_mock.side_effect = timeout_error + is_timeout_exception_mock.return_value = True + + # Call the function and expect exception + with self.assertRaises(NotAnEC2Instance): + ec2.get_instance_metadata('instance-id') + + @mock.patch('ebcli.lib.ec2.urllib.request.Request') + @mock.patch('ebcli.lib.ec2.urllib.request.urlopen') + def test_get_instance_metadata_url_error_timeout( + self, + urlopen_mock, + request_mock + ): + # Setup mock for URLError with timeout reason + url_error = urllib.error.URLError("URL error") + url_error.reason = TimeoutError("Timed out") + urlopen_mock.side_effect = url_error + + # Call the function and expect exception + with self.assertRaises(NotAnEC2Instance): + ec2.get_instance_metadata('instance-id') + + @mock.patch('ebcli.lib.ec2.urllib.request.Request') + @mock.patch('ebcli.lib.ec2.urllib.request.urlopen') + def test_get_instance_metadata_url_error_with_timeout_string( + self, + urlopen_mock, + request_mock + ): + # Setup mock for URLError with timeout in string + url_error = urllib.error.URLError("timed out") + urlopen_mock.side_effect = url_error + + # Call the function and expect exception + with self.assertRaises(NotAnEC2Instance): + ec2.get_instance_metadata('instance-id') + + @mock.patch('ebcli.lib.ec2.urllib.request.Request') + @mock.patch('ebcli.lib.ec2.urllib.request.urlopen') + def test_get_instance_metadata_connection_error( + self, + urlopen_mock, + request_mock + ): + # Setup mock for ConnectionError + urlopen_mock.side_effect = ConnectionError("Connection refused") + + # Call the function and expect exception + with self.assertRaises(ConnectionError): + ec2.get_instance_metadata('instance-id') + + def test_is_timeout_exception_with_timeout_error(self): + # Create a URLError with TimeoutError reason + url_error = urllib.error.URLError("URL error") + url_error.reason = TimeoutError("Timed out") + + # Verify it's detected as a timeout + self.assertTrue(ec2._is_timeout_exception(url_error)) + + def test_is_timeout_exception_with_timeout_string(self): + # Create a URLError with "timed out" in the string + url_error = urllib.error.URLError("Connection timed out") + + # Verify it's detected as a timeout + self.assertTrue(ec2._is_timeout_exception(url_error)) + + def test_is_timeout_exception_not_timeout(self): + # Create a URLError without timeout indication + url_error = urllib.error.URLError("Connection refused") + + # Verify it's not detected as a timeout + self.assertFalse(ec2._is_timeout_exception(url_error)) diff --git a/tests/unit/objects/test_solutionstack.py b/tests/unit/objects/test_solutionstack.py index 36e1b6b46..4e4c31cd5 100644 --- a/tests/unit/objects/test_solutionstack.py +++ b/tests/unit/objects/test_solutionstack.py @@ -529,6 +529,61 @@ def test_group_solution_stacks_by_language_name(self): [solution_stack['SolutionStack'] for solution_stack in grouped_solution_stacks] ) + def test_match_with_windows_server_version_string(self): + """ + Test the match_with_windows_server_version_string method which finds the most appropriate + EB Windows platform based on an input string. + """ + # Setup test data + solution_stack_list = [ + '64bit Windows Server 2025 v2.18.0 running IIS 10.0', + '64bit Windows Server Core 2022 v2.18.0 running IIS 10.0', + '64bit Windows Server 2016 v2.18.0 running IIS 10.0', + '64bit Windows Server Core 2016 v2.18.0 running IIS 10.0', + '64bit Windows Server 2019 v2.18.0 running IIS 10.0', + '64bit Amazon Linux 2017.09 v4.4.0 running Node.js' + ] + solution_stacks = [SolutionStack(s) for s in solution_stack_list] + + # Test case 1: Match with Windows Server 2016 - should prefer non-Core version + result = SolutionStack.match_with_windows_server_version_string( + solution_stacks, + 'Microsoft Windows Server 2016 Datacenter' + ) + self.assertIsNotNone(result) + self.assertEqual('64bit Windows Server 2016 v2.18.0 running IIS 10.0', result.name) + + # Test case 2: Match with Windows Server 2022 - only Core version available + result = SolutionStack.match_with_windows_server_version_string( + solution_stacks, + 'Windows Server 2022' + ) + self.assertIsNotNone(result) + self.assertEqual('64bit Windows Server Core 2022 v2.18.0 running IIS 10.0', result.name) + + # Test case 3: Generic Windows Server - should return first match + result = SolutionStack.match_with_windows_server_version_string( + solution_stacks, + 'Windows Server' + ) + self.assertIsNotNone(result) + # Should return any Windows Server match + self.assertTrue('Windows Server' in result.name) + + # Test case 4: No match for non-Windows input + result = SolutionStack.match_with_windows_server_version_string( + solution_stacks, + 'Amazon Linux' + ) + self.assertIsNone(result) + + # Test case 5: Empty solution stack list + result = SolutionStack.match_with_windows_server_version_string( + [], + 'Microsoft Windows Server 2016 Datacenter' + ) + self.assertIsNone(result) + def test_solution_string_sorting(self): solution_stacks = [ '64bit Amazon Linux 2017.09 v4.4.0 running Node.js', diff --git a/tests/unit/operations/test_commonops.py b/tests/unit/operations/test_commonops.py index 6b441143c..c3b855178 100644 --- a/tests/unit/operations/test_commonops.py +++ b/tests/unit/operations/test_commonops.py @@ -504,7 +504,7 @@ def test_wait_for_success_events__timeout_reached( commonops.wait_for_success_events(create_environment_events[0].request_id) self.assertEqual( - "The EB CLI timed out after 10 minute(s). The operation might still be running. To keep viewing events, run 'eb events -f'. To set timeout duration, use '--timeout MINUTES'.", + "The EB CLI timed out after 10 minute(s). The operation might still be running. To keep viewing events, run 'eb events -f'.", str(context_manager.exception) ) @@ -592,7 +592,7 @@ def test_wait_for_compose_events__timeout_is_reached( ) log_error_mock.assert_called_once_with( - "The EB CLI timed out after {timeout_in_minutes} minute(s). The operation might still be running. To keep viewing events, run 'eb events -f'. To set timeout duration, use '--timeout MINUTES'." + "The EB CLI timed out after {timeout_in_minutes} minute(s). The operation might still be running. To keep viewing events, run 'eb events -f'." ) def test_sleep(self): diff --git a/tests/unit/operations/test_gitops.py b/tests/unit/operations/test_gitops.py index 66b17c298..c3998499e 100644 --- a/tests/unit/operations/test_gitops.py +++ b/tests/unit/operations/test_gitops.py @@ -213,7 +213,6 @@ def test_initialize_codecommit__source_control_is_not_setup( get_repository_interactive_mock.assert_not_called() get_branch_interactive_mock.assert_not_called() print_current_codecommit_settings_mock.assert_not_called() - log_error_mock.assert_called_once_with('Cannot setup CodeCommit because there is no Source Control setup') @mock.patch('ebcli.operations.gitops.get_repo_default_for_current_environment') def test_get_default_repository(