diff --git a/py-scripts/lf_interop_throughput.py b/py-scripts/lf_interop_throughput.py index 9013cfc75..af60abf55 100755 --- a/py-scripts/lf_interop_throughput.py +++ b/py-scripts/lf_interop_throughput.py @@ -1,26 +1,26 @@ #!/usr/bin/env python3 -# flake8: noqa -""" +r""" NAME: lf_interop_throughput.py - PURPOSE: lf_interop_throughput.py will provide the available devices and allows user to run the wifi capacity test - on particular devices by specifying direction as upload, download and bidirectional including different types of loads and incremental capacity. + PURPOSE: lf_interop_throughput.py will provide the available devices and allows user to run the wifi capacity test + on particular devices by specifying direction as upload, download and bidirectional including different types of loads and incremental capacity. Will also run the interopability test on particular devices by specifying direction as upload, download and bidirectional. TO PERFORM THROUGHPUT TEST: EXAMPLE-1: Command Line Interface to run download scenario with desired resources - python3 lf_interop_throughput.py --mgr 192.168.214.219 --mgr_port 8080 --upstream_port eth1 --test_duration 1m --download 1000000 --traffic_type lf_udp --device_list 1.10,1.12 + python3 lf_interop_throughput.py --mgr 192.168.214.219 --mgr_port 8080 --upstream_port eth1 --test_duration 1m --download 1000000 --traffic_type lf_udp --device_list 1.10,1.12 EXAMPLE-2: Command Line Interface to run download scenario with incremental capacity - python3 lf_interop_throughput.py --mgr 192.168.214.219 --mgr_port 8080 --security wpa2 --upstream_port eth1 --test_duration 1m --download 1000000 + python3 lf_interop_throughput.py --mgr 192.168.214.219 --mgr_port 8080 --security wpa2 --upstream_port eth1 --test_duration 1m --download 1000000 --traffic_type lf_udp --incremental_capacity 1,2 EXAMPLE-3: Command Line Interface to run upload scenario with packet size - python3 lf_interop_throughput.py --mgr 192.168.214.219 --mgr_port 8080 --security wpa2 --upstream_port eth1 --test_duration 1m --download 0 --upload 1000000 --traffic_type lf_udp --packet_size 17 + python3 lf_interop_throughput.py --mgr 192.168.214.219 --mgr_port 8080 --security wpa2 --upstream_port eth1 --test_duration 1m --download 0 --upload 1000000 + --traffic_type lf_udp --packet_size 17 EXAMPLE-4: Command Line Interface to run bi-directional scenario with load_type intended load @@ -40,7 +40,7 @@ EXAMPLE-7: Command Line Interface to run the test with precleanup python3 lf_interop_throughput.py --mgr 192.168.214.219 --mgr_port 8080 --upstream_port eth1 --test_duration 1m --download 1000000 --traffic_type lf_udp --precleanup - + EXAMPLE-8: Command Line Interface to run the test with postcleanup python3 lf_interop_throughput.py --mgr 192.168.214.219 --mgr_port 8080 --upstream_port eth1 --test_duration 1m --download 1000000 --traffic_type lf_udp --postcleanup @@ -53,7 +53,8 @@ EXAMPLE-1: Command Line Interface to run download scenario with desired resources - python3 lf_interop_throughput.py --mgr 192.168.214.219 --mgr_port 8080 --upstream_port eth1 --test_duration 1m --download 1000000 --traffic_type lf_udp --do_interopability --device_list 1.10,1.12 + python3 lf_interop_throughput.py --mgr 192.168.214.219 --mgr_port 8080 --upstream_port eth1 --test_duration 1m --download 1000000 --traffic_type lf_udp --do_interopability + --device_list 1.10,1.12 EXAMPLE-2: Command Line Interface to run bi-directional scenario in Interop web-GUI @@ -82,7 +83,7 @@ The following sentence will be displayed Enter the desired resources to run the test: Please enter the port numbers seperated by commas ','. - Example: + Example: Enter the desired resources to run the test:1.10,1.11,1.12,1.13,1.202,1.203,1.303 STATUS: BETA RELEASE @@ -104,7 +105,6 @@ import importlib import logging import json -import pandas as pd import shutil logger = logging.getLogger(__name__) @@ -116,62 +116,62 @@ if 'py-json' not in sys.path: sys.path.append(os.path.join(os.path.abspath('..'), 'py-json')) -import time -import argparse -from LANforge import LFUtils +import time # noqa: E402 +import argparse # noqa: E402 +from LANforge import LFUtils # noqa: F401, E402 realm = importlib.import_module("py-json.realm") Realm = realm.Realm -from lf_report import lf_report -from lf_graph import lf_bar_graph_horizontal -from lf_graph import lf_line_graph - -from datetime import datetime, timedelta +from lf_report import lf_report # noqa: E402 +from lf_graph import lf_bar_graph_horizontal # noqa: E402 +from lf_graph import lf_line_graph # noqa: E402 +from datetime import datetime, timedelta # noqa: E402 lf_logger_config = importlib.import_module("py-scripts.lf_logger_config") + class Throughput(Realm): def __init__(self, - tos, - ssid=None, - security=None, - password=None, - name_prefix=None, - upstream=None, - num_stations=10, - host="localhost", - port=8080, - test_name=None, - device_list=[], - result_dir=None, - ap_name="", - traffic_type=None, - incremental_capacity=None, - incremental=False, - # packet_size=None, - report_timer="2m", - direction="", - side_a_min_rate=0, side_a_max_rate=0, - side_b_min_rate=56, side_b_max_rate=0, - side_a_min_pdu=-1,side_b_min_pdu=-1, - number_template="00000", - test_duration="2m", - use_ht160=False, - load_type=None, - _debug_on=False, - dowebgui=False, - precleanup=False, - do_interopability=False, - ip="localhost", - user_list=[], real_client_list=[], real_client_list1=[], hw_list=[], laptop_list=[], android_list=[], mac_list=[], windows_list=[], linux_list=[], - total_resources_list=[], working_resources_list=[], hostname_list=[], username_list=[], eid_list=[], - devices_available=[], input_devices_list=[], mac_id1_list=[], mac_id_list=[],overall_avg_rssi=[]): + tos, + ssid=None, + security=None, + password=None, + name_prefix=None, + upstream=None, + num_stations=10, + host="localhost", + port=8080, + test_name=None, + device_list=[], + result_dir=None, + ap_name="", + traffic_type=None, + incremental_capacity=None, + incremental=False, + # packet_size=None, + report_timer="2m", + direction="", + side_a_min_rate=0, side_a_max_rate=0, + side_b_min_rate=56, side_b_max_rate=0, + side_a_min_pdu=-1, side_b_min_pdu=-1, + number_template="00000", + test_duration="2m", + use_ht160=False, + load_type=None, + _debug_on=False, + dowebgui=False, + precleanup=False, + do_interopability=False, + ip="localhost", + user_list=[], real_client_list=[], real_client_list1=[], hw_list=[], laptop_list=[], android_list=[], mac_list=[], windows_list=[], linux_list=[], + total_resources_list=[], working_resources_list=[], hostname_list=[], username_list=[], eid_list=[], + devices_available=[], input_devices_list=[], mac_id1_list=[], mac_id_list=[], overall_avg_rssi=[]): super().__init__(lfclient_host=host, - lfclient_port=port), + lfclient_port=port), self.ssid_list = [] - self.signal_list=[] - self.channel_list=[] - self.mode_list=[] - self.link_speed_list=[] + self.signal_list = [] + self.channel_list = [] + self.mode_list = [] + self.link_speed_list = [] self.upstream = upstream self.host = host self.port = port @@ -187,12 +187,12 @@ def __init__(self, self.direction = direction self.tos = tos.split(",") self.number_template = number_template - self.incremental_capacity=incremental_capacity - self.load_type=load_type + self.incremental_capacity = incremental_capacity + self.load_type = load_type self.debug = _debug_on self.name_prefix = name_prefix self.test_duration = test_duration - self.report_timer=report_timer + self.report_timer = report_timer self.station_profile = self.new_station_profile() self.cx_profile = self.new_l3_cx_profile() self.station_profile.lfclient_url = self.lfclient_url @@ -209,8 +209,8 @@ def __init__(self, self.cx_profile.side_a_max_bps = side_a_max_rate self.cx_profile.side_b_min_bps = side_b_min_rate self.cx_profile.side_b_max_bps = side_b_max_rate - self.cx_profile.side_a_min_pdu= side_a_min_pdu - self.cx_profile.side_b_min_pdu= side_b_min_pdu + self.cx_profile.side_a_min_pdu = side_a_min_pdu + self.cx_profile.side_b_min_pdu = side_b_min_pdu self.hw_list = hw_list self.laptop_list = laptop_list self.android_list = android_list @@ -229,31 +229,30 @@ def __init__(self, self.user_list = user_list self.mac_id_list = mac_id_list self.mac_id1_list = mac_id1_list - self.overall_avg_rssi=overall_avg_rssi + self.overall_avg_rssi = overall_avg_rssi self.dowebgui = dowebgui - self.do_interopability=do_interopability + self.do_interopability = do_interopability self.ip = ip self.device_found = False - self.incremental=incremental - self.precleanup=precleanup + self.incremental = incremental + self.precleanup = precleanup def os_type(self): """ Determines OS type of selected devices. - """ response = self.json_get("/resource/all") if "resources" not in response.keys(): logger.error("There are no real devices.") exit(1) - for key,value in response.items(): + for key, value in response.items(): if key == "resources": for element in value: - for a,b in element.items(): + for a, b in element.items(): self.hw_list.append(b['hw version']) # print(self.hw_list) - for hw_version in self.hw_list: + for hw_version in self.hw_list: if "Win" in hw_version: self.windows_list.append(hw_version) elif "Linux" in hw_version: @@ -265,115 +264,111 @@ def os_type(self): self.android_list.append(hw_version) self.laptop_list = self.windows_list + self.linux_list + self.mac_list - def phantom_check(self): """ Checks for non-phantom resources and ports, categorizes them, and prepares a list of available devices for testing. """ - port_eid_list,same_eid_list,original_port_list=[],[],[] + port_eid_list, same_eid_list, original_port_list = [], [], [] # Retrieve all resources from the LANforge - response=self.json_get("/resource/all") + response = self.json_get("/resource/all") if "resources" not in response.keys(): logger.error("There are no real devices.") exit(1) # Iterate over the response to categorize resources - for key,value in response.items(): + for key, value in response.items(): if key == "resources": for element in value: - for(a,b) in element.items(): + for (a, b) in element.items(): # Check if the resource is not phantom - if b['phantom'] == False: + if b['phantom'] is False: self.working_resources_list.append(b["hw version"]) # Categorize based on hw version (type of device) if "Win" in b['hw version']: self.eid_list.append(b['eid']) self.windows_list.append(b['hw version']) - self.devices_available.append(b['eid'] +" " +'Win'+" "+ b['hostname']) + self.devices_available.append(b['eid'] + " " + 'Win' + " " + b['hostname']) elif "Linux" in b['hw version']: if 'ct' not in b['hostname']: if 'lf' not in b['hostname']: self.eid_list.append(b['eid']) self.linux_list.append(b['hw version']) - self.devices_available.append(b['eid'] +" " +'Lin'+" "+ b['hostname']) + self.devices_available.append(b['eid'] + " " + 'Lin' + " " + b['hostname']) elif "Apple" in b['hw version']: self.eid_list.append(b['eid']) self.mac_list.append(b['hw version']) - self.devices_available.append(b['eid'] +" " +'Mac'+" "+ b['hostname']) + self.devices_available.append(b['eid'] + " " + 'Mac' + " " + b['hostname']) else: self.eid_list.append(b['eid']) self.android_list.append(b['hw version']) - self.devices_available.append(b['eid'] +" " +'android'+" "+ b['user']) - + self.devices_available.append(b['eid'] + " " + 'android' + " " + b['user']) + # Retrieve all ports from the endpoint - response_port=self.json_get("/port/all") + response_port = self.json_get("/port/all") if "interfaces" not in response_port.keys(): logger.error("Error: 'interfaces' key not found in port data") exit(1) - - # mac_id1_list=[] + # mac_id1_list=[] - # Iterate over port information to filter and categorize ports + # Iterate over port information to filter and categorize ports for interface in response_port['interfaces']: - for port,port_data in interface.items(): + for port, port_data in interface.items(): # Check conditions for non-phantom ports - if(not port_data['phantom'] and not port_data['down'] and port_data['parent dev'] == "wiphy0" and port_data['alias'] != 'p2p0'): + if (not port_data['phantom'] and not port_data['down'] and port_data['parent dev'] == "wiphy0" and port_data['alias'] != 'p2p0'): # Check if the port's parent device matches with an eid in the eid_list for id in self.eid_list: - if id+'.' in port: + if id + '.' in port: original_port_list.append(port) - port_eid_list.append(str(self.name_to_eid(port)[0])+'.'+str(self.name_to_eid(port)[1])) - self.mac_id1_list.append(str(self.name_to_eid(port)[0])+'.'+str(self.name_to_eid(port)[1])+' '+port_data['mac']) + port_eid_list.append(str(self.name_to_eid(port)[0]) + '.' + str(self.name_to_eid(port)[1])) + self.mac_id1_list.append(str(self.name_to_eid(port)[0]) + '.' + str(self.name_to_eid(port)[1]) + ' ' + port_data['mac']) # Check for matching eids between eid_list and port_eid_list for i in range(len(self.eid_list)): for j in range(len(port_eid_list)): - if self.eid_list[i]==port_eid_list[j]: + if self.eid_list[i] == port_eid_list[j]: same_eid_list.append(self.eid_list[i]) - same_eid_list=[_eid + ' ' for _eid in same_eid_list] + same_eid_list = [_eid + ' ' for _eid in same_eid_list] for eid in same_eid_list: for device in self.devices_available: if eid in device: self.user_list.append(device) - - # If self.device_list is provided, check availability against devices_available if len(self.device_list) != 0: - devices_list=self.device_list - available_list=[] - not_available=[] + devices_list = self.device_list + available_list = [] + not_available = [] # Iterate over each input device in devices_list for input_device in devices_list.split(','): - found=False + found = False # Check if input_device exists in devices_available for device in self.devices_available: if input_device + " " in device: available_list.append(input_device) - found =True + found = True break - if found == False: + if found is False: not_available.append(input_device) logger.warning(input_device + " is not available to run the test") - + # If available_list is not empty, log info and set self.device_found to True - if len(available_list)>0: + if len(available_list) > 0: logger.info("Test is intiated on these devices {}".format(available_list)) - devices_list=','.join(available_list) - self.device_found=True + devices_list = ','.join(available_list) + self.device_found = True else: - devices_list="" - self.device_found=False + devices_list = "" + self.device_found = False logger.warning("Test can not be initiated on any selected devices") else: @@ -382,13 +377,13 @@ def phantom_check(self): devices_list = input("Enter the desired resources to run the test:") # If no devices are selected or only comma is entered, log an error and return False - if(devices_list=="" or devices_list==","): + if (devices_list == "" or devices_list == ","): logger.error("Selected Devices are not available in the lanforge") - return False,self.real_client_list - + return False, self.real_client_list + # Split devices_list into resource_eid_list - resource_eid_list=devices_list.split(',') - logger.info("devices list {}".format(devices_list, resource_eid_list)) + resource_eid_list = devices_list.split(',') + logger.info("devices list {}".format(devices_list, resource_eid_list)) # noqa: F523 resource_eid_list2 = [eid + ' ' for eid in resource_eid_list] # Create resource_eid_list1 by appending dot to each eid in resource_eid_list @@ -406,42 +401,42 @@ def phantom_check(self): for i in resource_eid_list2: for j in range(len(self.user_list)): - if i in self.user_list[j]: + if i in self.user_list[j]: self.real_client_list.append(self.user_list[j]) self.real_client_list1.append(self.user_list[j][:25]) # print("real_client_list",self.real_client_list) # print("real_client_list1",self.real_client_list1) - self.num_stations=len(self.real_client_list) + self.num_stations = len(self.real_client_list) # Iterate over resource_eid_list2 and mac_id1_list to populate mac_id_list for eid in resource_eid_list2: for i in self.mac_id1_list: if eid in i: - self.mac_id_list.append(i.strip(eid+' ')) + self.mac_id_list.append(i.strip(eid + ' ')) # Check if incremental_capacity is provided and ensure selected devices are sufficient - if (len(self.incremental_capacity)>0 and int(self.incremental_capacity.split(',')[-1])>len(self.mac_id_list)): + if (len(self.incremental_capacity) > 0 and int(self.incremental_capacity.split(',')[-1]) > len(self.mac_id_list)): logger.error("Devices selected is less than given incremental capacity") - return False,self.real_client_list + return False, self.real_client_list else: - return True,self.real_client_list + return True, self.real_client_list - def get_signal_and_channel_data(self,station_names): + def get_signal_and_channel_data(self, station_names): """ Retrieves signal strength, channel, mode, and link speed data for the specified stations. """ - - signal_list,channel_list,mode_list,link_speed_list=[],[],[],[] + + signal_list, channel_list, mode_list, link_speed_list = [], [], [], [] interfaces_dict = dict() try: port_data = self.json_get('/ports/all/')['interfaces'] except KeyError: logger.error("Error: 'interfaces' key not found in port data") exit(1) - + for port in port_data: interfaces_dict.update(port) for sta in station_names: @@ -467,17 +462,15 @@ def get_signal_and_channel_data(self,station_names): link_speed_list.append(interfaces_dict[sta]['rx-rate']) else: link_speed_list.append('-') - return signal_list,channel_list,mode_list,link_speed_list + return signal_list, channel_list, mode_list, link_speed_list - - - def get_ssid_list(self,station_names): + def get_ssid_list(self, station_names): """ Retrieves the SSID for the specified stations. """ ssid_list = [] - + try: port_data = self.json_get('/ports/all/')['interfaces'] except KeyError: @@ -504,10 +497,13 @@ def build(self): return self.cx_profile.created_cx def create_cx(self): - direction='' + ''' + Creates a connection profile. + ''' + direction = '' # Determine direction based on side_a_min_bps and side_b_min_bps - if int(self.cx_profile.side_b_min_bps)!=2560 and int(self.cx_profile.side_a_min_bps)!=2560: + if int(self.cx_profile.side_b_min_bps) != 2560 and int(self.cx_profile.side_a_min_bps) != 2560: self.direction = "Bi-direction" direction = 'Bi-di' elif int(self.cx_profile.side_b_min_bps) != 2560: @@ -517,8 +513,8 @@ def create_cx(self): if int(self.cx_profile.side_a_min_bps) != 2560: self.direction = "Upload" direction = 'UL' - traffic_type=(self.traffic_type.strip("lf_")).upper() - traffic_direction_list,cx_list,traffic_type_list=[],[],[] + traffic_type = (self.traffic_type.strip("lf_")).upper() + traffic_direction_list, cx_list, traffic_type_list = [], [], [] for client in range(len(self.real_client_list)): traffic_direction_list.append(direction) traffic_type_list.append(traffic_type) @@ -528,15 +524,15 @@ def create_cx(self): for i in self.real_client_list1: for j in traffic_direction_list: for k in traffic_type_list: - cxs="%s_%s_%s"% (i,k,j) - cx_names=cxs.replace(" ","") + cxs = "%s_%s_%s" % (i, k, j) + cx_names = cxs.replace(" ", "") cx_list.append(cx_names) logger.info('cx_list{}'.format(cx_list)) - count=0 + count = 0 # creating duplicate created_cx's for precleanup of CX's if there are already existed - if self.precleanup==True: - self.cx_profile.created_cx={k:[k+'-A',k+'-B'] for k in cx_list} + if self.precleanup is True: + self.cx_profile.created_cx = {k: [k + '-A', k + '-B'] for k in cx_list} self.pre_cleanup() # for ip_tos in range(len(self.tos)): @@ -544,10 +540,10 @@ def create_cx(self): logger.info("Creating connections for endpoint type: %s cx-count: %s" % ( self.traffic_type, self.cx_profile.get_cx_count())) self.cx_profile.create(endp_type=self.traffic_type, side_a=[self.input_devices_list[device]], - side_b=self.upstream,sleep_time=0,cx_name="%s" % (cx_list[count])) - count +=1 + side_b=self.upstream, sleep_time=0, cx_name="%s" % (cx_list[count])) + count += 1 logger.info("cross connections with created") - + # def start(self,print_pass=False, print_fail=False): # if(len(self.cx_profile.created_cx))>0: # # print(type(self.cx_profile.created_cx),self.cx_profile.created_cx.keys()) @@ -561,13 +557,13 @@ def create_cx(self): # self.json_post(req_url, data) # self.cx_profile.start_cx() - def start_specific(self,cx_list): + def start_specific(self, cx_list): """ Starts specific connections from the given list and sets a report timer for them. """ logging.info("Test started at : {0} ".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) - if len(self.cx_profile.created_cx) >0: + if len(self.cx_profile.created_cx) > 0: for cx in cx_list: req_url = "cli-json/set_cx_report_timer" data = { @@ -587,7 +583,10 @@ def start_specific(self,cx_list): }, debug_=self.debug) # self.cx_profile.start_cx_specific(cx_list) - def stop_specific(self,cx_list): + def stop_specific(self, cx_list): + ''' + Stops specific connections from the given list. + ''' logger.info("Stopping specific CXs...") for cx_name in cx_list: if self.debug: @@ -599,84 +598,124 @@ def stop_specific(self,cx_list): }, debug_=self.debug) def stop(self): - + ''' + Stops all connections + ''' self.cx_profile.stop_cx() self.station_profile.admin_down() - + def pre_cleanup(self): + ''' + Pre-cleanup function + ''' self.cx_profile.cleanup() def cleanup(self): + ''' + Cleanup function + ''' logger.info("cleanup done") self.cx_profile.cleanup() - def monitor(self,iteration,individual_df,device_names,incremental_capacity_list,overall_start_time,overall_end_time): - - - throughput, upload,download,upload_throughput,download_throughput,connections_upload, connections_download = {}, [], [],[],[],{},{} - drop_a, drop_a_per, drop_b, drop_b_per,state,state_of_device= [], [], [], [], [], [] - test_stopped_by_user=False + def get_cx_states(self, device_names): + ''' + Get the cx states of the devices (ie Run, Stopped, WAITING) + ''' + cx_state_list = [] + for device in device_names: + try: + device_data = self.json_get('/cx/all')[device] + cx_state_list.append(device_data['state']) + except KeyError: + logger.error("Error: %s key not found in cx data", device) + return cx_state_list + + def monitor(self, iteration, individual_df, device_names, incremental_capacity_list, overall_start_time, overall_end_time): + ''' + Monitor the performance of the devices + ''' + + throughput, upload, download, upload_throughput, download_throughput, connections_upload, connections_download = {}, [], [], [], [], {}, {} + drop_a, drop_a_per, drop_b, drop_b_per, state, state_of_device = [], [], [], [], [], [] # noqa: F841 + test_stopped_by_user = False if (self.test_duration is None) or (int(self.test_duration) <= 1): raise ValueError("Monitor test duration should be > 1 second") if self.cx_profile.created_cx is None: raise ValueError("Monitor needs a list of Layer 3 connections") - - - start_time = datetime.now() - logger.info("Monitoring cx and endpoints") - end_time = start_time + timedelta(seconds=int(self.test_duration)) - self.overall=[] - + self.overall = [] + # Initialize variables for real-time connections data - index=-1 + index = -1 connections_upload = dict.fromkeys(list(self.cx_profile.created_cx.keys()), float(0)) connections_download = dict.fromkeys(list(self.cx_profile.created_cx.keys()), float(0)) connections_upload_realtime = dict.fromkeys(list(self.cx_profile.created_cx.keys()), float(0)) connections_download_realtime = dict.fromkeys(list(self.cx_profile.created_cx.keys()), float(0)) - + + logger.info("Waiting for cx to start") + + # loop to get_cx_states until one return 'Running' + max_retries = 20 + cx_states_down = True + while cx_states_down: + max_retries -= 1 + states = self.get_cx_states(list(self.cx_profile.created_cx.keys())) + logger.info("states: {}".format(states)) + + for cx_state in states: + if cx_state == 'Run': + cx_states_down = False + time.sleep(2) + + if max_retries == 0: + logger.error("CXs are not coming up. Exiting the test") + exit(1) + + start_time = datetime.now() + logger.info("Monitoring cx and endpoints") + end_time = start_time + timedelta(seconds=int(self.test_duration)) + # Initialize lists for throughput and drops for each connection - [(upload.append([]), download.append([]), drop_a.append([]), drop_b.append([]),state.append([])) for i in range(len(self.cx_profile.created_cx))] - + [(upload.append([]), download.append([]), drop_a.append([]), drop_b.append([]), state.append([])) for i in range(len(self.cx_profile.created_cx))] + # If using web GUI, set runtime directory if self.dowebgui: runtime_dir = self.result_dir - + # Continuously collect data until end time is reached while datetime.now() < end_time: index += 1 - - signal_list,channel_list,mode_list,link_speed_list=self.get_signal_and_channel_data(self.input_devices_list) + + signal_list, channel_list, mode_list, link_speed_list = self.get_signal_and_channel_data(self.input_devices_list) # Fetch required throughput data from Lanforge response = list( self.json_get('/cx/%s?fields=%s' % ( - ','.join(self.cx_profile.created_cx.keys()), ",".join(['bps rx a', 'bps rx b', 'rx drop %25 a', 'rx drop %25 b','state']))).values())[2:] + ','.join(self.cx_profile.created_cx.keys()), ",".join(['bps rx a', 'bps rx b', 'rx drop %25 a', 'rx drop %25 b', 'state']))).values())[2:] # Extracting and storing throughput data throughput[index] = list( map(lambda i: [x for x in i.values()], response)) if self.dowebgui: - individual_df_data=[] - temp_upload, temp_download, temp_drop_a, temp_drop_b= [], [], [], [] - + individual_df_data = [] + temp_upload, temp_download, temp_drop_a, temp_drop_b = [], [], [], [] + # Initialize temporary lists for each connection [(temp_upload.append([]), temp_download.append([]), temp_drop_a.append([]), temp_drop_b.append([])) for i in range(len(self.cx_profile.created_cx))] - + # Populate temporary lists with current throughput data for i in range(len(throughput[index])): - if throughput[index][i][4]!='Run': - temp_upload[i].append(0) - temp_download[i].append(0) - temp_drop_a[i].append(0) - temp_drop_b[i].append(0) + if throughput[index][i][4] != 'Run': + temp_upload[i].append(0) + temp_download[i].append(0) + temp_drop_a[i].append(0) + temp_drop_b[i].append(0) else: temp_upload[i].append(throughput[index][i][1]) temp_download[i].append(throughput[index][i][0]) temp_drop_a[i].append(throughput[index][i][2]) temp_drop_b[i].append(throughput[index][i][3]) - - + # Calculate average throughput and drop percentages upload_throughput = [float(f"{(sum(i) / 1000000) / len(i): .2f}") for i in temp_upload] download_throughput = [float(f"{(sum(i) / 1000000) / len(i): .2f}") for i in temp_download] @@ -688,22 +727,25 @@ def monitor(self,iteration,individual_df,device_names,incremental_capacity_list, connections_download_realtime.update({keys[i]: float(f"{(download_throughput[i]):.2f}")}) for i in range(len(upload_throughput)): connections_upload_realtime.update({keys[i]: float(f"{(upload_throughput[i]):.2f}")}) - time_difference = abs(end_time - datetime.now()) - overall_time_difference=abs(overall_end_time-datetime.now()) - overall_total_hours=overall_time_difference.total_seconds() / 3600 - overall_remaining_minutes=(overall_total_hours % 1) * 60 - timestamp=datetime.now().strftime("%d/%m %I:%M:%S %p") - remaining_minutes_instrf=[str(int(overall_total_hours)) + " hr and " + str(int(overall_remaining_minutes)) + " min" if int(overall_total_hours) != 0 or int(overall_remaining_minutes) != 0 else '<1 min'][0] - + time_difference = abs(end_time - datetime.now()) # noqa: F841 + overall_time_difference = abs(overall_end_time-datetime.now()) + overall_total_hours = overall_time_difference.total_seconds() / 3600 + overall_remaining_minutes = (overall_total_hours % 1) * 60 + timestamp = datetime.now().strftime("%d/%m %I:%M:%S %p") + remaining_minutes_instrf = [str(int(overall_total_hours)) + " hr and " + str(int(overall_remaining_minutes)) + " min" if int(overall_total_hours) + != 0 or int(overall_remaining_minutes) != 0 else '<1 min'][0] + # Storing individual device throughput data(download, upload, Rx % drop A, Rx % drop B) to dataframe for i in range(len(download_throughput)): - individual_df_data.extend([download_throughput[i],upload_throughput[i],drop_a_per[i],drop_b_per[i],int(signal_list[i]),link_speed_list[i]]) - + individual_df_data.extend([download_throughput[i], upload_throughput[i], drop_a_per[i], drop_b_per[i], int(signal_list[i]), link_speed_list[i]]) + # Storing Overall throughput data for all devices and also start time, end time, remaining time and status of test running - individual_df_data.extend([round(sum(download_throughput),2),round(sum(upload_throughput),2),sum(drop_a_per),sum(drop_a_per),iteration+1,timestamp,overall_start_time.strftime("%d/%m %I:%M:%S %p"),overall_end_time.strftime("%d/%m %I:%M:%S %p"),remaining_minutes_instrf,', '.join(str(n) for n in incremental_capacity_list),'Running']) - + individual_df_data.extend([round(sum(download_throughput), 2), round(sum(upload_throughput), 2), sum(drop_a_per), sum(drop_a_per), iteration+1, + timestamp, overall_start_time.strftime("%d/%m %I:%M:%S %p"), overall_end_time.strftime("%d/%m %I:%M:%S %p"), remaining_minutes_instrf, + ', '.join(str(n) for n in incremental_capacity_list), 'Running']) + # Append data to individual_df and save to CSV - individual_df.loc[len(individual_df)]=individual_df_data + individual_df.loc[len(individual_df)] = individual_df_data individual_df.to_csv('{}/throughput_data.csv'.format(runtime_dir), index=False) # Check if test was stopped by the user @@ -712,11 +754,11 @@ def monitor(self,iteration,individual_df,device_names,incremental_capacity_list, data = json.load(file) if data["status"] != "Running": logger.warning('Test is stopped by the user') - test_stopped_by_user=True + test_stopped_by_user = True break - + # Adjust sleep time based on elapsed time since start - d=datetime.now() + d = datetime.now() if d - start_time <= timedelta(hours=1): time.sleep(5) elif d - start_time > timedelta(hours=1) or d - start_time <= timedelta( @@ -751,57 +793,58 @@ def monitor(self,iteration,individual_df,device_names,incremental_capacity_list, else: # If not using web GUI, sleep based on report timer - individual_df_data=[] + individual_df_data = [] time.sleep(self.report_timer) # Aggregate data from throughput - + for index, key in enumerate(throughput): for i in range(len(throughput[key])): - upload[i],download[i],drop_a[i],drop_b[i]=[],[],[],[] - if throughput[key][i][4]!='Run': + upload[i], download[i], drop_a[i], drop_b[i] = [], [], [], [] + if throughput[key][i][4] != 'Run': upload[i].append(0) download[i].append(0) drop_a[i].append(0) drop_b[i].append(0) - + else: upload[i].append(throughput[key][i][1]) download[i].append(throughput[key][i][0]) drop_a[i].append(throughput[key][i][2]) drop_b[i].append(throughput[key][i][3]) - # Calculate average throughput and drop percentages upload_throughput = [float(f"{(sum(i) / 1000000) / len(i): .2f}") for i in upload] download_throughput = [float(f"{(sum(i) / 1000000) / len(i): .2f}") for i in download] drop_a_per = [float(round(sum(i) / len(i), 2)) for i in drop_a] drop_b_per = [float(round(sum(i) / len(i), 2)) for i in drop_b] - - # Calculate overall time difference and timestamp - timestamp=datetime.now().strftime("%d/%m %I:%M:%S %p") + timestamp = datetime.now().strftime("%d/%m %I:%M:%S %p") # # time_difference = abs(end_time - datetime.now()) - overall_time_difference=abs(overall_end_time-datetime.now()) + overall_time_difference = abs(overall_end_time - datetime.now()) # # total_hours = time_difference.total_seconds() / 3600 - overall_total_hours=overall_time_difference.total_seconds() / 3600 - overall_remaining_minutes=(overall_total_hours % 1) * 60 - remaining_minutes_instrf=[str(int(overall_total_hours)) + " hr and " + str(int(overall_remaining_minutes)) + " min" if int(overall_total_hours) != 0 or int(overall_remaining_minutes) != 0 else '<1 min'][0] - + overall_total_hours = overall_time_difference.total_seconds() / 3600 + overall_remaining_minutes = (overall_total_hours % 1) * 60 + remaining_minutes_instrf = [str(int(overall_total_hours)) + " hr and " + str(int(overall_remaining_minutes)) + " min" if int(overall_total_hours) + != 0 or int(overall_remaining_minutes) != 0 else '<1 min'][0] + # Storing individual device throughput data(download, upload, Rx % drop A, Rx % drop B) to dataframe for i in range(len(download_throughput)): - individual_df_data.extend([download_throughput[i],upload_throughput[i],drop_a_per[i],drop_b_per[i],int(signal_list[i]),link_speed_list[i]]) + individual_df_data.extend([download_throughput[i], upload_throughput[i], drop_a_per[i], drop_b_per[i], int(signal_list[i]), link_speed_list[i]]) # Storing Overall throughput data for all devices and also start time, end time, remaining time and status of test running - individual_df_data.extend([round(sum(download_throughput),2),round(sum(upload_throughput),2),sum(drop_a_per),sum(drop_a_per),iteration+1,timestamp,overall_start_time.strftime("%d/%m %I:%M:%S %p"),overall_end_time.strftime("%d/%m %I:%M:%S %p"),remaining_minutes_instrf,', '.join(str(n) for n in incremental_capacity_list),'Running']) - individual_df.loc[len(individual_df)]=individual_df_data + individual_df_data.extend([round(sum(download_throughput), 2), round(sum(upload_throughput), 2), sum(drop_a_per), sum(drop_a_per), iteration+1, + timestamp, overall_start_time.strftime("%d/%m %I:%M:%S %p"), overall_end_time.strftime("%d/%m %I:%M:%S %p"), remaining_minutes_instrf, + ', '.join(str(n) for n in incremental_capacity_list), 'Running']) + + individual_df.loc[len(individual_df)] = individual_df_data individual_df.to_csv('throughput_data.csv', index=False) - + for index, key in enumerate(throughput): for i in range(len(throughput[key])): - upload[i],download[i],drop_a[i],drop_b[i]=[],[],[],[] - if throughput[key][i][4]!='Run': + upload[i], download[i], drop_a[i], drop_b[i] = [], [], [], [] + if throughput[key][i][4] != 'Run': upload[i].append(0) download[i].append(0) drop_a[i].append(0) @@ -811,38 +854,39 @@ def monitor(self,iteration,individual_df,device_names,incremental_capacity_list, download[i].append(throughput[key][i][0]) drop_a[i].append(throughput[key][i][2]) drop_b[i].append(throughput[key][i][3]) - - - individual_df_data=[] + + individual_df_data = [] upload_throughput = [float(f"{(sum(i) / 1000000) / len(i): .2f}") for i in upload] download_throughput = [float(f"{(sum(i) / 1000000) / len(i): .2f}") for i in download] drop_a_per = [float(round(sum(i) / len(i), 2)) for i in drop_a] drop_b_per = [float(round(sum(i) / len(i), 2)) for i in drop_b] - signal_list,channel_list,mode_list,link_speed_list=self.get_signal_and_channel_data(self.input_devices_list) + signal_list, channel_list, mode_list, link_speed_list = self.get_signal_and_channel_data(self.input_devices_list) # Storing individual device throughput data(download, upload, Rx % drop A, Rx % drop B) to dataframe after test stopped for i in range(len(download_throughput)): - individual_df_data.extend([download_throughput[i],upload_throughput[i],drop_a_per[i],drop_b_per[i],int(signal_list[i]),link_speed_list[i]]) - timestamp=datetime.now().strftime("%d/%m %I:%M:%S %p") + individual_df_data.extend([download_throughput[i], upload_throughput[i], drop_a_per[i], drop_b_per[i], int(signal_list[i]), link_speed_list[i]]) + timestamp = datetime.now().strftime("%d/%m %I:%M:%S %p") + # If it's the last iteration, append final metrics and 'Stopped' status + if iteration+1 == len(incremental_capacity_list): + individual_df_data.extend([round(sum(download_throughput), 2), round(sum(upload_throughput), 2), sum(drop_a_per), sum(drop_a_per), iteration+1, timestamp, + overall_start_time.strftime("%d/%m %I:%M:%S %p"), timestamp, 0, ', '.join(str(n) for n in incremental_capacity_list), 'Stopped']) - # If it's the last iteration, append final metrics and 'Stopped' status - if iteration+1 == len(incremental_capacity_list): - - individual_df_data.extend([round(sum(download_throughput),2),round(sum(upload_throughput),2),sum(drop_a_per),sum(drop_a_per),iteration+1,timestamp,overall_start_time.strftime("%d/%m %I:%M:%S %p"),timestamp,0,', '.join(str(n) for n in incremental_capacity_list),'Stopped']) - # If the test was stopped by the user, append metrics and 'Stopped' status elif test_stopped_by_user: + individual_df_data.extend([round(sum(download_throughput), 2), round(sum(upload_throughput), 2), sum(drop_a_per), sum(drop_a_per), iteration+1, timestamp, + overall_start_time.strftime("%d/%m %I:%M:%S %p"), timestamp, 0, ', '.join(str(n) for n in incremental_capacity_list), 'Stopped']) - individual_df_data.extend([round(sum(download_throughput),2),round(sum(upload_throughput),2),sum(drop_a_per),sum(drop_a_per),iteration+1,timestamp,overall_start_time.strftime("%d/%m %I:%M:%S %p"),timestamp,0,', '.join(str(n) for n in incremental_capacity_list),'Stopped']) - # Otherwise, append metrics and 'Stopped' status with overall end time else: - individual_df_data.extend([round(sum(download_throughput),2),round(sum(upload_throughput),2),sum(drop_a_per),sum(drop_a_per),iteration+1,timestamp,overall_start_time.strftime("%d/%m %I:%M:%S %p"),overall_end_time.strftime("%d/%m %I:%M:%S %p"),remaining_minutes_instrf,', '.join(str(n) for n in incremental_capacity_list),'Stopped']) - individual_df.loc[len(individual_df)]=individual_df_data + individual_df_data.extend([round(sum(download_throughput), 2), round(sum(upload_throughput), 2), sum(drop_a_per), sum(drop_a_per), iteration+1, timestamp, + overall_start_time.strftime("%d/%m %I:%M:%S %p"), overall_end_time.strftime("%d/%m %I:%M:%S %p"), remaining_minutes_instrf, + ', '.join(str(n) for n in incremental_capacity_list), 'Stopped']) + + individual_df.loc[len(individual_df)] = individual_df_data # Save individual_df to CSV based on web GUI status - if self.dowebgui : + if self.dowebgui: individual_df.to_csv('{}/throughput_data.csv'.format(runtime_dir), index=False) individual_df.to_csv('throughput_data.csv', index=False) else: @@ -852,22 +896,21 @@ def monitor(self,iteration,individual_df,device_names,incremental_capacity_list, keys = list(connections_download.keys()) for i in range(len(download_throughput)): - connections_download.update({keys[i]: float(f"{(download_throughput[i] ):.2f}")}) + connections_download.update({keys[i]: float(f"{(download_throughput[i]):.2f}")}) for i in range(len(upload_throughput)): - connections_upload.update({keys[i]: float(f"{(upload_throughput[i] ):.2f}")}) + connections_upload.update({keys[i]: float(f"{(upload_throughput[i]):.2f}")}) logger.info("connections download {}".format(connections_download)) logger.info("connections upload {}".format(connections_upload)) + return individual_df, test_stopped_by_user - return individual_df,test_stopped_by_user - - def perform_intended_load(self,iteration,incremental_capacity_list): + def perform_intended_load(self, iteration, incremental_capacity_list): """ Configures the intended load for each connection endpoint based on the provided iteration and incremental capacity. """ - + for k in self.cx_profile.created_cx.values(): endp_side_a = { "alias": k[0], @@ -875,7 +918,7 @@ def perform_intended_load(self,iteration,incremental_capacity_list): # # "resource": side_a_resource, # # "port": side_a_info[2], # # "type": endp_type, - "min_rate": int(int(self.cx_profile.side_a_min_bps)/int(incremental_capacity_list[iteration])), + "min_rate": int(int(self.cx_profile.side_a_min_bps) / int(incremental_capacity_list[iteration])), # # "max_rate": int(int(self.cx_profile.side_a_max_bps)/int(incremental_capacity_list[iteration])), # # "min_pkt": self.side_a_min_pdu, # # "max_pkt": self.side_a_max_pdu, @@ -888,149 +931,140 @@ def perform_intended_load(self,iteration,incremental_capacity_list): # # "resource": side_b_resource, # # "port": side_b_info[2], # # "type": endp_type, - "min_rate": int(int(self.cx_profile.side_b_min_bps)/int(incremental_capacity_list[iteration])), + "min_rate": int(int(self.cx_profile.side_b_min_bps) / int(incremental_capacity_list[iteration])), # # "max_rate": int(int(self.cx_profile.side_b_max_bps)/int(incremental_capacity_list[iteration])), # # "min_pkt": self.side_b_min_pdu, # # "max_pkt": self.side_b_max_pdu, # # "ip_port": ip_port_b, # # "multi_conn": self.mconn_B, } - - # POST endpoint configuration for side_a and side_b + + # POST endpoint configuration for side_a and side_b url = "/cli-json/add_endp" self.json_post(_req_url=url, - _data=endp_side_a, - # # debug_=debug_, - # # suppress_related_commands_=suppress_related_commands - ) + _data=endp_side_a, + # debug_=debug_, + # suppress_related_commands_=suppress_related_commands + ) self.json_post(_req_url=url, - _data=endp_side_b, - # # debug_=debug_, - # # suppress_related_commands_=suppress_related_commands - ) - - - + _data=endp_side_b, + # debug_=debug_, + # suppress_related_commands_=suppress_related_commands + ) def check_incremental_list(self): """ Checks and generates a list of incremental capacities for connections. - """ - if (len(self.incremental_capacity)==0 and self.do_interopability!=True and self.incremental): - self.incremental_capacity=input("Enter the incremental load to run the test:") + if (len(self.incremental_capacity) == 0 and self.do_interopability is not True and self.incremental): + self.incremental_capacity = input("Enter the incremental load to run the test:") - cx_incremental_capacity_lists=[] - incremental_capacity_list_values=[] - device_list_length=len(self.mac_id_list) + cx_incremental_capacity_lists = [] + incremental_capacity_list_values = [] + device_list_length = len(self.mac_id_list) # Check if 'incremental_capacity' is not specified - if len(self.incremental_capacity)==0: - incremental_capacity_1=[device_list_length] - - elif(device_list_length!=0 and len(self.incremental_capacity.split(","))>0): - device_list_length=len(self.mac_id_list) - incremental_capacity_length=len(self.incremental_capacity.split(",")) + if len(self.incremental_capacity) == 0: + incremental_capacity_1 = [device_list_length] + + elif (device_list_length != 0 and len(self.incremental_capacity.split(",")) > 0): + device_list_length = len(self.mac_id_list) + incremental_capacity_length = len(self.incremental_capacity.split(",")) # Handle single incremental capacity specification - if incremental_capacity_length==1: - temp_incremental_capacity_list=[] - incremental_capacity=int(self.incremental_capacity.split(",")[0]) + if incremental_capacity_length == 1: + temp_incremental_capacity_list = [] + incremental_capacity = int(self.incremental_capacity.split(",")[0]) # Generate incremental capacity list - for i in range(incremental_capacity,device_list_length): + for i in range(incremental_capacity, device_list_length): if i % incremental_capacity == 0: temp_incremental_capacity_list.append(i) - + # Ensure the last capacity covers all devices if device_list_length not in temp_incremental_capacity_list: temp_incremental_capacity_list.append(device_list_length) - incremental_capacity_1=temp_incremental_capacity_list + incremental_capacity_1 = temp_incremental_capacity_list # Handle multiple incremental capacities specification else: - incremental_capacity_1=self.incremental_capacity.split(",") + incremental_capacity_1 = self.incremental_capacity.split(",") # Generate lists of incremental capacities for i in range(len(incremental_capacity_1)): - new_cx_list=[] - if i==0: - x=1 + new_cx_list = [] + if i == 0: + x = 1 else: - x=cx_incremental_capacity_lists[-1][-1]+1 - for j in range(x,int(incremental_capacity_1[i])+1): + x = cx_incremental_capacity_lists[-1][-1] + 1 + for j in range(x, int(incremental_capacity_1[i]) + 1): new_cx_list.append(j) incremental_capacity_list_values.append(new_cx_list[-1]) cx_incremental_capacity_lists.append(new_cx_list) # Check completeness: last capacity list should cover all devices - if incremental_capacity_list_values[-1]==device_list_length: + if incremental_capacity_list_values[-1] == device_list_length: return True else: return False - - - def get_incremental_capacity_list(self): """ - Generates lists of incremental capacities and connection names for the created connections. - """ - - cx_incremental_capacity_lists,cx_incremental_capacity_names_lists,incremental_capacity_list_values=[],[],[] + + cx_incremental_capacity_lists, cx_incremental_capacity_names_lists, incremental_capacity_list_values = [], [], [] created_cx_lists_keys = list(self.cx_profile.created_cx.keys()) - device_list_length=len(created_cx_lists_keys) + device_list_length = len(created_cx_lists_keys) # Check if incremental capacity is not provided - if len(self.incremental_capacity)==0: - incremental_capacity_1=[device_list_length] - + if len(self.incremental_capacity) == 0: + incremental_capacity_1 = [device_list_length] + # Check if device list is not empty and incremental capacity is provided - elif(device_list_length!=0 and len(self.incremental_capacity.split(","))>0): - device_list_length=len(created_cx_lists_keys) - incremental_capacity_length=len(self.incremental_capacity.split(",")) + elif (device_list_length != 0 and len(self.incremental_capacity.split(",")) > 0): + device_list_length = len(created_cx_lists_keys) + incremental_capacity_length = len(self.incremental_capacity.split(",")) # Handle case with a single incremental capacity value - if incremental_capacity_length==1: - temp_incremental_capacity_list=[] - incremental_capacity=int(self.incremental_capacity.split(",")[0]) + if incremental_capacity_length == 1: + temp_incremental_capacity_list = [] + incremental_capacity = int(self.incremental_capacity.split(",")[0]) # Calculate increments based on the provided capacity - for i in range(incremental_capacity,device_list_length): + for i in range(incremental_capacity, device_list_length): if i % incremental_capacity == 0: temp_incremental_capacity_list.append(i) # Ensure the device list length itself is included in the increments if device_list_length not in temp_incremental_capacity_list: temp_incremental_capacity_list.append(device_list_length) - incremental_capacity_1=temp_incremental_capacity_list + incremental_capacity_1 = temp_incremental_capacity_list # Handle case with multiple incremental capacity values else: - incremental_capacity_1=self.incremental_capacity.split(",") + incremental_capacity_1 = self.incremental_capacity.split(",") # Generate lists of incremental capacities and connection names for i in range(len(incremental_capacity_1)): - new_cx_list=[] - new_cx_names_list=[] - if i==0: - x=1 + new_cx_list = [] + new_cx_names_list = [] + if i == 0: + x = 1 else: - x=cx_incremental_capacity_lists[-1][-1]+1 + x = cx_incremental_capacity_lists[-1][-1] + 1 # Generate capacity list and corresponding names - for j in range(x,int(incremental_capacity_1[i])+1): + for j in range(x, int(incremental_capacity_1[i]) + 1): new_cx_list.append(j) new_cx_names_list.append(created_cx_lists_keys[j-1]) @@ -1038,308 +1072,326 @@ def get_incremental_capacity_list(self): incremental_capacity_list_values.append(new_cx_list[-1]) cx_incremental_capacity_lists.append(new_cx_list) cx_incremental_capacity_names_lists.append(new_cx_names_list) - return cx_incremental_capacity_names_lists,cx_incremental_capacity_lists,created_cx_lists_keys,incremental_capacity_list_values + return cx_incremental_capacity_names_lists, cx_incremental_capacity_lists, created_cx_lists_keys, incremental_capacity_list_values - def generate_report(self,iterations_before_test_stopped_by_user,incremental_capacity_list,data=None,data1=None,report_path='',result_dir_name='Throughput_Test_report', + def generate_report(self, iterations_before_test_stopped_by_user, incremental_capacity_list, data=None, data1=None, report_path='', result_dir_name='Throughput_Test_report', selected_real_clients_names=None): + ''' + Generate a report for the throughput test. + ''' if self.do_interopability: - result_dir_name="Interopability_Test_report" + result_dir_name = "Interopability_Test_report" self.ssid_list = self.get_ssid_list(self.input_devices_list) - self.signal_list,self.channel_list,self.mode_list,self.link_speed_list=self.get_signal_and_channel_data(self.input_devices_list) + self.signal_list, self.channel_list, self.mode_list, self.link_speed_list = self.get_signal_and_channel_data(self.input_devices_list) if selected_real_clients_names is not None: self.num_stations = selected_real_clients_names - # Initialize the report object - if self.do_interopability==False: + # Initialize the report object + if self.do_interopability is False: report = lf_report(_output_pdf="throughput.pdf", _output_html="throughput.html", _path=report_path, - _results_dir_name=result_dir_name) + _results_dir_name=result_dir_name) report_path = report.get_path() report_path_date_time = report.get_path_date_time() # df.to_csv(os.path.join(report_path_date_time, 'throughput_data.csv')) - shutil.move('throughput_data.csv',report_path_date_time) + shutil.move('throughput_data.csv', report_path_date_time) logger.info("path: {}".format(report_path)) logger.info("path_date_time: {}".format(report_path_date_time)) report.set_title("Throughput Test") report.build_banner() - + # objective title and description report.set_obj_html(_obj_title="Objective", - _obj="The Candela Client Capacity test is designed to measure an Access Point’s client capacity and performance when handling different amounts of Real clients like android, Linux," - " windows, and IOS. The test allows the user to increase the number of clients in user-defined steps for each test iteration and measure the per client and the overall throughput for" - " this test, we aim to assess the capacity of network to handle high volumes of traffic while" - " each trial. Along with throughput other measurements made are client connection times, Station 4-Way Handshake time, DHCP times, and more. The expected behavior is for the" - " AP to be able to handle several stations (within the limitations of the AP specs) and make sure all Clients get a fair amount of airtime both upstream and downstream. An AP that" - "scales well will not show a significant overall throughput decrease as more Real clients are added.") + _obj="The Candela Client Capacity test is designed to measure an Access Point’s client capacity and performance when handling different amounts of \ + Real clients like android, Linux," + " windows, and IOS. The test allows the user to increase the number of clients in user-defined steps for each test iteration and measure the \ + per client and the overall throughput for" + " this test, we aim to assess the capacity of network to handle high volumes of traffic while" + " each trial. Along with throughput other measurements made are client connection times, Station 4-Way Handshake time, DHCP times, and more. \ + The expected behavior is for the" + " AP to be able to handle several stations (within the limitations of the AP specs) and make sure all Clients get a fair amount of airtime both \ + upstream and downstream. An AP that" + "scales well will not show a significant overall throughput decrease as more Real clients are added.") report.build_objective() report.set_obj_html(_obj_title="Input Parameters", _obj="The below tables provides the input parameters for the test") report.build_objective() # Initialize counts and lists for device types - android_devices,windows_devices,linux_devices,ios_devices=0,0,0,0 - all_devices_names=[] - device_type=[] - packet_size_text='' - total_devices="" - if self.cx_profile.side_a_min_pdu==-1: - packet_size_text='AUTO' + android_devices, windows_devices, linux_devices, ios_devices = 0, 0, 0, 0 + all_devices_names = [] + device_type = [] + packet_size_text = '' + total_devices = "" + if self.cx_profile.side_a_min_pdu == -1: + packet_size_text = 'AUTO' else: - packet_size_text=str(self.cx_profile.side_a_min_pdu)+' Bytes' + packet_size_text = str(self.cx_profile.side_a_min_pdu) + ' Bytes' # Determine load type name based on self.load_type - if self.load_type=="wc_intended_load": - load_type_name="Intended Load" + if self.load_type == "wc_intended_load": + load_type_name = "Intended Load" else: - load_type_name="Per Client Load" + load_type_name = "Per Client Load" for i in self.real_client_list: - split_device_name=i.split(" ") + split_device_name = i.split(" ") if 'android' in split_device_name: - all_devices_names.append(split_device_name[2] + ("(Android)") ) + all_devices_names.append(split_device_name[2] + ("(Android)")) device_type.append("Android") - android_devices+=1 + android_devices += 1 elif 'Win' in split_device_name: all_devices_names.append(split_device_name[2] + ("(Windows)")) device_type.append("Windows") - windows_devices+=1 + windows_devices += 1 elif 'Lin' in split_device_name: all_devices_names.append(split_device_name[2] + ("(Linux)")) device_type.append("Linux") - linux_devices+=1 + linux_devices += 1 elif 'Mac' in split_device_name: all_devices_names.append(split_device_name[2] + ("(Mac)")) device_type.append("Mac") - ios_devices+=1 + ios_devices += 1 # Build total_devices string based on counts - if android_devices>0: - total_devices+= f" Android({android_devices})" - if windows_devices>0: - total_devices+= f" Windows({windows_devices})" - if linux_devices>0: - total_devices+= f" Linux({linux_devices})" - if ios_devices>0: - total_devices+= f" IOS({ios_devices})" + if android_devices > 0: + total_devices += f" Android({android_devices})" + if windows_devices > 0: + total_devices += f" Windows({windows_devices})" + if linux_devices > 0: + total_devices += f" Linux({linux_devices})" + if ios_devices > 0: + total_devices += f" IOS({ios_devices})" # Determine incremental_capacity_data based on self.incremental_capacity - if len(self.incremental_capacity)==1: - incremental_capacity_data=str(self.incremental_capacity[0]) - elif(len(self.incremental_capacity)>1): - self.incremental_capacity=self.incremental_capacity.split(',') - incremental_capacity_data=', '.join(self.incremental_capacity) + if len(self.incremental_capacity) == 1: + incremental_capacity_data = str(self.incremental_capacity[0]) + elif (len(self.incremental_capacity) > 1): + self.incremental_capacity = self.incremental_capacity.split(',') + incremental_capacity_data = ', '.join(self.incremental_capacity) else: - incremental_capacity_data="None" - + incremental_capacity_data = "None" + # Construct test_setup_info dictionary for test setup table test_setup_info = { - "Test name" : self.test_name, - "Device List": ", ".join(all_devices_names), - "No of Devices": "Total"+ f"({str(self.num_stations)})" + total_devices, - "Increment":incremental_capacity_data, - "Traffic Duration in minutes" : round(int(self.test_duration)*len(incremental_capacity_list)/60,2), - "Traffic Type" : (self.traffic_type.strip("lf_")).upper(), - "Traffic Direction" : self.direction, - "Upload Rate(Mbps)" : str(round(int(self.cx_profile.side_a_min_bps)/1000000,2)) + "Mbps", - "Download Rate(Mbps)" : str(round(int(self.cx_profile.side_b_min_bps)/1000000,2)) + "Mbps", - "Load Type" : load_type_name, - "Packet Size" : packet_size_text - } + "Test name": self.test_name, + "Device List": ", ".join(all_devices_names), + "No of Devices": "Total" + f"({str(self.num_stations)})" + total_devices, + "Increment": incremental_capacity_data, + "Traffic Duration in minutes": round(int(self.test_duration) * len(incremental_capacity_list) / 60, 2), + "Traffic Type": (self.traffic_type.strip("lf_")).upper(), + "Traffic Direction": self.direction, + "Upload Rate(Mbps)": str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps", + "Download Rate(Mbps)": str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps", + "Load Type": load_type_name, + "Packet Size": packet_size_text + } report.test_setup_table(test_setup_data=test_setup_info, value="Test Configuration") - + # Loop through iterations and build graphs, tables for each iteration for i in range(len(iterations_before_test_stopped_by_user)): # rssi_signal_data=[] - devices_on_running=[] - download_data=[] - upload_data=[] - devices_data_to_create_bar_graph=[] + devices_on_running = [] + download_data = [] + upload_data = [] + devices_data_to_create_bar_graph = [] # signal_data=[] - direction_in_table=[] - packet_size_in_table=[] - upload_list,download_list=[],[] - rssi_data=[] - data_iter=data[data['Iteration']==i+1] - + direction_in_table = [] + packet_size_in_table = [] + upload_list, download_list = [], [] + rssi_data = [] + data_iter = data[data['Iteration'] == i + 1] + # for sig in self.signal_list[0:int(incremental_capacity_list[i])]: # signal_data.append(int(sig)*(-1)) # rssi_signal_data.append(signal_data) - # Fetch devices_on_running from real_client_list + # Fetch devices_on_running from real_client_list for j in range(data1[i][-1]): devices_on_running.append(self.real_client_list[j].split(" ")[-1]) # Fetch download_data and upload_data based on load_type and direction - for k in devices_on_running: + for k in devices_on_running: # individual_device_data=[] # Checking individual device download and upload rate by searching device name in dataframe columns_with_substring = [col for col in data_iter.columns if k in col] filtered_df = data_iter[columns_with_substring] - if self.load_type=="wc_intended_load": - if self.direction=="Bi-direction": + if self.load_type == "wc_intended_load": + if self.direction == "Bi-direction": # Append download and upload data from filtered dataframe - download_data.append(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[-1]) - upload_data.append(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[-1]) + download_data.append(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[-1]) + upload_data.append(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[-1]) + + rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) / + len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2))*-1) - rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist())/len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()),2))*-1) # Calculate and append upload and download throughput to lists - upload_list.append(str(round((int(self.cx_profile.side_a_min_bps)/1000000)/int(incremental_capacity_list[i]),2)) + "Mbps") - download_list.append(str(round((int(self.cx_profile.side_b_min_bps)/1000000)/int(incremental_capacity_list[i]),2)) + "Mbps") - if self.cx_profile.side_a_min_pdu==-1: + upload_list.append(str(round((int(self.cx_profile.side_a_min_bps)/1000000)/int(incremental_capacity_list[i]), 2)) + "Mbps") + download_list.append(str(round((int(self.cx_profile.side_b_min_bps)/1000000)/int(incremental_capacity_list[i]), 2)) + "Mbps") + if self.cx_profile.side_a_min_pdu == -1: packet_size_in_table.append('AUTO') else: packet_size_in_table.append(self.cx_profile.side_a_min_pdu) direction_in_table.append(self.direction) - elif self.direction=='Download': + elif self.direction == 'Download': # Append download data from filtered dataframe - download_data.append(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[-1]) + download_data.append(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[-1]) # Append 0 for upload data upload_data.append(0) - rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist())/len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()),2))*-1) + rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) / + len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1) # Calculate and append upload and download throughput to lists - upload_list.append(str(round((int(self.cx_profile.side_a_min_bps)/1000000)/int(incremental_capacity_list[i]),2)) + "Mbps") - download_list.append(str(round((int(self.cx_profile.side_b_min_bps)/1000000)/int(incremental_capacity_list[i]),2)) + "Mbps") - if self.cx_profile.side_a_min_pdu==-1: + upload_list.append(str(round((int(self.cx_profile.side_a_min_bps)/1000000)/int(incremental_capacity_list[i]), 2)) + "Mbps") + download_list.append(str(round((int(self.cx_profile.side_b_min_bps)/1000000)/int(incremental_capacity_list[i]), 2)) + "Mbps") + if self.cx_profile.side_a_min_pdu == -1: packet_size_in_table.append('AUTO') else: packet_size_in_table.append(self.cx_profile.side_a_min_pdu) direction_in_table.append(self.direction) - elif self.direction=='Upload': + elif self.direction == 'Upload': # Calculate and append upload and download throughput to lists - upload_list.append(str(round((int(self.cx_profile.side_a_min_bps)/1000000)/int(incremental_capacity_list[i]),2)) + "Mbps") - download_list.append(str(round((int(self.cx_profile.side_b_min_bps)/1000000)/int(incremental_capacity_list[i]),2)) + "Mbps") - - rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist())/len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()),2))*-1) + upload_list.append(str(round((int(self.cx_profile.side_a_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)) + "Mbps") + download_list.append(str(round((int(self.cx_profile.side_b_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)) + "Mbps") + + rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) / + len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1) # Append upload data from filtered dataframe - upload_data.append(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[-1]) + upload_data.append(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[-1]) # Append 0 for download data download_data.append(0) - if self.cx_profile.side_a_min_pdu==-1: + if self.cx_profile.side_a_min_pdu == -1: packet_size_in_table.append('AUTO') else: packet_size_in_table.append(self.cx_profile.side_a_min_pdu) direction_in_table.append(self.direction) - + else: - - if self.direction=="Bi-direction": + + if self.direction == "Bi-direction": # Append download and upload data from filtered dataframe - download_data.append(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[-1]) - upload_data.append(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[-1]) - rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist())/len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()),2))*-1) + download_data.append(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[-1]) + upload_data.append(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[-1]) + rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) / + len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2))*-1) # Calculate and append upload and download throughput to lists - upload_list.append(str(round(int(self.cx_profile.side_a_min_bps)/1000000,2)) + "Mbps") - download_list.append(str(round(int(self.cx_profile.side_b_min_bps)/1000000,2)) + "Mbps") + upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps") + download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps") - if self.cx_profile.side_a_min_pdu==-1: + if self.cx_profile.side_a_min_pdu == -1: packet_size_in_table.append('AUTO') else: packet_size_in_table.append(self.cx_profile.side_a_min_pdu) direction_in_table.append(self.direction) - elif self.direction=='Download': + elif self.direction == 'Download': # Append download data from filtered dataframe - download_data.append(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[-1]) + download_data.append(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[-1]) # Append 0 for upload data upload_data.append(0) - rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist())/len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()),2))*-1) + rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) / + len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2))*-1) # Calculate and append upload and download throughput to lists - upload_list.append(str(round(int(self.cx_profile.side_a_min_bps)/1000000,2)) + "Mbps") - download_list.append(str(round(int(self.cx_profile.side_b_min_bps)/1000000,2)) + "Mbps") + upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps") + download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps") - if self.cx_profile.side_a_min_pdu==-1: + if self.cx_profile.side_a_min_pdu == -1: packet_size_in_table.append('AUTO') else: packet_size_in_table.append(self.cx_profile.side_a_min_pdu) direction_in_table.append(self.direction) - elif self.direction=='Upload': + elif self.direction == 'Upload': # Calculate and append upload and download throughput to lists - upload_list.append(str(round(int(self.cx_profile.side_a_min_bps)/1000000,2)) + "Mbps") - download_list.append(str(round(int(self.cx_profile.side_b_min_bps)/1000000,2)) + "Mbps") - rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist())/len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()),2))*-1) + upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps") + download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps") + rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) / + len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1) # Append upload data from filtered dataframe - upload_data.append(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[-1]) + upload_data.append(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[-1]) # Append 0 for download data download_data.append(0) - if self.cx_profile.side_a_min_pdu==-1: + if self.cx_profile.side_a_min_pdu == -1: packet_size_in_table.append('AUTO') else: packet_size_in_table.append(self.cx_profile.side_a_min_pdu) direction_in_table.append(self.direction) - data_set_in_graph,trimmed_data_set_in_graph=[],[] + data_set_in_graph, trimmed_data_set_in_graph = [], [] # Depending on the test direction, retrieve corresponding throughput data, # organize it into datasets for graphing, and calculate real-time average throughput values accordingly. - if self.direction=="Bi-direction": - download_values_list=data['Overall Download'][data['Iteration']==i+1].values.tolist() - upload_values_list=data['Overall Upload'][data['Iteration']==i+1].values.tolist() + if self.direction == "Bi-direction": + download_values_list = data['Overall Download'][data['Iteration'] == i + 1].values.tolist() + upload_values_list = data['Overall Upload'][data['Iteration'] == i + 1].values.tolist() data_set_in_graph.append(download_values_list) data_set_in_graph.append(upload_values_list) devices_data_to_create_bar_graph.append(download_data) devices_data_to_create_bar_graph.append(upload_data) - label_data=['Download','Upload'] - real_time_data=f"Real Time Throughput: Average achieved Throughput: Download : {round(((sum(download_data[0:int(incremental_capacity_list[i])]))/len(download_data[0:int(incremental_capacity_list[i])])),2)} Mbps, Upload : {round((sum(upload_data[0:int(incremental_capacity_list[i])])/len(upload_data[0:int(incremental_capacity_list[i])])),2)} Mbps" - - elif self.direction=='Download': - download_values_list=data['Overall Download'][data['Iteration']==i+1].values.tolist() + label_data = ['Download', 'Upload'] + real_time_data = f"Real Time Throughput: Average achieved Throughput: Download : \ + {round(((sum(download_data[0:int(incremental_capacity_list[i])])) / len(download_data[0:int(incremental_capacity_list[i])])), 2)} Mbps, Upload : \ + {round((sum(upload_data[0:int(incremental_capacity_list[i])]) / len(upload_data[0:int(incremental_capacity_list[i])])), 2)} Mbps" + + elif self.direction == 'Download': + download_values_list = data['Overall Download'][data['Iteration'] == i + 1].values.tolist() data_set_in_graph.append(download_values_list) devices_data_to_create_bar_graph.append(download_data) - label_data=['Download'] - real_time_data=f"Real Time Throughput: Average achieved Throughput: Download : {round(((sum(download_data[0:int(incremental_capacity_list[i])]))/len(download_data[0:int(incremental_capacity_list[i])])),2)} Mbps" - - elif self.direction=='Upload': - upload_values_list=data['Overall Upload'][data['Iteration']==i+1].values.tolist() + label_data = ['Download'] + real_time_data = f"Real Time Throughput: Average achieved Throughput: Download : \ + {round(((sum(download_data[0:int(incremental_capacity_list[i])])) / len(download_data[0:int(incremental_capacity_list[i])])), 2)} Mbps" + + elif self.direction == 'Upload': + upload_values_list = data['Overall Upload'][data['Iteration'] == i + 1].values.tolist() data_set_in_graph.append(upload_values_list) devices_data_to_create_bar_graph.append(upload_data) - label_data=['Upload'] - real_time_data=f"Real Time Throughput: Average achieved Throughput: Upload : {round((sum(upload_data[0:int(incremental_capacity_list[i])])/len(upload_data[0:int(incremental_capacity_list[i])])),2)} Mbps" - - if len(incremental_capacity_list)>1: - report.set_custom_html(f"