Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
136 changes: 136 additions & 0 deletions scripts/dumper/axi_elaborate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
import pandas as pd
from axi_utils import INT_FIELDS, raise_exception

class AXIElaborate:
"""
A class responsible for processing and analyzing AXI transaction DataFrames.
It includes methods to resolve write addresses and filter transactions.
"""

def __init__(self, df):
self.df = df

def resolve_write_addresses(self):
"""
Resolves write addresses for W transactions using preceding AW entries.
Groups by source_file to avoid cross-contamination between interfaces.

Returns:
pd.DataFrame: DataFrame with resolved addresses in W entries.
"""
resolved_dfs = []

for source, group in self.df.groupby('source_file', sort=False):
resolved = self.resolve_write_addresses_per_interface(group)
resolved_dfs.append(resolved)

self.df = pd.concat(resolved_dfs, ignore_index=True)
return self.df


def resolve_write_addresses_per_interface(self, df):
"""
Resolves and fills in missing 'addr' fields for W transactions based
on the most recent AW entry within the same file (interface).

Parameters:
df (pd.DataFrame): Subset of AXI transactions from a single source file.

Returns:
pd.DataFrame: Updated DataFrame with resolved addresses.
"""
current_aw = None
w_counter = 0
df = df.copy()

for idx, row in df.iterrows():
if row.get('type') == 'AW':
current_aw = {'addr': row.get('addr'), 'size': row.get('size')}
w_counter = 0

elif row.get('type') == 'W':
if current_aw is not None:
bytes_per_transfer = 1 << int(current_aw['size'])
resolved_addr = current_aw['addr'] + w_counter * bytes_per_transfer
df.at[idx, 'addr'] = resolved_addr
w_counter += 1

if row.get('last') == 1:
current_aw = None
w_counter = 0
else:
raise_exception(RuntimeError,
f"[{row.get('source_file')}] Found W entry without preceding AW at {row.get('time')} ps")

return df


def apply_field_filters(self, df_subset, **kwargs):
"""
Helper method to apply key-value filters to a DataFrame subset.
Hex strings are automatically converted to integers.

Parameters:
df_subset (pd.DataFrame): DataFrame to filter.
kwargs: Field filters.

Returns:
pd.DataFrame: Filtered DataFrame.
"""
for key, val in kwargs.items():
if key not in df_subset.columns:
raise_exception(ValueError, f"Column '{key}' not found in DataFrame.")

if isinstance(val, str) and val.startswith('0x'):
try:
val = int(val, 16)
except ValueError:
raise_exception(ValueError, f"Invalid hex value for {key}: '{val}'")

df_subset = df_subset[df_subset[key] == val]

return df_subset.reset_index(drop=True)


def select_aw(self, **kwargs):
"""
Selects AW transactions which match field filters.

Returns:
pd.DataFrame: Filtered AW transactions.
"""
df_aw = self.df[self.df['type'] == 'AW']
return self.apply_field_filters(df_aw, **kwargs)


def select_w(self, **kwargs):
"""
Selects W transactions which match field filters.

Returns:
pd.DataFrame: Filtered W transactions.
"""
df_w = self.df[self.df['type'] == 'W']
return self.apply_field_filters(df_w, **kwargs)


def filter_transactions(self, tx_type, **kwargs):
"""
Filters transactions by type and additional field criteria.

Parameters:
tx_type (str): Transaction type ('AW', 'W', etc).
kwargs: Additional field filters (e.g., addr='0x70000000').

Returns:
pd.DataFrame: Filtered subset of transactions.
"""
tx_type = tx_type.upper()

if tx_type == "AW":
return self.select_aw(**kwargs)
elif tx_type == "W":
return self.select_w(**kwargs)
else:
raise_exception(ValueError, f"Unsupported transaction type '{tx_type}'")

73 changes: 73 additions & 0 deletions scripts/dumper/axi_parser.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
from pathlib import Path
import pandas as pd
import json
import re
from axi_utils import INT_FIELDS, raise_exception, cprint, gprint, rprint

class AXIParser:
"""
Responsible for parsing raw AXI log files into structured pandas DataFrames.
"""
def __init__(self):
pass


def clean_and_parse(self, line):
"""
Cleans a single line from the AXI log and converts it to a JSON-parsable dict.
"""

line = re.sub(r',\s*}', '}', line.strip())
line = re.sub(r"(0x[\da-fA-FxX]+)", r'"\1"', line)
line = line.replace("'", '"')
return json.loads(line)


def parse_axi_dump(self, file_path):
"""
Parses a single AXI log file and returns a DataFrame of all transaction entries.
Each transaction is parsed into a dict and converted into appropriate Python types.
"""

parsed_entries = []
cprint(f"Parsing {file_path.name}")
with open(file_path, 'r') as file:
for line in file:
try:
parsed = self.clean_and_parse(line)
formatted = {}
for k, v in parsed.items():
if k in INT_FIELDS:
formatted[k] = int(v, 16) if isinstance(v, str) and v.startswith('0x') else int(v)
else:
formatted[k] = str(v)
parsed_entries.append(formatted)
except Exception as e:
rprint(f"Error parsing line:\n{line.strip()}\n -> {e}")
return pd.DataFrame(parsed_entries)

def collect_df(self, directory, pattern="axi_trace_mem_tile_*.log"):
"""
Collects all matching AXI log files from a directory and returns a combined, time-sorted DataFrame.
Automatically tags each row with its source file for traceability.
"""

directory = Path(directory)
all_dfs = []

for file_path in sorted(directory.glob(pattern)):
df = self.parse_axi_dump(file_path)
df['source_file'] = file_path.name
all_dfs.append(df)

if not all_dfs:
raise_exception(FileNotFoundError, f"No AXI log files found in '{directory}' matching '{pattern}'")

df_combined = pd.concat(all_dfs, ignore_index=True)

if 'source_file' in df_combined.columns and 'time' in df_combined.columns:
cols = ['source_file', 'time'] + [col for col in df_combined.columns if col not in ('source_file', 'time')]
df_combined = df_combined[cols]

df_combined.sort_values(by=['source_file', 'time'], inplace=True)
return df_combined
43 changes: 43 additions & 0 deletions scripts/dumper/axi_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import re
import pandas as pd

INT_FIELDS = {'addr', 'atop', 'burst', 'cache', 'id', 'len', 'size'}

def print_rgb(message: str):
styles = {
r"\\gb\{(.*?)\}": "\033[1;32m\\1\033[0m",
r"\\rb\{(.*?)\}": "\033[1;31m\\1\033[0m",
r"\\yb\{(.*?)\}": "\033[1;33m\\1\033[0m",
r"\\cb\{(.*?)\}": "\033[1;36m\\1\033[0m",
}
styled = message
for pattern, replacement in styles.items():
styled = re.sub(pattern, replacement, styled)
print(styled)

def gprint(msg): print_rgb(f"\\gb{{{msg}}}")
def rprint(msg): print_rgb(f"\\rb{{{msg}}}")
def yprint(msg): print_rgb(f"\\yb{{{msg}}}")
def cprint(msg): print_rgb(f"\\cb{{{msg}}}")

def raise_exception(extype: type[Exception], msg: str):
rprint(msg)
raise extype(msg)

def format_for_display(df):
"""
Converts numeric fields into human-readable hex strings.
"""
df_formatted = df.copy()
for col in INT_FIELDS:
if col in df_formatted.columns:
df_formatted[col] = df_formatted[col].apply(
lambda x: f"0x{int(x):x}" if pd.notnull(x) else "None"
)
return df_formatted

def save_to_csv(df, file_name):
"""
Saves the current DataFrame to a CSV file.
"""
df.to_csv(file_name, index=False)
56 changes: 56 additions & 0 deletions scripts/dumper/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
#!/usr/bin/env python3

import sys
import argparse
from pathlib import Path

# Automatically add parent dir of 'util/' to sys.path
sys.path.append(str(Path(__file__).resolve().parents[1]))

from axi_parser import AXIParser
from axi_elaborate import AXIElaborate
import axi_utils

def parse_arguments():
parser = argparse.ArgumentParser(
description="Filter and analyze AXI transactions from log files."
)
parser.add_argument(
"--type", "-t", type=str, required=True,
help="Transaction type to filter (e.g., AW, W, AR, etc.)"
)
parser.add_argument(
"--addr", "-a", type=str, required=True,
help="Address to filter for (e.g., 0x7000b080)"
)
parser.add_argument(
"--logdir", "-l", type=str, default="axi_log/",
help="Directory containing AXI log files (default: axi_log/)"
)
return parser.parse_args()


def main():
args = parse_arguments()

log_dir = args.logdir
tran_type = args.type
addr = args.addr


parser = AXIParser()
df = parser.collect_df(log_dir)

elab = AXIElaborate(df)


axi_utils.save_to_csv(df,"elab.csv")
df = elab.resolve_write_addresses()

df_filtered = elab.filter_transactions(tran_type, addr=addr)
df_formatted = axi_utils.format_for_display(df_filtered)
axi_utils.save_to_csv(df_formatted, "write_trans.csv")


if __name__ == "__main__":
main()