Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
# file types
*.pdf
*.npz
*.npy
*.json
*~

# folders
Expand Down
202 changes: 202 additions & 0 deletions lab_analysis/MatrixCvG_allPixels.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
import numpy as np
from scipy.optimize import curve_fit
import os
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import mplhep as hep
import argparse
import json

# import MatrixVTH.linear_func as linear_func
from scipy.stats import norm

hep.style.use("ATLAS")

from SmartPixStyle import *
from Analyze import inspectPath

# Perform linear fit
def linear_func(x, a, b):
return a * x + b


# Argument parser
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("-i", '--inFilePath', type=str, required=True, help='Input file path')
parser.add_argument("-o", '--outDir', type=str, default=None, help='Input file path')
args = parser.parse_args()

# Load data and info
inData = np.load(args.inFilePath)
features = inData["features"]

# get information
info = inspectPath(os.path.dirname(args.inFilePath))
print(info)

# get output directory
outDir = args.outDir if args.outDir else os.path.dirname(args.inFilePath)
os.makedirs(outDir, exist_ok=True)
# os.chmod(outDir, mode=0o777)
print("Computing CvG for all pixels and bit across all settings...")
store_CvG = []
store_VthOff = []

bit_VthOff = {0: [], 1: [], 2: []}
bit_CvG = {0: [], 1: [], 2: []}

nSettings, nPixels, nBits, nFeatures = features.shape

# Initialize a list to store the data
fit_data = []

for iB in range(nBits):
for iP in range(nPixels):
x = []
y = []
for iS in range(nSettings):
vth = features[iS, iP, iB, 1]
value = features[iS, iP, iB, 2] # 50% electron value
if value > 0 and vth > 0:
x.append(vth)
y.append(value)
x = np.array(x)
y = np.array(y)
if len(x) >= 2:
try:
mask = y > 0
linearRegion = x[mask] > 0.03 # 0.05
popt, pcov = curve_fit(linear_func, x[mask][linearRegion], y[mask][linearRegion])
print(x[mask][linearRegion], y[mask][linearRegion])
a, b = popt
CvG = 1 / a * 1e6 # µV/e⁻
vth_offset = -b / a # V
if CvG > 0:
bit_CvG[iB].append(CvG)
bit_VthOff[iB].append(vth_offset) # mV
store_CvG.append((iP, iB, CvG))
store_VthOff.append((iP, iB, vth_offset))

# Save the data for this pixel and bit
fit_data.append({
"pixel": iP,
"bit": iB,
"x": x[mask][linearRegion].tolist(),
"y": y[mask][linearRegion].tolist(),
"CvG": CvG,
"vth_offset": vth_offset
})
except RuntimeError:
continue

# Save the fit data to a JSON file
with open(os.path.join(outDir, f'fit_data.json'), 'w') as json_file:
json.dump(fit_data, json_file, indent=4)


with open(os.path.join(outDir, f'fit_data.json'), 'r') as json_file:
fit_data = json.load(json_file)

# Iterate over each bit and plot the data
for iB in range(nBits):
x_all = []
y_all = []

# Collect x and y data for all pixels for the current bit
for entry in fit_data:
if entry['bit'] == iB:
x_all.extend(entry['x'])
y_all.extend(entry['y'])

# Convert to numpy arrays
x_all = np.array(x_all)
y_all = np.array(y_all)

# Perform a linear fit
popt, _ = curve_fit(linear_func, x_all, y_all)
a, b = popt

# Generate fitted line
x_fit = np.linspace(min(x_all), max(x_all), 100)
y_fit = linear_func(x_fit, a, b)

# Plot the data and the fit
plt.figure()
plt.scatter(x_all, y_all, label='Data', color='blue', alpha=0.6, s=10)
plt.plot(x_fit, y_fit, label=f'Fit: y = {a:.2f}x + {b:.2f}', color='red')
plt.xlim(0.02, 0.09)
plt.title(f'Bit {iB}')
plt.xlabel('Vth [V]')
plt.ylabel('S-curve half max [e-]')
plt.legend()
plt.grid()
# Display fit parameters on the plot with a border at the bottom-right
plt.text(0.95, 0.05, f'Threshold [e-] = {a:.2f} * V$_{{TH}}$ [V] + {b:.2f}', transform=plt.gca().transAxes,
fontsize=10, verticalalignment='bottom', horizontalalignment='right',
bbox=dict(edgecolor='black', facecolor='none', boxstyle='round,pad=0.5'))
plt.savefig(os.path.join(outDir, f'CvG_AllPixelsFit_Bit_{iB}.png'))

# # ====== CvG Histogram Plotting ======
# fig, axs = plt.subplots(1, 3, figsize=(18, 5))
# for iB in range(nBits):
# vals = np.array(bit_CvG[iB])
# if len(vals) == 0:
# continue
# mu, std = norm.fit(vals)
# axs[iB].hist(vals, bins=30, color='skyblue', edgecolor='black', alpha=0.7)
# axs[iB].set_title(f'Bit {iB}: μ = {mu:.2f} µV/e⁻, σ = {std:.2f}')
# axs[iB].set_xlabel("CvG [µV/e⁻]")
# axs[iB].set_ylabel("Count")
# axs[iB].grid(True)
# plt.tight_layout()
# plt.savefig(os.path.join(outDir, "CvG_Histograms_PerBit.pdf"))
# plt.close()

# fig, axs = plt.subplots(1, 3, figsize=(18, 5))
# for iB in range(nBits):
# vals = np.array(bit_VthOff[iB])
# if len(vals) == 0:
# continue
# mu, std = norm.fit(vals)
# axs[iB].hist(vals, bins=30, color='skyblue', edgecolor='black', alpha=0.7)
# axs[iB].set_title(f'Bit {iB}: Vth offset = {mu:.2f} V, σ = {std:.2f}')
# axs[iB].set_xlabel("Vth offset [mV]")
# axs[iB].set_ylabel("Count")
# axs[iB].grid(True)
# plt.tight_layout()
# plt.savefig(os.path.join(outDir, "vthOffset_Histograms_PerBit.pdf"))
# plt.close()

# # Combined histogram
# combined_vals = np.concatenate([np.array(v) for v in bit_CvG.values()])
# mu, std = norm.fit(combined_vals)
# plt.figure(figsize=(8,6))
# plt.hist(combined_vals, bins=40, color='salmon', edgecolor='black', alpha=0.75)
# plt.title(f'All Bits Combined: μ = {mu:.2f} µV/e⁻, σ = {std:.2f}')
# plt.xlabel("CvG [µV/e⁻]")
# plt.ylabel("Count")
# plt.grid(True)
# plt.savefig(os.path.join(outDir, "CvG_Histogram_Combined.pdf"))
# plt.close()

# # Combined histogram
# combined_vals = np.concatenate([np.array(v) for v in bit_VthOff.values()])
# mu, std = norm.fit(combined_vals)
# plt.figure(figsize=(8,6))
# plt.hist(combined_vals, bins=40, color='salmon', edgecolor='black', alpha=0.75)
# plt.title(f'All Bits Combined: μ = {mu:.2f} V⁻, σ = {std:.2f}')
# plt.xlabel("vth offset [mV]")
# plt.ylabel("Count")
# plt.grid(True)
# plt.savefig(os.path.join(outDir, "vth_offset_Histogram_Combined.pdf"))
# plt.close()

# # Save CvG data as (iP, iB, CvG)
# CvG_array = np.array(store_CvG)
# vth_offset_array = np.array(store_VthOff)
# save_path1 = os.path.join(outDir, "CvG_data.npy")
# save_path2 = os.path.join(outDir, "vth_offset_data.npy")
# np.save(save_path1, CvG_array)
# np.save(save_path2, vth_offset_array)
# print(f"Saved CvG data to: {save_path1}")
# print(f"Saved vth offset data to: {save_path2}")
11 changes: 7 additions & 4 deletions lab_analysis/MatrixNPix.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,9 @@ def gaussian(x, amplitude, mean, std_dev):

hist_vals, bin_edges = np.histogram(temp, bins=bins, density=False) # inData[name][iB]
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
ax.step(bin_centers, hist_vals, where='mid', linewidth=1.5, color='black', label='Data')
data_mean = np.mean(temp)
data_std_dev = np.std(temp)
ax.step(bin_centers, hist_vals, where='mid', linewidth=1.5, color='black', label=f"Data\n($\\bar{{X}}$={data_mean:.2f}, $X_{{RMS}}$={data_std_dev:.2f})")
# ax.hist(features[iS:,:,iB:,config["idx"]].flatten(), bins=bins, histtype="step", linewidth=1.5, color='black', label='Data') # plot data histogram # inData[name][iB]

mean = 0
Expand All @@ -121,15 +123,16 @@ def gaussian(x, amplitude, mean, std_dev):
# restrict fit to certain region
mask = bin_centers != np.nan
if "fitRange" in config.keys():
print("Only fitting in the range: ", config["fitRange"][iB])
mask = ((bin_centers > mean - 3.0 * rms) & (bin_centers < mean + 3.0 * rms))
# print("Only fitting in the range: ", config["fitRange"][iB])
mask = ((bin_centers > mean - 3.0 * rms) & (bin_centers < mean + 3.0 * rms))
print("Only fitting in the range: [", mean - 3.0 * rms, ", ", mean + 3.0 * rms, "]")
# perform fit
popt, _ = curve_fit(gaussian, bin_centers[mask], hist_vals[mask], p0=p0)
# evaluate fit and plot
y_fit = gaussian(bin_centers, *popt) # evaluate gaussian at bins
amplitude, mean , std_dev = popt
std_dev = abs(std_dev) # from the gaussian the reported value could be +/- but just report positive
ax.plot(bin_centers[mask], y_fit[mask], color='r', label='Gaussian Fit''\n'fr'({mean:.2f},{std_dev:.2f})', alpha=0.5) # fit
ax.plot(bin_centers[mask], y_fit[mask], color='r', label='Gaussian Fit''\n'fr'($\mu=${mean:.2f}, $\sigma$={std_dev:.2f})', alpha=0.5) # fit
except:
print("Fit failed")

Expand Down
32 changes: 32 additions & 0 deletions lab_analysis/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
To run dynamic_parseDNNresults.py (used to find and/or parse on-chip NN results): you have to pass the location to the readout.csv (output file from on-chip NN), and one of the following:
1. -d flag: if dnn_RTL_out.csv is produced and copied to the the same location of readout.csv. Passing this flag find the best timestamp to evaluate the on-chip NN results at. It will also produce the relevant confusion matrix and accuracy of on-chip NN w.r.t. RTL results.
2. -s flag followed by timestamp number: if you want to evaluate the on-chip NN results at a particular timestamp
If both of the above flags, it will evaluate the on-chip NN results at the user-defined timestamp and compare the subsequent results with RTL prediction.

Analysis of CvG data (across all 256 pixels) to obtain conversion equations to take us from VTH [V] to VTH [electrons]:
Note: Future iteration of CvG routine will improve upon directory management. Please bear with us for now with the below defined fix.

1. Folder organization: Ensure the directory is named in the usual format (date_MatrixCvG_vMin_vMax...). Within this directory needs to be folders: nPix0, nPix1, ... nPix254, nPix256. And within each nPix folder, the data for various vth's used needs to be present.
2. Running analysis: python3 launchAnalysis.py -i data_MatrixCvG_vMin_vMax... (--doFit if you want SCurves to be fit with Gaussian)
3. Final plots: python3 MatrixCvG_allPixels.py -i data_MatrixCvG_vMin_vMax.../plots/scurve_data.npz
One can use a shell script like the following to help move directories:
```
master="./2025.11.20_11.24.21_MatrixCvG_vMin0.001_vMax0.400_vStep0.00100_nSample1365.000_vdda0.900_BXCLKf10.00_BxCLKDly45.00_injDly75.00_Ibias0.600"
list="dirs.txt"

while IFS= read -r d; do
[ -z "$d" ] && continue # skip empty lines
mv -- "$d" "$master"/
done < "$list"

cd "$master"

for d in *_nPix*; do
# extract from first occurrence of "nPix" to end
new="${d#*nPix}"
new="nPix${new}" # ensure it starts with nPix
mv -- "$d" "$new"
done
```

To perform analysis on power measurements: pass the location of the Ivddd.csv file using the -f flag. Subsequent arguments are to help understand the double-peaks typically observed in power consumption histograms/values. The -c argument is the user's guess value of the power value that separates the two peaks. The -r argument sets the maximum range of power-histogram plot in the case of P_noise measurements.
63 changes: 42 additions & 21 deletions lab_analysis/dynamic_parseDNNresults.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,17 @@
import numpy as np
import argparse
import numpy as np
import argparse, os
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from SmartPixStyle import *
import mplhep as hep

hep.style.use("ATLAS")

# To-do:
# - Update directories in config.py / config.json to reflect this. This way, one does not have to hard-code the results paths in config.py/config.json before running efficiency.py

class SaveOnlyAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
Expand All @@ -12,8 +21,9 @@ def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, int(values))

parser = argparse.ArgumentParser(description='Parse DNN results from CSV files')
parser.add_argument("-r", "--readout", type=str, default='./readout.csv', help='Path to readout CSV file')
parser.add_argument("-d", "--dnn_rtl", type=str, default='./dnn_RTL_out.csv', help='Path to DNN RTL output CSV file')
parser.add_argument("-r", "--readout", type=str, default='./', help='Path to readout CSV file')
# parser.add_argument("-d", "--dnn_rtl", type=str, default='./dnn_RTL_out.csv', help='Path to DNN RTL output CSV file')
parser.add_argument("-d", "--dnn_rtl", action='store_true', help='Use DNN RTL CSV file if exists')
parser.add_argument("-p", "--true_pt", type=str, default=None, help='Path to true pt values for inputs')
parser.add_argument("-s", "--save-only", nargs="?", type=int, action=SaveOnlyAction, default=None, help="If set, only save the final results without evaluating accuracy. Pass the time stamp parameter at which the DNN output is parsed. Defaults to 20 if no value is provided.")
args = parser.parse_args()
Expand Down Expand Up @@ -47,21 +57,22 @@ def eval_dnn_result(results_file, bit_iter):
final_results = np.array(final_results)
return final_results

results_file_to_evaluate = np.genfromtxt(args.readout, delimiter=',', dtype=int)
loc_to_files = os.path.join(args.readout, '')
results_file_to_evaluate = np.genfromtxt(f'{loc_to_files}readout.csv', delimiter=',', dtype=int) # File name is hard-coded because this is the Spacely software default name and not other name has been used in the past.
# Output DNN results based on time-stamp guess when RTL file has not been produced
if(args.save_only is not None):
if(args.save_only is not None and args.dnn_rtl is False):
# If save-only is specified, we only save the results at the given time stamp and exit
print(f"Saving results at time stamp {args.save_only} as --save-only parameter has been passed.")
final_results = eval_dnn_result(results_file_to_evaluate, args.save_only)
np.savetxt('final_results.csv', final_results, delimiter=',', fmt='%d')
np.save('final_results.npy', final_results)
np.savetxt(f'{loc_to_files}final_results_ts{args.save_only}.csv', final_results, delimiter=',', fmt='%d')
np.save(f'{loc_to_files}final_results_ts{args.save_only}.npy', final_results)
print(f"Results saved at time stamp {args.save_only}. Exiting without further evaluation.\nTo evaluate accuracy omit the --save-only parameter and pass the RTL file.")
exit(0)

# If RTL file is provided, obtain the best time stamp based on training set (200 events)
results_file = results_file_to_evaluate[:200]

dnn_RTL_out = np.genfromtxt(args.dnn_rtl, delimiter=',', dtype=int)
dnn_RTL_out = np.genfromtxt(f'{loc_to_files}dnn_RTL_out.csv', delimiter=',', dtype=int)
expected_train_results = dnn_RTL_out[:200]

assert len(results_file) == len(expected_train_results), "Mismatch in number of events between results file and expected train results"
Expand Down Expand Up @@ -101,8 +112,12 @@ def eval_dnn_result(results_file, bit_iter):
best_score = np.argmin(score[12:]) + 12
# print("Passing time stamp = ", best_score, "(evaluating at next time stamp to ensure evaluation in a safe output time range of dnn0 and dnn1).")
final_results = eval_dnn_result(results_file_to_evaluate, best_score)
np.savetxt('final_results.csv', final_results, delimiter=',', fmt='%d')
np.save('final_results.npy', final_results)
if args.save_only is not None:
print(f"\n\nSaving results at time stamp {args.save_only} as --save-only parameter has been passed.")
print(f"Overriding best score with --save-only parameter: {score[args.save_only]}.")
final_results = eval_dnn_result(results_file_to_evaluate, args.save_only)
np.savetxt(f'{loc_to_files}final_results_ts{best_score}.csv', final_results, delimiter=',', fmt='%d')
np.save(f'{loc_to_files}final_results_ts{best_score}.npy', final_results)

assert final_results.shape == dnn_RTL_out.shape

Expand All @@ -118,19 +133,25 @@ def eval_dnn_result(results_file, bit_iter):
print(f"Percentage of matches: {percentage_match:.2f}%")


# Create confusion matrix from the results
# Generate and plot confusion matrix
true_values = dnn_RTL_out.flatten() # Flattening in case of multi-dimensional arrays
predicted_values = final_results.flatten()
# Generate confusion matrix
cm = confusion_matrix(true_values, predicted_values, labels=[0, 1, 2])
# Plot confusion matrix
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=['0','1','2'], yticklabels=['0','1','2'])
# sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=['High p$_T$', 'Low p$_T$ negative', 'Low p$_T$ positive'], yticklabels=['High p$_T$', 'Low p$_T$ negative', 'Low p$_T$ positive'])
plt.ylabel('RTL label', fontsize=12)
plt.xlabel('Predicted label', fontsize=12)
plt.title('Confusion Matrix', fontsize=14)
plt.savefig('final_results_confusion_matrix.pdf', dpi=300)
fig, ax = plt.subplots(figsize=(8,6))
# sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=['0','1','2'], yticklabels=['0','1','2'])
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=['High p$_T$', 'Low p$_T$ -ve', 'Low p$_T$ +ve'], yticklabels=['High p$_T$', 'Low p$_T$ -ve', 'Low p$_T$ +ve'], cbar_kws={'label': 'Number of events'}, annot_kws={"size": 16})
ax.set_xlabel('Predicted label', fontsize=18, loc='center')#, labelpad=10)
ax.set_ylabel('RTL label', fontsize=18, loc='center')#, labelpad=10)
ax.tick_params(axis='both', which='both', length=0, labelsize=16) # removes tick marks
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True)
ax.spines['left'].set_visible(True)
ax.spines['bottom'].set_visible(True)
SmartPixLabel(ax, 0, 1.003, size=18)
ax.text(0.33, 1.005, f"ROIC V1, ID 23, SuperPixel 1", transform=ax.transAxes, fontsize=12, color="black", ha='left', va='bottom')
# ax.text(0.05, 0.85, f"ROIC V{int(info['ChipVersion'])}, ID {int(info['ChipID'])}, SuperPixel {int(info['SuperPix'])}", transform=ax.transAxes, fontsize=12, color="black", ha='left', va='bottom')
# plt.title('Confusion Matrix', fontsize=14)
plt.savefig(f'{loc_to_files}final_results_confusion_matrix.pdf', bbox_inches='tight', dpi=300)


# =====================================================
Expand Down Expand Up @@ -186,7 +207,7 @@ def efficiency_error(eff, N):
import os
from matplotlib.ticker import MultipleLocator

info = inspectPath(os.path.dirname(args.readout))
info = inspectPath(os.path.dirname(f'{loc_to_files}readout.csv'))
print(info)

# load true y-profile
Expand Down
Loading