Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion input.json

This file was deleted.

Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
15 changes: 15 additions & 0 deletions python/models/models_onnx/model_minimal.onnx
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
pytorch2.7.1:�
7
inputoutput/flatten/Flatten"Flatten*
axis�
main_graphZ
input




b
output


�B
Expand Down
Binary file not shown.
Binary file not shown.
31 changes: 31 additions & 0 deletions python/scripts/export_identity_crisis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
print("Exporting Identity Crisis model to ONNX format...")

import torch
import sys

sys.path.append('/Users/elenapashkova/GravyTesting-Internal/python/models/models_onnx')

try:
from identity_crisis_model import IdentityCrisisModel

model = IdentityCrisisModel()
model.eval()
dummy_input = torch.randn(1, 1, 28, 28)

print("Testing forward pass...")
with torch.no_grad():
output = model(dummy_input)
print(f"Model output shape: {output.shape}")

print("Exporting to ONNX with opset version 13...")
torch.onnx.export(
model, dummy_input, "model_identity_crisis.onnx",
input_names=["input"], output_names=["output"],
opset_version=13 # Changed from 11 to 13
)
print("✅ Exported model_identity_crisis.onnx successfully!")

except Exception as e:
print(f"❌ Error: {e}")
import traceback
traceback.print_exc()
91 changes: 91 additions & 0 deletions python/scripts/export_supported_weird_fixed.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
print("Creating fixed Supported Weird model with Conv2D instead of Conv1D...")

import torch
import torch.nn as nn
import os

class SupportedWeirdModelFixed(nn.Module):
def __init__(self):
super().__init__()
# First conv layer (keep as is)
self.conv1 = nn.Conv2d(1, 8, 3, padding=1)

# Fix: Change Conv1d to Conv2d
self.conv2 = nn.Conv2d(8, 16, 3, padding=1) # Was Conv1d, now Conv2d

# Rest of the architecture (basic operations)
self.pool = nn.MaxPool2d(2)
self.flatten = nn.Flatten()
self.linear = nn.Linear(3136, 1) # 16*14*14 = 3136

def forward(self, x):
# Input: [1, 1, 28, 28]
x = torch.relu(self.conv1(x)) # [1, 8, 28, 28]
x = torch.relu(self.conv2(x)) # [1, 16, 28, 28] - Now using Conv2d
x = self.pool(x) # [1, 16, 14, 14]
x = self.flatten(x) # [1, 3136]
x = self.linear(x) # [1, 1]
return x

try:
model = SupportedWeirdModelFixed()
model.eval()
dummy_input = torch.randn(1, 1, 28, 28)

print("Testing forward pass...")
with torch.no_grad():
output = model(dummy_input)
print(f"Input shape: {dummy_input.shape}")
print(f"Output shape: {output.shape}")

print("Exporting to ONNX...")
torch.onnx.export(
model, dummy_input, "model_supported_weird_fixed.onnx",
input_names=["input"], output_names=["output"],
opset_version=11
)

# Move to models directory
os.rename("model_supported_weird_fixed.onnx", "python/models/models_onnx/model_supported_weird_fixed.onnx")
print("✅ Exported model_supported_weird_fixed.onnx successfully!")

# Also create the original with Conv1d for comparison
print("\nCreating original version with Conv1d for comparison...")

class SupportedWeirdModelOriginal(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 8, 3, padding=1)
self.conv1d = nn.Conv1d(8, 16, 3, padding=1) # The problematic Conv1d
self.pool = nn.MaxPool2d(2)
self.flatten = nn.Flatten()
self.linear = nn.Linear(3136, 1)

def forward(self, x):
x = torch.relu(self.conv1(x)) # [1, 8, 28, 28]
# Reshape for Conv1d: [1, 8, 784] (flatten spatial dims)
x = x.view(x.size(0), x.size(1), -1) # [1, 8, 784]
x = torch.relu(self.conv1d(x)) # [1, 16, 782] - Conv1d output
# Reshape back for pooling
x = x.view(x.size(0), x.size(1), 28, 28) # Back to [1, 16, 28, 28]
x = self.pool(x) # [1, 16, 14, 14]
x = self.flatten(x) # [1, 3136]
x = self.linear(x) # [1, 1]
return x

original_model = SupportedWeirdModelOriginal()
original_model.eval()

torch.onnx.export(
original_model, dummy_input, "model_supported_weird_original.onnx",
input_names=["input"], output_names=["output"],
opset_version=11
)

os.rename("model_supported_weird_original.onnx", "python/models/models_onnx/model_supported_weird_original.onnx")
print("✅ Also exported original version for comparison!")

except Exception as e:
print(f"❌ Error: {e}")
import traceback
traceback.print_exc()
40 changes: 40 additions & 0 deletions python/scripts/generate_io_identity_crisis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
print("Generating input/output for Identity Crisis model...")

import torch
import json
import os
import sys

# Add the models directory to Python path - using relative path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'models', 'models_onnx')))

try:
from identity_crisis_model import IdentityCrisisModel

model = IdentityCrisisModel()
model.eval()
dummy_input = torch.randn(1, 1, 28, 28)

with torch.no_grad():
output = model(dummy_input)

input_data = dummy_input.numpy().tolist()
output_data = output.detach().numpy().tolist()

io_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'testing', 'core', 'input_output_data', 'model_identity_crisis'))
os.makedirs(io_dir, exist_ok=True)

with open(os.path.join(io_dir, "input.json"), "w") as f:
json.dump({"input": input_data}, f)

with open(os.path.join(io_dir, "output.json"), "w") as f:
json.dump({"output": output_data}, f)

print("✅ Successfully saved input/output JSONs to:", io_dir)
print("Input shape:", dummy_input.shape)
print("Output shape:", output.shape)

except Exception as e:
print(f"❌ Error: {e}")
import traceback
traceback.print_exc()
87 changes: 87 additions & 0 deletions python/scripts/identity_crisis_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
print("Creating Identity Crisis Model...")

import torch
import torch.nn as nn

try:
class IdentityCrisisModel(nn.Module):
def __init__(self):
super(IdentityCrisisModel, self).__init__()

# Conflicting normalization layers
self.batch_norm = nn.BatchNorm2d(8)
self.group_norm = nn.GroupNorm(2, 8)
self.instance_norm = nn.InstanceNorm2d(8)

# Conflicting activation functions
self.relu = nn.ReLU()
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)

# Extreme dropout patterns
self.light_dropout = nn.Dropout(0.1)
self.heavy_dropout = nn.Dropout(0.95)

# Conflicting pooling operations
self.max_pool = nn.MaxPool2d(2)
self.avg_pool = nn.AvgPool2d(2)

# Core layers
self.conv1 = nn.Conv2d(1, 8, 3, padding=1)
self.conv2 = nn.Conv2d(8, 16, 3, padding=1)

# Flatten and unflatten (identity crisis about shape)
self.flatten = nn.Flatten()
self.unflatten = nn.Unflatten(1, (16, 7, 7))

# Final layers
self.linear1 = nn.Linear(784, 64)
self.linear2 = nn.Linear(64, 1)

def forward(self, x):
# Initial convolution
x = self.conv1(x) # [1, 8, 28, 28]

# Apply conflicting normalizations (last one wins)
x = self.batch_norm(x)
x = self.group_norm(x)
x = self.instance_norm(x)

# Apply conflicting activations
x = self.relu(x)
x = self.leaky_relu(x) # ReLU already made it positive

# Apply conflicting dropout patterns
x = self.light_dropout(x)
x = self.heavy_dropout(x) # Heavy dropout dominates

# Second convolution
x = self.conv2(x) # [1, 16, 28, 28]

# Apply conflicting pooling and average results
x1 = self.max_pool(x) # [1, 16, 14, 14]
x2 = self.avg_pool(x) # [1, 16, 14, 14]
x = (x1 + x2) / 2

# Pool to 7x7
x = nn.functional.adaptive_avg_pool2d(x, (7, 7)) # [1, 16, 7, 7]

# Flatten then unflatten (the true identity crisis!)
x = self.flatten(x) # [1, 784]
x = self.unflatten(x) # [1, 16, 7, 7] - back to original shape

# Final flatten for linear layers
x = self.flatten(x) # [1, 784]

# Linear layers
x = self.linear1(x) # [1, 64]
x = self.relu(x)
x = self.linear2(x) # [1, 1]

return x

print("IdentityCrisisModel class created successfully!")

except Exception as e:
print(f"Error creating model: {e}")
import traceback
traceback.print_exc()
68 changes: 68 additions & 0 deletions python/scripts/identity_crisis_simple.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
print("Creating Simple Identity Crisis Model (without unflatten)...")

import torch
import torch.nn as nn

try:
class SimpleIdentityCrisisModel(nn.Module):
def __init__(self):
super(SimpleIdentityCrisisModel, self).__init__()

# Conflicting normalization layers (but skip InstanceNorm for compatibility)
self.batch_norm = nn.BatchNorm2d(8)
self.group_norm = nn.GroupNorm(2, 8)

# Only ReLU (skip LeakyReLU for compatibility)
self.relu = nn.ReLU()

# Light dropout only
self.dropout = nn.Dropout(0.5)

# Only max pooling (skip averaging operations)
self.pool = nn.MaxPool2d(2)

# Core layers
self.conv1 = nn.Conv2d(1, 8, 3, padding=1)
self.conv2 = nn.Conv2d(8, 16, 3, padding=1)

# Simple flatten
self.flatten = nn.Flatten()

# Final layers
self.linear1 = nn.Linear(3136, 64) # 16*14*14 = 3136
self.linear2 = nn.Linear(64, 1)

def forward(self, x):
# Initial convolution
x = self.conv1(x) # [1, 8, 28, 28]

# Apply normalizations (last one wins)
x = self.batch_norm(x)
x = self.group_norm(x)

# Activation
x = self.relu(x)

# Dropout
x = self.dropout(x)

# Second convolution
x = self.conv2(x) # [1, 16, 28, 28]

# Simple pooling
x = self.pool(x) # [1, 16, 14, 14]

# Flatten and linear layers
x = self.flatten(x) # [1, 3136]
x = self.linear1(x) # [1, 64]
x = self.relu(x)
x = self.linear2(x) # [1, 1]

return x

print("SimpleIdentityCrisisModel class created successfully!")

except Exception as e:
print(f"Error creating model: {e}")
import traceback
traceback.print_exc()
40 changes: 40 additions & 0 deletions python/scripts/test_minimal_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
print("Testing Minimal Model that should work with JSTProve...")

import torch
import torch.nn as nn
import os

class MinimalModel(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()

def forward(self, x):
return self.flatten(x)

try:
model = MinimalModel()
model.eval()
dummy_input = torch.randn(1, 1, 28, 28)

print("Testing forward pass...")
with torch.no_grad():
output = model(dummy_input)
print(f"Input shape: {dummy_input.shape}")
print(f"Output shape: {output.shape}")

print("Exporting to ONNX...")
torch.onnx.export(
model, dummy_input, "model_minimal.onnx",
input_names=["input"], output_names=["output"],
opset_version=11
)

# Move to models directory
os.rename("model_minimal.onnx", "python/models/models_onnx/model_minimal.onnx")
print("✅ Exported model_minimal.onnx successfully!")

except Exception as e:
print(f"❌ Error: {e}")
import traceback
traceback.print_exc()
Loading