You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Today I was thinking about getting the Simpler MSINT dataset working with PyTorch.
It worked successfully but I got some trouble at first, I fixed it quickly now it works fine as juice.
Here is the full code for it:
#Let us use some helper_functions from other
import requests
from pathlib import Path
import time
if Path("helper_functions.py").is_file():
print(f"Already have this helper_functions.py")
#import that
from helper_functions import *
else:
print("downloading the file...")
time.sleep(2)
r = requests.get('https://github.com/mrdbourke/pytorch-deep-learning/raw/main/helper_functions.py')
with open('helper_functions.py','wb')as f:
f.write(r.content)
print("download finished")
#import that
from helper_functions import *
device='cuda' if torch.cuda.is_available() == True else 'cpu'
import torch
import torchvision
from torchvision.transforms import ToTensor
from torchvision.datasets import MNIST
train_data = MNIST("Model_dataset", True, download=True, transform=ToTensor(),target_transform=None)
test_data = MNIST("Model_dataset", False, download=True, transform=ToTensor(),target_transform=None)
from torch.utils.data import DataLoader
BATCH_SIZE = 32
train_dataloader = DataLoader(train_data, BATCH_SIZE, True, num_workers=2)
test_dataloader = DataLoader(test_data, BATCH_SIZE, True, num_workers=2)
print(len(train_dataloader), len(test_dataloader))
class_name = train_data.classes
import torch.nn as nn
class puttingalltogether(nn.Module):
def __init__(self,input_shape: int,hidden_units: int,output_shape: int):
super().__init__()
self.conv_block = nn.Sequential(
nn.Conv2d(in_channels=input_shape,out_channels=hidden_units,
kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=hidden_units,out_channels=hidden_units,
kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv_block_2 = nn.Sequential(
nn.Conv2d(in_channels=hidden_units,out_channels=hidden_units,
kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=hidden_units,out_channels=hidden_units,
kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(in_features=hidden_units*7*7, #there is a trick to calculating this
out_features=output_shape)
)
def forward(self,x:torch.Tensor) -> torch.Tensor:
x = self.conv_block(x)
x = self.conv_block_2(x)
x = self.classifier(x)
return x
model_final = puttingalltogether(1,10,len(class_name))
#Time for the calculating trick
rand_image_tensor = torch.randn(size=(1,28,28))
rand_image_tensor.shape
# model_final(rand_image_tensor.unsqueeze(0))
10*7*7
#Picking an loss and optimizer functions
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model_final.parameters(),lr=0.1)
#Let us do some analytics on shape so we can clear that its great
for X_test, y_test in test_dataloader:
print(f"X_test shape: {X_test.shape}, y_test shape: {y_test.shape}")
from tqdm.auto import tqdm
###TRAIN TIME
epochs = 4
train_loss,train_acc = 0,0
for epoch in tqdm(range(epochs)):
model_final = model_final.to(device)
model_final.train()
# Add a loop to loop through the training batches
for batch,(X,y) in enumerate(train_dataloader):
X,y = X.to(device),y.to(device)
y_pred = model_final(X)
loss = loss_fn(y_pred,y)
train_loss += loss
train_acc += accuracy_fn(y,torch.softmax(y_pred,0).argmax(dim=1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
#Print out what's happening
#Device total train loss by length of train dataloader
train_loss /=len(test_dataloader)
train_acc /=len(test_dataloader)
print(f"Train acc: {train_acc} | train_loss: {train_loss}")
test_loss, test_acc = 0,0
model_final = model_final.to(device)
model_final.eval()
with torch.inference_mode():
for X_test, y_test in test_dataloader:
X_test, y_test = X_test.to(device), y_test.to(device)
test_pred = model_final(X_test)
test_loss += loss_fn(test_pred,y_test)
test_acc += accuracy_fn(y_test,torch.softmax(test_pred,0).argmax(1))
test_loss /=len(test_dataloader)
test_acc /=len(test_dataloader)
#Print out what happening
print(f"\ntest loss: {test_loss} | test acc: {test_acc}")
from mlxtend.plotting import plot_confusion_matrix
from torchmetrics import ConfusionMatrix
import matplotlib.pyplot as plt
image,label = test_data[0]
plt.imshow(image.squeeze(),cmap='gray')
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
Today I was thinking about getting the Simpler MSINT dataset working with PyTorch.
It worked successfully but I got some trouble at first, I fixed it quickly now it works fine as juice.
Here is the full code for it:
Beta Was this translation helpful? Give feedback.
All reactions