From e6c7f42c46a6414f12b2d8820e00e4474be7bbd8 Mon Sep 17 00:00:00 2001 From: Aladdin Persson Date: Wed, 24 Mar 2021 21:57:40 +0100 Subject: [PATCH 1/7] revised some code examples --- ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py | 51 ++++-------- ML/Pytorch/Basics/pytorch_simple_CNN.py | 67 ++++++--------- ML/Pytorch/Basics/pytorch_simple_fullynet.py | 86 ++++++++++---------- ML/Pytorch/Basics/pytorch_tensorbasics.py | 3 + 4 files changed, 90 insertions(+), 117 deletions(-) diff --git a/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py b/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py index 4a8e2b9..f12d855 100644 --- a/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py +++ b/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py @@ -8,15 +8,14 @@ Programmed by Aladdin Persson # Imports import torch -import torchvision -import torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions -import torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc. -import torch.nn.functional as F # All functions that don't have any parameters -from torch.utils.data import ( - DataLoader, -) # Gives easier dataset managment and creates mini batches -import torchvision.datasets as datasets # Has standard datasets we can import in a nice way -import torchvision.transforms as transforms # Transformations we can perform on our dataset +import torchvision # torch package for vision related things +import torch.nn.functional as F # Parameterless functions, like (some) activation functions +import torchvision.datasets as datasets # Standard datasets +import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation +from torch import optim # For optimizers like SGD, Adam, etc. +from torch import nn # All neural network modules +from torch.utils.data import DataLoader # Gives easier dataset managment by creating mini batches etc. +from tqdm import tqdm # For a nice progress bar! # Set device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -29,7 +28,7 @@ num_classes = 10 sequence_length = 28 learning_rate = 0.005 batch_size = 64 -num_epochs = 2 +num_epochs = 3 # Recurrent neural network (many-to-one) class RNN(nn.Module): @@ -101,18 +100,12 @@ class RNN_LSTM(nn.Module): # Load Data -train_dataset = datasets.MNIST( - root="dataset/", train=True, transform=transforms.ToTensor(), download=True -) - -test_dataset = datasets.MNIST( - root="dataset/", train=False, transform=transforms.ToTensor(), download=True -) - +train_dataset = datasets.MNIST(root="dataset/", train=True, transform=transforms.ToTensor(), download=True) +test_dataset = datasets.MNIST(root="dataset/", train=False, transform=transforms.ToTensor(), download=True) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) -# Initialize network +# Initialize network (try out just using simple RNN, or GRU, and then compare with LSTM) model = RNN_LSTM(input_size, hidden_size, num_layers, num_classes).to(device) # Loss and optimizer @@ -121,7 +114,7 @@ optimizer = optim.Adam(model.parameters(), lr=learning_rate) # Train Network for epoch in range(num_epochs): - for batch_idx, (data, targets) in enumerate(train_loader): + for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): # Get data to cuda if possible data = data.to(device=device).squeeze(1) targets = targets.to(device=device) @@ -134,16 +127,11 @@ for epoch in range(num_epochs): optimizer.zero_grad() loss.backward() - # gradient descent or adam step + # gradient descent update step/adam step optimizer.step() # Check accuracy on training & test to see how good our model def check_accuracy(loader, model): - if loader.dataset.train: - print("Checking accuracy on training data") - else: - print("Checking accuracy on test data") - num_correct = 0 num_samples = 0 @@ -160,13 +148,10 @@ def check_accuracy(loader, model): num_correct += (predictions == y).sum() num_samples += predictions.size(0) - print( - f"Got {num_correct} / {num_samples} with \ - accuracy {float(num_correct)/float(num_samples)*100:.2f}" - ) - # Set model back to train + # Toggle model back to train model.train() + return num_correct / num_samples -check_accuracy(train_loader, model) -check_accuracy(test_loader, model) +print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:2f}") +print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}") diff --git a/ML/Pytorch/Basics/pytorch_simple_CNN.py b/ML/Pytorch/Basics/pytorch_simple_CNN.py index fe5c1fd..d18d948 100644 --- a/ML/Pytorch/Basics/pytorch_simple_CNN.py +++ b/ML/Pytorch/Basics/pytorch_simple_CNN.py @@ -1,34 +1,33 @@ """ -Example code of a simple CNN network training on MNIST dataset. -The code is intended to show how to create a CNN network as well -as how to initialize loss, optimizer, etc. in a simple way to get -training to work with function that checks accuracy as well. +A simple walkthrough of how to code a convolutional neural network (CNN) +using the PyTorch library. For demonstration we train it on the very +common MNIST dataset of handwritten digits. In this code we go through +how to create the network as well as initialize a loss function, optimizer, +check accuracy and more. -Video explanation: https://youtu.be/wnK3uWv_WkU -Got any questions leave a comment on youtube :) - -Programmed by Aladdin Persson -* 2020-04-08 Initial coding +Programmed by Aladdin Persson +* 2020-04-08: Initial coding +* 2021-03-24: More detailed comments and small revision of the code """ # Imports import torch -import torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions -import torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc. -import torch.nn.functional as F # All functions that don't have any parameters -from torch.utils.data import ( - DataLoader, -) # Gives easier dataset managment and creates mini batches -import torchvision.datasets as datasets # Has standard datasets we can import in a nice way -import torchvision.transforms as transforms # Transformations we can perform on our dataset +import torchvision # torch package for vision related things +import torch.nn.functional as F # Parameterless functions, like (some) activation functions +import torchvision.datasets as datasets # Standard datasets +import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation +from torch import optim # For optimizers like SGD, Adam, etc. +from torch import nn # All neural network modules +from torch.utils.data import DataLoader # Gives easier dataset managment by creating mini batches etc. +from tqdm import tqdm # For nice progress bar! # Simple CNN class CNN(nn.Module): def __init__(self, in_channels=1, num_classes=10): super(CNN, self).__init__() self.conv1 = nn.Conv2d( - in_channels=1, + in_channels=in_channels, out_channels=8, kernel_size=(3, 3), stride=(1, 1), @@ -51,7 +50,6 @@ class CNN(nn.Module): x = self.pool(x) x = x.reshape(x.shape[0], -1) x = self.fc1(x) - return x @@ -59,24 +57,20 @@ class CNN(nn.Module): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Hyperparameters -in_channel = 1 +in_channels = 1 num_classes = 10 learning_rate = 0.001 batch_size = 64 -num_epochs = 5 +num_epochs = 3 # Load Data -train_dataset = datasets.MNIST( - root="dataset/", train=True, transform=transforms.ToTensor(), download=True -) +train_dataset = datasets.MNIST(root="dataset/", train=True, transform=transforms.ToTensor(), download=True) +test_dataset = datasets.MNIST(root="dataset/", train=False, transform=transforms.ToTensor(), download=True) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) -test_dataset = datasets.MNIST( - root="dataset/", train=False, transform=transforms.ToTensor(), download=True -) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) # Initialize network -model = CNN().to(device) +model = CNN(in_channels=in_channels, num_classes=num_classes).to(device) # Loss and optimizer criterion = nn.CrossEntropyLoss() @@ -84,7 +78,7 @@ optimizer = optim.Adam(model.parameters(), lr=learning_rate) # Train Network for epoch in range(num_epochs): - for batch_idx, (data, targets) in enumerate(train_loader): + for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): # Get data to cuda if possible data = data.to(device=device) targets = targets.to(device=device) @@ -101,14 +95,7 @@ for epoch in range(num_epochs): optimizer.step() # Check accuracy on training & test to see how good our model - - def check_accuracy(loader, model): - if loader.dataset.train: - print("Checking accuracy on training data") - else: - print("Checking accuracy on test data") - num_correct = 0 num_samples = 0 model.eval() @@ -123,12 +110,10 @@ def check_accuracy(loader, model): num_correct += (predictions == y).sum() num_samples += predictions.size(0) - print( - f"Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}" - ) model.train() + return num_correct/num_samples -check_accuracy(train_loader, model) -check_accuracy(test_loader, model) +print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:.2f}") +print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}") \ No newline at end of file diff --git a/ML/Pytorch/Basics/pytorch_simple_fullynet.py b/ML/Pytorch/Basics/pytorch_simple_fullynet.py index 9d1b5a2..fa0a9f4 100644 --- a/ML/Pytorch/Basics/pytorch_simple_fullynet.py +++ b/ML/Pytorch/Basics/pytorch_simple_fullynet.py @@ -1,60 +1,69 @@ """ -Working code of a simple Fully Connected (FC) network training on MNIST dataset. -The code is intended to show how to create a FC network as well -as how to initialize loss, optimizer, etc. in a simple way to get -training to work with function that checks accuracy as well. +A simple walkthrough of how to code a fully connected neural network +using the PyTorch library. For demonstration we train it on the very +common MNIST dataset of handwritten digits. In this code we go through +how to create the network as well as initialize a loss function, optimizer, +check accuracy and more. -Video explanation: https://youtu.be/Jy4wM2X21u0 -Got any questions leave a comment on youtube :) - -Programmed by Aladdin Persson -* 2020-04-08 Initial coding +Programmed by Aladdin Persson +* 2020-04-08: Initial coding +* 2021-03-24: Added more detailed comments also removed part of + check_accuracy which would only work specifically on MNIST. """ # Imports import torch -import torchvision -import torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions -import torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc. -import torch.nn.functional as F # All functions that don't have any parameters -from torch.utils.data import ( - DataLoader, -) # Gives easier dataset managment and creates mini batches -import torchvision.datasets as datasets # Has standard datasets we can import in a nice way -import torchvision.transforms as transforms # Transformations we can perform on our dataset +import torchvision # torch package for vision related things +import torch.nn.functional as F # Parameterless functions, like (some) activation functions +import torchvision.datasets as datasets # Standard datasets +import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation +from torch import optim # For optimizers like SGD, Adam, etc. +from torch import nn # All neural network modules +from torch.utils.data import DataLoader # Gives easier dataset managment by creating mini batches etc. +from tqdm import tqdm # For nice progress bar! -# Create Fully Connected Network +# Here we create our simple neural network. For more details here we are subclassing and +# inheriting from nn.Module, this is the most general way to create your networks and +# allows for more flexibility. I encourage you to also check out nn.Sequential which +# would be easier to use in this scenario but I wanted to show you something that +# "always" works. class NN(nn.Module): def __init__(self, input_size, num_classes): super(NN, self).__init__() + # Our first linear layer take input_size, in this case 784 nodes to 50 + # and our second linear layer takes 50 to the num_classes we have, in + # this case 10. self.fc1 = nn.Linear(input_size, 50) self.fc2 = nn.Linear(50, num_classes) def forward(self, x): + """ + x here is the mnist images and we run it through fc1, fc2 that we created above. + we also add a ReLU activation function in between and for that (since it has no parameters) + I recommend using nn.functional (F) + """ + x = F.relu(self.fc1(x)) x = self.fc2(x) return x -# Set device +# Set device cuda for GPU if it's available otherwise run on the CPU device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -# Hyperparameters +# Hyperparameters of our neural network which depends on the dataset, and +# also just experimenting to see what works well (learning rate for example). input_size = 784 num_classes = 10 learning_rate = 0.001 batch_size = 64 -num_epochs = 1 +num_epochs = 3 -# Load Data -train_dataset = datasets.MNIST( - root="dataset/", train=True, transform=transforms.ToTensor(), download=True -) +# Load Training and Test data +train_dataset = datasets.MNIST(root="dataset/", train=True, transform=transforms.ToTensor(), download=True) +test_dataset = datasets.MNIST(root="dataset/", train=False, transform=transforms.ToTensor(), download=True) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) -test_dataset = datasets.MNIST( - root="dataset/", train=False, transform=transforms.ToTensor(), download=True -) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) # Initialize network @@ -66,7 +75,7 @@ optimizer = optim.Adam(model.parameters(), lr=learning_rate) # Train Network for epoch in range(num_epochs): - for batch_idx, (data, targets) in enumerate(train_loader): + for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): # Get data to cuda if possible data = data.to(device=device) targets = targets.to(device=device) @@ -85,15 +94,9 @@ for epoch in range(num_epochs): # gradient descent or adam step optimizer.step() + # Check accuracy on training & test to see how good our model - - def check_accuracy(loader, model): - if loader.dataset.train: - print("Checking accuracy on training data") - else: - print("Checking accuracy on test data") - num_correct = 0 num_samples = 0 model.eval() @@ -109,12 +112,9 @@ def check_accuracy(loader, model): num_correct += (predictions == y).sum() num_samples += predictions.size(0) - print( - f"Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}" - ) - model.train() + return num_correct/num_samples -check_accuracy(train_loader, model) -check_accuracy(test_loader, model) +print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:.2f}") +print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}") diff --git a/ML/Pytorch/Basics/pytorch_tensorbasics.py b/ML/Pytorch/Basics/pytorch_tensorbasics.py index 7d9bd30..6e63ebc 100644 --- a/ML/Pytorch/Basics/pytorch_tensorbasics.py +++ b/ML/Pytorch/Basics/pytorch_tensorbasics.py @@ -11,6 +11,9 @@ But also other things such as setting the device (GPU/CPU) and converting between different types (int, float etc) and how to convert a tensor to an numpy array and vice-versa. +Programmed by Aladdin Persson +* 2020-06-27: Initial coding + """ import torch From d945e7ae47611e5cc00488db6eb5468246bec8c3 Mon Sep 17 00:00:00 2001 From: Aladdin Persson Date: Wed, 24 Mar 2021 22:01:16 +0100 Subject: [PATCH 2/7] update --- ML/Pytorch/CNN_architectures/pytorch_inceptionet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ML/Pytorch/CNN_architectures/pytorch_inceptionet.py b/ML/Pytorch/CNN_architectures/pytorch_inceptionet.py index 38faef9..bdc2057 100644 --- a/ML/Pytorch/CNN_architectures/pytorch_inceptionet.py +++ b/ML/Pytorch/CNN_architectures/pytorch_inceptionet.py @@ -11,7 +11,7 @@ Programmed by Aladdin Persson # Imports import torch -import torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions +from torch import nn class GoogLeNet(nn.Module): From 31c404822a38c2e0e07ad9d2bf33f0650ff620dd Mon Sep 17 00:00:00 2001 From: Aladdin Persson Date: Wed, 24 Mar 2021 22:03:12 +0100 Subject: [PATCH 3/7] small revisions to code examples --- ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py | 1 + ML/Pytorch/Basics/pytorch_simple_CNN.py | 1 + ML/Pytorch/Basics/pytorch_simple_fullynet.py | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py b/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py index f12d855..489caeb 100644 --- a/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py +++ b/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py @@ -130,6 +130,7 @@ for epoch in range(num_epochs): # gradient descent update step/adam step optimizer.step() + # Check accuracy on training & test to see how good our model def check_accuracy(loader, model): num_correct = 0 diff --git a/ML/Pytorch/Basics/pytorch_simple_CNN.py b/ML/Pytorch/Basics/pytorch_simple_CNN.py index d18d948..45c94e1 100644 --- a/ML/Pytorch/Basics/pytorch_simple_CNN.py +++ b/ML/Pytorch/Basics/pytorch_simple_CNN.py @@ -94,6 +94,7 @@ for epoch in range(num_epochs): # gradient descent or adam step optimizer.step() + # Check accuracy on training & test to see how good our model def check_accuracy(loader, model): num_correct = 0 diff --git a/ML/Pytorch/Basics/pytorch_simple_fullynet.py b/ML/Pytorch/Basics/pytorch_simple_fullynet.py index fa0a9f4..4e08a9d 100644 --- a/ML/Pytorch/Basics/pytorch_simple_fullynet.py +++ b/ML/Pytorch/Basics/pytorch_simple_fullynet.py @@ -95,7 +95,7 @@ for epoch in range(num_epochs): optimizer.step() -# Check accuracy on training & test to see how good our model +# Check accuracy on training & test to see how good our model is def check_accuracy(loader, model): num_correct = 0 num_samples = 0 From 80690d56f8b13c239b024165f96ef4a135320087 Mon Sep 17 00:00:00 2001 From: Aladdin Persson Date: Wed, 24 Mar 2021 22:07:20 +0100 Subject: [PATCH 4/7] small revisions to code examples --- ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py | 1 - ML/Pytorch/Basics/pytorch_simple_CNN.py | 1 - ML/Pytorch/Basics/pytorch_simple_fullynet.py | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py b/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py index 489caeb..f12d855 100644 --- a/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py +++ b/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py @@ -130,7 +130,6 @@ for epoch in range(num_epochs): # gradient descent update step/adam step optimizer.step() - # Check accuracy on training & test to see how good our model def check_accuracy(loader, model): num_correct = 0 diff --git a/ML/Pytorch/Basics/pytorch_simple_CNN.py b/ML/Pytorch/Basics/pytorch_simple_CNN.py index 45c94e1..d18d948 100644 --- a/ML/Pytorch/Basics/pytorch_simple_CNN.py +++ b/ML/Pytorch/Basics/pytorch_simple_CNN.py @@ -94,7 +94,6 @@ for epoch in range(num_epochs): # gradient descent or adam step optimizer.step() - # Check accuracy on training & test to see how good our model def check_accuracy(loader, model): num_correct = 0 diff --git a/ML/Pytorch/Basics/pytorch_simple_fullynet.py b/ML/Pytorch/Basics/pytorch_simple_fullynet.py index 4e08a9d..fa0a9f4 100644 --- a/ML/Pytorch/Basics/pytorch_simple_fullynet.py +++ b/ML/Pytorch/Basics/pytorch_simple_fullynet.py @@ -95,7 +95,7 @@ for epoch in range(num_epochs): optimizer.step() -# Check accuracy on training & test to see how good our model is +# Check accuracy on training & test to see how good our model def check_accuracy(loader, model): num_correct = 0 num_samples = 0 From b1e23795281428bacb022a75bb36012bf00017e3 Mon Sep 17 00:00:00 2001 From: Aladdin Persson Date: Wed, 24 Mar 2021 22:09:25 +0100 Subject: [PATCH 5/7] test --- ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py | 157 ------------------- ML/Pytorch/Basics/pytorch_simple_CNN.py | 119 -------------- ML/Pytorch/Basics/pytorch_simple_fullynet.py | 120 -------------- 3 files changed, 396 deletions(-) delete mode 100644 ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py delete mode 100644 ML/Pytorch/Basics/pytorch_simple_CNN.py delete mode 100644 ML/Pytorch/Basics/pytorch_simple_fullynet.py diff --git a/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py b/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py deleted file mode 100644 index f12d855..0000000 --- a/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -Example code of a simple RNN, GRU, LSTM on the MNIST dataset. - -Programmed by Aladdin Persson -* 2020-05-09 Initial coding - -""" - -# Imports -import torch -import torchvision # torch package for vision related things -import torch.nn.functional as F # Parameterless functions, like (some) activation functions -import torchvision.datasets as datasets # Standard datasets -import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation -from torch import optim # For optimizers like SGD, Adam, etc. -from torch import nn # All neural network modules -from torch.utils.data import DataLoader # Gives easier dataset managment by creating mini batches etc. -from tqdm import tqdm # For a nice progress bar! - -# Set device -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -# Hyperparameters -input_size = 28 -hidden_size = 256 -num_layers = 2 -num_classes = 10 -sequence_length = 28 -learning_rate = 0.005 -batch_size = 64 -num_epochs = 3 - -# Recurrent neural network (many-to-one) -class RNN(nn.Module): - def __init__(self, input_size, hidden_size, num_layers, num_classes): - super(RNN, self).__init__() - self.hidden_size = hidden_size - self.num_layers = num_layers - self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True) - self.fc = nn.Linear(hidden_size * sequence_length, num_classes) - - def forward(self, x): - # Set initial hidden and cell states - h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) - - # Forward propagate LSTM - out, _ = self.rnn(x, h0) - out = out.reshape(out.shape[0], -1) - - # Decode the hidden state of the last time step - out = self.fc(out) - return out - - -# Recurrent neural network with GRU (many-to-one) -class RNN_GRU(nn.Module): - def __init__(self, input_size, hidden_size, num_layers, num_classes): - super(RNN_GRU, self).__init__() - self.hidden_size = hidden_size - self.num_layers = num_layers - self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True) - self.fc = nn.Linear(hidden_size * sequence_length, num_classes) - - def forward(self, x): - # Set initial hidden and cell states - h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) - - # Forward propagate LSTM - out, _ = self.gru(x, h0) - out = out.reshape(out.shape[0], -1) - - # Decode the hidden state of the last time step - out = self.fc(out) - return out - - -# Recurrent neural network with LSTM (many-to-one) -class RNN_LSTM(nn.Module): - def __init__(self, input_size, hidden_size, num_layers, num_classes): - super(RNN_LSTM, self).__init__() - self.hidden_size = hidden_size - self.num_layers = num_layers - self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) - self.fc = nn.Linear(hidden_size * sequence_length, num_classes) - - def forward(self, x): - # Set initial hidden and cell states - h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) - c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) - - # Forward propagate LSTM - out, _ = self.lstm( - x, (h0, c0) - ) # out: tensor of shape (batch_size, seq_length, hidden_size) - out = out.reshape(out.shape[0], -1) - - # Decode the hidden state of the last time step - out = self.fc(out) - return out - - -# Load Data -train_dataset = datasets.MNIST(root="dataset/", train=True, transform=transforms.ToTensor(), download=True) -test_dataset = datasets.MNIST(root="dataset/", train=False, transform=transforms.ToTensor(), download=True) -train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) -test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) - -# Initialize network (try out just using simple RNN, or GRU, and then compare with LSTM) -model = RNN_LSTM(input_size, hidden_size, num_layers, num_classes).to(device) - -# Loss and optimizer -criterion = nn.CrossEntropyLoss() -optimizer = optim.Adam(model.parameters(), lr=learning_rate) - -# Train Network -for epoch in range(num_epochs): - for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): - # Get data to cuda if possible - data = data.to(device=device).squeeze(1) - targets = targets.to(device=device) - - # forward - scores = model(data) - loss = criterion(scores, targets) - - # backward - optimizer.zero_grad() - loss.backward() - - # gradient descent update step/adam step - optimizer.step() - -# Check accuracy on training & test to see how good our model -def check_accuracy(loader, model): - num_correct = 0 - num_samples = 0 - - # Set model to eval - model.eval() - - with torch.no_grad(): - for x, y in loader: - x = x.to(device=device).squeeze(1) - y = y.to(device=device) - - scores = model(x) - _, predictions = scores.max(1) - num_correct += (predictions == y).sum() - num_samples += predictions.size(0) - - # Toggle model back to train - model.train() - return num_correct / num_samples - - -print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:2f}") -print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}") diff --git a/ML/Pytorch/Basics/pytorch_simple_CNN.py b/ML/Pytorch/Basics/pytorch_simple_CNN.py deleted file mode 100644 index d18d948..0000000 --- a/ML/Pytorch/Basics/pytorch_simple_CNN.py +++ /dev/null @@ -1,119 +0,0 @@ -""" -A simple walkthrough of how to code a convolutional neural network (CNN) -using the PyTorch library. For demonstration we train it on the very -common MNIST dataset of handwritten digits. In this code we go through -how to create the network as well as initialize a loss function, optimizer, -check accuracy and more. - -Programmed by Aladdin Persson -* 2020-04-08: Initial coding -* 2021-03-24: More detailed comments and small revision of the code - -""" - -# Imports -import torch -import torchvision # torch package for vision related things -import torch.nn.functional as F # Parameterless functions, like (some) activation functions -import torchvision.datasets as datasets # Standard datasets -import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation -from torch import optim # For optimizers like SGD, Adam, etc. -from torch import nn # All neural network modules -from torch.utils.data import DataLoader # Gives easier dataset managment by creating mini batches etc. -from tqdm import tqdm # For nice progress bar! - -# Simple CNN -class CNN(nn.Module): - def __init__(self, in_channels=1, num_classes=10): - super(CNN, self).__init__() - self.conv1 = nn.Conv2d( - in_channels=in_channels, - out_channels=8, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - ) - self.pool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) - self.conv2 = nn.Conv2d( - in_channels=8, - out_channels=16, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - ) - self.fc1 = nn.Linear(16 * 7 * 7, num_classes) - - def forward(self, x): - x = F.relu(self.conv1(x)) - x = self.pool(x) - x = F.relu(self.conv2(x)) - x = self.pool(x) - x = x.reshape(x.shape[0], -1) - x = self.fc1(x) - return x - - -# Set device -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -# Hyperparameters -in_channels = 1 -num_classes = 10 -learning_rate = 0.001 -batch_size = 64 -num_epochs = 3 - -# Load Data -train_dataset = datasets.MNIST(root="dataset/", train=True, transform=transforms.ToTensor(), download=True) -test_dataset = datasets.MNIST(root="dataset/", train=False, transform=transforms.ToTensor(), download=True) -train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) -test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) - -# Initialize network -model = CNN(in_channels=in_channels, num_classes=num_classes).to(device) - -# Loss and optimizer -criterion = nn.CrossEntropyLoss() -optimizer = optim.Adam(model.parameters(), lr=learning_rate) - -# Train Network -for epoch in range(num_epochs): - for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): - # Get data to cuda if possible - data = data.to(device=device) - targets = targets.to(device=device) - - # forward - scores = model(data) - loss = criterion(scores, targets) - - # backward - optimizer.zero_grad() - loss.backward() - - # gradient descent or adam step - optimizer.step() - -# Check accuracy on training & test to see how good our model -def check_accuracy(loader, model): - num_correct = 0 - num_samples = 0 - model.eval() - - with torch.no_grad(): - for x, y in loader: - x = x.to(device=device) - y = y.to(device=device) - - scores = model(x) - _, predictions = scores.max(1) - num_correct += (predictions == y).sum() - num_samples += predictions.size(0) - - - model.train() - return num_correct/num_samples - - -print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:.2f}") -print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}") \ No newline at end of file diff --git a/ML/Pytorch/Basics/pytorch_simple_fullynet.py b/ML/Pytorch/Basics/pytorch_simple_fullynet.py deleted file mode 100644 index fa0a9f4..0000000 --- a/ML/Pytorch/Basics/pytorch_simple_fullynet.py +++ /dev/null @@ -1,120 +0,0 @@ -""" -A simple walkthrough of how to code a fully connected neural network -using the PyTorch library. For demonstration we train it on the very -common MNIST dataset of handwritten digits. In this code we go through -how to create the network as well as initialize a loss function, optimizer, -check accuracy and more. - -Programmed by Aladdin Persson -* 2020-04-08: Initial coding -* 2021-03-24: Added more detailed comments also removed part of - check_accuracy which would only work specifically on MNIST. - -""" - -# Imports -import torch -import torchvision # torch package for vision related things -import torch.nn.functional as F # Parameterless functions, like (some) activation functions -import torchvision.datasets as datasets # Standard datasets -import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation -from torch import optim # For optimizers like SGD, Adam, etc. -from torch import nn # All neural network modules -from torch.utils.data import DataLoader # Gives easier dataset managment by creating mini batches etc. -from tqdm import tqdm # For nice progress bar! - -# Here we create our simple neural network. For more details here we are subclassing and -# inheriting from nn.Module, this is the most general way to create your networks and -# allows for more flexibility. I encourage you to also check out nn.Sequential which -# would be easier to use in this scenario but I wanted to show you something that -# "always" works. -class NN(nn.Module): - def __init__(self, input_size, num_classes): - super(NN, self).__init__() - # Our first linear layer take input_size, in this case 784 nodes to 50 - # and our second linear layer takes 50 to the num_classes we have, in - # this case 10. - self.fc1 = nn.Linear(input_size, 50) - self.fc2 = nn.Linear(50, num_classes) - - def forward(self, x): - """ - x here is the mnist images and we run it through fc1, fc2 that we created above. - we also add a ReLU activation function in between and for that (since it has no parameters) - I recommend using nn.functional (F) - """ - - x = F.relu(self.fc1(x)) - x = self.fc2(x) - return x - - -# Set device cuda for GPU if it's available otherwise run on the CPU -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -# Hyperparameters of our neural network which depends on the dataset, and -# also just experimenting to see what works well (learning rate for example). -input_size = 784 -num_classes = 10 -learning_rate = 0.001 -batch_size = 64 -num_epochs = 3 - -# Load Training and Test data -train_dataset = datasets.MNIST(root="dataset/", train=True, transform=transforms.ToTensor(), download=True) -test_dataset = datasets.MNIST(root="dataset/", train=False, transform=transforms.ToTensor(), download=True) -train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) -test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) - -# Initialize network -model = NN(input_size=input_size, num_classes=num_classes).to(device) - -# Loss and optimizer -criterion = nn.CrossEntropyLoss() -optimizer = optim.Adam(model.parameters(), lr=learning_rate) - -# Train Network -for epoch in range(num_epochs): - for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): - # Get data to cuda if possible - data = data.to(device=device) - targets = targets.to(device=device) - - # Get to correct shape - data = data.reshape(data.shape[0], -1) - - # forward - scores = model(data) - loss = criterion(scores, targets) - - # backward - optimizer.zero_grad() - loss.backward() - - # gradient descent or adam step - optimizer.step() - - -# Check accuracy on training & test to see how good our model -def check_accuracy(loader, model): - num_correct = 0 - num_samples = 0 - model.eval() - - with torch.no_grad(): - for x, y in loader: - x = x.to(device=device) - y = y.to(device=device) - x = x.reshape(x.shape[0], -1) - - scores = model(x) - _, predictions = scores.max(1) - num_correct += (predictions == y).sum() - num_samples += predictions.size(0) - - model.train() - return num_correct/num_samples - - -print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:.2f}") -print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}") From d01c4b98af5bb12fa25723764a8008caa7885379 Mon Sep 17 00:00:00 2001 From: Aladdin Persson Date: Wed, 24 Mar 2021 22:12:45 +0100 Subject: [PATCH 6/7] revisions to code examples --- ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py | 157 +++++++++++++++++++ ML/Pytorch/Basics/pytorch_simple_CNN.py | 119 ++++++++++++++ ML/Pytorch/Basics/pytorch_simple_fullynet.py | 120 ++++++++++++++ 3 files changed, 396 insertions(+) create mode 100644 ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py create mode 100644 ML/Pytorch/Basics/pytorch_simple_CNN.py create mode 100644 ML/Pytorch/Basics/pytorch_simple_fullynet.py diff --git a/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py b/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py new file mode 100644 index 0000000..f12d855 --- /dev/null +++ b/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py @@ -0,0 +1,157 @@ +""" +Example code of a simple RNN, GRU, LSTM on the MNIST dataset. + +Programmed by Aladdin Persson +* 2020-05-09 Initial coding + +""" + +# Imports +import torch +import torchvision # torch package for vision related things +import torch.nn.functional as F # Parameterless functions, like (some) activation functions +import torchvision.datasets as datasets # Standard datasets +import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation +from torch import optim # For optimizers like SGD, Adam, etc. +from torch import nn # All neural network modules +from torch.utils.data import DataLoader # Gives easier dataset managment by creating mini batches etc. +from tqdm import tqdm # For a nice progress bar! + +# Set device +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +# Hyperparameters +input_size = 28 +hidden_size = 256 +num_layers = 2 +num_classes = 10 +sequence_length = 28 +learning_rate = 0.005 +batch_size = 64 +num_epochs = 3 + +# Recurrent neural network (many-to-one) +class RNN(nn.Module): + def __init__(self, input_size, hidden_size, num_layers, num_classes): + super(RNN, self).__init__() + self.hidden_size = hidden_size + self.num_layers = num_layers + self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True) + self.fc = nn.Linear(hidden_size * sequence_length, num_classes) + + def forward(self, x): + # Set initial hidden and cell states + h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) + + # Forward propagate LSTM + out, _ = self.rnn(x, h0) + out = out.reshape(out.shape[0], -1) + + # Decode the hidden state of the last time step + out = self.fc(out) + return out + + +# Recurrent neural network with GRU (many-to-one) +class RNN_GRU(nn.Module): + def __init__(self, input_size, hidden_size, num_layers, num_classes): + super(RNN_GRU, self).__init__() + self.hidden_size = hidden_size + self.num_layers = num_layers + self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True) + self.fc = nn.Linear(hidden_size * sequence_length, num_classes) + + def forward(self, x): + # Set initial hidden and cell states + h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) + + # Forward propagate LSTM + out, _ = self.gru(x, h0) + out = out.reshape(out.shape[0], -1) + + # Decode the hidden state of the last time step + out = self.fc(out) + return out + + +# Recurrent neural network with LSTM (many-to-one) +class RNN_LSTM(nn.Module): + def __init__(self, input_size, hidden_size, num_layers, num_classes): + super(RNN_LSTM, self).__init__() + self.hidden_size = hidden_size + self.num_layers = num_layers + self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) + self.fc = nn.Linear(hidden_size * sequence_length, num_classes) + + def forward(self, x): + # Set initial hidden and cell states + h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) + c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) + + # Forward propagate LSTM + out, _ = self.lstm( + x, (h0, c0) + ) # out: tensor of shape (batch_size, seq_length, hidden_size) + out = out.reshape(out.shape[0], -1) + + # Decode the hidden state of the last time step + out = self.fc(out) + return out + + +# Load Data +train_dataset = datasets.MNIST(root="dataset/", train=True, transform=transforms.ToTensor(), download=True) +test_dataset = datasets.MNIST(root="dataset/", train=False, transform=transforms.ToTensor(), download=True) +train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) +test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) + +# Initialize network (try out just using simple RNN, or GRU, and then compare with LSTM) +model = RNN_LSTM(input_size, hidden_size, num_layers, num_classes).to(device) + +# Loss and optimizer +criterion = nn.CrossEntropyLoss() +optimizer = optim.Adam(model.parameters(), lr=learning_rate) + +# Train Network +for epoch in range(num_epochs): + for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): + # Get data to cuda if possible + data = data.to(device=device).squeeze(1) + targets = targets.to(device=device) + + # forward + scores = model(data) + loss = criterion(scores, targets) + + # backward + optimizer.zero_grad() + loss.backward() + + # gradient descent update step/adam step + optimizer.step() + +# Check accuracy on training & test to see how good our model +def check_accuracy(loader, model): + num_correct = 0 + num_samples = 0 + + # Set model to eval + model.eval() + + with torch.no_grad(): + for x, y in loader: + x = x.to(device=device).squeeze(1) + y = y.to(device=device) + + scores = model(x) + _, predictions = scores.max(1) + num_correct += (predictions == y).sum() + num_samples += predictions.size(0) + + # Toggle model back to train + model.train() + return num_correct / num_samples + + +print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:2f}") +print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}") diff --git a/ML/Pytorch/Basics/pytorch_simple_CNN.py b/ML/Pytorch/Basics/pytorch_simple_CNN.py new file mode 100644 index 0000000..d18d948 --- /dev/null +++ b/ML/Pytorch/Basics/pytorch_simple_CNN.py @@ -0,0 +1,119 @@ +""" +A simple walkthrough of how to code a convolutional neural network (CNN) +using the PyTorch library. For demonstration we train it on the very +common MNIST dataset of handwritten digits. In this code we go through +how to create the network as well as initialize a loss function, optimizer, +check accuracy and more. + +Programmed by Aladdin Persson +* 2020-04-08: Initial coding +* 2021-03-24: More detailed comments and small revision of the code + +""" + +# Imports +import torch +import torchvision # torch package for vision related things +import torch.nn.functional as F # Parameterless functions, like (some) activation functions +import torchvision.datasets as datasets # Standard datasets +import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation +from torch import optim # For optimizers like SGD, Adam, etc. +from torch import nn # All neural network modules +from torch.utils.data import DataLoader # Gives easier dataset managment by creating mini batches etc. +from tqdm import tqdm # For nice progress bar! + +# Simple CNN +class CNN(nn.Module): + def __init__(self, in_channels=1, num_classes=10): + super(CNN, self).__init__() + self.conv1 = nn.Conv2d( + in_channels=in_channels, + out_channels=8, + kernel_size=(3, 3), + stride=(1, 1), + padding=(1, 1), + ) + self.pool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) + self.conv2 = nn.Conv2d( + in_channels=8, + out_channels=16, + kernel_size=(3, 3), + stride=(1, 1), + padding=(1, 1), + ) + self.fc1 = nn.Linear(16 * 7 * 7, num_classes) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = self.pool(x) + x = F.relu(self.conv2(x)) + x = self.pool(x) + x = x.reshape(x.shape[0], -1) + x = self.fc1(x) + return x + + +# Set device +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +# Hyperparameters +in_channels = 1 +num_classes = 10 +learning_rate = 0.001 +batch_size = 64 +num_epochs = 3 + +# Load Data +train_dataset = datasets.MNIST(root="dataset/", train=True, transform=transforms.ToTensor(), download=True) +test_dataset = datasets.MNIST(root="dataset/", train=False, transform=transforms.ToTensor(), download=True) +train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) +test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) + +# Initialize network +model = CNN(in_channels=in_channels, num_classes=num_classes).to(device) + +# Loss and optimizer +criterion = nn.CrossEntropyLoss() +optimizer = optim.Adam(model.parameters(), lr=learning_rate) + +# Train Network +for epoch in range(num_epochs): + for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): + # Get data to cuda if possible + data = data.to(device=device) + targets = targets.to(device=device) + + # forward + scores = model(data) + loss = criterion(scores, targets) + + # backward + optimizer.zero_grad() + loss.backward() + + # gradient descent or adam step + optimizer.step() + +# Check accuracy on training & test to see how good our model +def check_accuracy(loader, model): + num_correct = 0 + num_samples = 0 + model.eval() + + with torch.no_grad(): + for x, y in loader: + x = x.to(device=device) + y = y.to(device=device) + + scores = model(x) + _, predictions = scores.max(1) + num_correct += (predictions == y).sum() + num_samples += predictions.size(0) + + + model.train() + return num_correct/num_samples + + +print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:.2f}") +print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}") \ No newline at end of file diff --git a/ML/Pytorch/Basics/pytorch_simple_fullynet.py b/ML/Pytorch/Basics/pytorch_simple_fullynet.py new file mode 100644 index 0000000..fa0a9f4 --- /dev/null +++ b/ML/Pytorch/Basics/pytorch_simple_fullynet.py @@ -0,0 +1,120 @@ +""" +A simple walkthrough of how to code a fully connected neural network +using the PyTorch library. For demonstration we train it on the very +common MNIST dataset of handwritten digits. In this code we go through +how to create the network as well as initialize a loss function, optimizer, +check accuracy and more. + +Programmed by Aladdin Persson +* 2020-04-08: Initial coding +* 2021-03-24: Added more detailed comments also removed part of + check_accuracy which would only work specifically on MNIST. + +""" + +# Imports +import torch +import torchvision # torch package for vision related things +import torch.nn.functional as F # Parameterless functions, like (some) activation functions +import torchvision.datasets as datasets # Standard datasets +import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation +from torch import optim # For optimizers like SGD, Adam, etc. +from torch import nn # All neural network modules +from torch.utils.data import DataLoader # Gives easier dataset managment by creating mini batches etc. +from tqdm import tqdm # For nice progress bar! + +# Here we create our simple neural network. For more details here we are subclassing and +# inheriting from nn.Module, this is the most general way to create your networks and +# allows for more flexibility. I encourage you to also check out nn.Sequential which +# would be easier to use in this scenario but I wanted to show you something that +# "always" works. +class NN(nn.Module): + def __init__(self, input_size, num_classes): + super(NN, self).__init__() + # Our first linear layer take input_size, in this case 784 nodes to 50 + # and our second linear layer takes 50 to the num_classes we have, in + # this case 10. + self.fc1 = nn.Linear(input_size, 50) + self.fc2 = nn.Linear(50, num_classes) + + def forward(self, x): + """ + x here is the mnist images and we run it through fc1, fc2 that we created above. + we also add a ReLU activation function in between and for that (since it has no parameters) + I recommend using nn.functional (F) + """ + + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return x + + +# Set device cuda for GPU if it's available otherwise run on the CPU +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +# Hyperparameters of our neural network which depends on the dataset, and +# also just experimenting to see what works well (learning rate for example). +input_size = 784 +num_classes = 10 +learning_rate = 0.001 +batch_size = 64 +num_epochs = 3 + +# Load Training and Test data +train_dataset = datasets.MNIST(root="dataset/", train=True, transform=transforms.ToTensor(), download=True) +test_dataset = datasets.MNIST(root="dataset/", train=False, transform=transforms.ToTensor(), download=True) +train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) +test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) + +# Initialize network +model = NN(input_size=input_size, num_classes=num_classes).to(device) + +# Loss and optimizer +criterion = nn.CrossEntropyLoss() +optimizer = optim.Adam(model.parameters(), lr=learning_rate) + +# Train Network +for epoch in range(num_epochs): + for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): + # Get data to cuda if possible + data = data.to(device=device) + targets = targets.to(device=device) + + # Get to correct shape + data = data.reshape(data.shape[0], -1) + + # forward + scores = model(data) + loss = criterion(scores, targets) + + # backward + optimizer.zero_grad() + loss.backward() + + # gradient descent or adam step + optimizer.step() + + +# Check accuracy on training & test to see how good our model +def check_accuracy(loader, model): + num_correct = 0 + num_samples = 0 + model.eval() + + with torch.no_grad(): + for x, y in loader: + x = x.to(device=device) + y = y.to(device=device) + x = x.reshape(x.shape[0], -1) + + scores = model(x) + _, predictions = scores.max(1) + num_correct += (predictions == y).sum() + num_samples += predictions.size(0) + + model.train() + return num_correct/num_samples + + +print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:.2f}") +print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}") From 37762890f0fc714f1f65b30e555fac4455b7f926 Mon Sep 17 00:00:00 2001 From: Aladdin Persson Date: Wed, 24 Mar 2021 22:18:05 +0100 Subject: [PATCH 7/7] updated readme with new links --- README.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 5786b9b..8fdc76b 100644 --- a/README.md +++ b/README.md @@ -38,22 +38,22 @@ If you have any specific video suggestion please make a comment on YouTube :) ### Basics * [![Youtube Link][logo]](https://youtu.be/x9JiIFvlUwk)   [Tensor Basics](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_tensorbasics.py) -* [![Youtube Link][logo]](https://youtu.be/Jy4wM2X21u0)   [Feedforward Neural Network](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/804c45e83b27c59defb12f0ea5117de30fe25289/ML/Pytorch/Basics/pytorch_simple_fullynet.py#L26-L35) -* [![Youtube Link][logo]](https://youtu.be/wnK3uWv_WkU)   [Convolutional Neural Network](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/157a5f458f272a513eb6b4a19d6613aec32dc21c/ML/Pytorch/Basics/pytorch_simple_CNN.py#L25-L41) -* [![Youtube Link][logo]](https://youtu.be/Gl2WXLIMvKA)   [Recurrent Neural Network](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py) -* [![Youtube Link][logo]](https://youtu.be/jGst43P-TJA)   [Bidirectional Recurrent Neural Network](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_bidirectional_lstm.py) -* [![Youtube Link][logo]](https://youtu.be/g6kQl_EFn84)   [Loading and saving model](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/804c45e83b27c59defb12f0ea5117de30fe25289/ML/Pytorch/Basics/pytorch_loadsave.py#L26-L34) -* [![Youtube Link][logo]](https://youtu.be/ZoZHd0Zm3RY)   [Custom Dataset (Images)](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/aba36b89b438ca8f608a186f4d61d1b60c7f24e0/ML/Pytorch/Basics/custom_dataset/custom_dataset.py#L12-L29) -* [![Youtube Link][logo]](https://youtu.be/9sHcLvVXsns)   [Custom Dataset (Text)](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/custom_dataset_txt/loader_customtext.py) -* [![Youtube Link][logo]](https://youtu.be/qaDe0qQZ5AQ)   [Transfer Learning and finetuning](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/804c45e83b27c59defb12f0ea5117de30fe25289/ML/Pytorch/Basics/pytorch_pretrain_finetune.py#L33-L54) -* [![Youtube Link][logo]](https://youtu.be/Zvd276j9sZ8)   [Data augmentation using Torchvision](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/804c45e83b27c59defb12f0ea5117de30fe25289/ML/Pytorch/Basics/pytorch_transforms.py#L56-L72) +* [![Youtube Link][logo]](https://youtu.be/Jy4wM2X21u0)   [Feedforward Neural Network](https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_simple_fullynet.py) +* [![Youtube Link][logo]](https://youtu.be/wnK3uWv_WkU)   [Convolutional Neural Network](https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_simple_CNN.py) +* [![Youtube Link][logo]](https://youtu.be/Gl2WXLIMvKA)   [Recurrent Neural Network](https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py) +* [![Youtube Link][logo]](https://youtu.be/jGst43P-TJA)   [Bidirectional Recurrent Neural Network](https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_bidirectional_lstm.py) +* [![Youtube Link][logo]](https://youtu.be/g6kQl_EFn84)   [Loading and saving model](https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_loadsave.py) +* [![Youtube Link][logo]](https://youtu.be/ZoZHd0Zm3RY)   [Custom Dataset (Images)](https://github.com/aladdinpersson/Machine-Learning-Collection/tree/master/ML/Pytorch/Basics/custom_dataset) +* [![Youtube Link][logo]](https://youtu.be/9sHcLvVXsns)   [Custom Dataset (Text)](https://github.com/aladdinpersson/Machine-Learning-Collection/tree/master/ML/Pytorch/Basics/custom_dataset_txt) +* [![Youtube Link][logo]](https://youtu.be/qaDe0qQZ5AQ)   [Transfer Learning and finetuning](https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_pretrain_finetune.py) +* [![Youtube Link][logo]](https://youtu.be/Zvd276j9sZ8)   [Data augmentation using Torchvision](https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_transforms.py) * [![Youtube Link][logo]](https://youtu.be/rAdLwKJBvPM)   [Data augmentation using Albumentations](https://github.com/aladdinpersson/Machine-Learning-Collection/tree/master/ML/Pytorch/Basics/albumentations_tutorial) -* [![Youtube Link][logo]](https://youtu.be/RLqsxWaQdHE)   [TensorBoard Example](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/79f2e1928906f3cccbae6c024f3f79fd05262cd1/ML/Pytorch/Basics/pytorch_tensorboard_.py#L72-L120) -* [![Youtube Link][logo]](https://youtu.be/y6IEcEBRZks)   [Calculate Mean and STD of Images](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/55637e6afbb8cc8be6a63e04bbc899704f862911/ML/Pytorch/Basics/pytorch_std_mean.py#L41-L53) -* [![Youtube Link][logo]](https://youtu.be/RKHopFfbPao)   [Simple Progress bar]() +* [![Youtube Link][logo]](https://youtu.be/RLqsxWaQdHE)   [TensorBoard Example](https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_tensorboard_.py) +* [![Youtube Link][logo]](https://youtu.be/y6IEcEBRZks)   [Calculate Mean and STD of Images](https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_std_mean.py) +* [![Youtube Link][logo]](https://youtu.be/RKHopFfbPao)   [Simple Progress bar](https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_progress_bar.py) * [![Youtube Link][logo]](https://youtu.be/1SZocGaCAr8)   [Deterministic Behavior](https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/set_deterministic_behavior/pytorch_set_seeds.py) -* [![Youtube Link][logo]](https://youtu.be/P31hB37g4Ak)   [Learning Rate Scheduler](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/804c45e83b27c59defb12f0ea5117de30fe25289/ML/Pytorch/Basics/pytorch_lr_ratescheduler.py#L45-L78) -* [![Youtube Link][logo]](https://youtu.be/xWQ-p_o0Uik)   [Initialization of weights](https://github.com/AladdinPerzon/Machine-Learning-Collection/blob/804c45e83b27c59defb12f0ea5117de30fe25289/ML/Pytorch/Basics/pytorch_init_weights.py#L35-L49) +* [![Youtube Link][logo]](https://youtu.be/P31hB37g4Ak)   [Learning Rate Scheduler](https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_lr_ratescheduler.py) +* [![Youtube Link][logo]](https://youtu.be/xWQ-p_o0Uik)   [Initialization of weights](https://github.com/aladdinpersson/Machine-Learning-Collection/blob/master/ML/Pytorch/Basics/pytorch_init_weights.py) ### More Advanced