diff --git a/CK+_VGG19/Confusion Matrix.png b/CK+_VGG19/Confusion Matrix.png new file mode 100644 index 0000000..c1e0f30 Binary files /dev/null and b/CK+_VGG19/Confusion Matrix.png differ diff --git a/CK.py b/CK.py index a0c2a05..559b1c2 100644 --- a/CK.py +++ b/CK.py @@ -1,90 +1,90 @@ -from __future__ import print_function -from PIL import Image -import numpy as np -import h5py -import torch.utils.data as data - - -class CK(data.Dataset): - """`CK+ Dataset. - - Args: - train (bool, optional): If True, creates dataset from training set, otherwise - creates from test set. - transform (callable, optional): A function/transform that takes in an PIL image - and returns a transformed version. E.g, ``transforms.RandomCrop`` - - there are 135,177,75,207,84,249,54 images in data - we choose 123,159,66,186,75,225,48 images for training - we choose 12,8,9,21,9,24,6 images for testing - the split are in order according to the fold number - """ - - def __init__(self, split='Training', fold = 1, transform=None): - self.transform = transform - self.split = split # training set or test set - self.fold = fold # the k-fold cross validation - self.data = h5py.File('./data/CK_data.h5', 'r', driver='core') - - number = len(self.data['data_label']) #981 - sum_number = [0,135,312,387,594,678,927,981] # the sum of class number - test_number = [12,18,9,21,9,24,6] # the number of each class - - test_index = [] - train_index = [] - - for j in xrange(len(test_number)): - for k in xrange(test_number[j]): - if self.fold != 10: #the last fold start from the last element - test_index.append(sum_number[j]+(self.fold-1)*test_number[j]+k) - else: - test_index.append(sum_number[j+1]-1-k) - - for i in xrange(number): - if i not in test_index: - train_index.append(i) - - print(len(train_index),len(test_index)) - - # now load the picked numpy arrays - if self.split == 'Training': - self.train_data = [] - self.train_labels = [] - for ind in xrange(len(train_index)): - self.train_data.append(self.data['data_pixel'][train_index[ind]]) - self.train_labels.append(self.data['data_label'][train_index[ind]]) - - elif self.split == 'Testing': - self.test_data = [] - self.test_labels = [] - for ind in xrange(len(test_index)): - self.test_data.append(self.data['data_pixel'][test_index[ind]]) - self.test_labels.append(self.data['data_label'][test_index[ind]]) - - def __getitem__(self, index): - """ - Args: - index (int): Index - - Returns: - tuple: (image, target) where target is index of the target class. - """ - if self.split == 'Training': - img, target = self.train_data[index], self.train_labels[index] - elif self.split == 'Testing': - img, target = self.test_data[index], self.test_labels[index] - # doing this so that it is consistent with all other datasets - # to return a PIL Image - img = img[:, :, np.newaxis] - img = np.concatenate((img, img, img), axis=2) - img = Image.fromarray(img) - if self.transform is not None: - img = self.transform(img) - return img, target - - def __len__(self): - if self.split == 'Training': - return len(self.train_data) - elif self.split == 'Testing': - return len(self.test_data) - +from __future__ import print_function +from PIL import Image +import numpy as np +import h5py +import torch.utils.data as data + + +class CK(data.Dataset): + """`CK+ Dataset. + + Args: + train (bool, optional): If True, creates dataset from training set, otherwise + creates from test set. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + + there are 135,177,75,207,84,249,54 images in data + we choose 123,159,66,186,75,225,48 images for training + we choose 12,8,9,21,9,24,6 images for testing + the split are in order according to the fold number + """ + + def __init__(self, split='Training', fold = 1, transform=None): + self.transform = transform + self.split = split # training set or test set + self.fold = fold # the k-fold cross validation + self.data = h5py.File('./data/CK_data.h5', 'r', driver='core') + + number = len(self.data['data_label']) #981 + sum_number = [0,135,312,387,594,678,927,981] # the sum of class number + test_number = [12,18,9,21,9,24,6] # the number of each class + + test_index = [] + train_index = [] + + for j in range(len(test_number)): + for k in range(test_number[j]): + if self.fold != 10: #the last fold start from the last element + test_index.append(sum_number[j]+(self.fold-1)*test_number[j]+k) + else: + test_index.append(sum_number[j+1]-1-k) + + for i in range(number): + if i not in test_index: + train_index.append(i) + + print(len(train_index),len(test_index)) + + # now load the picked numpy arrays + if self.split == 'Training': + self.train_data = [] + self.train_labels = [] + for ind in range(len(train_index)): + self.train_data.append(self.data['data_pixel'][train_index[ind]]) + self.train_labels.append(self.data['data_label'][train_index[ind]]) + + elif self.split == 'Testing': + self.test_data = [] + self.test_labels = [] + for ind in range(len(test_index)): + self.test_data.append(self.data['data_pixel'][test_index[ind]]) + self.test_labels.append(self.data['data_label'][test_index[ind]]) + + def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + if self.split == 'Training': + img, target = self.train_data[index], self.train_labels[index] + elif self.split == 'Testing': + img, target = self.test_data[index], self.test_labels[index] + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = img[:, :, np.newaxis] + img = np.concatenate((img, img, img), axis=2) + img = Image.fromarray(img) + if self.transform is not None: + img = self.transform(img) + return img, target + + def __len__(self): + if self.split == 'Training': + return len(self.train_data) + elif self.split == 'Testing': + return len(self.test_data) + diff --git a/Readme.md b/Readme.md index 6d55852..1677a61 100644 --- a/Readme.md +++ b/Readme.md @@ -2,17 +2,19 @@ A CNN based pytorch implementation on facial expression recognition (FER2013 and CK+), achieving 73.112% (state-of-the-art) in FER2013 and 94.64% in CK+ dataset ## Demos ## -![Image text](https://raw.githubusercontent.com/WuJie1010/Facial-Expression-Recognition.Pytorch/master/demo/1.png) -![Image text](https://raw.githubusercontent.com/WuJie1010/Facial-Expression-Recognition.Pytorch/master/demo/2.png) +![Image text](https://github.com/xmtybb/Facial-Expression-Recognition.Pytorch/blob/xmtybb-patch-1/images/results/3.png) +![Image text](https://github.com/xmtybb/Facial-Expression-Recognition.Pytorch/blob/xmtybb-patch-1/images/results/4.png) ## Dependencies ## -- Python 2.7 -- Pytorch >=0.2.0 +- Python 3.7 +- Pytorch 1.8.1 - h5py (Preprocessing) - sklearn (plot confusion matrix) ## Visualize for a test image by a pre-trained model ## -- Firstly, download the pre-trained model from https://drive.google.com/open?id=1Oy_9YmpkSKX1Q8jkOhJbz3Mc7qjyISzU (or https://pan.baidu.com/s/1gCL0TlCwKctAy_5yhzHy5Q, key: g2d3) and then put it in the "FER2013_VGG19" folder; Next, Put the test image (rename as 1.jpg) into the "images" folder, then +- Download the "FER2013_VGG19" pre-trained model from https://drive.google.com/open?id=1Oy_9YmpkSKX1Q8jkOhJbz3Mc7qjyISzU (or https://pan.baidu.com/s/1gCL0TlCwKctAy_5yhzHy5Q, key: g2d3) +- +- for CK+/VGG19 - python visualize.py ## FER2013 Dataset ## diff --git a/images/3.jpeg b/images/3.jpeg new file mode 100644 index 0000000..1486f80 Binary files /dev/null and b/images/3.jpeg differ diff --git a/images/4.jpeg b/images/4.jpeg new file mode 100644 index 0000000..0ff1cb5 Binary files /dev/null and b/images/4.jpeg differ diff --git a/images/results/3.png b/images/results/3.png new file mode 100644 index 0000000..44c7599 Binary files /dev/null and b/images/results/3.png differ diff --git a/images/results/4.png b/images/results/4.png new file mode 100644 index 0000000..305108f Binary files /dev/null and b/images/results/4.png differ diff --git a/images/results/l.png b/images/results/l.png new file mode 100644 index 0000000..f1e505c Binary files /dev/null and b/images/results/l.png differ diff --git a/mainpro_CK+.py b/mainpro_CK+.py index bc7cffa..1a12fde 100644 --- a/mainpro_CK+.py +++ b/mainpro_CK+.py @@ -1,176 +1,178 @@ -'''Train CK+ with PyTorch.''' -# 10 crop for data enhancement -from __future__ import print_function - -import torch -import torch.nn as nn -import torch.optim as optim -import torch.nn.functional as F -import torch.backends.cudnn as cudnn -import torchvision -import transforms as transforms -import numpy as np -import os -import argparse -import utils -from CK import CK -from torch.autograd import Variable -from models import * - -parser = argparse.ArgumentParser(description='PyTorch CK+ CNN Training') -parser.add_argument('--model', type=str, default='VGG19', help='CNN architecture') -parser.add_argument('--dataset', type=str, default='CK+', help='dataset') -parser.add_argument('--fold', default=1, type=int, help='k fold number') -parser.add_argument('--bs', default=128, type=int, help='batch_size') -parser.add_argument('--lr', default=0.01, type=float, help='learning rate') -parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') -opt = parser.parse_args() - -use_cuda = torch.cuda.is_available() - -best_Test_acc = 0 # best PrivateTest accuracy -best_Test_acc_epoch = 0 -start_epoch = 0 # start from epoch 0 or last checkpoint epoch - -learning_rate_decay_start = 20 # 50 -learning_rate_decay_every = 1 # 5 -learning_rate_decay_rate = 0.8 # 0.9 - -cut_size = 44 -total_epoch = 60 - -path = os.path.join(opt.dataset + '_' + opt.model, str(opt.fold)) - -# Data -print('==> Preparing data..') -transform_train = transforms.Compose([ - transforms.RandomCrop(cut_size), - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), -]) - -transform_test = transforms.Compose([ - transforms.TenCrop(cut_size), - transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), -]) - -trainset = CK(split = 'Training', fold = opt.fold, transform=transform_train) -trainloader = torch.utils.data.DataLoader(trainset, batch_size=opt.bs, shuffle=True, num_workers=1) -testset = CK(split = 'Testing', fold = opt.fold, transform=transform_test) -testloader = torch.utils.data.DataLoader(testset, batch_size=5, shuffle=False, num_workers=1) - -# Model -if opt.model == 'VGG19': - net = VGG('VGG19') -elif opt.model == 'Resnet18': - net = ResNet18() - -if opt.resume: - # Load checkpoint. - print('==> Resuming from checkpoint..') - assert os.path.isdir(path), 'Error: no checkpoint directory found!' - checkpoint = torch.load(os.path.join(path,'Test_model.t7')) - - net.load_state_dict(checkpoint['net']) - best_Test_acc = checkpoint['best_Test_acc'] - best_Test_acc_epoch = checkpoint['best_Test_acc_epoch'] - start_epoch = best_Test_acc_epoch + 1 -else: - print('==> Building model..') - -if use_cuda: - net.cuda() - -criterion = nn.CrossEntropyLoss() -optimizer = optim.SGD(net.parameters(), lr=opt.lr, momentum=0.9, weight_decay=5e-4) - -# Training -def train(epoch): - print('\nEpoch: %d' % epoch) - global Train_acc - net.train() - train_loss = 0 - correct = 0 - total = 0 - - if epoch > learning_rate_decay_start and learning_rate_decay_start >= 0: - frac = (epoch - learning_rate_decay_start) // learning_rate_decay_every - decay_factor = learning_rate_decay_rate ** frac - current_lr = opt.lr * decay_factor - utils.set_lr(optimizer, current_lr) # set the decayed rate - else: - current_lr = opt.lr - print('learning_rate: %s' % str(current_lr)) - - - for batch_idx, (inputs, targets) in enumerate(trainloader): - if use_cuda: - inputs, targets = inputs.cuda(), targets.cuda() - optimizer.zero_grad() - inputs, targets = Variable(inputs), Variable(targets) - outputs = net(inputs) - loss = criterion(outputs, targets) - loss.backward() - utils.clip_gradient(optimizer, 0.1) - optimizer.step() - - train_loss += loss.data[0] - _, predicted = torch.max(outputs.data, 1) - total += targets.size(0) - correct += predicted.eq(targets.data).cpu().sum() - - utils.progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' - % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) - - Train_acc = 100.*correct/total - -def test(epoch): - global Test_acc - global best_Test_acc - global best_Test_acc_epoch - net.eval() - PrivateTest_loss = 0 - correct = 0 - total = 0 - for batch_idx, (inputs, targets) in enumerate(testloader): - bs, ncrops, c, h, w = np.shape(inputs) - inputs = inputs.view(-1, c, h, w) - - if use_cuda: - inputs, targets = inputs.cuda(), targets.cuda() - inputs, targets = Variable(inputs, volatile=True), Variable(targets) - outputs = net(inputs) - outputs_avg = outputs.view(bs, ncrops, -1).mean(1) # avg over crops - - loss = criterion(outputs_avg, targets) - PrivateTest_loss += loss.data[0] - _, predicted = torch.max(outputs_avg.data, 1) - total += targets.size(0) - correct += predicted.eq(targets.data).cpu().sum() - - utils.progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' - % (PrivateTest_loss / (batch_idx + 1), 100. * correct / total, correct, total)) - # Save checkpoint. - Test_acc = 100.*correct/total - - if Test_acc > best_Test_acc: - print('Saving..') - print("best_Test_acc: %0.3f" % Test_acc) - state = {'net': net.state_dict() if use_cuda else net, - 'best_Test_acc': Test_acc, - 'best_Test_acc_epoch': epoch, - } - if not os.path.isdir(opt.dataset + '_' + opt.model): - os.mkdir(opt.dataset + '_' + opt.model) - if not os.path.isdir(path): - os.mkdir(path) - torch.save(state, os.path.join(path, 'Test_model.t7')) - best_Test_acc = Test_acc - best_Test_acc_epoch = epoch - -for epoch in range(start_epoch, total_epoch): - train(epoch) - test(epoch) - -print("best_Test_acc: %0.3f" % best_Test_acc) -print("best_Test_acc_epoch: %d" % best_Test_acc_epoch) +'''Train CK+ with PyTorch.''' +# 10 crop for data enhancement +from __future__ import print_function + +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +import torch.backends.cudnn as cudnn +import torchvision +import transforms as transforms +import numpy as np +import os +import argparse +import utils +from CK import CK +from torch.autograd import Variable +from models import * + +parser = argparse.ArgumentParser(description='PyTorch CK+ CNN Training') +parser.add_argument('--model', type=str, default='VGG19', help='CNN architecture') +parser.add_argument('--dataset', type=str, default='CK+', help='dataset') +parser.add_argument('--fold', default=1, type=int, help='k fold number') +parser.add_argument('--bs', default=128, type=int, help='batch_size') +parser.add_argument('--lr', default=0.01, type=float, help='learning rate') +parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') +opt = parser.parse_args() + +use_cuda = torch.cuda.is_available() + +best_Test_acc = 0 # best PrivateTest accuracy +best_Test_acc_epoch = 0 +start_epoch = 0 # start from epoch 0 or last checkpoint epoch + +learning_rate_decay_start = 20 # 50 +learning_rate_decay_every = 1 # 5 +learning_rate_decay_rate = 0.8 # 0.9 + +cut_size = 44 +total_epoch = 60 + +path = os.path.join(opt.dataset + '_' + opt.model, str(opt.fold)) + +# Data +print('==> Preparing data..') +transform_train = transforms.Compose([ + transforms.RandomCrop(cut_size), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), +]) + +transform_test = transforms.Compose([ + transforms.TenCrop(cut_size), + transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), +]) + +trainset = CK(split = 'Training', fold = opt.fold, transform=transform_train) +trainloader = torch.utils.data.DataLoader(trainset, batch_size=opt.bs, shuffle=True, num_workers=0) +testset = CK(split = 'Testing', fold = opt.fold, transform=transform_test) +testloader = torch.utils.data.DataLoader(testset, batch_size=5, shuffle=False, num_workers=0) + +# Model +if opt.model == 'VGG19': + net = VGG('VGG19') +elif opt.model == 'Resnet18': + net = ResNet18() + +if opt.resume: + # Load checkpoint. + print('==> Resuming from checkpoint..') + assert os.path.isdir(path), 'Error: no checkpoint directory found!' + checkpoint = torch.load(os.path.join(path,'Test_model.t7')) + + net.load_state_dict(checkpoint['net']) + best_Test_acc = checkpoint['best_Test_acc'] + best_Test_acc_epoch = checkpoint['best_Test_acc_epoch'] + start_epoch = best_Test_acc_epoch + 1 +else: + print('==> Building model..') + +if use_cuda: + net.cuda() + +criterion = nn.CrossEntropyLoss() +optimizer = optim.SGD(net.parameters(), lr=opt.lr, momentum=0.9, weight_decay=5e-4) + +# Training +def train(epoch): + print('\nEpoch: %d' % epoch) + global Train_acc + net.train() + train_loss = 0 + correct = 0 + total = 0 + + if epoch > learning_rate_decay_start and learning_rate_decay_start >= 0: + frac = (epoch - learning_rate_decay_start) // learning_rate_decay_every + decay_factor = learning_rate_decay_rate ** frac + current_lr = opt.lr * decay_factor + utils.set_lr(optimizer, current_lr) # set the decayed rate + else: + current_lr = opt.lr + print('learning_rate: %s' % str(current_lr)) + + + for batch_idx, (inputs, targets) in enumerate(trainloader): + if use_cuda: + inputs, targets = inputs.cuda(), targets.cuda() + optimizer.zero_grad() + inputs, targets = Variable(inputs), Variable(targets) + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + utils.clip_gradient(optimizer, 0.1) + optimizer.step() + + train_loss += loss.data + _, predicted = torch.max(outputs.data, 1) + total += targets.size(0) + correct += predicted.eq(targets.data).cpu().sum() + + utils.progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) + + Train_acc = 100.*correct/total + +def test(epoch): + global Test_acc + global best_Test_acc + global best_Test_acc_epoch + net.eval() + PrivateTest_loss = 0 + correct = 0 + total = 0 + for batch_idx, (inputs, targets) in enumerate(testloader): + bs, ncrops, c, h, w = np.shape(inputs) + inputs = inputs.view(-1, c, h, w) + + if use_cuda: + inputs, targets = inputs.cuda(), targets.cuda() + inputs, targets = Variable(inputs, volatile=True), Variable(targets) + outputs = net(inputs) + outputs_avg = outputs.view(bs, ncrops, -1).mean(1) # avg over crops + + loss = criterion(outputs_avg, targets) + PrivateTest_loss += loss.data + _, predicted = torch.max(outputs_avg.data, 1) + total += targets.size(0) + correct += predicted.eq(targets.data).cpu().sum() + + utils.progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (PrivateTest_loss / (batch_idx + 1), 100. * correct / total, correct, total)) + # Save checkpoint. + Test_acc = 100.*correct/total + + if Test_acc > best_Test_acc: + print('Saving..') + print("best_Test_acc: %0.3f" % Test_acc) + ''' + state = {'net': net.state_dict() if use_cuda else net, + 'best_Test_acc': Test_acc, + 'best_Test_acc_epoch': epoch, + } + ''' + if not os.path.isdir(opt.dataset + '_' + opt.model): + os.mkdir(opt.dataset + '_' + opt.model) + if not os.path.isdir(path): + os.mkdir(path) + torch.save(net.state_dict(), os.path.join(path, 'Test_model.t8')) + best_Test_acc = Test_acc + best_Test_acc_epoch = epoch + +for epoch in range(start_epoch, total_epoch): + train(epoch) + test(epoch) + +print("best_Test_acc: %0.3f" % best_Test_acc) +print("best_Test_acc_epoch: %d" % best_Test_acc_epoch) diff --git a/models/__pycache__/__init__.cpython-37.pyc b/models/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..78d9062 Binary files /dev/null and b/models/__pycache__/__init__.cpython-37.pyc differ diff --git a/models/__pycache__/resnet.cpython-37.pyc b/models/__pycache__/resnet.cpython-37.pyc new file mode 100644 index 0000000..72b4714 Binary files /dev/null and b/models/__pycache__/resnet.cpython-37.pyc differ diff --git a/models/__pycache__/vgg.cpython-37.pyc b/models/__pycache__/vgg.cpython-37.pyc new file mode 100644 index 0000000..84f5f23 Binary files /dev/null and b/models/__pycache__/vgg.cpython-37.pyc differ diff --git a/models/vgg.py b/models/vgg.py index de75bb3..4c18939 100644 --- a/models/vgg.py +++ b/models/vgg.py @@ -3,7 +3,7 @@ import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable - +# from functions.stn import STNFunction, STNFunctionBCHW cfg = { 'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], @@ -18,8 +18,10 @@ def __init__(self, vgg_name): super(VGG, self).__init__() self.features = self._make_layers(cfg[vgg_name]) self.classifier = nn.Linear(512, 7) + # self.SpatialTransformer = STNFunction() def forward(self, x): + # out = self.SpatialTransformer(x) out = self.features(x) out = out.view(out.size(0), -1) out = F.dropout(out, p=0.5, training=self.training) diff --git a/plot_CK+_confusion_matrix.py b/plot_CK+_confusion_matrix.py index 66348c7..f776995 100644 --- a/plot_CK+_confusion_matrix.py +++ b/plot_CK+_confusion_matrix.py @@ -75,21 +75,25 @@ def plot_confusion_matrix(cm, classes, total = 0 all_target = [] -for i in xrange(10): +# for 1 fold +for i in range(1): print("%d fold" % (i+1)) path = os.path.join(opt.dataset + '_' + opt.model, '%d' %(i+1)) - checkpoint = torch.load(os.path.join(path, 'Test_model.t7')) + + # checkpoint = torch.load(os.path.join(path, 'Test_model.t7')) + # net.load_state_dict(checkpoint['net']) + net.load_state_dict(torch.load(os.path.join(path, 'Test_model.t8'))) - net.load_state_dict(checkpoint['net']) - net.cuda() + + # net.cuda() net.eval() testset = CK(split = 'Testing', fold = i+1, transform=transform_test) - testloader = torch.utils.data.DataLoader(testset, batch_size=5, shuffle=False, num_workers=1) + testloader = torch.utils.data.DataLoader(testset, batch_size=5, shuffle=False, num_workers=0) for batch_idx, (inputs, targets) in enumerate(testloader): bs, ncrops, c, h, w = np.shape(inputs) inputs = inputs.view(-1, c, h, w) - inputs, targets = inputs.cuda(), targets.cuda() + # inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = Variable(inputs, volatile=True), Variable(targets) outputs = net(inputs) outputs_avg = outputs.view(bs, ncrops, -1).mean(1) # avg over crops diff --git a/visualize.py b/visualize.py index 5133971..4b2f6e4 100644 --- a/visualize.py +++ b/visualize.py @@ -36,18 +36,21 @@ def rgb2gray(rgb): img = Image.fromarray(img) inputs = transform_test(img) -class_names = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] +# class_names = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] for Fer2013 +class_names = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Contempt'] # for CK+ net = VGG('VGG19') -checkpoint = torch.load(os.path.join('FER2013_VGG19', 'PrivateTest_model.t7')) -net.load_state_dict(checkpoint['net']) -net.cuda() +#checkpoint = torch.load(os.path.join('FER2013_VGG19', 'PrivateTest_model.t7')) +#net.load_state_dict(checkpoint['net']) +#net.cuda() + +net.load_state_dict(torch.load(os.path.join('CK+_VGG19', '1/Test_model.t8'))) net.eval() ncrops, c, h, w = np.shape(inputs) inputs = inputs.view(-1, c, h, w) -inputs = inputs.cuda() +#inputs = inputs.cuda() inputs = Variable(inputs, volatile=True) outputs = net(inputs) @@ -88,7 +91,7 @@ def rgb2gray(rgb): # show emojis #plt.show() -plt.savefig(os.path.join('images/results/l.png')) +plt.savefig(os.path.join('images/results/1.png')) plt.close() print("The Expression is %s" %str(class_names[int(predicted.cpu().numpy())]))