-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathOMG_Emotion_Face.py
123 lines (70 loc) · 2.89 KB
/
OMG_Emotion_Face.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
# -*- coding: utf-8 -*-
"""Experiments with the OMG-Emotion Dataset using the Face Channel
More information: Barros, P., Jirak, D., Weber, C., & Wermter, S. (2015). Multimodal emotional state recognition using sequence-dependent deep hierarchical features. Neural Networks, 72, 140-151.
Parameters:
baseDirectory (String): Base directory where the experiment will be saved.
datasetFolderTrain (String): Folder where the audios used for training the model are stored
datasetFolderTest (String): Folder where the audios used for testing the model are stored
experimentName (String): Name of the experiment.
logManager (LogManager):
Author: Pablo Barros
Created on: 02.05.2018
Last Update: 16.06.2018
"""
import matplotlib
matplotlib.use('Agg')
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from keras import backend as K
def set_keras_backend(backend):
if K.backend() != backend:
os.environ['KERAS_BACKEND'] = backend
reload(K)
assert K.backend() == backend
def runModel():
from KEF.Controllers import ExperimentManager
from KEF.DataLoaders import DataLoader_OMG_Emotion_Face
from KEF.Implementations import Vision_CNN_OMG_Emotion_Face
numberOfClasses = 8
dataDirectory = "/data/OMG_Faces_10Faces/"
videosDirectory = "/data/datasets/OMG-Emotion/faces_extraced_all/"
dataFileTrain = "/data/datasets/OMG-Emotion/omg_TrainVideos.csv"
dataFileValidation = "/data/datasets/OMG-Emotion/omg_ValidationVideos.csv"
""" Initianize all the parameters and modules necessary
image size: 64,64
"""
experimentManager = ExperimentManager.ExperimentManager(dataDirectory,"FER+PreTrained", verbose=True)
grayScale = True
preProcessingProperties = [(64,64), grayScale]
""" Loading the training and testing data
"""
dataLoader = DataLoader_OMG_Emotion_Face.DataLoader_OMG_Face(experimentManager.logManager, preProcessingProperties)
#
#
dataLoader.loadValidationData(videosDirectory, dataFileValidation)
dataLoader.loadTrainData(videosDirectory, dataFileTrain)
#""" Creating and tuning the CNN
#"""
cnnModel = Vision_CNN_OMG_Emotion_Face.Vision_CNN_OMG_Face(experimentManager, "CNN", experimentManager.plotManager)
#
cnnModel.buildModel(dataLoader.dataTrain.dataX.shape[1:])
##
cnnModel.train(dataLoader.dataTrain, dataLoader.dataValidation, False)
##
cnnModel.save(experimentManager.modelDirectory)
##
print "Public Test Evaluation"
cnnModel.evaluate(dataLoader.dataValidation)
print "Private Test Evaluation"
cnnModel.evaluate(dataLoader.dataTest)
set_keras_backend("tensorflow")
print K.backend
if K.backend == "tensorflow":
import tensorflow as tf
sess = tf.Session()
# from keras import backend as K
K.set_session(sess)
with tf.device('/gpu:1'):
runModel()
else:
runModel()