-
Notifications
You must be signed in to change notification settings - Fork 22
/
train_test.py
225 lines (175 loc) · 7.63 KB
/
train_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
"""Summary
"""
import os
import yaml
import getopt
import sys
from numba import cuda
import numpy as np
from tensorflow.keras import backend as K
from action_predict import action_prediction
from action_predict import ActionPredict
#from new_model import NewModel, HybridModel, MultiRNN3D, MultiRNN3D_MATT
from jaad_data import JAAD
# if use PIE data:
from pie_data import PIE
import tensorflow as tf
# tf.test.is_gpu_available()
gpus = tf.config.experimental.list_physical_devices('GPU')
assert len(gpus) > 0, "Not enough GPU hardware devices available"
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
tf.config.experimental.set_virtual_device_configuration(
gpu,
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=7168)]
)
# path to JAAD dataset, please change to your local path
path_jaad = "/home/steven/submission_T_IV/JAAD"
path_pie = "/media/steven/MEDIA/PIE"
# config = tf.compat.v1.ConfigProto()
# # config.gpu_options.per_process_gpu_memory_fraction=0.8
# config.gpu_options.allow_growth = True
# sess = tf.compat.v1.Session(config=config)
# from tensorflow.compat.v1 import ConfigProto
# from tensorflow.compat.v1 import InteractiveSession
# config = ConfigProto()
# config.gpu_options.allow_growth = True
# session = InteractiveSession(config=config)
# config = tf.compat.v1.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.8
# tf.keras.backend.set_session(tf.Session(config=config));
# config = tf.compat.v1.ConfigProto()
# config.gpu_options.allow_growth = True
# sess = tf.Session(config=config)
# physical_devices = tf.config.experimental.list_physical_devices('GPU')
# assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
# config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
def write_to_yaml(yaml_path=None, data=None):
"""
Write model to yaml results file
Args:
model_path (None, optional): Description
data (None, optional): results from the run
Deleted Parameters:
exp_type (str, optional): experiment type
overwrite (bool, optional): whether to overwrite the results if the model exists
"""
with open(yaml_path, 'w') as yamlfile:
yaml.dump(data, yamlfile)
def run(config_file=None):
"""
Run train and test on the dataset with parameters specified in configuration file.
Args:
config_file: path to configuration file in yaml format
dataset: dataset to train and test the model on (pie, jaad_beh or jaad_all)
"""
print(config_file)
# Read default Config file
configs_default ='config_files/configs_default.yaml'
with open(configs_default, 'r') as f:
configs = yaml.safe_load(f)
with open(config_file, 'r') as f:
model_configs = yaml.safe_load(f)
# Update configs based on the model configs
for k in ['model_opts', 'net_opts']:
if k in model_configs:
configs[k].update(model_configs[k])
# Calculate min track size
tte = configs['model_opts']['time_to_event'] if isinstance(configs['model_opts']['time_to_event'], int) else \
configs['model_opts']['time_to_event'][1]
configs['data_opts']['min_track_size'] = configs['model_opts']['obs_length'] + tte
# update model and training options from the config file
for dataset_idx, dataset in enumerate(model_configs['exp_opts']['datasets']):
# # clear GPU memory
# device = cuda.get_current_device()
# device.reset()
# print('===> GPU memory cleared')
configs['data_opts']['sample_type'] = 'beh' if 'beh' in dataset else 'all'
configs['model_opts']['overlap'] = 0.6 if 'pie' in dataset else 0.8
configs['model_opts']['dataset'] = dataset.split('_')[0]
configs['train_opts']['batch_size'] = model_configs['exp_opts']['batch_size'][dataset_idx]
configs['train_opts']['lr'] = model_configs['exp_opts']['lr'][dataset_idx]
configs['train_opts']['epochs'] = model_configs['exp_opts']['epochs'][dataset_idx]
model_name = configs['model_opts']['model']
# Remove speed in case the dataset is jaad
if 'RNN' in model_name and 'jaad' in dataset:
configs['model_opts']['obs_input_type'] = configs['model_opts']['obs_input_type']
for k, v in configs.items():
print(k,v)
# set batch size
if model_name in ['ConvLSTM']:
configs['train_opts']['batch_size'] = 2
if model_name in ['C3D', 'I3D']:
configs['train_opts']['batch_size'] = 4
if model_name in ['PCPA']:
configs['train_opts']['batch_size'] = 1
if 'MultiRNN' in model_name:
configs['train_opts']['batch_size'] = 8
if model_name in ['TwoStream']:
configs['train_opts']['batch_size'] = 16
if configs['model_opts']['dataset'] == 'pie':
# imdb = PIE(data_path=os.environ.copy()['PIE_PATH'])
imdb = PIE(data_path=path_pie)
elif configs['model_opts']['dataset'] == 'jaad':
# if use docker:
# imdb = JAAD(data_path=os.environ.copy()['JAAD_PATH'])
# if use local path
imdb = JAAD(data_path=path_jaad)
# get sequences
beh_seq_train = imdb.generate_data_trajectory_sequence('train', **configs['data_opts'])
beh_seq_val = None
# Uncomment the line below to use validation set
# beh_seq_val = imdb.generate_data_trajectory_sequence('val', **configs['data_opts'])
beh_seq_test = imdb.generate_data_trajectory_sequence('test', **configs['data_opts']) ## load_dataset
# get the model
method_class = action_prediction(configs['model_opts']['model'])(**configs['net_opts'])
# train and save the model
saved_files_path = method_class.train(beh_seq_train, beh_seq_val, **configs['train_opts'],
model_opts=configs['model_opts'])
# test and evaluate the model
acc, auc, f1, precision, recall = method_class.test(beh_seq_test, saved_files_path)
# save the results
data = {}
data['results'] = {}
data['results']['acc'] = float(acc)
data['results']['auc'] = float(auc)
data['results']['f1'] = float(f1)
data['results']['precision'] = float(precision)
data['results']['recall'] = float(recall)
write_to_yaml(yaml_path=os.path.join(saved_files_path, 'results.yaml'), data=data)
data = configs
write_to_yaml(yaml_path=os.path.join(saved_files_path, 'configs.yaml'), data=data)
print('Model saved to {}'.format(saved_files_path))
def usage():
"""
Prints help
"""
print('Benchmark for evaluating pedestrian action prediction.')
print('Script for training and testing models.')
print('Usage: python train_test.py [options]')
print('Options:')
print('-h, --help\t\t', 'Displays this help')
print('-c, --config_file\t', 'Path to config file')
print()
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'hc:', ['help', 'config_file'])
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
config_file = None
model_name = None
dataset = None
for o, a in opts:
if o in ["-h", "--help"]:
usage()
sys.exit(2)
elif o in ['-c', '--config_file']:
config_file = a
# if neither the config file or model name are provided
if not config_file:
print('\x1b[1;37;41m' + 'ERROR: Provide path to config file!' + '\x1b[0m')
usage()
sys.exit(2)
run(config_file=config_file)