-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathhparams.py
70 lines (63 loc) · 1.83 KB
/
hparams.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: ZuoXiang
@contact: [email protected]
@file: hparams.py
@time: 2019/4/16 17:54
@desc:
"""
import tensorflow as tf
hparams = tf.contrib.training.HParams(
# Training:
image_size=224,
num_category_classes=48,
num_attribute_classes=1000,
num_epochs=20,
batch_size=10,
buffer_size=100, # The number of input images in buffer.
train_images_num=207300,
dropout_keep_prob=0.5,
model_dir='model',
init=False,
train_stage='landmark',
max_checkpoints=1000,
# Optimization Flags
weight_decay=0.0005,
# The name of the optimizer, one of "adadelta", "adagrad", "adam", "ftrl", "momentum", "sgd" or "rmsprop"
optimizer='adadelta',
# adadelta parameters
adadelta_rho=0.95,
# adagrad parameters
adagrad_initial_accumulator_value=0.1,
# adam parameters
adam_beta1=0.9,
adam_beta2=0.999,
opt_epsilon=1.0,
# ftrl parameters
ftrl_learning_rate_power=-0.5,
ftrl_initial_accumulator_value=0.1,
ftrl_l1=0.0,
ftrl_l2=0.0,
# momentum parameters
momentum=0.9,
# rmsprop parameters
rmsprop_momentum=0.9,
rmsprop_decay=0.9,
# Learning Rate Flags
# Specifies how the learning rate is decayed. One of "fixed", "exponential", or "polynomial"
learning_rate_decay_type='exponential',
learning_rate=0.001,
end_learning_rate=0.00001,
label_smoothing=0.0,
learning_rate_decay_factor=0.94,
num_epochs_per_decay=2.0,
sync_replicas=False,
replicas_to_aggregate=1,
moving_average_decay=None, # The decay to use for the moving average.
image_path='/home/zuoxiang/data/deepfashion/Img'
)
def hparams_debug_string():
values = hparams.values()
hp = [' %s: %s' % (name, values[name]) for name in sorted(values)]
return 'Hyperparameters:\n' + '\n'.join(hp)