| [data] |
| kaldi_root = |
| train = /home/sooda/speech/kaldi/egs/timit/s5/data/train/train.feats |
| dev = /home/sooda/speech/kaldi/egs/timit/s5/data/dev/dev.feats |
| test = |
| out_file = | |
| format = kaldi |
| xdim = 13 |
| ydim = 1939 |
| #ydim = 1909 |
| label_mean = label_mean.feats |
| [arch] |
| num_hidden = 1024 |
| # set it to zero if you want a regular LSTM |
| num_hidden_proj = 512 |
| num_lstm_layer = 3 |
| |
| [train] |
| batch_size = 40 |
| buckets = 100, 200, 300, 400, 500, 600, 700, 800 |
| num_epoch = 12 |
| |
| # used only if method is truncated-bptt |
| truncate_len = 20 |
| |
| # gpu0, gpu1 |
| context = gpu0 |
| |
| # bucketing, truncated-bptt |
| method = truncated-bptt |
| #method = bucketing |
| |
| # checkpoint prefix |
| prefix = timit |
| |
| learning_rate = 1 |
| decay_factor = 2 |
| decay_lower_bound = 1e-6 |
| |
| optimizer = speechSGD |
| momentum = 0.9 |
| |
| # set to 0 to disable gradient clipping |
| clip_gradient = 0 |
| |
| # uniform, normal, xavier |
| initializer = Uniform |
| init_scale = 0.05 |
| weight_decay = 0.008 |
| |
| # show progress every how many batches |
| show_every = 1000 |