blob: 886b88cf2eec55410c6351fc00263a649516e198 [file] [log] [blame]
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
/*
* LSTM layer.
*/
source("nn/layers/sigmoid.dml") as sigmoid
source("nn/layers/tanh.dml") as tanh
forward = function(matrix[double] X, matrix[double] W, matrix[double] b,
boolean return_sequences, matrix[double] out0, matrix[double] c0)
return (matrix[double] out, matrix[double] c) {
/*
* Computes the forward pass for an LSTM layer with M neurons.
* The input data has N sequences of T examples, each with D features.
*
* In an LSTM, an internal cell state is maintained, additive
* interactions operate over the cell state at each timestep, and
* some amount of this cell state is exposed as output at each
* timestep. Additionally, the output of the previous timestep is fed
* back in as an additional input at the current timestep.
*
* Reference:
* - Long Short-Term Memory, Hochreiter, 1997
* - http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
*
* Inputs:
* - X: Inputs, of shape (N, T*D).
* - W: Weights, of shape (D+M, 4M).
* - b: Biases, of shape (1, 4M).
* - return_sequences: Whether to return `out` at all timesteps,
* or just for the final timestep.
* - out0: Outputs from previous timestep, of shape (N, M).
* Note: This is *optional* and could just be an empty matrix.
* - c0: Initial cell state, of shape (N, M).
* Note: This is *optional* and could just be an empty matrix.
*
* Outputs:
* - out: If `return_sequences` is True, outputs for all timesteps,
* of shape (N, T*M). Else, outputs for the final timestep, of
* shape (N, M).
* - c: Cell state for final timestep, of shape (N, M).
*/
out = 0; c = c0;
[out, c] = lstm(X, W, b, out0, c0, return_sequences)
}
backward = function(matrix[double] dout, matrix[double] dc,
matrix[double] X, matrix[double] W, matrix[double] b,
boolean given_sequences, matrix[double] out0, matrix[double] c0)
return (matrix[double] dX, matrix[double] dW, matrix[double] db,
matrix[double] dout0, matrix[double] dc0) {
/*
* Computes the backward pass for an LSTM layer with M neurons.
*
* Inputs:
* - dout: Gradient wrt `out`. If `given_sequences` is `True`,
* contains gradients on outputs for all timesteps, of
* shape (N, T*M). Else, contains the gradient on the output
* for the final timestep, of shape (N, M).
* - dc: Gradient wrt `c` (from later in time), of shape (N, M).
* This would come from later in time if the cell state was used
* downstream as the initial cell state for another LSTM layer.
* Typically, this would be used when a sequence was cut at
* timestep `T` and then continued in the next batch. If `c`
* was not used downstream, then `dc` would be an empty matrix.
* - X: Inputs, of shape (N, T*D).
* - W: Weights, of shape (D+M, 4M).
* - b: Biases, of shape (1, 4M).
* - given_sequences: Whether `dout` is for all timesteps,
* or just for the final timestep. This is based on whether
* `return_sequences` was true in the forward pass.
* - out0: Outputs from previous timestep, of shape (N, M).
* Note: This is *optional* and could just be an empty matrix.
* - c0: Initial cell state, of shape (N, M).
* Note: This is *optional* and could just be an empty matrix.
* - cache_out: Cache of outputs, of shape (T, N*M).
* Note: This is used for performance during training.
* - cache_c: Cache of cell state, of shape (T, N*M).
* Note: This is used for performance during training.
* - cache_ifog: Cache of intermediate values, of shape (T, N*4*M).
* Note: This is used for performance during training.
*
* Outputs:
* - dX: Gradient wrt `X`, of shape (N, T*D).
* - dW: Gradient wrt `W`, of shape (D+M, 4M).
* - db: Gradient wrt `b`, of shape (1, 4M).
* - dout0: Gradient wrt `out0`, of shape (N, M).
* - dc0: Gradient wrt `c0`, of shape (N, M).
*/
dX = X; dW = W; db = b; dout0 = out0; dc0 = c0
[dX, dW, db, dout0, dc0] = lstm_backward(X, W, b, out0, c0, given_sequences, dout, dc)
}
init = function(int N, int D, int M)
return (matrix[double] W, matrix[double] b, matrix[double] out0, matrix[double] c0) {
/*
* Initialize the parameters of this layer.
*
* Note: This is just a convenience function, and parameters
* may be initialized manually if needed.
*
* We use the Glorot uniform heuristic which limits the magnification
* of inputs/gradients during forward/backward passes by scaling
* uniform weights by a factor of sqrt(6/(fan_in + fan_out)).
* - http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
*
* Inputs:
* - N: Number of examples in batch.
* - D: Dimensionality of the input features (number of features).
* - M: Number of neurons in this layer.
*
* Outputs:
* - W: Weights, of shape (D+M, 4M).
* - b: Biases, of shape (1, 4M).
* - out0: Empty previous timestep output matrix, of shape (N, M).
* - c0: Empty initial cell state matrix, of shape (N, M).
*/
fan_in = D+M
fan_out = 4*M
scale = sqrt(6/(fan_in+fan_out))
W = rand(rows=D+M, cols=4*M, min=-scale, max=scale, pdf="uniform")
b = matrix(0, rows=1, cols=4*M)
out0 = matrix(0, rows=N, cols=M)
c0 = matrix(0, rows=N, cols=M)
}