blob: e1b1fdf362421092687db8972e997fc98d4ae4d7 [file] [log] [blame]
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
source("nn/layers/affine.dml") as affine
source("nn/layers/multi_attention.dml") as attention
source("nn/layers/dropout.dml") as dropout
source("nn/layers/batch_norm1d.dml") as batch_norm
source("nn/layers/tanh.dml") as tanh
source("nn/layers/gelu.dml") as gelu
linear_tensor_forward = function(matrix[double] X, matrix[double] W, matrix[double] b, int B, int C)
return (matrix[double] out) {
/*
* Helper function for computing linear layer with tensor input, of shape (A, B*C)
*/
A = nrow(X)
C_new = ncol(W)
out = affine::forward(matrix(X, rows=A*B, cols=C), W, b)
out = matrix(out, rows=A, cols=B*C_new)
}
linear_tensor_backward = function(matrix[double] dout, matrix[double] X, matrix[double] W, matrix[double] b, int B,
int C_out, int C_in)
return (matrix[double] dX, matrix[double] dW, matrix[double] db) {
/*
* Helper function for computing linear layer with tensor input, of shape (A, B*C)
*/
A = nrow(X)
[dX, dW, db] = affine::backward(matrix(dout, rows=A*B, cols=C_out), matrix(X, rows=A*B, cols=C_in), W, b)
dX = matrix(dX, rows=A, cols=B*C_in)
}
layer_norm_forward = function(matrix[double] X, matrix[double] gamma, matrix[double] beta, double epsilon, int B, int C)
return (matrix[double] out, matrix[double] cache_mean, matrix[double] cache_var, matrix[double] cache_norm) {
/*
* Helper function for computing layer norm via 1D batch norm with tensor input, of shpae (A, B*C)
*/
A = nrow(X)
batch_norm_input = t(matrix(X, rows=A*B, cols=C))
# EMA matrices are unused and thus empty matrices will be provided
emas_mat = matrix(0, rows=1, cols=A*B)
[batch_norm_out, unused1, unused2, cache_mean, cache_var, cache_norm] = batch_norm::forward(
batch_norm_input, t(gamma), t(beta), "train", emas_mat, emas_mat, 0.0, epsilon)
out = matrix(t(batch_norm_out), rows=A, cols=B*C)
}
layer_norm_backward = function(matrix[double] dout, matrix[double] cache_mean, matrix[double] cache_var,
matrix[double] cache_norm, matrix[double] X, matrix[double] gamma, matrix[double] beta, double epsilon, int B, int C)
return (matrix[double] dX, matrix[double] dgamma, matrix[double] dbeta) {
/*
* Helper function for computing layer norm via 1D batch norm with tensor input, of shpae (A, B*C)
*/
A = nrow(X)
batch_norm_input = t(matrix(X, rows=A*B, cols=C))
batch_norm_doutput = t(matrix(dout, rows=A*B, cols=C))
# EMA matrices, updated EMA matrices and out matrix are unused and thus empty matrices will be provided
empty_mat = matrix(0, rows=1, cols=1)
[batch_norm_dX, unused1, unused2] = batch_norm::backward(
batch_norm_doutput,
empty_mat, empty_mat, empty_mat,
cache_mean, cache_var, cache_norm,
batch_norm_input, t(gamma), t(beta), "train", empty_mat, empty_mat, 0.0, epsilon)
dX = matrix(t(batch_norm_dX), rows=A, cols=B*C)
dgamma = t(rowSums(batch_norm_doutput * cache_norm))
dbeta = t(rowSums(batch_norm_doutput))
}
forward = function(matrix[double] states,
int H, int T, int d, int I,
matrix[double] W_Q, matrix[double] b_Q,
matrix[double] W_K, matrix[double] b_K,
matrix[double] W_V, matrix[double] b_V,
matrix[double] W_context, matrix[double] b_context,
matrix[double] W_intermediate, matrix[double] b_intermediate,
matrix[double] W_out, matrix[double] b_out,
double dropout_p_attention,
double dropout_p_output,
double epsilon_ln,
matrix[double] gamma_ln1, matrix[double] beta_ln1,
matrix[double] gamma_ln2, matrix[double] beta_ln2,
string activation)
return (matrix[double] out_states, matrix[double] attention,
list[unknown] outputs,
matrix[double] dropout_mask_attention,
matrix[double] dropout_mask_output_1,
matrix[double] dropout_mask_output_2,
matrix[double] cache_mean_ln1, matrix[double] cache_var_ln1, matrix[double] cache_norm_ln1,
matrix[double] cache_mean_ln2, matrix[double] cache_var_ln2, matrix[double] cache_norm_ln2) {
/*
* Computes the forward pass for a layer of the BERT transformer architecture.
*
* Inputs (B: Batch size, T: Sequence length, D: Embedding length, H: Heads):
* - states: Hidden states, of shape (B, T*D).
* - H: Head count.
* - T: Sequence length.
* - d: Embedding length of single token per head with d*H = D.
* - I: Intemediate embedding length.
* - W_Q: Weights for linear query layer, of shape (D, D).
* - b_Q: Biases for linear query layer, of shape (1, D).
* - W_K: Weights for linear key layer, of shape (D, D).
* - b_K: Biases for linear key layer, of shape (1, D).
* - W_V: Weights for linear value layer, of shape (D, D).
* - b_V: Biases for linear value layer, of shape (1, D).
* - W_context: Weights for linear output layer on context, of shape (D, D).
* - b_context: Biases for linear output layer on context, of shape (1, D).
* - W_intermediate: Weights for intermediate linear layer, of shape (D, I).
* - b_intermediate: Biases for intermediate linear layer, of shape (1, I).
* - W_out: Weights for last linear output layer, of shape (D, D).
* - b_out: Biases for last linear output layer, of shape (1, D).
* - dropout_p_attention: Probability for dropout on attention.
* - dropout_p_output: Probability for dropout on output.
* - epsilon_ln: Epsilon value for layer norm.
* - gamma_ln1: Gamma params for layer norm 1, of shape (1, D).
* - beta_ln1: Beta params for layer norm 1, of shape (1, D).
* - gamma_ln2: Gamma params for layer norm 2, of shape (1, D).
* - beta_ln2: Beta params for layer norm 2, of shape (1, D).
* - activation: String specifying type of activation to use.
* Can be tanh or gelu.
*
* Outputs:
* - out_states: Token output states, of shape (B, T*D)
* - attention: Attention values for keys & querys, of shape (B, H*T*T)
* - outputs: List of relevant outputs for backward pass with following
* order/content:
* -> 1: Output of linear query layer, of shape (B, T*D).
* -> 2: Output of linear key layer, of shape (B, T*D).
* -> 3: Output of linear value layer, of shape (B, T*D).
* -> 4: Output context of attention layer, of shape (B, T*D).
* -> 5: Output attention of attention layer, of shape (B, T*D).
* -> 6: Output of residual pass 1, of shape (B, T*D).
* -> 7: Output of layer norm 1, of shape (B, T*D).
* -> 8: Output of intermediate linear layer, of shape (B, T*I).
* -> 9: Output of activation layer, of shape (B, T*I).
* -> 10: Output of residual pass 2, of shape (B, T*D).
* - dropout_mask_attention: Dropout mask used on attention, of shape (B, H*T*T)
* - dropout_mask_output_1: Dropout mask used on attention output, of shape (B, T*D)
* - dropout_mask_output_2: Dropout mask used on attention output, of shape (B, T*D)
* - cache_mean_ln1: Cached mean from layer norm 1, of shape (1, B*T)
* - cache_var_ln1: Cached mean from layer norm 1, of shape (1, B*T)
* - cache_norm_ln1: Cached mean from layer norm 1, of shape (1, B*T)
* - cache_mean_ln2: Cached mean from layer norm 2, of shape (1, B*T)
* - cache_var_ln2: Cached mean from layer norm 2, of shape (1, B*T)
* - cache_norm_ln2: Cached mean from layer norm 2, of shape (1, B*T)
*/
# Embedding dim
D = d * H
# Linear layers for Q, K, V
Q = linear_tensor_forward(states, W_Q, b_Q, T, D) # Shape (B, T*D)
K = linear_tensor_forward(states, W_K, b_K, T, D) # Shape (B, T*D)
V = linear_tensor_forward(states, W_V, b_V, T, D) # Shape (B, T*D)
# Multi-head self attention
[context, attention, dropout_mask_attention] = attention::forward(Q, K, V, H, T, d, dropout_p_attention)
# Shapes (B, T*D), (B, H*T*T), (B, H*T*T)
outputs = list(Q, K, V, context, attention)
# Linear layer on attention output (output layer)
out_states = linear_tensor_forward(context, W_context, b_context, T, D) # Shape (B, T*D)
# Dropout on output 1
dropout_mask_output_1 = matrix(0, 1, 1)
if (dropout_p_output > 0.0) {
[out_states, dropout_mask_output_1] = dropout::forward(out_states, dropout_p_output, -1)
}
# Residual pass 1
out_states = out_states + states # Shapes (B, T*D).
outputs = append(outputs, out_states)
# Layer norm 1 for each token
[out_states, cache_mean_ln1, cache_var_ln1, cache_norm_ln1] = layer_norm_forward(
out_states, gamma_ln1, beta_ln1, epsilon_ln, T, D)
outputs = append(outputs, out_states)
# Save out_states for residual pass
out_states_identity = out_states
# Linear layer of intermediate part
out_states = linear_tensor_forward(out_states, W_intermediate, b_intermediate, T, D) # Shape (B, T*I)
outputs = append(outputs, out_states)
# Activation
if (activation == "gelu") {
out_states = gelu::forward(out_states)
} else if (activation == "tanh") {
out_states = tanh::forward(out_states)
}
outputs = append(outputs, out_states)
# Final linear output layer
out_states = linear_tensor_forward(out_states, W_out, b_out, T, I) # Shape (B, T*D)
# Dropout on output 2
dropout_mask_output_2 = matrix(0, 1, 1)
if (dropout_p_output > 0.0) {
[out_states, dropout_mask_output_2] = dropout::forward(out_states, dropout_p_output, -1)
}
# Residual pass 2
out_states = out_states + out_states_identity
outputs = append(outputs, out_states)
# Layer norm 2 for each token
[out_states, cache_mean_ln2, cache_var_ln2, cache_norm_ln2] = layer_norm_forward(
out_states, gamma_ln2, beta_ln2, epsilon_ln, T, D)
}
backward = function(matrix[double] dout_states,
matrix[double] dropout_mask_attention,
matrix[double] dropout_mask_output_1,
matrix[double] dropout_mask_output_2,
matrix[double] cache_mean_ln1, matrix[double] cache_var_ln1, matrix[double] cache_norm_ln1,
matrix[double] cache_mean_ln2, matrix[double] cache_var_ln2, matrix[double] cache_norm_ln2,
list[unknown] outputs,
matrix[double] states,
int H, int T, int d, int I,
matrix[double] W_Q, matrix[double] b_Q,
matrix[double] W_K, matrix[double] b_K,
matrix[double] W_V, matrix[double] b_V,
matrix[double] W_context, matrix[double] b_context,
matrix[double] W_intermediate, matrix[double] b_intermediate,
matrix[double] W_out, matrix[double] b_out,
double dropout_p_attention,
double dropout_p_output,
double epsilon_ln,
matrix[double] gamma_ln1, matrix[double] beta_ln1,
matrix[double] gamma_ln2, matrix[double] beta_ln2,
string activation)
return (matrix[double] din_states,
matrix[double] dW_Q, matrix[double] db_Q,
matrix[double] dW_K, matrix[double] db_K,
matrix[double] dW_V, matrix[double] db_V,
matrix[double] dW_context, matrix[double] db_context,
matrix[double] dW_intermediate, matrix[double] db_intermediate,
matrix[double] dW_out, matrix[double] db_out,
matrix[double] dgamma_ln1, matrix[double] dbeta_ln1,
matrix[double] dgamma_ln2, matrix[double] dbeta_ln2) {
/*
* Computes the backward pass for a layer of the BERT transformer architecture.
*
* Inputs (B: Batch size, T: Sequence length, D: Embedding length, H: Heads):
* - dout_states: Gradients w.r.t. output states, of shape (B, T*D)
* - dropout_mask_attention: Dropout mask used on attention, of shape (B, H*T*T)
* - dropout_mask_output_1: Dropout mask used on attention output, of shape (B, T*D)
* - dropout_mask_output_2: Dropout mask used on attention output, of shape (B, T*D)
* - cache_mean_ln1: Cached mean from layer norm 1, of shape (1, B*T)
* - cache_var_ln1: Cached mean from layer norm 1, of shape (1, B*T)
* - cache_norm_ln1: Cached mean from layer norm 1, of shape (1, B*T)
* - cache_mean_ln2: Cached mean from layer norm 2, of shape (1, B*T)
* - cache_var_ln2: Cached mean from layer norm 2, of shape (1, B*T)
* - cache_norm_ln2: Cached mean from layer norm 2, of shape (1, B*T)
* - outputs: list of relevant outputs from forward pass
* with the following order/content:
* -> 1: Output of linear query layer, of shape (B, T*D).
* -> 2: Output of linear key layer, of shape (B, T*D).
* -> 3: Output of linear value layer, of shape (B, T*D).
* -> 4: Output context of attention layer, of shape (B, T*D).
* -> 5: Output attention of attention layer, of shape (B, T*D).
* -> 6: Output of residual pass 1, of shape (B, T*D).
* -> 7: Output of layer norm 1, of shape (B, T*D).
* -> 8: Output of intermediate linear layer, of shape (B, T*I).
* -> 9: Output of activation layer, of shape (B, T*I).
* -> 10: Output of residual pass 2, of shape (B, T*D).
* - states: Hidden states, of shape (B, T*D).
* - H: Head count.
* - T: Sequence length.
* - d: Embedding length of single token per head with d*H = D.
* - I: Intemediate embedding length.
* - W_Q: Weights for linear query layer, of shape (D, D).
* - b_Q: Biases for linear query layer, of shape (1, D).
* - W_K: Weights for linear key layer, of shape (D, D).
* - b_K: Biases for linear key layer, of shape (1, D).
* - W_V: Weights for linear value layer, of shape (D, D).
* - b_V: Biases for linear value layer, of shape (1, D).
* - W_context: Weights for linear output layer on context, of shape (D, D).
* - b_context: Biases for linear output layer on context, of shape (1, D).
* - W_intermediate: Weights for intermediate linear layer, of shape (D, I).
* - b_intermediate: Biases for intermediate linear layer, of shape (1, I).
* - W_out: Weights for last linear output layer, of shape (D, D).
* - b_out: Biases for last linear output layer, of shape (1, D).
* - dropout_p_attention: Probability for dropout on attention.
* - dropout_p_output: Probability for dropout on output.
* - epsilon_ln: Epsilon value for layer norm.
* - gamma_ln1: Gamma params for layer norm 1, of shape (1, D).
* - beta_ln1: Beta params for layer norm 1, of shape (1, D).
* - gamma_ln2: Gamma params for layer norm 2, of shape (1, D).
* - beta_ln2: Beta params for layer norm 2, of shape (1, D).
* - activation: String specifying type of activation to use.
* Can be tanh or gelu.
*
* Outputs:
* - din_states: Gradients w.r.t. hidden input states, of shape (B, T*D).
* - W_Q: Gradients w.r.t. weights for linear query layer, of shape (D, D).
* - b_Q: Gradients w.r.t. biases for linear query layer, of shape (1, D).
* - W_K: Gradients w.r.t. weights for linear key layer, of shape (D, D).
* - b_K: Gradients w.r.t. biases for linear key layer, of shape (1, D).
* - W_V: Gradients w.r.t. weights for linear value layer, of shape (D, D).
* - b_V: Gradients w.r.t. biases for linear value layer, of shape (1, D).
* - W_context: Gradients w.r.t. weights for linear output layer on context, of shape (D, D).
* - b_context: Gradients w.r.t. biases for linear output layer on context, of shape (1, D).
* - W_intermediate: Gradients w.r.t. weights for intermediate linear layer, of shape (D, I).
* - b_intermediate: Gradients w.r.t. biases for intermediate linear layer, of shape (1, I).
* - W_out: Gradients w.r.t. weights for last linear output layer, of shape (D, D).
* - b_out: Gradients w.r.t. biases for last linear output layer, of shape (1, D).
*/
# Embedding dim
D = d * H
# Layer norm 2 for each token
[dout_states, dgamma_ln2, dbeta_ln2] = layer_norm_backward(
dout_states, cache_mean_ln2, cache_var_ln2, cache_norm_ln2, as.matrix(outputs[10]), gamma_ln2, beta_ln2, epsilon_ln, T, D)
# Save dout_states for residual pass
dout_states_identity_2 = dout_states
# Dropout on output 2
if (dropout_p_output > 0.0) {
dout_states = dropout::backward(dout_states, matrix(0, 1, 1), dropout_p_output, dropout_mask_output_2)
}
# Final linear output layer
[dout_states, dW_out, db_out] = linear_tensor_backward(dout_states, as.matrix(outputs[9]), W_out, b_out, T, D, I)
# Activation
if (activation == "gelu") {
dout_states = gelu::backward(dout_states, as.matrix(outputs[8]))
} else if (activation == "tanh") {
dout_states = tanh::backward(dout_states, as.matrix(outputs[8]))
}
# Linear layer of intermediate part
[dout_states, dW_intermediate, db_intermediate] = linear_tensor_backward(dout_states, as.matrix(outputs[7]), W_intermediate,
b_intermediate, T, I, D)
# Residual pass 2
dout_states = dout_states + dout_states_identity_2
# Layer norm 1 for each token
[dout_states, dgamma_ln1, dbeta_ln1] = layer_norm_backward(
dout_states, cache_mean_ln1, cache_var_ln1, cache_norm_ln1, as.matrix(outputs[6]), gamma_ln1, beta_ln1, epsilon_ln, T, D)
# Save dout_states for residual pass
dout_states_identity_1 = dout_states
# Dropout on output 1
if (dropout_p_output > 0.0) {
dout_states = dropout::backward(dout_states, matrix(0, 1, 1), dropout_p_output, dropout_mask_output_1)
}
# Linear layer on attention output (output layer)
[dcontext, dW_context, db_context] = linear_tensor_backward(dout_states, as.matrix(outputs[4]), W_context, b_context, T, D, D)
# Multi-head self attention
[dQ, dK, dV] = attention::backward(dcontext, dropout_mask_attention, as.matrix(outputs[5]), as.matrix(outputs[1]),
as.matrix(outputs[2]), as.matrix(outputs[3]), H, T, d, dropout_p_attention)
# Linear layers for Q, K, V
[dstates_Q, dW_Q, db_Q] = linear_tensor_backward(dQ, states, W_Q, b_Q, T, D, D)
[dstates_K, dW_K, db_K] = linear_tensor_backward(dK, states, W_K, b_K, T, D, D)
[dstates_V, dW_V, db_V] = linear_tensor_backward(dV, states, W_V, b_V, T, D, D)
# Add paths + residual pass 1
din_states = dstates_Q + dstates_K + dstates_V + dout_states_identity_1
}