| #------------------------------------------------------------- |
| # |
| # Licensed to the Apache Software Foundation (ASF) under one |
| # or more contributor license agreements. See the NOTICE file |
| # distributed with this work for additional information |
| # regarding copyright ownership. The ASF licenses this file |
| # to you under the Apache License, Version 2.0 (the |
| # "License"); you may not use this file except in compliance |
| # with the License. You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, |
| # software distributed under the License is distributed on an |
| # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| # KIND, either express or implied. See the License for the |
| # specific language governing permissions and limitations |
| # under the License. |
| # |
| #------------------------------------------------------------- |
| |
| /* |
| * Affine (fully-connected) layer. |
| */ |
| |
| forward = function(matrix[double] X, matrix[double] W, matrix[double] b) |
| return (matrix[double] out) { |
| /* |
| * Computes the forward pass for an affine (fully-connected) layer |
| * with M neurons. The input data has N examples, each with D |
| * features. |
| * |
| * Inputs: |
| * - X: Inputs, of shape (N, D). |
| * - W: Weights, of shape (D, M). |
| * - b: Biases, of shape (1, M). |
| * |
| * Outputs: |
| * - out: Outputs, of shape (N, M). |
| */ |
| out = X %*% W + b |
| } |
| |
| backward = function(matrix[double] dout, matrix[double] X, |
| matrix[double] W, matrix[double] b) |
| return (matrix[double] dX, matrix[double] dW, matrix[double] db) { |
| /* |
| * Computes the backward pass for a fully-connected (affine) layer |
| * with M neurons. |
| * |
| * Inputs: |
| * - dout: Gradient wrt `out` from upstream, of shape (N, M). |
| * - X: Inputs, of shape (N, D). |
| * - W: Weights, of shape (D, M). |
| * - b: Biases, of shape (1, M). |
| * |
| * Outputs: |
| * - dX: Gradient wrt `X`, of shape (N, D). |
| * - dW: Gradient wrt `W`, of shape (D, M). |
| * - db: Gradient wrt `b`, of shape (1, M). |
| */ |
| dX = dout %*% t(W) |
| dW = t(X) %*% dout |
| db = colSums(dout) |
| } |
| |
| init = function(int D, int M) |
| return (matrix[double] W, matrix[double] b) { |
| /* |
| * Initialize the parameters of this layer. |
| * |
| * Note: This is just a convenience function, and parameters |
| * may be initialized manually if needed. |
| * |
| * We use the heuristic by He et al., which limits the magnification |
| * of inputs/gradients during forward/backward passes by scaling |
| * unit-Gaussian weights by a factor of sqrt(2/n), under the |
| * assumption of relu neurons. |
| * - http://arxiv.org/abs/1502.01852 |
| * |
| * Inputs: |
| * - D: Dimensionality of the input features (number of features). |
| * - M: Number of neurons in this layer. |
| * |
| * Outputs: |
| * - W: Weights, of shape (D, M). |
| * - b: Biases, of shape (1, M). |
| */ |
| W = rand(rows=D, cols=M, pdf="normal") * sqrt(2.0/D) |
| b = matrix(0, rows=1, cols=M) |
| } |
| |