| #------------------------------------------------------------- |
| # |
| # Licensed to the Apache Software Foundation (ASF) under one |
| # or more contributor license agreements. See the NOTICE file |
| # distributed with this work for additional information |
| # regarding copyright ownership. The ASF licenses this file |
| # to you under the Apache License, Version 2.0 (the |
| # "License"); you may not use this file except in compliance |
| # with the License. You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, |
| # software distributed under the License is distributed on an |
| # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| # KIND, either express or implied. See the License for the |
| # specific language governing permissions and limitations |
| # under the License. |
| # |
| #------------------------------------------------------------- |
| |
| /* |
| * Various tests, not including gradient checks. |
| */ |
| source("scripts/nn/layers/batch_norm1d.dml") as batch_norm1d |
| source("scripts/nn/layers/batch_norm2d.dml") as batch_norm2d |
| source("scripts/nn/layers/conv2d.dml") as conv2d |
| source("scripts/nn/layers/conv2d_builtin.dml") as conv2d_builtin |
| source("scripts/nn/layers/conv2d_depthwise.dml") as conv2d_depthwise |
| source("scripts/nn/layers/conv2d_transpose.dml") as conv2d_transpose |
| source("scripts/nn/layers/conv2d_transpose_depthwise.dml") as conv2d_transpose_depthwise |
| source("scripts/nn/layers/cross_entropy_loss.dml") as cross_entropy_loss |
| source("scripts/nn/layers/cross_entropy_loss2d.dml") as cross_entropy_loss2d |
| source("scripts/nn/layers/max_pool2d.dml") as max_pool2d |
| source("scripts/nn/layers/max_pool2d_builtin.dml") as max_pool2d_builtin |
| source("scripts/nn/layers/tanh.dml") as tanh |
| source("scripts/nn/layers/softmax2d.dml") as softmax2d |
| source("scripts/nn/test/conv2d_simple.dml") as conv2d_simple |
| source("scripts/nn/test/max_pool2d_simple.dml") as max_pool2d_simple |
| source("scripts/nn/test/util.dml") as test_util |
| source("scripts/nn/util.dml") as util |
| source("scripts/nn/layers/sigmoid.dml") as sigmoid |
| source("scripts/nn/layers/elu.dml") as elu |
| |
| |
| batch_norm1d = function() { |
| /* |
| * Test for the 1D batch normalization function. |
| */ |
| print("Testing the 1D batch normalization function.") |
| |
| # Generate data |
| N = 4 # Number of examples |
| D = 4 # Number of features |
| mode = 'train' # execution mode |
| mu = 0.9 # momentum of moving averages |
| eps = 1e-5 # smoothing term |
| X = matrix(seq(1,16), rows=N, cols=D) |
| |
| # Create layer |
| [gamma, beta, ema_mean, ema_var] = batch_norm1d::init(D) |
| |
| # Forward |
| [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] = |
| batch_norm1d::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps) |
| |
| # Equivalency check |
| target = matrix("-1.34160721 -1.34160721 -1.34160733 -1.34160709 |
| -0.44720244 -0.44720244 -0.44720244 -0.44720232 |
| 0.44720244 0.44720232 0.44720244 0.44720244 |
| 1.34160733 1.34160721 1.34160733 1.34160733", rows=1, cols=N*D) |
| out = matrix(out, rows=1, cols=N*D) |
| for (i in 1:length(out)) { |
| rel_error = test_util::check_rel_error(as.scalar(out[1,i]), |
| as.scalar(target[1,i]), 1e-3, 1e-4) |
| } |
| } |
| |
| batch_norm2d = function() { |
| /* |
| * Test for the 2D (spatial) batch normalization function. |
| */ |
| print("Testing the 2D (spatial) batch normalization function.") |
| |
| # Generate data |
| N = 2 # Number of examples |
| C = 3 # num channels |
| Hin = 4 # input height |
| Win = 5 # input width |
| mode = 'train' # execution mode |
| mu = 0.9 # momentum of moving averages |
| eps = 1e-5 # smoothing term |
| X = matrix("70 29 23 55 72 |
| 42 98 68 48 39 |
| 34 73 44 6 40 |
| 74 18 18 53 53 |
| |
| 63 85 72 61 72 |
| 32 36 23 29 63 |
| 9 43 43 49 43 |
| 31 43 89 94 50 |
| |
| 62 12 32 41 87 |
| 25 48 99 52 61 |
| 12 83 60 55 34 |
| 30 42 68 88 51 |
| |
| |
| 67 59 62 67 84 |
| 8 76 24 19 57 |
| 10 89 63 72 2 |
| 59 56 16 15 70 |
| |
| 32 69 55 39 93 |
| 84 36 4 30 40 |
| 70 100 36 76 59 |
| 69 15 40 24 34 |
| |
| 51 67 11 13 32 |
| 66 85 55 85 38 |
| 32 35 17 83 34 |
| 55 58 52 0 99", rows=N, cols=C*Hin*Win) |
| |
| # Create layer |
| [gamma, beta, ema_mean, ema_var] = batch_norm2d::init(C) |
| |
| # Forward |
| [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var] = |
| batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps) |
| |
| # Equivalency check |
| target = matrix("0.86215019 -0.76679718 -1.00517964 0.26619387 0.94161105 |
| -0.25030172 1.97460198 0.78268933 -0.01191914 -0.36949289 |
| -0.56814504 0.98134136 -0.17084086 -1.68059683 -0.32976246 |
| 1.02107191 -1.20383179 -1.20383179 0.18673301 0.18673301 |
| |
| 0.50426388 1.41921711 0.87856293 0.42108631 0.87856293 |
| -0.78498828 -0.61863315 -1.15928721 -0.90975463 0.50426388 |
| -1.74153018 -0.32751167 -0.32751167 -0.07797909 -0.32751167 |
| -0.82657707 -0.32751167 1.58557224 1.79351616 -0.0363903 |
| |
| 0.4607178 -1.49978399 -0.71558321 -0.36269283 1.44096887 |
| -0.99005347 -0.08822262 1.91148913 0.06861746 0.42150795 |
| -1.49978399 1.28412855 0.38229787 0.18624771 -0.63716316 |
| -0.79400325 -0.32348287 0.69597805 1.48017895 0.0294075 |
| |
| |
| 0.74295878 0.42511559 0.54430676 0.74295878 1.41837597 |
| -1.60113597 1.10053277 -0.96544927 -1.16410136 0.34565473 |
| -1.52167511 1.61702824 0.5840373 0.94161105 -1.83951855 |
| 0.42511559 0.30592418 -1.28329265 -1.32302308 0.86215019 |
| |
| -0.78498828 0.75379658 0.17155361 -0.4938668 1.75192738 |
| 1.37762833 -0.61863315 -1.9494741 -0.86816585 -0.45227802 |
| 0.79538536 2.04304862 -0.61863315 1.04491806 0.33790874 |
| 0.75379658 -1.49199748 -0.45227802 -1.11769855 -0.70181072 |
| |
| 0.0294075 0.65676796 -1.53899395 -1.46057391 -0.71558321 |
| 0.61755812 1.36254871 0.18624771 1.36254871 -0.48032296 |
| -0.71558321 -0.59795308 -1.30373383 1.28412855 -0.63716316 |
| 0.18624771 0.30387771 0.06861746 -1.97030437 1.91148913", |
| rows=1, cols=N*C*Hin*Win) |
| out = matrix(out, rows=1, cols=N*C*Hin*Win) |
| for (i in 1:length(out)) { |
| rel_error = test_util::check_rel_error(as.scalar(out[1,i]), |
| as.scalar(target[1,i]), 1e-3, 1e-4) |
| } |
| } |
| |
| conv2d = function() { |
| /* |
| * Test for the 2D convolution functions. |
| */ |
| print("Testing the 2D convolution functions.") |
| |
| # Generate data |
| N = 2 # num examples |
| C = 3 # num channels |
| Hin = 5 # input height |
| Win = 5 # input width |
| F = 2 # num filters |
| Hf = 3 # filter height |
| Wf = 3 # filter width |
| stride = 1 |
| pad = 1 |
| X = rand(rows=N, cols=C*Hin*Win, pdf="normal") |
| |
| # Create layer |
| [W, b] = conv2d::init(F, C, Hf, Wf) |
| |
| # Forward |
| [out, Hout, Wout] = conv2d::forward(X, W, b, C, Hin, Win, Hf, Wf, stride, stride, pad, pad) |
| [out_simple, Hout_simple, Wout_simple] = conv2d_simple::forward(X, W, b, C, Hin, Win, Hf, Wf, |
| stride, stride, pad, pad) |
| [out_builtin, Hout_builtin, Wout_builtin] = conv2d_builtin::forward(X, W, b, C, Hin, Win, Hf, Wf, |
| stride, stride, pad, pad) |
| |
| # Equivalency check |
| out = matrix(out, rows=1, cols=N*F*Hout*Wout) |
| out_simple = matrix(out_simple, rows=1, cols=N*F*Hout*Wout) |
| out_builtin = matrix(out_builtin, rows=1, cols=N*F*Hout*Wout) |
| for (i in 1:length(out)) { |
| rel_error = test_util::check_rel_error(as.scalar(out[1,i]), |
| as.scalar(out_simple[1,i]), 1e-10, 1e-12) |
| rel_error = test_util::check_rel_error(as.scalar(out[1,i]), |
| as.scalar(out_builtin[1,i]), 1e-10, 1e-12) |
| } |
| } |
| |
| conv2d_depthwise = function() { |
| /* |
| * Test for the 2D depthwise convolution function. |
| */ |
| print("Testing the 2D depthwise convolution function.") |
| |
| # Generate data |
| N = 2 # num examples |
| C = 2 # num channels |
| Hin = 3 # input height |
| Win = 3 # input width |
| M = 2 # num filters per input channel (i.e. depth multiplier) |
| Hf = 3 # filter height |
| Wf = 3 # filter width |
| stride = 1 |
| pad = 1 |
| X = matrix(seq(1,N*C*Hin*Win), rows=N, cols=C*Hin*Win) / (N*C*Hin*Win) * 2 - 1 # normalized |
| |
| # Create layer |
| W = matrix(seq(1,C*M*Hf*Wf), rows=C, cols=M*Hf*Wf) / (C*M*Hf*Wf) * 2 - 1 # normalized |
| b = matrix(seq(1,C*M), rows=C*M, cols=1) / (C*M)^2 # non-zero & non-one |
| |
| # Forward |
| [out, Hout, Wout] = conv2d_depthwise::forward(X, W, b, Hin, Win, M, Hf, Wf, stride, stride, |
| pad, pad) |
| |
| # Equivalency check |
| target = matrix("2.13040113 3.20447516 2.16743827 |
| 3.30324078 4.94212961 3.30324078 |
| 2.16743827 3.20447516 2.13040113 |
| |
| 0.52623457 0.85030866 0.67438275 |
| 1.11574078 1.75462961 1.2824074 |
| 0.89660496 1.35030866 0.97067899 |
| |
| -0.30015433 -0.42052469 -0.15200615 |
| -0.15509261 -0.1828704 0.01157404 |
| 0.07021603 0.07947529 0.1442901 |
| |
| -0.90432101 -1.27469134 -0.64506173 |
| -0.8425926 -1.12037039 -0.50925928 |
| -0.20061731 -0.2746914 -0.01543214 |
| |
| |
| -0.31404325 -0.62885809 -0.49922845 |
| -0.86342597 -1.55787039 -1.19675934 |
| -0.94367278 -1.62885797 -1.20293212 |
| |
| 0.0817901 0.01697529 0.00771603 |
| -0.05092596 -0.2453704 -0.21759261 |
| -0.21450615 -0.48302469 -0.36265433 |
| |
| 1.25540125 1.74614203 1.1813271 |
| 1.67824078 2.31712961 1.51157403 |
| 0.95910496 1.24614203 0.81095684 |
| |
| 2.65123463 3.8919754 2.68827152 |
| 3.99074078 5.87962961 3.99074078 |
| 2.68827152 3.8919754 2.65123463", rows=N, cols=C*M*Hout*Wout) |
| |
| for (i in 1:nrow(out)) { |
| for(j in 1:ncol(out)) { |
| rel_error = test_util::check_rel_error(as.scalar(out[i,j]), |
| as.scalar(target[i,j]), 1e-3, 1e-4) |
| } |
| } |
| } |
| |
| conv2d_transpose = function() { |
| /* |
| * Test for the 2D transpose convolution function. |
| */ |
| print("Testing the 2D transpose convolution function.") |
| |
| # Generate data |
| N = 2 # num examples |
| C = 3 # num channels |
| Hin = 2 # input height |
| Win = 2 # input width |
| F = 2 # num filters |
| Hf = 3 # filter height |
| Wf = 3 # filter width |
| stride = 1 |
| pad = 0 |
| out_pad = 0 # padding added to output |
| X = matrix(seq(1,N*C*Hin*Win), rows=N, cols=C*Hin*Win) / (N*C*Hin*Win) * 2 - 1 # normalized |
| |
| # Create layer |
| W = matrix(seq(1,C*F*Hf*Wf), rows=C, cols=F*Hf*Wf) / (C*F*Hf*Wf) * 2 - 1 # normalized |
| b = matrix(seq(1,F), rows=F, cols=1) / F^2 # non-zero & non-one |
| |
| # Forward |
| [out, Hout, Wout] = conv2d_transpose::forward(X, W, b, C, Hin, Win, Hf, Wf, stride, stride, |
| pad, pad, out_pad, out_pad) |
| |
| # Equivalency check |
| target = matrix("1.21296299 2.03703713 1.91666663 1.02777779 |
| 1.83333337 3.18518519 2.98148131 1.52777767 |
| 1.5 2.57407403 2.37037039 1.24999988 |
| 0.78703707 1.25925922 1.17592585 0.69444442 |
| |
| 0.87962961 1.20370364 1.08333337 0.77777773 |
| 1.08333337 1.60185182 1.39814818 0.94444442 |
| 0.75 0.99074072 0.78703701 0.66666657 |
| 0.62037039 0.75925928 0.67592591 0.6111111 |
| |
| |
| 0.32407406 0.37037039 0.47222221 0.36111113 |
| 0.38888881 0.51851851 0.75925928 0.52777779 |
| 0.72222215 1.24074078 1.48148155 0.91666669 |
| 0.56481475 0.92592585 1.06481469 0.69444442 |
| |
| 0.99074078 1.53703713 1.63888896 1.11111116 |
| 1.63888884 2.93518519 3.17592597 1.94444442 |
| 1.97222221 3.65740728 3.89814806 2.33333325 |
| 1.39814818 2.42592597 2.56481481 1.61111116", rows=N, cols=F*Hout*Wout) |
| |
| for (i in 1:nrow(out)) { |
| for(j in 1:ncol(out)) { |
| rel_error = test_util::check_rel_error(as.scalar(out[i,j]), |
| as.scalar(target[i,j]), 1e-3, 1e-4) |
| } |
| } |
| } |
| |
| conv2d_transpose_depthwise = function() { |
| /* |
| * Test for the 2D depthwise transpose convolution function. |
| */ |
| print("Testing the 2D depthwise transpose convolution function.") |
| |
| # Generate data |
| N = 2 # num examples |
| C = 4 # num channels |
| Hin = 2 # input height |
| Win = 2 # input width |
| M = 2 # depth of each filter |
| Hf = 3 # filter height |
| Wf = 3 # filter width |
| stride = 1 |
| pad = 0 |
| out_pad = 0 # padding added to output |
| X = matrix(seq(1,N*C*Hin*Win), rows=N, cols=C*Hin*Win) / (N*C*Hin*Win) * 2 - 1 # normalized |
| |
| # Create layer |
| W = matrix(seq(1,C/M*M*Hf*Wf), rows=C/M, cols=M*Hf*Wf) / (C/M*M*Hf*Wf) * 2 - 1 # normalized |
| b = matrix(seq(1,C/M), rows=C/M, cols=1) / (C/M)^2 # non-zero & non-one |
| |
| # Forward |
| [out, Hout, Wout] = conv2d_transpose_depthwise::forward(X, W, b, C, Hin, Win, M, Hf, Wf, |
| stride, stride, pad, pad, |
| out_pad, out_pad) |
| |
| # Equivalency check |
| target = matrix("1.44097221 2.45486116 2.28125 1.1875 |
| 2.1875 3.80555558 3.48611116 1.72916663 |
| 1.6875 2.84722233 2.52777767 1.27083325 |
| 0.80902779 1.24652779 1.10069442 0.625 |
| |
| 0.37152776 0.24652773 0.18402778 0.35416669 |
| 0.21527778 -0.02777781 -0.12500003 0.22916666 |
| 0.04861115 -0.31944442 -0.41666669 0.10416666 |
| 0.32291669 0.20486113 0.1701389 0.375 |
| |
| |
| 0.05208334 -0.21180555 -0.16319445 0.02083334 |
| -0.25694442 -0.8611111 -0.7361111 -0.27083331 |
| -0.09027778 -0.4861111 -0.3611111 -0.0625 |
| 0.08680556 -0.08680557 -0.01041669 0.125 |
| |
| 0.98263896 1.57986116 1.73958337 1.1875 |
| 1.77083337 3.30555558 3.65277791 2.22916675 |
| 2.27083325 4.34722233 4.69444466 2.77083349 |
| 1.60069442 2.87152767 3.05902767 1.875 ", rows=N, cols=C/M*Hout*Wout) |
| |
| for (i in 1:nrow(out)) { |
| for(j in 1:ncol(out)) { |
| rel_error = test_util::check_rel_error(as.scalar(out[i,j]), |
| as.scalar(target[i,j]), 1e-3, 1e-4) |
| } |
| } |
| } |
| |
| cross_entropy_loss = function() { |
| /* |
| * Test for the cross-entropy loss function. |
| * |
| * Here we make sure that the cross-entropy loss function does |
| * not propagate `infinity` values in the case that a prediction is |
| ` * exactly equal to 0. |
| */ |
| print("Testing the cross-entropy loss function with zero-valued predictions.") |
| |
| # Generate data |
| N = 3 # num examples |
| K = 10 # num targets |
| pred = matrix(0, rows=N, cols=K) |
| y = rand(rows=N, cols=K, min=0, max=1, pdf="uniform") |
| y = y / rowSums(y) # normalized probs |
| |
| loss = cross_entropy_loss::forward(pred, y) |
| |
| inf = 1/0 |
| if (loss == inf) { |
| print("ERROR: The cross-entropy loss function ouptuts infinity for all-zero predictions.") |
| } |
| } |
| |
| cross_entropy_loss2d = function() { |
| /* |
| * Test for the 2D cross-entropy loss function. |
| */ |
| print("Testing the 2D cross-entropy loss function.") |
| |
| # Generate data |
| N = 2 # num examples |
| C = 3 # num targets |
| Hin = 3 # example height |
| Win = 3 # example width |
| loss_expected = 0.0770996 |
| |
| # pred data after the softmax |
| pred = matrix("9.99909163e-01 4.99988675e-01 4.53958055e-05 |
| 9.99909163e-01 4.53958055e-05 4.53958055e-05 |
| 9.99909163e-01 4.53958055e-05 4.53958055e-05 |
| 4.53958055e-05 4.99988675e-01 4.53958055e-05 |
| 4.53958055e-05 9.99909163e-01 4.53958055e-05 |
| 4.53958055e-05 9.99909163e-01 4.53958055e-05 |
| 4.53958055e-05 2.26994507e-05 9.99909163e-01 |
| 4.53958055e-05 4.53958055e-05 9.99909163e-01 |
| 4.53958055e-05 4.53958055e-05 9.99909163e-01 |
| 9.99909163e-01 4.99988675e-01 4.53958055e-05 |
| 9.99909163e-01 4.53958055e-05 4.53958055e-05 |
| 9.99909163e-01 4.53958055e-05 4.53958055e-05 |
| 4.53958055e-05 4.99988675e-01 4.53958055e-05 |
| 4.53958055e-05 9.99909163e-01 4.53958055e-05 |
| 4.53958055e-05 9.99909163e-01 4.53958055e-05 |
| 4.53958055e-05 2.26994507e-05 9.99909163e-01 |
| 4.53958055e-05 4.53958055e-05 9.99909163e-01 |
| 4.53958055e-05 4.53958055e-05 9.99909163e-01", rows=N, cols=C*Hin*Win) |
| y = matrix("1 0 0 |
| 1 0 0 |
| 1 0 0 |
| 0 1 0 |
| 0 1 0 |
| 0 1 0 |
| 0 0 1 |
| 0 0 1 |
| 0 0 1 |
| 1 0 0 |
| 1 0 0 |
| 1 0 0 |
| 0 1 0 |
| 0 1 0 |
| 0 1 0 |
| 0 0 1 |
| 0 0 1 |
| 0 0 1", rows=N, cols=C*Hin*Win) |
| |
| loss = cross_entropy_loss2d::forward(pred, y, C) |
| |
| # Equivalency check |
| rel_error = test_util::check_rel_error(loss, loss_expected, 1e-3, 1e-4) |
| } |
| |
| im2col = function() { |
| /* |
| * Test for the `im2col` and `col2im` functions. |
| */ |
| print("Testing the im2col and col2im functions.") |
| |
| # Generate data |
| C = 3 # num channels |
| Hin = 5 # input height |
| Win = 5 # input width |
| Hf = 3 # filter height |
| Wf = 3 # filter width |
| stride = 2 |
| pad = (Hin * stride - Hin + Hf - stride) / 2 |
| Hout = as.integer(floor((Hin + 2*pad - Hf)/stride + 1)) |
| Wout = as.integer(floor((Win + 2*pad - Wf)/stride + 1)) |
| x = rand(rows=C, cols=Hin*Win) |
| |
| # pad |
| x_pad = util::pad_image(x, Hin, Win, pad, pad, 0) |
| |
| # im2col |
| x_cols = util::im2col(x_pad, Hin+2*pad, Win+2*pad, Hf, Wf, stride, stride) |
| |
| if (ncol(x_cols) != Hout*Wout) { |
| print("ERROR: im2col does not yield the correct output size: " |
| + ncol(x_cols)+" (actual) vs. "+Hout*Wout+" (correct).") |
| } |
| |
| # col2im |
| x_pad2 = util::col2im(x_cols, C, Hin+2*pad, Win+2*pad, Hf, Wf, stride, stride, "none") |
| |
| # Equivalency check |
| equivalent = test_util::all_equal(x_pad, x_pad2) |
| if (!equivalent) { |
| print("ERROR: im2col and then col2im does not yield the original image.") |
| } |
| } |
| |
| padding = function() { |
| /* |
| * Test for the `pad_image` and `unpad_image` functions. |
| */ |
| print("Testing the padding and unpadding functions.") |
| |
| # Generate data |
| C = 3 # num channels |
| Hin = 5 # input height |
| Win = 5 # input width |
| pad = 3 # padding |
| x = rand(rows=C, cols=Hin*Win) |
| |
| # Pad image |
| x_pad = util::pad_image(x, Hin, Win, pad, pad, 0) |
| |
| # Check for padded rows & columns |
| for (c in 1:C) { |
| x_pad_slice = matrix(x_pad[c,], rows=Hin+2*pad, cols=Win+2*pad) |
| for (i in 1:pad) { |
| rowsum = sum(x_pad_slice[i,]) |
| colsum = sum(x_pad_slice[,i]) |
| if (rowsum != 0) |
| print("ERROR: Padding was not applied to row " + i + ".") |
| if (colsum != 0) |
| print("ERROR: Padding was not applied to column " + i + ".") |
| } |
| } |
| |
| # Unpad image |
| x1 = util::unpad_image(x_pad, Hin, Win, pad, pad) |
| |
| # Equivalency check |
| equivalent = test_util::all_equal(x, x1) |
| if (!equivalent) { |
| print("ERROR: Padding and then unpadding does not yield the original image.") |
| } |
| } |
| |
| transpose_NCHW_to_CNHW = function() { |
| /* |
| * Test for `transpose_NCHW_to_CNHW` function. |
| */ |
| print("Testing transpose_NCHW_to_CNHW function.") |
| |
| # Generate data |
| N = 2 |
| C = 3 |
| H = 4 |
| W = 5 |
| X = matrix(seq(1, N*C*H*W), rows=N, cols=C*H*W) |
| |
| out = util::transpose_NCHW_to_CNHW(X, C) |
| |
| target = |
| matrix("1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
| 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
| 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
| 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
| 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
| 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120", |
| rows=C, cols=N*H*W) |
| |
| # Equivalency check |
| for (i in 1:nrow(out)) { |
| for(j in 1:ncol(out)) { |
| rel_error = test_util::check_rel_error(as.scalar(out[i,j]), |
| as.scalar(target[i,j]), 1e-10, 1e-12) |
| } |
| } |
| } |
| |
| max_pool2d = function() { |
| /* |
| * Test for the 2D max pooling functions. |
| */ |
| print("Testing the 2D max pooling functions.") |
| |
| # Generate data |
| N = 2 # num examples |
| C = 3 # num channels |
| Hin = 8 # input height |
| Win = 8 # input width |
| Hf = 2 # filter height |
| Wf = 2 # filter width |
| stride = 2 |
| X = rand(rows=N, cols=C*Hin*Win, pdf="normal") |
| |
| for (padh in 0:3) { |
| for (padw in 0:3) { |
| print(" - Testing w/ padh="+padh+" & padw="+padw+".") |
| #while(FALSE){} # force correct printing |
| #print(" - Testing forward") |
| [out, Hout, Wout] = max_pool2d::forward(X, C, Hin, Win, Hf, Wf, stride, stride, padh, padw) |
| [out_simple, Hout_simple, Wout_simple] = max_pool2d_simple::forward(X, C, Hin, Win, Hf, Wf, |
| stride, stride, |
| padh, padw) |
| [out_builtin, Hout_builtin, Wout_builtin] = max_pool2d_builtin::forward(X, C, Hin, Win, |
| Hf, Wf, |
| stride, stride, |
| padh, padw) |
| |
| # Equivalency check |
| out = matrix(out, rows=1, cols=N*C*Hout*Wout) |
| out_simple = matrix(out_simple, rows=1, cols=N*C*Hout*Wout) |
| out_builtin = matrix(out_builtin, rows=1, cols=N*C*Hout*Wout) |
| for (i in 1:length(out)) { |
| rel_error = test_util::check_rel_error(as.scalar(out[1,i]), |
| as.scalar(out_simple[1,i]), 1e-10, 1e-12) |
| rel_error = test_util::check_rel_error(as.scalar(out[1,i]), |
| as.scalar(out_builtin[1,i]), 1e-10, 1e-12) |
| } |
| |
| #print(" - Testing backward") |
| dout = rand(rows=N, cols=C*Hout*Wout, pdf="normal") |
| dX = max_pool2d::backward(dout, Hout, Wout, X, C, Hin, Win, Hf, Wf, stride, stride, |
| padh, padw) |
| dX_simple = max_pool2d_simple::backward(dout, Hout_simple, Wout_simple, X, C, Hin, Win, |
| Hf, Wf, stride, stride, padh, padw) |
| dX_builtin = max_pool2d_builtin::backward(dout, Hout_builtin, Wout_builtin, X, C, Hin, Win, |
| Hf, Wf, stride, stride, padh, padw) |
| |
| # Equivalency check |
| dX = matrix(dX, rows=1, cols=N*C*Hin*Win) |
| dX_simple = matrix(dX_simple, rows=1, cols=N*C*Hin*Win) |
| dX_builtin = matrix(dX_builtin, rows=1, cols=N*C*Hin*Win) |
| for (i in 1:length(dX)) { |
| rel_error = test_util::check_rel_error(as.scalar(dX[1,i]), |
| as.scalar(dX_simple[1,i]), 1e-10, 1e-12) |
| rel_error = test_util::check_rel_error(as.scalar(dX[1,i]), |
| as.scalar(dX_builtin[1,i]), 1e-10, 1e-12) |
| } |
| } |
| } |
| |
| # --- |
| print(" - Testing for correct behavior against known answer w/ pad=0.") |
| # generate data |
| # -- channel 1 |
| # 1 2 3 4 |
| # 5 6 7 8 |
| # 9 10 11 12 |
| # 13 14 15 16 |
| # -- channel 2 |
| # 1 5 9 13 |
| # 2 6 10 14 |
| # 3 7 11 15 |
| # 4 8 12 16 |
| C = 2 # num channels |
| Hin = 4 # input height |
| Win = 4 # input width |
| X = matrix(seq(1,16,1), rows=Hin, cols=Win) |
| X = matrix(rbind(X, t(X)), rows=1, cols=C*Hin*Win) # C=2 |
| X = rbind(X, X) # n=2 |
| pad = 0 |
| |
| # forward |
| [out, Hout, Wout] = max_pool2d::forward(X, C, Hin, Win, Hf, Wf, stride, stride, pad, pad) |
| [out_simple, Hout_simple, Wout_simple] = max_pool2d_simple::forward(X, C, Hin, Win, Hf, Wf, |
| stride, stride, pad, pad) |
| [out_builtin, Hout_builtin, Wout_builtin] = max_pool2d_builtin::forward(X, C, Hin, Win, Hf, Wf, |
| stride, stride, pad, pad) |
| |
| # equivalency check |
| # -- channel 1 |
| # 6 8 |
| # 14 16 |
| # -- channel 2 |
| # 6 14 |
| # 8 16 |
| target = matrix("6 8 14 16 6 14 8 16", rows=1, cols=C*Hout*Wout) |
| target = rbind(target, target) # n=2 |
| tmp = test_util::check_all_equal(out, target) |
| tmp = test_util::check_all_equal(out_simple, target) |
| tmp = test_util::check_all_equal(out_builtin, target) |
| |
| print(" - Testing for correct behavior against known answer w/ pad=1.") |
| # generate data |
| # -- channel 1 |
| # 0 0 0 0 0 0 |
| # 0 1 2 3 4 0 |
| # 0 5 6 7 8 0 |
| # 0 9 10 11 12 0 |
| # 0 13 14 15 16 0 |
| # 0 0 0 0 0 0 |
| # -- channel 2 |
| # 0 0 0 0 0 0 |
| # 0 1 5 9 13 0 |
| # 0 2 6 10 14 0 |
| # 0 3 7 11 15 0 |
| # 0 4 8 12 16 0 |
| # 0 0 0 0 0 0 |
| pad = 1 |
| |
| # forward |
| [out, Hout, Wout] = max_pool2d::forward(X, C, Hin, Win, Hf, Wf, stride, stride, pad, pad) |
| [out_simple, Hout_simple, Wout_simple] = max_pool2d_simple::forward(X, C, Hin, Win, Hf, Wf, |
| stride, stride, pad, pad) |
| [out_builtin, Hout_builtin, Wout_builtin] = max_pool2d_builtin::forward(X, C, Hin, Win, Hf, Wf, |
| stride, stride, pad, pad) |
| |
| # equivalency check |
| # -- channel 1 |
| # 1 3 4 |
| # 9 11 12 |
| # 13 15 16 |
| # -- channel 2 |
| # 1 9 13 |
| # 3 11 15 |
| # 4 12 16 |
| target = matrix("1 3 4 9 11 12 13 15 16 1 9 13 3 11 15 4 12 16", rows=1, cols=C*Hout*Wout) |
| target = rbind(target, target) # n=2 |
| tmp = test_util::check_all_equal(out, target) |
| tmp = test_util::check_all_equal(out_simple, target) |
| tmp = test_util::check_all_equal(out_builtin, target) |
| |
| print(" - Testing for correct behavior against known answer w/ all negative matrix w/ pad=0.") |
| # generate data |
| # -- channel 1 |
| # -1 -2 -3 -4 |
| # -5 -6 -7 -8 |
| # -9 -10 -11 -12 |
| # -13 -14 -15 -16 |
| # -- channel 2 |
| # -1 -5 -9 -13 |
| # -2 -6 -10 -14 |
| # -3 -7 -11 -15 |
| # -4 -8 -12 -16 |
| X = X * -1 |
| pad = 0 |
| |
| # forward |
| [out, Hout, Wout] = max_pool2d::forward(X, C, Hin, Win, Hf, Wf, stride, stride, pad, pad) |
| [out_simple, Hout_simple, Wout_simple] = max_pool2d_simple::forward(X, C, Hin, Win, Hf, Wf, |
| stride, stride, pad, pad) |
| [out_builtin, Hout_builtin, Wout_builtin] = max_pool2d_builtin::forward(X, C, Hin, Win, Hf, Wf, |
| stride, stride, pad, pad) |
| |
| # equivalency check |
| # -- channel 1 |
| # -1 -3 |
| # -9 -11 |
| # -- channel 2 |
| # -1 -9 |
| # -3 -11 |
| target = matrix("-1 -3 -9 -11 -1 -9 -3 -11", rows=1, cols=C*Hout*Wout) |
| target = rbind(target, target) # n=2 |
| tmp = test_util::check_all_equal(out, target) |
| tmp = test_util::check_all_equal(out_simple, target) |
| tmp = test_util::check_all_equal(out_builtin, target) |
| |
| |
| print(" - Testing for correct behavior against known answer w/ all negative matrix w/ pad=1.") |
| # generate data |
| # -- channel 1 |
| # 0 0 0 0 0 0 |
| # 0 -1 -2 -3 -4 0 |
| # 0 -5 -6 -7 -8 0 |
| # 0 -9 -10 -11 -12 0 |
| # 0 -13 -14 -15 -16 0 |
| # 0 0 0 0 0 0 |
| # -- channel 2 |
| # 0 0 0 0 0 0 |
| # 0 -1 -5 -9 -13 0 |
| # 0 -2 -6 -10 -14 0 |
| # 0 -3 -7 -11 -15 0 |
| # 0 -4 -8 -12 -16 0 |
| # 0 0 0 0 0 0 |
| pad = 1 |
| |
| # forward |
| [out, Hout, Wout] = max_pool2d::forward(X, C, Hin, Win, Hf, Wf, stride, stride, pad, pad) |
| [out_simple, Hout_simple, Wout_simple] = max_pool2d_simple::forward(X, C, Hin, Win, Hf, Wf, |
| stride, stride, pad, pad) |
| [out_builtin, Hout_builtin, Wout_builtin] = max_pool2d_builtin::forward(X, C, Hin, Win, Hf, Wf, |
| stride, stride, pad, pad) |
| |
| # equivalency check |
| # -- channel 1 |
| # 0 0 0 |
| # 0 -6 0 |
| # 0 0 0 |
| # -- channel 2 |
| # 0 0 0 |
| # 0 -6 0 |
| # 0 0 0 |
| target = matrix("-1 -2 -4 -5 -6 -8 -13 -14 -16 -1 -5 -13 -2 -6 -14 -4 -8 -16", |
| rows=1, cols=C*Hout*Wout) |
| target = rbind(target, target) # n=2 |
| tmp = test_util::check_all_equal(out, target) |
| tmp = test_util::check_all_equal(out_simple, target) |
| tmp = test_util::check_all_equal(out_builtin, target) |
| } |
| |
| tanh = function() { |
| /* |
| * Test for the `tanh` forward function. |
| */ |
| print("Testing the tanh forward function.") |
| |
| # Generate data |
| N = 2 # num examples |
| C = 3 # num channels |
| X = rand(rows=N, cols=C, pdf="normal") |
| |
| out = tanh::forward(X) |
| out_ref = (exp(X) - exp(-X)) / (exp(X) + exp(-X)) |
| |
| # Equivalency check |
| for (i in 1:nrow(out)) { |
| for (j in 1:ncol(out)) { |
| rel_error = test_util::check_rel_error(as.scalar(out[i,j]), as.scalar(out_ref[i,j]), |
| 1e-10, 1e-12) |
| } |
| } |
| } |
| |
| compare_tanh_builtin_forward_with_old = function() { |
| /* |
| * Test for the `tanh` forward function. |
| */ |
| print("Testing the tanh forward function.") |
| |
| # Generate data |
| N = 2 # num examples |
| C = 3 # num channels |
| X = rand(rows=N, cols=C, pdf="normal") |
| |
| out = tanh::forward(X) |
| |
| sigma2X = sigmoid::forward(2*X) |
| out_ref = 2*sigma2X - 1 |
| |
| # Equivalency check |
| for (i in 1:nrow(out)) { |
| for (j in 1:ncol(out)) { |
| rel_error = test_util::check_rel_error(as.scalar(out[i,j]), as.scalar(out_ref[i,j]), |
| 1e-10, 1e-12) |
| } |
| } |
| } |
| |
| compare_tanh_builtin_backward_with_old = function() { |
| /* |
| * Test for the `tanh` backward function. |
| */ |
| print("Testing the tanh backward function.") |
| |
| # Generate data |
| N = 2 # num examples |
| C = 3 # num channels |
| X = rand(rows=N, cols=C, pdf="normal") |
| dout = rand(rows=N, cols=C, pdf="normal") |
| |
| sigma2X = sigmoid::forward(2*X) |
| out = 2*sigma2X - 1 |
| out_ref = (1-out^2) * dout |
| |
| out = tanh::backward(dout, X) |
| |
| # Equivalency check |
| for (i in 1:nrow(out)) { |
| for (j in 1:ncol(out)) { |
| rel_error = test_util::check_rel_error(as.scalar(out[i,j]), as.scalar(out_ref[i,j]), |
| 1e-10, 1e-12) |
| } |
| } |
| } |
| |
| threshold = function() { |
| /* |
| * Test for the threshold function. |
| */ |
| print("Testing the threshold function.") |
| |
| # Generate data |
| X = matrix("0.31 0.24 0.87 |
| 0.45 0.66 0.65 |
| 0.24 0.91 0.13", rows=3, cols=3) |
| thresh = 0.5 |
| target_matrix = matrix("0.0 0.0 1.0 |
| 0.0 1.0 1.0 |
| 0.0 1.0 0.0", rows=3, cols=3) |
| |
| # Get the indicator matrix |
| indicator_matrix = util::threshold(X, thresh) |
| |
| # Equivalency check |
| out = test_util::check_all_equal(indicator_matrix, target_matrix) |
| } |
| |
| top_k_row = function() { |
| /* |
| * Test for the top_k_row function. |
| */ |
| print("Testing the top_k_row function.") |
| |
| #Generate data |
| X = matrix("2 3 2 1 19 20 31 12 3 4 5 60 9 2 |
| 3 6 15 18 6 12 1 17 3 0 4 19 1 6", rows=2, cols=14) |
| r = 2 |
| k = 3 |
| expected_values = matrix("19 |
| 18 |
| 17", rows=1, cols=3) |
| expected_indices = matrix("12 |
| 4 |
| 8", rows=1, cols=3) |
| |
| # Test the top 3 for the second row. |
| [values, indices] = util::top_k_row(X, 2, 3) |
| check_values = test_util::check_all_equal(values, expected_values) |
| check_indices = test_util::check_all_equal(indices, expected_indices) |
| } |
| |
| top_k = function() { |
| /* |
| * Test for the top_k function. |
| */ |
| print("Testing the top_k function.") |
| |
| # Generate data |
| X = matrix("0.1 0.3 0.2 0.4 |
| 0.1 0.3 0.3 0.2", rows=2, cols=4) |
| expected_values_top1 = matrix("0.4 |
| 0.3", rows=2, cols=1) |
| expected_indices_top1 = matrix("4 |
| 2", rows=2, cols=1) |
| expected_values_top2 = matrix("0.4 0.3 |
| 0.3 0.3", rows=2, cols=2) |
| expected_indices_top2 = matrix("4 2 |
| 2 3", rows=2, cols=2) |
| expected_values_topAll = matrix("0.4 0.3 0.2 0.1 |
| 0.3 0.3 0.2 0.1", rows=2, cols=4) |
| expected_indices_topAll = matrix("4 2 3 1 |
| 2 3 4 1", rows=2, cols=4) |
| |
| # test top_1 |
| print(" - Testing top_1.") |
| [values_top1, indices_top1] = util::top_k(X, 1) |
| check_values_top1 = test_util::check_all_equal(values_top1, expected_values_top1) |
| check_indices_top1 = test_util::check_all_equal(indices_top1, expected_indices_top1) |
| |
| # test top_2 |
| print(" - Testing top_2.") |
| [values_top2, indices_top2] = util::top_k(X, 2) |
| check_values_top2 = test_util::check_all_equal(values_top2, expected_values_top2) |
| check_indices_top2 = test_util::check_all_equal(indices_top2, expected_indices_top2) |
| |
| # test top_All |
| print(" - Testing top_All.") |
| [values_topAll, indices_topAll] = util::top_k(X, 4) |
| check_values_topAll = test_util::check_all_equal(values_topAll, expected_values_topAll) |
| check_indices_topAll = test_util::check_all_equal(indices_topAll, expected_indices_topAll) |
| } |
| |
| top_k2d = function() { |
| /* |
| * Test for the top_k2d function. |
| */ |
| print("Testing the top_k2d function.") |
| # Generate data, of shape (2, 3, 3, 4) |
| k = 2 |
| X = matrix("0.1 0.4 0.4 0.5 |
| 0.4 0.1 0.6 0.1 |
| 0.7 0.7 0.3 0.2 |
| |
| 0.2 0.5 0.4 0.5 |
| 0.4 0.1 0.6 0.1 |
| 0.7 0.8 0.3 0.2 |
| |
| 0.3 0.4 0.4 0.5 |
| 0.4 0.1 0.6 0.1 |
| 0.7 0.2 0.3 0.2 |
| |
| 0.1 0.4 0.4 0.5 |
| 0.4 0.1 0.6 0.1 |
| 0.7 0.7 0.3 0.2 |
| |
| 0.2 0.5 0.4 0.5 |
| 0.4 0.1 0.6 0.1 |
| 0.7 0.8 0.3 0.2 |
| |
| 0.3 0.4 0.4 0.5 |
| 0.4 0.1 0.6 0.1 |
| 0.7 0.2 0.3 0.2", rows=2, cols=3*3*4) |
| |
| expected_values = matrix("0.3 0.5 0.4 0.5 |
| 0.4 0.1 0.6 0.1 |
| 0.7 0.8 0.3 0.2 |
| |
| 0.2 0.4 0.4 0.5 |
| 0.4 0.1 0.6 0.1 |
| 0.7 0.7 0.3 0.2 |
| |
| 0.3 0.5 0.4 0.5 |
| 0.4 0.1 0.6 0.1 |
| 0.7 0.8 0.3 0.2 |
| |
| 0.2 0.4 0.4 0.5 |
| 0.4 0.1 0.6 0.1 |
| 0.7 0.7 0.3 0.2", rows=2, cols=2*3*4) |
| |
| expected_indices = matrix("3 2 1 1 |
| 1 1 1 1 |
| 1 2 1 1 |
| |
| 2 1 2 2 |
| 2 2 2 2 |
| 2 1 2 2 |
| |
| 3 2 1 1 |
| 1 1 1 1 |
| 1 2 1 1 |
| |
| 2 1 2 2 |
| 2 2 2 2 |
| 2 1 2 2", rows=2, cols=24) |
| |
| [values, indices] = util::top_k2d(X, k, 3, 3, 4) |
| |
| # Equivalency check |
| check_values = test_util::check_all_equal(values, expected_values) |
| check_indices = test_util::check_all_equal(indices, expected_indices) |
| } |
| |
| softmax2d = function() { |
| /* |
| * Test for 2D softmax function. |
| */ |
| print("Testing the 2D softmax function.") |
| |
| N = 2 # num example |
| C = 3 # num class |
| Hin = 3 # example height |
| Win = 3 # example width |
| X = matrix("10.0 10.0 0.0 |
| 10.0 0.0 0.0 |
| 10.0 0.0 0.0 |
| 0.0 10.0 0.0 |
| 0.0 10.0 0.0 |
| 0.0 10.0 0.0 |
| 0.0 0.0 10.0 |
| 0.0 0.0 10.0 |
| 0.0 0.0 10.0 |
| 10.0 10.0 0.0 |
| 10.0 0.0 0.0 |
| 10.0 0.0 0.0 |
| 0.0 10.0 0.0 |
| 0.0 10.0 0.0 |
| 0.0 10.0 0.0 |
| 0.0 0.0 10.0 |
| 0.0 0.0 10.0 |
| 0.0 0.0 10.0", rows=N, cols=C*Hin*Win) |
| |
| probs_expected = matrix("9.99909163e-01 4.99988675e-01 4.53958055e-05 |
| 9.99909163e-01 4.53958055e-05 4.53958055e-05 |
| 9.99909163e-01 4.53958055e-05 4.53958055e-05 |
| 4.53958055e-05 4.99988675e-01 4.53958055e-05 |
| 4.53958055e-05 9.99909163e-01 4.53958055e-05 |
| 4.53958055e-05 9.99909163e-01 4.53958055e-05 |
| 4.53958055e-05 2.26994507e-05 9.99909163e-01 |
| 4.53958055e-05 4.53958055e-05 9.99909163e-01 |
| 4.53958055e-05 4.53958055e-05 9.99909163e-01 |
| 9.99909163e-01 4.99988675e-01 4.53958055e-05 |
| 9.99909163e-01 4.53958055e-05 4.53958055e-05 |
| 9.99909163e-01 4.53958055e-05 4.53958055e-05 |
| 4.53958055e-05 4.99988675e-01 4.53958055e-05 |
| 4.53958055e-05 9.99909163e-01 4.53958055e-05 |
| 4.53958055e-05 9.99909163e-01 4.53958055e-05 |
| 4.53958055e-05 2.26994507e-05 9.99909163e-01 |
| 4.53958055e-05 4.53958055e-05 9.99909163e-01 |
| 4.53958055e-05 4.53958055e-05 9.99909163e-01", rows=N, cols=C*Hin*Win) |
| probs = softmax2d::forward(X, C) |
| |
| # Equivalency check |
| for (i in 1:nrow(probs)) { |
| for (j in 1:ncol(probs)) { |
| rel_error = test_util::check_rel_error(as.scalar(probs[i,j]), as.scalar(probs_expected[i,j]), |
| 1e-5, 1e-6) |
| } |
| } |
| } |
| |
| elu = function() { |
| /* |
| * Test for ELU function. |
| */ |
| print("Testing ELU function.") |
| |
| X = matrix("0.3923 -0.2236 -0.3195 -1.2050 1.0445 -0.6332 0.5731 0.5409 -0.3919 -1.0427", rows = 10, cols = 1) |
| |
| print(" - Testing forward") |
| out = elu::forward(X, 1) |
| out_ref = matrix("0.3923 -0.2003 -0.2735 -0.7003 1.0445 -0.4691 0.5731 0.5409 -0.3242 -0.6475", rows = 10, cols = 1) |
| |
| for (i in 1:nrow(out)) { |
| for(j in 1:ncol(out)) { |
| rel_error = test_util::check_rel_error(as.scalar(out[i,j]), |
| as.scalar(out_ref[i,j]), 1e-3, 1e-3) |
| } |
| } |
| |
| print(" - Testing backward") |
| out = elu::backward(X, X, 1) |
| out_ref = matrix("0.3923 -0.1788 -0.2321 -0.3611 1.0445 -0.3362 0.5731 0.5409 -0.2648 -0.3676", rows = 10, cols = 1) |
| |
| for (i in 1:nrow(out)) { |
| for(j in 1:ncol(out)) { |
| rel_error = test_util::check_rel_error(as.scalar(out[i,j]), |
| as.scalar(out_ref[i,j]), 1e-3, 1e-3) |
| } |
| } |
| } |