Merge pull request #866 from apache/dev

Merge Dev branch into master
diff --git a/.github/workflows/macOS.yaml b/.github/workflows/macOS.yaml
index d38ce32..6b5fde2 100644
--- a/.github/workflows/macOS.yaml
+++ b/.github/workflows/macOS.yaml
@@ -33,6 +33,10 @@
       - name: install-build-dependencies
         run: |
          brew install protobuf swig opencv glog lmdb numpy
+         wget https://github.com/oneapi-src/oneTBB/releases/download/v2020.1/tbb-2020.1-mac.tgz 
+         mkdir /tmp/tbb
+         tar zxvf tbb-2020.1-mac.tgz -C /tmp/tbb --strip-components=1
+         mv /tmp/tbb/lib/lib* /usr/local/lib/
          pip3 install numpy && wget https://github.com/oneapi-src/oneDNN/releases/download/v1.2/dnnl_mac_1.2.0_cpu_tbb.tgz -P /tmp
          tar zxf /tmp/dnnl_mac_1.2.0_cpu_tbb.tgz -C /tmp
       - name: configure
@@ -48,7 +52,6 @@
           LD_LIBRARY_PATH: /usr/local/opt/openblas/lib:/tmp/dnnl_mac_1.2.0_cpu_tbb/lib:$LD_LIBRARY_PATH
       - name: C++ test
         run: |
-         brew install tbb
          install_name_tool -change libdnnl.1.dylib /tmp/dnnl_mac_1.2.0_cpu_tbb/lib/libdnnl.1.dylib /Users/runner/work/singa/singa/build/lib/libsinga.dylib
          install_name_tool -change libdnnl.1.dylib /tmp/dnnl_mac_1.2.0_cpu_tbb/lib/libdnnl.1.dylib build/bin/test_singa
          build/bin/test_singa
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 6a151f7..1d8201f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -118,6 +118,8 @@
     #ADD_SUBDIRECTORY(lib/cnmem)
     #LIST(APPEND SINGA_LINKER_LIBS cnmem)
     SET(global_cuda_objs "")
+    # add support cuda fp16
+    SET(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} --gpu-architecture=compute_75")
 ENDIF()
 
 # TODO(wangwei) detect the ev lib
diff --git a/LICENSE b/LICENSE
index e968ba3..f448e8b 100644
--- a/LICENSE
+++ b/LICENSE
@@ -297,6 +297,7 @@
 =====================================================================
 SINGA bundles the following under MIT license:
 cmake/ThirdParty/FindOpenCL.cmake
+include/half.hpp
 
 Copyright (c) 2010-2016 Institute for Microelectronics,
                         Institute for Analysis and Scientific Computing, TU Wien.
diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
index a4257a6..60c4327 100644
--- a/cmake/Dependencies.cmake
+++ b/cmake/Dependencies.cmake
@@ -67,7 +67,7 @@
 
 FIND_PACKAGE(Glog)
 IF(GLOG_FOUND)
-    #MESSAGE(STATUS "GLOG FOUND at ${GLOG_INCLUDE_DIR}")
+    MESSAGE(STATUS "FOUND GLOG at ${GLOG_INCLUDE_DIR}")
     #ADD_DEFINITIONS("-DUSE_GLOG")
     SET(USE_GLOG TRUE)
     LIST(APPEND SINGA_LINKER_LIBS ${GLOG_LIBRARIES})
diff --git a/examples/cifar_distributed_cnn/README.md b/examples/cifar_distributed_cnn/README.md
new file mode 100644
index 0000000..4af7916
--- /dev/null
+++ b/examples/cifar_distributed_cnn/README.md
@@ -0,0 +1,46 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+# Image Classification using Convolutional Neural Networks
+
+Examples inside this folder show how to train CNN models using 
+SINGA for image classification.
+
+* `data` includes the scripts for preprocessing image datasets.
+  Currently, MNIST, CIFAR10 and CIFAR100 are included.
+
+* `model` includes the CNN model construction codes by creating
+  a subclass of `Module` to wrap the neural network operations 
+  of each model. Then computational graph is enabled to optimized 
+  the memory and efficiency.
+
+* `autograd` includes the codes to train CNN models by calling the
+  [neural network operations](../../python/singa/autograd.py) imperatively. 
+  The computational graph is not created.
+
+* `train_cnn.py` is the training script, which controls the training flow by
+  doing BackPropagation and SGD update.
+
+* `train_multiprocess.py` is the script for distributed training on a single
+  node with multiple GPUs; it uses Python's multiprocessing module and NCCL.
+
+* `train_mpi.py` is the script for distributed training (among multiple nodes) 
+  using MPI and NCCL for communication.
+
+* `benchmark.py` tests the training throughput using `ResNet50` as the workload.
diff --git a/examples/cifar_distributed_cnn/autograd/cifar10_multiprocess.py b/examples/cifar_distributed_cnn/autograd/cifar10_multiprocess.py
new file mode 100755
index 0000000..b5e51ad
--- /dev/null
+++ b/examples/cifar_distributed_cnn/autograd/cifar10_multiprocess.py
@@ -0,0 +1,43 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from resnet_cifar10 import *
+import multiprocessing
+import sys
+
+if __name__ == '__main__':
+
+    # Generate a NCCL ID to be used for collective communication
+    nccl_id = singa.NcclIdHolder()
+
+    # number of GPUs to be used
+    world_size = int(sys.argv[1])
+
+    # Testing the experimental partial-parameter update asynchronous training
+    partial_update = True
+
+    process = []
+    for local_rank in range(0, world_size):
+        process.append(
+            multiprocessing.Process(target=train_cifar10,
+                                    args=(True, local_rank, world_size, nccl_id,
+                                          partial_update)))
+
+    for p in process:
+        p.start()
diff --git a/examples/cifar_distributed_cnn/autograd/mnist_cnn.py b/examples/cifar_distributed_cnn/autograd/mnist_cnn.py
new file mode 100644
index 0000000..16752ce
--- /dev/null
+++ b/examples/cifar_distributed_cnn/autograd/mnist_cnn.py
@@ -0,0 +1,304 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from singa import singa_wrap as singa
+from singa import autograd
+from singa import layer
+from singa import tensor
+from singa import device
+from singa import opt
+import numpy as np
+import os
+import sys
+import gzip
+import codecs
+import time
+
+
+class CNN:
+
+    def __init__(self):
+        self.conv1 = layer.Conv2d(1, 20, 5, padding=0)
+        self.conv2 = layer.Conv2d(20, 50, 5, padding=0)
+        self.linear1 = layer.Linear(4 * 4 * 50, 500)
+        self.linear2 = layer.Linear(500, 10)
+        self.pooling1 = layer.MaxPool2d(2, 2, padding=0)
+        self.pooling2 = layer.MaxPool2d(2, 2, padding=0)
+        self.relu1 = layer.ReLU()
+        self.relu2 = layer.ReLU()
+        self.relu3 = layer.ReLU()
+        self.flatten = layer.Flatten()
+
+    def forward(self, x):
+        y = self.conv1(x)
+        y = self.relu1(y)
+        y = self.pooling1(y)
+        y = self.conv2(y)
+        y = self.relu2(y)
+        y = self.pooling2(y)
+        y = self.flatten(y)
+        y = self.linear1(y)
+        y = self.relu3(y)
+        y = self.linear2(y)
+        return y
+
+
+def check_dataset_exist(dirpath):
+    if not os.path.exists(dirpath):
+        print(
+            'The MNIST dataset does not exist. Please download the mnist dataset using download_mnist.py (e.g. python3 download_mnist.py)'
+        )
+        sys.exit(0)
+    return dirpath
+
+
+def load_dataset():
+    train_x_path = '/tmp/train-images-idx3-ubyte.gz'
+    train_y_path = '/tmp/train-labels-idx1-ubyte.gz'
+    valid_x_path = '/tmp/t10k-images-idx3-ubyte.gz'
+    valid_y_path = '/tmp/t10k-labels-idx1-ubyte.gz'
+
+    train_x = read_image_file(check_dataset_exist(train_x_path)).astype(
+        np.float32)
+    train_y = read_label_file(check_dataset_exist(train_y_path)).astype(
+        np.float32)
+    valid_x = read_image_file(check_dataset_exist(valid_x_path)).astype(
+        np.float32)
+    valid_y = read_label_file(check_dataset_exist(valid_y_path)).astype(
+        np.float32)
+    return train_x, train_y, valid_x, valid_y
+
+
+def read_label_file(path):
+    with gzip.open(path, 'rb') as f:
+        data = f.read()
+        assert get_int(data[:4]) == 2049
+        length = get_int(data[4:8])
+        parsed = np.frombuffer(data, dtype=np.uint8, offset=8).reshape((length))
+        return parsed
+
+
+def get_int(b):
+    return int(codecs.encode(b, 'hex'), 16)
+
+
+def read_image_file(path):
+    with gzip.open(path, 'rb') as f:
+        data = f.read()
+        assert get_int(data[:4]) == 2051
+        length = get_int(data[4:8])
+        num_rows = get_int(data[8:12])
+        num_cols = get_int(data[12:16])
+        parsed = np.frombuffer(data, dtype=np.uint8, offset=16).reshape(
+            (length, 1, num_rows, num_cols))
+        return parsed
+
+
+def to_categorical(y, num_classes):
+    y = np.array(y, dtype="int")
+    n = y.shape[0]
+    categorical = np.zeros((n, num_classes))
+    categorical[np.arange(n), y] = 1
+    categorical = categorical.astype(np.float32)
+    return categorical
+
+
+def accuracy(pred, target):
+    y = np.argmax(pred, axis=1)
+    t = np.argmax(target, axis=1)
+    a = y == t
+    return np.array(a, "int").sum()
+
+
+# Function to all reduce NUMPY accuracy and loss from multiple devices
+def reduce_variable(variable, dist_opt, reducer):
+    reducer.copy_from_numpy(variable)
+    dist_opt.all_reduce(reducer.data)
+    dist_opt.wait()
+    output = tensor.to_numpy(reducer)
+    return output
+
+
+# Function to sychronize SINGA TENSOR initial model parameters
+def synchronize(tensor, dist_opt):
+    dist_opt.all_reduce(tensor.data)
+    dist_opt.wait()
+    tensor /= dist_opt.world_size
+
+
+# Data augmentation
+def augmentation(x, batch_size):
+    xpad = np.pad(x, [[0, 0], [0, 0], [4, 4], [4, 4]], 'symmetric')
+    for data_num in range(0, batch_size):
+        offset = np.random.randint(8, size=2)
+        x[data_num, :, :, :] = xpad[data_num, :, offset[0]:offset[0] + 28,
+                                    offset[1]:offset[1] + 28]
+        if_flip = np.random.randint(2)
+        if (if_flip):
+            x[data_num, :, :, :] = x[data_num, :, :, ::-1]
+    return x
+
+
+# Data partition
+def data_partition(dataset_x, dataset_y, global_rank, world_size):
+    data_per_rank = dataset_x.shape[0] // world_size
+    idx_start = global_rank * data_per_rank
+    idx_end = (global_rank + 1) * data_per_rank
+    return dataset_x[idx_start:idx_end], dataset_y[idx_start:idx_end]
+
+
+def train_mnist_cnn(DIST=False,
+                    local_rank=None,
+                    world_size=None,
+                    nccl_id=None,
+                    spars=0,
+                    topK=False,
+                    corr=True):
+
+    # Define the hypermeters for the mnist_cnn
+    max_epoch = 10
+    batch_size = 64
+    sgd = opt.SGD(lr=0.005, momentum=0.9, weight_decay=1e-5)
+
+    # Prepare training and valadiation data
+    train_x, train_y, test_x, test_y = load_dataset()
+    IMG_SIZE = 28
+    num_classes = 10
+    train_y = to_categorical(train_y, num_classes)
+    test_y = to_categorical(test_y, num_classes)
+
+    # Normalization
+    train_x = train_x / 255
+    test_x = test_x / 255
+
+    if DIST:
+        # For distributed GPU training
+        sgd = opt.DistOpt(sgd,
+                          nccl_id=nccl_id,
+                          local_rank=local_rank,
+                          world_size=world_size)
+        dev = device.create_cuda_gpu_on(sgd.local_rank)
+
+        # Dataset partition for distributed training
+        train_x, train_y = data_partition(train_x, train_y, sgd.global_rank,
+                                          sgd.world_size)
+        test_x, test_y = data_partition(test_x, test_y, sgd.global_rank,
+                                        sgd.world_size)
+        world_size = sgd.world_size
+    else:
+        # For single GPU
+        dev = device.create_cuda_gpu()
+        world_size = 1
+
+    # Create model
+    model = CNN()
+
+    tx = tensor.Tensor((batch_size, 1, IMG_SIZE, IMG_SIZE), dev, tensor.float32)
+    ty = tensor.Tensor((batch_size, num_classes), dev, tensor.int32)
+    num_train_batch = train_x.shape[0] // batch_size
+    num_test_batch = test_x.shape[0] // batch_size
+    idx = np.arange(train_x.shape[0], dtype=np.int32)
+
+    if DIST:
+        #Sychronize the initial parameters
+        autograd.training = True
+        x = np.random.randn(batch_size, 1, IMG_SIZE,
+                            IMG_SIZE).astype(np.float32)
+        y = np.zeros(shape=(batch_size, num_classes), dtype=np.int32)
+        tx.copy_from_numpy(x)
+        ty.copy_from_numpy(y)
+        out = model.forward(tx)
+        loss = autograd.softmax_cross_entropy(out, ty)
+        for p, g in autograd.backward(loss):
+            synchronize(p, sgd)
+
+    # Training and evaulation loop
+    for epoch in range(max_epoch):
+        start_time = time.time()
+        np.random.shuffle(idx)
+
+        if ((DIST == False) or (sgd.global_rank == 0)):
+            print('Starting Epoch %d:' % (epoch))
+
+        # Training phase
+        autograd.training = True
+        train_correct = np.zeros(shape=[1], dtype=np.float32)
+        test_correct = np.zeros(shape=[1], dtype=np.float32)
+        train_loss = np.zeros(shape=[1], dtype=np.float32)
+
+        for b in range(num_train_batch):
+            x = train_x[idx[b * batch_size:(b + 1) * batch_size]]
+            x = augmentation(x, batch_size)
+            y = train_y[idx[b * batch_size:(b + 1) * batch_size]]
+            tx.copy_from_numpy(x)
+            ty.copy_from_numpy(y)
+            out = model.forward(tx)
+            loss = autograd.softmax_cross_entropy(out, ty)
+            train_correct += accuracy(tensor.to_numpy(out), y)
+            train_loss += tensor.to_numpy(loss)[0]
+            if DIST:
+                if (spars == 0):
+                    sgd.backward_and_update(loss, threshold=50000)
+                else:
+                    sgd.backward_and_sparse_update(loss,
+                                                   spars=spars,
+                                                   topK=topK,
+                                                   corr=corr)
+            else:
+                sgd(loss)
+
+        if DIST:
+            # Reduce the evaluation accuracy and loss from multiple devices
+            reducer = tensor.Tensor((1,), dev, tensor.float32)
+            train_correct = reduce_variable(train_correct, sgd, reducer)
+            train_loss = reduce_variable(train_loss, sgd, reducer)
+
+        # Output the training loss and accuracy
+        if ((DIST == False) or (sgd.global_rank == 0)):
+            print('Training loss = %f, training accuracy = %f' %
+                  (train_loss, train_correct /
+                   (num_train_batch * batch_size * world_size)),
+                  flush=True)
+
+        # Evaluation phase
+        autograd.training = False
+        for b in range(num_test_batch):
+            x = test_x[b * batch_size:(b + 1) * batch_size]
+            y = test_y[b * batch_size:(b + 1) * batch_size]
+            tx.copy_from_numpy(x)
+            ty.copy_from_numpy(y)
+            out_test = model.forward(tx)
+            test_correct += accuracy(tensor.to_numpy(out_test), y)
+
+        if DIST:
+            # Reduce the evaulation accuracy from multiple devices
+            test_correct = reduce_variable(test_correct, sgd, reducer)
+
+        # Output the evaluation accuracy
+        if ((DIST == False) or (sgd.global_rank == 0)):
+            print('Evaluation accuracy = %f, Elapsed Time = %fs' %
+                  (test_correct / (num_test_batch * batch_size * world_size),
+                   time.time() - start_time),
+                  flush=True)
+
+
+if __name__ == '__main__':
+
+    DIST = False
+    train_mnist_cnn(DIST=DIST)
diff --git a/examples/cifar_distributed_cnn/autograd/mnist_dist.py b/examples/cifar_distributed_cnn/autograd/mnist_dist.py
new file mode 100644
index 0000000..3586127
--- /dev/null
+++ b/examples/cifar_distributed_cnn/autograd/mnist_dist.py
@@ -0,0 +1,25 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from mnist_cnn import *
+
+if __name__ == '__main__':
+
+    DIST = True
+    train_mnist_cnn(DIST=DIST)
diff --git a/examples/cifar_distributed_cnn/autograd/mnist_multiprocess.py b/examples/cifar_distributed_cnn/autograd/mnist_multiprocess.py
new file mode 100644
index 0000000..f51344f
--- /dev/null
+++ b/examples/cifar_distributed_cnn/autograd/mnist_multiprocess.py
@@ -0,0 +1,39 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from mnist_cnn import *
+import multiprocessing
+import sys
+
+if __name__ == '__main__':
+
+    # Generate a NCCL ID to be used for collective communication
+    nccl_id = singa.NcclIdHolder()
+
+    # Number of GPUs to be used
+    world_size = int(sys.argv[1])
+
+    process = []
+    for local_rank in range(0, world_size):
+        process.append(
+            multiprocessing.Process(target=train_mnist_cnn,
+                                    args=(True, local_rank, world_size, nccl_id)))
+
+    for p in process:
+        p.start()
diff --git a/examples/cifar_distributed_cnn/autograd/sparsification_mnist.py b/examples/cifar_distributed_cnn/autograd/sparsification_mnist.py
new file mode 100644
index 0000000..315605a
--- /dev/null
+++ b/examples/cifar_distributed_cnn/autograd/sparsification_mnist.py
@@ -0,0 +1,45 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from mnist_cnn import *
+import multiprocessing
+import sys
+
+if __name__ == '__main__':
+
+    # Generate a NCCL ID to be used for collective communication
+    nccl_id = singa.NcclIdHolder()
+
+    # Number of GPUs to be used
+    world_size = int(sys.argv[1])
+
+    # Use sparsification with parameters
+    topK = False  # When topK = False, Sparsification based on a constant absolute threshold
+    corr = True  # If True, uses local accumulate gradient for the correction
+    sparsThreshold = 0.05  # The constant absolute threshold for sparsification
+
+    process = []
+    for local_rank in range(0, world_size):
+        process.append(
+            multiprocessing.Process(target=train_mnist_cnn,
+                                    args=(True, local_rank, world_size, nccl_id,
+                                          sparsThreshold, topK, corr)))
+
+    for p in process:
+        p.start()
diff --git a/examples/cifar_distributed_cnn/autograd/xceptionnet.py b/examples/cifar_distributed_cnn/autograd/xceptionnet.py
new file mode 100644
index 0000000..357e47d
--- /dev/null
+++ b/examples/cifar_distributed_cnn/autograd/xceptionnet.py
@@ -0,0 +1,303 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+
+from singa import autograd
+from singa import tensor
+from singa import device
+from singa import layer
+from singa import opt
+
+import numpy as np
+from tqdm import trange
+
+# the code is modified from
+# https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/xception.py
+
+
+class Block(layer.Layer):
+
+    def __init__(self,
+                 in_filters,
+                 out_filters,
+                 reps,
+                 strides=1,
+                 padding=0,
+                 start_with_relu=True,
+                 grow_first=True):
+        super(Block, self).__init__()
+
+        if out_filters != in_filters or strides != 1:
+            self.skip = layer.Conv2d(in_filters,
+                                     out_filters,
+                                     1,
+                                     stride=strides,
+                                     padding=padding,
+                                     bias=False)
+            self.skipbn = layer.BatchNorm2d(out_filters)
+        else:
+            self.skip = None
+
+        self.layers = []
+
+        filters = in_filters
+        if grow_first:
+            self.layers.append(layer.ReLU())
+            self.layers.append(
+                layer.SeparableConv2d(in_filters,
+                                      out_filters,
+                                      3,
+                                      stride=1,
+                                      padding=1,
+                                      bias=False))
+            self.layers.append(layer.BatchNorm2d(out_filters))
+            filters = out_filters
+
+        for i in range(reps - 1):
+            self.layers.append(layer.ReLU())
+            self.layers.append(
+                layer.SeparableConv2d(filters,
+                                      filters,
+                                      3,
+                                      stride=1,
+                                      padding=1,
+                                      bias=False))
+            self.layers.append(layer.BatchNorm2d(filters))
+
+        if not grow_first:
+            self.layers.append(layer.ReLU())
+            self.layers.append(
+                layer.SeparableConv2d(in_filters,
+                                      out_filters,
+                                      3,
+                                      stride=1,
+                                      padding=1,
+                                      bias=False))
+            self.layers.append(layer.BatchNorm2d(out_filters))
+
+        if not start_with_relu:
+            self.layers = self.layers[1:]
+        else:
+            self.layers[0] = layer.ReLU()
+
+        if strides != 1:
+            self.layers.append(layer.MaxPool2d(3, strides, padding + 1))
+
+        self.register_layers(*self.layers)
+
+        self.add = layer.Add()
+
+    def forward(self, x):
+        y = self.layers[0](x)
+        for layer in self.layers[1:]:
+            if isinstance(y, tuple):
+                y = y[0]
+            y = layer(y)
+
+        if self.skip is not None:
+            skip = self.skip(x)
+            skip = self.skipbn(skip)
+        else:
+            skip = x
+        y = self.add(y, skip)
+        return y
+
+
+__all__ = ['Xception']
+
+
+class Xception(layer.Layer):
+    """
+    Xception optimized for the ImageNet dataset, as specified in
+    https://arxiv.org/pdf/1610.02357.pdf
+    """
+
+    def __init__(self, num_classes=1000):
+        """ Constructor
+        Args:
+            num_classes: number of classes
+        """
+        super(Xception, self).__init__()
+        self.num_classes = num_classes
+
+        self.conv1 = layer.Conv2d(3, 32, 3, 2, 0, bias=False)
+        self.bn1 = layer.BatchNorm2d(32)
+        self.relu1 = layer.ReLU()
+
+        self.conv2 = layer.Conv2d(32, 64, 3, 1, 1, bias=False)
+        self.bn2 = layer.BatchNorm2d(64)
+        self.relu2 = layer.ReLU()
+        # do relu here
+
+        self.block1 = Block(64,
+                            128,
+                            2,
+                            2,
+                            padding=0,
+                            start_with_relu=False,
+                            grow_first=True)
+        self.block2 = Block(128,
+                            256,
+                            2,
+                            2,
+                            padding=0,
+                            start_with_relu=True,
+                            grow_first=True)
+        self.block3 = Block(256,
+                            728,
+                            2,
+                            2,
+                            padding=0,
+                            start_with_relu=True,
+                            grow_first=True)
+
+        self.block4 = Block(728,
+                            728,
+                            3,
+                            1,
+                            start_with_relu=True,
+                            grow_first=True)
+        self.block5 = Block(728,
+                            728,
+                            3,
+                            1,
+                            start_with_relu=True,
+                            grow_first=True)
+        self.block6 = Block(728,
+                            728,
+                            3,
+                            1,
+                            start_with_relu=True,
+                            grow_first=True)
+        self.block7 = Block(728,
+                            728,
+                            3,
+                            1,
+                            start_with_relu=True,
+                            grow_first=True)
+
+        self.block8 = Block(728,
+                            728,
+                            3,
+                            1,
+                            start_with_relu=True,
+                            grow_first=True)
+        self.block9 = Block(728,
+                            728,
+                            3,
+                            1,
+                            start_with_relu=True,
+                            grow_first=True)
+        self.block10 = Block(728,
+                             728,
+                             3,
+                             1,
+                             start_with_relu=True,
+                             grow_first=True)
+        self.block11 = Block(728,
+                             728,
+                             3,
+                             1,
+                             start_with_relu=True,
+                             grow_first=True)
+
+        self.block12 = Block(728,
+                             1024,
+                             2,
+                             2,
+                             start_with_relu=True,
+                             grow_first=False)
+
+        self.conv3 = layer.SeparableConv2d(1024, 1536, 3, 1, 1)
+        self.bn3 = layer.BatchNorm2d(1536)
+        self.relu3 = layer.ReLU()
+
+        # do relu here
+        self.conv4 = layer.SeparableConv2d(1536, 2048, 3, 1, 1)
+        self.bn4 = layer.BatchNorm2d(2048)
+
+        self.relu4 = layer.ReLU()
+        self.globalpooling = layer.MaxPool2d(10, 1)
+        self.flatten = layer.Flatten()
+        self.fc = layer.Linear(2048, num_classes)
+
+    def features(self, input):
+        x = self.conv1(input)
+        x = self.bn1(x)
+        x = self.relu1(x)
+
+        x = self.conv2(x)
+        x = self.bn2(x)
+        x = self.relu2(x)
+
+        x = self.block1(x)
+        x = self.block2(x)
+        x = self.block3(x)
+        x = self.block4(x)
+        x = self.block5(x)
+        x = self.block6(x)
+        x = self.block7(x)
+        x = self.block8(x)
+        x = self.block9(x)
+        x = self.block10(x)
+        x = self.block11(x)
+        x = self.block12(x)
+
+        x = self.conv3(x)
+        x = self.bn3(x)
+        x = self.relu3(x)
+
+        x = self.conv4(x)
+        x = self.bn4(x)
+        return x
+
+    def logits(self, features):
+        x = self.relu4(features)
+        x = self.globalpooling(x)
+        x = self.flatten(x)
+        x = self.fc(x)
+        return x
+
+    def forward(self, input):
+        x = self.features(input)
+        x = self.logits(x)
+        return x
+
+
+if __name__ == '__main__':
+    model = Xception(num_classes=1000)
+    print('Start intialization............')
+    dev = device.create_cuda_gpu_on(0)
+    #dev = device.create_cuda_gpu()
+
+    niters = 20
+    batch_size = 16
+    IMG_SIZE = 299
+    sgd = opt.SGD(lr=0.1, momentum=0.9, weight_decay=1e-5)
+
+    tx = tensor.Tensor((batch_size, 3, IMG_SIZE, IMG_SIZE), dev)
+    ty = tensor.Tensor((batch_size,), dev, tensor.int32)
+    autograd.training = True
+    x = np.random.randn(batch_size, 3, IMG_SIZE, IMG_SIZE).astype(np.float32)
+    y = np.random.randint(0, 1000, batch_size, dtype=np.int32)
+    tx.copy_from_numpy(x)
+    ty.copy_from_numpy(y)
+
+    with trange(niters) as t:
+        for _ in t:
+            x = model(tx)
+            loss = autograd.softmax_cross_entropy(x, ty)
+            sgd(loss)
diff --git a/examples/cifar_distributed_cnn/benchmark.py b/examples/cifar_distributed_cnn/benchmark.py
new file mode 100644
index 0000000..6f9ef52
--- /dev/null
+++ b/examples/cifar_distributed_cnn/benchmark.py
@@ -0,0 +1,122 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# the code is modified from
+# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
+
+from singa import opt
+# import opt
+from singa import device
+from singa import tensor
+
+import argparse
+import time
+import numpy as np
+from tqdm import trange
+
+
+def train_resnet(DIST=True, graph=True, sequential=False, verbosity=0):
+
+    # Define the hypermeters for the train_resnet
+    niters = 100
+    batch_size = 32
+    sgd = opt.SGD(lr=0.1, momentum=0.9, weight_decay=1e-5)
+
+    IMG_SIZE = 224
+
+    # For distributed training, sequential has better throughput in the current version
+    if DIST == True:
+        sgd = opt.DistOpt(sgd)
+        world_size = sgd.world_size
+        local_rank = sgd.local_rank
+        global_rank = sgd.global_rank
+        sequential = True
+    else:
+        local_rank = 0
+        world_size = 1
+        global_rank = 0
+        sequential = False
+
+    dev = device.create_cuda_gpu_on(local_rank)
+
+    tx = tensor.Tensor((batch_size, 3, IMG_SIZE, IMG_SIZE), dev)
+    ty = tensor.Tensor((batch_size,), dev, tensor.int32)
+    x = np.random.randn(batch_size, 3, IMG_SIZE, IMG_SIZE).astype(np.float32)
+    y = np.random.randint(0, 1000, batch_size, dtype=np.int32)
+    tx.copy_from_numpy(x)
+    ty.copy_from_numpy(y)
+
+    dev.SetVerbosity(verbosity)
+    dev.SetSkipIteration(5)
+
+    # Construct the model
+    from model import resnet
+    model = resnet.resnet50(num_channels=3, num_classes=1000)
+
+    model.train()
+    model.set_optimizer(sgd)
+    model.compile([tx], is_train=True, use_graph=graph, sequential=sequential)
+
+    # Train model
+    dev.Sync()
+    start = time.time()
+    with trange(niters) as t:
+        for _ in t:
+            model(tx, ty, dist_option='fp32', spars=None)
+
+    dev.Sync()
+    end = time.time()
+    titer = (end - start) / float(niters)
+    throughput = float(niters * batch_size * world_size) / (end - start)
+    if global_rank == 0:
+        print("\nThroughput = {} per second".format(throughput), flush=True)
+        print("TotalTime={}".format(end - start), flush=True)
+        print("Total={}".format(titer), flush=True)
+        dev.PrintTimeProfiling()
+
+
+if __name__ == "__main__":
+
+    parser = argparse.ArgumentParser(
+        description='Throughput test using Resnet 50')
+    parser.add_argument('--dist',
+                        '--enable-dist',
+                        default='False',
+                        action='store_true',
+                        help='enable distributed training',
+                        dest='DIST')
+    parser.add_argument('--no-graph',
+                        '--disable-graph',
+                        default='True',
+                        action='store_false',
+                        help='disable graph',
+                        dest='graph')
+    parser.add_argument('--verbosity',
+                        '--log-verbosity',
+                        default=0,
+                        type=int,
+                        help='logging verbosity',
+                        dest='verbosity')
+
+    args = parser.parse_args()
+
+    train_resnet(DIST=args.DIST,
+                 graph=args.graph,
+                 sequential=False,
+                 verbosity=args.verbosity)
diff --git a/examples/cifar_distributed_cnn/data/cifar10.py b/examples/cifar_distributed_cnn/data/cifar10.py
new file mode 100644
index 0000000..3b83ad7
--- /dev/null
+++ b/examples/cifar_distributed_cnn/data/cifar10.py
@@ -0,0 +1,91 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+try:
+    import pickle
+except ImportError:
+    import cPickle as pickle
+
+import numpy as np
+import os
+import sys
+
+
+def load_dataset(filepath):
+    with open(filepath, 'rb') as fd:
+        try:
+            cifar10 = pickle.load(fd, encoding='latin1')
+        except TypeError:
+            cifar10 = pickle.load(fd)
+    image = cifar10['data'].astype(dtype=np.uint8)
+    image = image.reshape((-1, 3, 32, 32))
+    label = np.asarray(cifar10['labels'], dtype=np.uint8)
+    label = label.reshape(label.size, 1)
+    return image, label
+
+
+#def load_train_data(dir_path='/scratch1/07801/nusbin20/gordon-bell/cifar-10-batches-py', num_batches=5):
+def load_train_data(dir_path='/scratch/snx3000/lyongbin/singa_my/cifar10_log/cifar-10-batches-py', num_batches=5):
+    labels = []
+    batchsize = 10000
+    images = np.empty((num_batches * batchsize, 3, 32, 32), dtype=np.uint8)
+    for did in range(1, num_batches + 1):
+        fname_train_data = dir_path + "/data_batch_{}".format(did)
+        image, label = load_dataset(check_dataset_exist(fname_train_data))
+        images[(did - 1) * batchsize:did * batchsize] = image
+        labels.extend(label)
+    images = np.array(images, dtype=np.float32)
+    labels = np.array(labels, dtype=np.int32)
+    return images, labels
+
+
+#def load_test_data(dir_path='/scratch1/07801/nusbin20/gordon-bell/cifar-10-batches-py'):
+def load_test_data(dir_path='/scratch/snx3000/lyongbin/singa_my/cifar10_log/cifar-10-batches-py'):
+    images, labels = load_dataset(check_dataset_exist(dir_path + "/test_batch"))
+    return np.array(images, dtype=np.float32), np.array(labels, dtype=np.int32)
+
+
+def check_dataset_exist(dirpath):
+    if not os.path.exists(dirpath):
+        print(
+            'Please download the cifar10 dataset using python data/download_cifar10.py'
+        )
+        sys.exit(0)
+    return dirpath
+
+
+def normalize(train_x, val_x):
+    mean = [0.4914, 0.4822, 0.4465]
+    std = [0.2023, 0.1994, 0.2010]
+    train_x /= 255
+    val_x /= 255
+    for ch in range(0, 2):
+        train_x[:, ch, :, :] -= mean[ch]
+        train_x[:, ch, :, :] /= std[ch]
+        val_x[:, ch, :, :] -= mean[ch]
+        val_x[:, ch, :, :] /= std[ch]
+    return train_x, val_x
+
+def load():
+    train_x, train_y = load_train_data()
+    val_x, val_y = load_test_data()
+    train_x, val_x = normalize(train_x, val_x)
+    train_y = train_y.flatten()
+    val_y = val_y.flatten()
+    return train_x, train_y, val_x, val_y
diff --git a/examples/cifar_distributed_cnn/data/cifar100.py b/examples/cifar_distributed_cnn/data/cifar100.py
new file mode 100644
index 0000000..88b943f
--- /dev/null
+++ b/examples/cifar_distributed_cnn/data/cifar100.py
@@ -0,0 +1,81 @@
+#

+# Licensed to the Apache Software Foundation (ASF) under one

+# or more contributor license agreements.  See the NOTICE file

+# distributed with this work for additional information

+# regarding copyright ownership.  The ASF licenses this file

+# to you under the Apache License, Version 2.0 (the

+# "License"); you may not use this file except in compliance

+# with the License.  You may obtain a copy of the License at

+#

+#   http://www.apache.org/licenses/LICENSE-2.0

+#

+# Unless required by applicable law or agreed to in writing,

+# software distributed under the License is distributed on an

+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY

+# KIND, either express or implied.  See the License for the

+# specific language governing permissions and limitations

+# under the License.

+#

+

+try:

+    import pickle

+except ImportError:

+    import cPickle as pickle

+

+import numpy as np

+import os

+import sys

+

+

+def load_dataset(filepath):

+    with open(filepath, 'rb') as fd:

+        try:

+            cifar100 = pickle.load(fd, encoding='latin1')

+        except TypeError:

+            cifar100 = pickle.load(fd)

+    image = cifar100['data'].astype(dtype=np.uint8)

+    image = image.reshape((-1, 3, 32, 32))

+    label = np.asarray(cifar100['fine_labels'], dtype=np.uint8)

+    label = label.reshape(label.size, 1)

+    return image, label

+

+

+def load_train_data(dir_path='/tmp/cifar-100-python'):

+    images, labels = load_dataset(check_dataset_exist(dir_path + "/train"))

+    return np.array(images, dtype=np.float32), np.array(labels, dtype=np.int32)

+

+

+def load_test_data(dir_path='/tmp/cifar-100-python'):

+    images, labels = load_dataset(check_dataset_exist(dir_path + "/test"))

+    return np.array(images, dtype=np.float32), np.array(labels, dtype=np.int32)

+

+

+def check_dataset_exist(dirpath):

+    if not os.path.exists(dirpath):

+        print(

+            'Please download the cifar100 dataset using python data/download_cifar100.py'

+        )

+        sys.exit(0)

+    return dirpath

+

+

+def normalize(train_x, val_x):

+    mean = [0.4914, 0.4822, 0.4465]

+    std = [0.2023, 0.1994, 0.2010]

+    train_x /= 255

+    val_x /= 255

+    for ch in range(0, 2):

+        train_x[:, ch, :, :] -= mean[ch]

+        train_x[:, ch, :, :] /= std[ch]

+        val_x[:, ch, :, :] -= mean[ch]

+        val_x[:, ch, :, :] /= std[ch]

+    return train_x, val_x

+

+

+def load():

+    train_x, train_y = load_train_data()

+    val_x, val_y = load_test_data()

+    train_x, val_x = normalize(train_x, val_x)

+    train_y = train_y.flatten()

+    val_y = val_y.flatten()

+    return train_x, train_y, val_x, val_y

diff --git a/examples/cifar_distributed_cnn/data/download_cifar10.py b/examples/cifar_distributed_cnn/data/download_cifar10.py
new file mode 100644
index 0000000..a010b2e
--- /dev/null
+++ b/examples/cifar_distributed_cnn/data/download_cifar10.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#     http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+
+from __future__ import print_function
+from future import standard_library
+standard_library.install_aliases()
+import urllib.request, urllib.parse, urllib.error
+import tarfile
+import os
+import sys
+
+
+def extract_tarfile(filepath):
+    if os.path.exists(filepath):
+        print('The tar file does exist. Extracting it now..')
+        with tarfile.open(filepath, 'r') as f:
+            f.extractall('/tmp/')
+        print('Finished!')
+        sys.exit(0)
+
+
+def do_download(dirpath, gzfile, url):
+    print('Downloading CIFAR from %s' % (url))
+    urllib.request.urlretrieve(url, gzfile)
+    extract_tarfile(gzfile)
+    print('Finished!')
+
+
+if __name__ == '__main__':
+    dirpath = '/tmp/'
+    gzfile = dirpath + 'cifar-10-python.tar.gz'
+    url = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
+    do_download(dirpath, gzfile, url)
diff --git a/examples/cifar_distributed_cnn/data/download_mnist.py b/examples/cifar_distributed_cnn/data/download_mnist.py
new file mode 100644
index 0000000..65acb0e
--- /dev/null
+++ b/examples/cifar_distributed_cnn/data/download_mnist.py
@@ -0,0 +1,49 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+import urllib.request
+
+
+def check_exist_or_download(url):
+
+    download_dir = '/tmp/'
+    name = url.rsplit('/', 1)[-1]
+    filename = os.path.join(download_dir, name)
+
+    if not os.path.isfile(filename):
+        print("Downloading %s" % url)
+        urllib.request.urlretrieve(url, filename)
+    else:
+        print("Already Downloaded: %s" % url)
+
+
+if __name__ == '__main__':
+
+    #url of the mnist dataset
+    train_x_url = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'
+    train_y_url = 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz'
+    valid_x_url = 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz'
+    valid_y_url = 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'
+
+    #download the mnist dataset
+    check_exist_or_download(train_x_url)
+    check_exist_or_download(train_y_url)
+    check_exist_or_download(valid_x_url)
+    check_exist_or_download(valid_y_url)
diff --git a/examples/cifar_distributed_cnn/data/mnist.py b/examples/cifar_distributed_cnn/data/mnist.py
new file mode 100644
index 0000000..9cd1a84
--- /dev/null
+++ b/examples/cifar_distributed_cnn/data/mnist.py
@@ -0,0 +1,91 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import numpy as np
+import os
+import sys
+import gzip
+import codecs
+
+
+def check_dataset_exist(dirpath):
+    if not os.path.exists(dirpath):
+        print(
+            'The MNIST dataset does not exist. Please download the mnist dataset using python data/download_mnist.py'
+        )
+        sys.exit(0)
+    return dirpath
+
+
+def load_dataset():
+    train_x_path = '/tmp/train-images-idx3-ubyte.gz'
+    train_y_path = '/tmp/train-labels-idx1-ubyte.gz'
+    valid_x_path = '/tmp/t10k-images-idx3-ubyte.gz'
+    valid_y_path = '/tmp/t10k-labels-idx1-ubyte.gz'
+
+    train_x = read_image_file(check_dataset_exist(train_x_path)).astype(
+        np.float32)
+    train_y = read_label_file(check_dataset_exist(train_y_path)).astype(
+        np.float32)
+    valid_x = read_image_file(check_dataset_exist(valid_x_path)).astype(
+        np.float32)
+    valid_y = read_label_file(check_dataset_exist(valid_y_path)).astype(
+        np.float32)
+    return train_x, train_y, valid_x, valid_y
+
+
+def read_label_file(path):
+    with gzip.open(path, 'rb') as f:
+        data = f.read()
+        assert get_int(data[:4]) == 2049
+        length = get_int(data[4:8])
+        parsed = np.frombuffer(data, dtype=np.uint8, offset=8).reshape((length))
+        return parsed
+
+
+def get_int(b):
+    return int(codecs.encode(b, 'hex'), 16)
+
+
+def read_image_file(path):
+    with gzip.open(path, 'rb') as f:
+        data = f.read()
+        assert get_int(data[:4]) == 2051
+        length = get_int(data[4:8])
+        num_rows = get_int(data[8:12])
+        num_cols = get_int(data[12:16])
+        parsed = np.frombuffer(data, dtype=np.uint8, offset=16).reshape(
+            (length, 1, num_rows, num_cols))
+        return parsed
+
+
+def normalize(train_x, val_x):
+    train_x /= 255
+    val_x /= 255
+    return train_x, val_x
+
+
+def load():
+    train_x, train_y, val_x, val_y = load_dataset()
+    train_x, val_x = normalize(train_x, val_x)
+    train_x = train_x.astype(np.float32)
+    val_x = val_x.astype(np.float32)
+    train_y = train_y.astype(np.int32)
+    val_y = val_y.astype(np.int32)
+    return train_x, train_y, val_x, val_y
diff --git a/examples/cifar_distributed_cnn/model/alexnet.py b/examples/cifar_distributed_cnn/model/alexnet.py
new file mode 100644
index 0000000..cad7b1e
--- /dev/null
+++ b/examples/cifar_distributed_cnn/model/alexnet.py
@@ -0,0 +1,119 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from singa import layer
+from singa import model
+
+
+class AlexNet(model.Model):
+
+    def __init__(self, num_classes=10, num_channels=1):
+        super(AlexNet, self).__init__()
+        self.num_classes = num_classes
+        self.input_size = 224
+        self.dimension = 4
+        self.conv1 = layer.Conv2d(num_channels, 64, 11, stride=4, padding=2)
+        self.conv2 = layer.Conv2d(64, 192, 5, padding=2)
+        self.conv3 = layer.Conv2d(192, 384, 3, padding=1)
+        self.conv4 = layer.Conv2d(384, 256, 3, padding=1)
+        self.conv5 = layer.Conv2d(256, 256, 3, padding=1)
+        self.linear1 = layer.Linear(4096)
+        self.linear2 = layer.Linear(4096)
+        self.linear3 = layer.Linear(num_classes)
+        self.pooling1 = layer.MaxPool2d(2, 2, padding=0)
+        self.pooling2 = layer.MaxPool2d(2, 2, padding=0)
+        self.pooling3 = layer.MaxPool2d(2, 2, padding=0)
+        self.avg_pooling1 = layer.AvgPool2d(3, 2, padding=0)
+        self.relu1 = layer.ReLU()
+        self.relu2 = layer.ReLU()
+        self.relu3 = layer.ReLU()
+        self.relu4 = layer.ReLU()
+        self.relu5 = layer.ReLU()
+        self.relu6 = layer.ReLU()
+        self.relu7 = layer.ReLU()
+        self.flatten = layer.Flatten()
+        self.dropout1 = layer.Dropout()
+        self.dropout2 = layer.Dropout()
+        self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
+
+    def forward(self, x):
+        y = self.conv1(x)
+        y = self.relu1(y)
+        y = self.pooling1(y)
+        y = self.conv2(y)
+        y = self.relu2(y)
+        y = self.pooling2(y)
+        y = self.conv3(y)
+        y = self.relu3(y)
+        y = self.conv4(y)
+        y = self.relu4(y)
+        y = self.conv5(y)
+        y = self.relu5(y)
+        y = self.pooling3(y)
+        y = self.avg_pooling1(y)
+        y = self.flatten(y)
+        y = self.dropout1(y)
+        y = self.linear1(y)
+        y = self.relu6(y)
+        y = self.dropout2(y)
+        y = self.linear2(y)
+        y = self.relu7(y)
+        y = self.linear3(y)
+        return y
+
+    def train_one_batch(self, x, y, dist_option, spars):
+        out = self.forward(x)
+        loss = self.softmax_cross_entropy(out, y)
+
+        if dist_option == 'plain':
+            self.optimizer(loss)
+        elif dist_option == 'half':
+            self.optimizer.backward_and_update_half(loss)
+        elif dist_option == 'partialUpdate':
+            self.optimizer.backward_and_partial_update(loss)
+        elif dist_option == 'sparseTopK':
+            self.optimizer.backward_and_sparse_update(loss,
+                                                      topK=True,
+                                                      spars=spars)
+        elif dist_option == 'sparseThreshold':
+            self.optimizer.backward_and_sparse_update(loss,
+                                                      topK=False,
+                                                      spars=spars)
+        return out, loss
+
+    def set_optimizer(self, optimizer):
+        self.optimizer = optimizer
+
+
+def create_model(pretrained=False, **kwargs):
+    """Constructs a AlexNet model.
+
+    Args:
+        pretrained (bool): If True, returns a pre-trained model.
+    
+    Returns:
+        The created AlexNet model.
+    
+    """
+    model = AlexNet(**kwargs)
+
+    return model
+
+
+__all__ = ['AlexNet', 'create_model']
diff --git a/examples/cifar_distributed_cnn/model/resnet.py b/examples/cifar_distributed_cnn/model/resnet.py
new file mode 100644
index 0000000..28b5f99
--- /dev/null
+++ b/examples/cifar_distributed_cnn/model/resnet.py
@@ -0,0 +1,300 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# the code is modified from
+# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
+
+from singa import layer
+from singa import model
+
+
+def conv3x3(in_planes, out_planes, stride=1):
+    """3x3 convolution with padding"""
+    return layer.Conv2d(
+        in_planes,
+        out_planes,
+        3,
+        stride=stride,
+        padding=1,
+        bias=False,
+    )
+
+
+class BasicBlock(layer.Layer):
+    expansion = 1
+
+    def __init__(self, inplanes, planes, stride=1, downsample=None):
+        super(BasicBlock, self).__init__()
+        self.conv1 = conv3x3(inplanes, planes, stride)
+        self.bn1 = layer.BatchNorm2d(planes)
+        self.conv2 = conv3x3(planes, planes)
+        self.bn2 = layer.BatchNorm2d(planes)
+        self.relu1 = layer.ReLU()
+        self.add = layer.Add()
+        self.relu2 = layer.ReLU()
+        self.downsample = downsample
+        self.stride = stride
+
+    def forward(self, x):
+        residual = x
+
+        out = self.conv1(x)
+        out = self.bn1(out)
+        out = self.relu1(out)
+
+        out = self.conv2(out)
+        out = self.bn2(out)
+
+        if self.downsample is not None:
+            residual = self.downsample(x)
+
+        out = self.add(out, residual)
+        out = self.relu2(out)
+
+        return out
+
+
+class Bottleneck(layer.Layer):
+    expansion = 4
+
+    def __init__(self, inplanes, planes, stride=1, downsample=None):
+        super(Bottleneck, self).__init__()
+        self.conv1 = layer.Conv2d(inplanes, planes, 1, bias=False)
+        self.bn1 = layer.BatchNorm2d(planes)
+        self.relu1 = layer.ReLU()
+        self.conv2 = layer.Conv2d(planes,
+                                  planes,
+                                  3,
+                                  stride=stride,
+                                  padding=1,
+                                  bias=False)
+        self.bn2 = layer.BatchNorm2d(planes)
+        self.relu2 = layer.ReLU()
+        self.conv3 = layer.Conv2d(planes,
+                                  planes * self.expansion,
+                                  1,
+                                  bias=False)
+        self.bn3 = layer.BatchNorm2d(planes * self.expansion)
+
+        self.add = layer.Add()
+        self.relu3 = layer.ReLU()
+
+        self.downsample = downsample
+        self.stride = stride
+
+    def forward(self, x):
+        residual = x
+
+        out = self.conv1(x)
+        out = self.bn1(out)
+        out = self.relu1(out)
+
+        out = self.conv2(out)
+        out = self.bn2(out)
+        out = self.relu2(out)
+
+        out = self.conv3(out)
+        out = self.bn3(out)
+
+        if self.downsample is not None:
+            residual = self.downsample(x)
+
+        out = self.add(out, residual)
+        out = self.relu3(out)
+
+        return out
+
+
+__all__ = [
+    'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'
+]
+
+
+class ResNet(model.Model):
+
+    def __init__(self, block, layers, num_classes=10, num_channels=3):
+        self.inplanes = 64
+        super(ResNet, self).__init__()
+        self.num_classes = num_classes
+        self.input_size = 224
+        self.dimension = 4
+        self.conv1 = layer.Conv2d(num_channels,
+                                  64,
+                                  7,
+                                  stride=2,
+                                  padding=3,
+                                  bias=False)
+        self.bn1 = layer.BatchNorm2d(64)
+        self.relu = layer.ReLU()
+        self.maxpool = layer.MaxPool2d(kernel_size=3, stride=2, padding=1)
+        self.layer1, layers1 = self._make_layer(block, 64, layers[0])
+        self.layer2, layers2 = self._make_layer(block, 128, layers[1], stride=2)
+        self.layer3, layers3 = self._make_layer(block, 256, layers[2], stride=2)
+        self.layer4, layers4 = self._make_layer(block, 512, layers[3], stride=2)
+        self.avgpool = layer.AvgPool2d(7, stride=1)
+        self.flatten = layer.Flatten()
+        self.fc = layer.Linear(num_classes)
+        self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
+
+        self.register_layers(*layers1, *layers2, *layers3, *layers4)
+
+    def _make_layer(self, block, planes, blocks, stride=1):
+        downsample = None
+        if stride != 1 or self.inplanes != planes * block.expansion:
+            conv = layer.Conv2d(
+                self.inplanes,
+                planes * block.expansion,
+                1,
+                stride=stride,
+                bias=False,
+            )
+            bn = layer.BatchNorm2d(planes * block.expansion)
+
+            def _downsample(x):
+                return bn(conv(x))
+
+            downsample = _downsample
+
+        layers = []
+        layers.append(block(self.inplanes, planes, stride, downsample))
+        self.inplanes = planes * block.expansion
+        for i in range(1, blocks):
+            layers.append(block(self.inplanes, planes))
+
+        def forward(x):
+            for layer in layers:
+                x = layer(x)
+            return x
+
+        return forward, layers
+
+    def forward(self, x):
+        x = self.conv1(x)
+        x = self.bn1(x)
+        x = self.relu(x)
+        x = self.maxpool(x)
+
+        x = self.layer1(x)
+        x = self.layer2(x)
+        x = self.layer3(x)
+        x = self.layer4(x)
+
+        x = self.avgpool(x)
+        x = self.flatten(x)
+        x = self.fc(x)
+
+        return x
+
+    def train_one_batch(self, x, y, dist_option, spars):
+        out = self.forward(x)
+        loss = self.softmax_cross_entropy(out, y)
+
+        if dist_option == 'plain':
+            self.optimizer(loss)
+        elif dist_option == 'half':
+            self.optimizer.backward_and_update_half(loss)
+        elif dist_option == 'partialUpdate':
+            self.optimizer.backward_and_partial_update(loss)
+        elif dist_option == 'sparseTopK':
+            self.optimizer.backward_and_sparse_update(loss,
+                                                      topK=True,
+                                                      spars=spars)
+        elif dist_option == 'sparseThreshold':
+            self.optimizer.backward_and_sparse_update(loss,
+                                                      topK=False,
+                                                      spars=spars)
+        return out, loss
+
+    def set_optimizer(self, optimizer):
+        self.optimizer = optimizer
+
+
+def resnet18(pretrained=False, **kwargs):
+    """Constructs a ResNet-18 model.
+
+    Args:
+        pretrained (bool): If True, returns a model pre-trained on ImageNet.
+    
+    Returns:
+        The created ResNet-18 model.
+    """
+    model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
+
+    return model
+
+
+def resnet34(pretrained=False, **kwargs):
+    """Constructs a ResNet-34 model.
+
+    Args:
+        pretrained (bool): If True, returns a model pre-trained on ImageNet.
+
+    Returns:
+        The created ResNet-34 model.
+    """
+    model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
+
+    return model
+
+
+def resnet50(pretrained=False, **kwargs):
+    """Constructs a ResNet-50 model.
+
+    Args:
+        pretrained (bool): If True, returns a model pre-trained on ImageNet.
+
+    Returns:
+        The created ResNet-50 model.
+    """
+    model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
+
+    return model
+
+
+def resnet101(pretrained=False, **kwargs):
+    """Constructs a ResNet-101 model.
+
+    Args:
+        pretrained (bool): If True, returns a model pre-trained on ImageNet.
+
+    Returns:
+        The created ResNet-101 model.
+    """
+    model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
+
+    return model
+
+
+def resnet152(pretrained=False, **kwargs):
+    """Constructs a ResNet-152 model.
+
+    Args:
+        pretrained (bool): If True, returns a model pre-trained on ImageNet.
+
+    Returns:
+        The created ResNet-152 model.
+    """
+    model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
+
+    return model
+
+
+__all__ = [
+    'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'
+]
diff --git a/examples/cifar_distributed_cnn/model/xceptionnet.py b/examples/cifar_distributed_cnn/model/xceptionnet.py
new file mode 100644
index 0000000..34440ab
--- /dev/null
+++ b/examples/cifar_distributed_cnn/model/xceptionnet.py
@@ -0,0 +1,311 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+
+# the code is modified from
+# https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/xception.py
+
+from singa import layer
+from singa import model
+
+
+class Block(layer.Layer):
+
+    def __init__(self,
+                 in_filters,
+                 out_filters,
+                 reps,
+                 strides=1,
+                 padding=0,
+                 start_with_relu=True,
+                 grow_first=True):
+        super(Block, self).__init__()
+
+        if out_filters != in_filters or strides != 1:
+            self.skip = layer.Conv2d(in_filters,
+                                     out_filters,
+                                     1,
+                                     stride=strides,
+                                     padding=padding,
+                                     bias=False)
+            self.skipbn = layer.BatchNorm2d(out_filters)
+        else:
+            self.skip = None
+
+        self.layers = []
+
+        filters = in_filters
+        if grow_first:
+            self.layers.append(layer.ReLU())
+            self.layers.append(
+                layer.SeparableConv2d(in_filters,
+                                      out_filters,
+                                      3,
+                                      stride=1,
+                                      padding=1,
+                                      bias=False))
+            self.layers.append(layer.BatchNorm2d(out_filters))
+            filters = out_filters
+
+        for i in range(reps - 1):
+            self.layers.append(layer.ReLU())
+            self.layers.append(
+                layer.SeparableConv2d(filters,
+                                      filters,
+                                      3,
+                                      stride=1,
+                                      padding=1,
+                                      bias=False))
+            self.layers.append(layer.BatchNorm2d(filters))
+
+        if not grow_first:
+            self.layers.append(layer.ReLU())
+            self.layers.append(
+                layer.SeparableConv2d(in_filters,
+                                      out_filters,
+                                      3,
+                                      stride=1,
+                                      padding=1,
+                                      bias=False))
+            self.layers.append(layer.BatchNorm2d(out_filters))
+
+        if not start_with_relu:
+            self.layers = self.layers[1:]
+        else:
+            self.layers[0] = layer.ReLU()
+
+        if strides != 1:
+            self.layers.append(layer.MaxPool2d(3, strides, padding + 1))
+
+        self.register_layers(*self.layers)
+
+        self.add = layer.Add()
+
+    def forward(self, x):
+        y = self.layers[0](x)
+        for layer in self.layers[1:]:
+            if isinstance(y, tuple):
+                y = y[0]
+            y = layer(y)
+
+        if self.skip is not None:
+            skip = self.skip(x)
+            skip = self.skipbn(skip)
+        else:
+            skip = x
+        y = self.add(y, skip)
+        return y
+
+
+class Xception(model.Model):
+    """
+    Xception optimized for the ImageNet dataset, as specified in
+    https://arxiv.org/pdf/1610.02357.pdf
+    """
+
+    def __init__(self, num_classes=10, num_channels=3):
+        """ Constructor
+        Args:
+            num_classes: number of classes
+        """
+        super(Xception, self).__init__()
+        self.num_classes = num_classes
+        self.input_size = 299
+        self.dimension = 4
+
+        self.conv1 = layer.Conv2d(num_channels, 32, 3, 2, 0, bias=False)
+        self.bn1 = layer.BatchNorm2d(32)
+        self.relu1 = layer.ReLU()
+
+        self.conv2 = layer.Conv2d(32, 64, 3, 1, 1, bias=False)
+        self.bn2 = layer.BatchNorm2d(64)
+        self.relu2 = layer.ReLU()
+        # do relu here
+
+        self.block1 = Block(64,
+                            128,
+                            2,
+                            2,
+                            padding=0,
+                            start_with_relu=False,
+                            grow_first=True)
+        self.block2 = Block(128,
+                            256,
+                            2,
+                            2,
+                            padding=0,
+                            start_with_relu=True,
+                            grow_first=True)
+        self.block3 = Block(256,
+                            728,
+                            2,
+                            2,
+                            padding=0,
+                            start_with_relu=True,
+                            grow_first=True)
+
+        self.block4 = Block(728,
+                            728,
+                            3,
+                            1,
+                            start_with_relu=True,
+                            grow_first=True)
+        self.block5 = Block(728,
+                            728,
+                            3,
+                            1,
+                            start_with_relu=True,
+                            grow_first=True)
+        self.block6 = Block(728,
+                            728,
+                            3,
+                            1,
+                            start_with_relu=True,
+                            grow_first=True)
+        self.block7 = Block(728,
+                            728,
+                            3,
+                            1,
+                            start_with_relu=True,
+                            grow_first=True)
+
+        self.block8 = Block(728,
+                            728,
+                            3,
+                            1,
+                            start_with_relu=True,
+                            grow_first=True)
+        self.block9 = Block(728,
+                            728,
+                            3,
+                            1,
+                            start_with_relu=True,
+                            grow_first=True)
+        self.block10 = Block(728,
+                             728,
+                             3,
+                             1,
+                             start_with_relu=True,
+                             grow_first=True)
+        self.block11 = Block(728,
+                             728,
+                             3,
+                             1,
+                             start_with_relu=True,
+                             grow_first=True)
+
+        self.block12 = Block(728,
+                             1024,
+                             2,
+                             2,
+                             start_with_relu=True,
+                             grow_first=False)
+
+        self.conv3 = layer.SeparableConv2d(1024, 1536, 3, 1, 1)
+        self.bn3 = layer.BatchNorm2d(1536)
+        self.relu3 = layer.ReLU()
+
+        # do relu here
+        self.conv4 = layer.SeparableConv2d(1536, 2048, 3, 1, 1)
+        self.bn4 = layer.BatchNorm2d(2048)
+
+        self.relu4 = layer.ReLU()
+        self.globalpooling = layer.MaxPool2d(10, 1)
+        self.flatten = layer.Flatten()
+        self.fc = layer.Linear(num_classes)
+
+        self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
+
+    def features(self, input):
+        x = self.conv1(input)
+        x = self.bn1(x)
+        x = self.relu1(x)
+
+        x = self.conv2(x)
+        x = self.bn2(x)
+        x = self.relu2(x)
+
+        x = self.block1(x)
+        x = self.block2(x)
+        x = self.block3(x)
+        x = self.block4(x)
+        x = self.block5(x)
+        x = self.block6(x)
+        x = self.block7(x)
+        x = self.block8(x)
+        x = self.block9(x)
+        x = self.block10(x)
+        x = self.block11(x)
+        x = self.block12(x)
+
+        x = self.conv3(x)
+        x = self.bn3(x)
+        x = self.relu3(x)
+
+        x = self.conv4(x)
+        x = self.bn4(x)
+        return x
+
+    def logits(self, features):
+        x = self.relu4(features)
+        x = self.globalpooling(x)
+        x = self.flatten(x)
+        x = self.fc(x)
+        return x
+
+    def forward(self, x):
+        x = self.features(x)
+        x = self.logits(x)
+        return x
+
+    def train_one_batch(self, x, y, dist_option, spars):
+        out = self.forward(x)
+        loss = self.softmax_cross_entropy(out, y)
+        if dist_option == 'plain':
+            self.optimizer(loss)
+        elif dist_option == 'half':
+            self.optimizer.backward_and_update_half(loss)
+        elif dist_option == 'partialUpdate':
+            self.optimizer.backward_and_partial_update(loss)
+        elif dist_option == 'sparseTopK':
+            self.optimizer.backward_and_sparse_update(loss,
+                                                      topK=True,
+                                                      spars=spars)
+        elif dist_option == 'sparseThreshold':
+            self.optimizer.backward_and_sparse_update(loss,
+                                                      topK=False,
+                                                      spars=spars)
+        return out, loss
+
+    def set_optimizer(self, optimizer):
+        self.optimizer = optimizer
+
+
+def create_model(pretrained=False, **kwargs):
+    """Constructs a Xceptionnet model.
+
+    Args:
+        pretrained (bool): If True, returns a pre-trained model.
+
+    Returns:
+        The created Xceptionnet model.
+    """
+    model = Xception(**kwargs)
+
+    return model
+
+
+__all__ = ['Xception', 'create_model']
diff --git a/examples/cifar_distributed_cnn/train_cnn.py b/examples/cifar_distributed_cnn/train_cnn.py
new file mode 100644
index 0000000..26e0403
--- /dev/null
+++ b/examples/cifar_distributed_cnn/train_cnn.py
@@ -0,0 +1,319 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from singa import singa_wrap as singa
+from singa import device
+from singa import tensor
+from singa import opt
+# import opt
+import numpy as np
+import time
+import argparse
+from PIL import Image
+
+import warnings
+warnings.filterwarnings("ignore", category=DeprecationWarning)
+
+np_dtype = {"float32": np.float32}
+
+singa_dtype = {"float32": tensor.float32}
+
+
+# Data augmentation
+def augmentation(x, batch_size):
+    xpad = np.pad(x, [[0, 0], [0, 0], [4, 4], [4, 4]], 'symmetric')
+    for data_num in range(0, batch_size):
+        offset = np.random.randint(8, size=2)
+        x[data_num, :, :, :] = xpad[data_num, :,
+                                    offset[0]:offset[0] + x.shape[2],
+                                    offset[1]:offset[1] + x.shape[2]]
+        if_flip = np.random.randint(2)
+        if (if_flip):
+            x[data_num, :, :, :] = x[data_num, :, :, ::-1]
+    return x
+
+
+# Calculate accuracy
+def accuracy(pred, target):
+    # y is network output to be compared with ground truth (int)
+    y = np.argmax(pred, axis=1)
+    a = y == target
+    correct = np.array(a, "int").sum()
+    return correct
+
+
+# Data partition according to the rank
+def partition(global_rank, world_size, train_x, train_y, val_x, val_y):
+    # Partition training data
+    data_per_rank = train_x.shape[0] // world_size
+    idx_start = global_rank * data_per_rank
+    idx_end = (global_rank + 1) * data_per_rank
+    train_x = train_x[idx_start:idx_end]
+    train_y = train_y[idx_start:idx_end]
+
+    # Partition evaluation data
+    data_per_rank = val_x.shape[0] // world_size
+    idx_start = global_rank * data_per_rank
+    idx_end = (global_rank + 1) * data_per_rank
+    val_x = val_x[idx_start:idx_end]
+    val_y = val_y[idx_start:idx_end]
+    return train_x, train_y, val_x, val_y
+
+
+# Function to all reduce NUMPY accuracy and loss from multiple devices
+def reduce_variable(variable, dist_opt, reducer):
+    reducer.copy_from_numpy(variable)
+    dist_opt.all_reduce(reducer.data)
+    dist_opt.wait()
+    output = tensor.to_numpy(reducer)
+    return output
+
+
+def resize_dataset(x, image_size):
+    num_data = x.shape[0]
+    dim = x.shape[1]
+    X = np.zeros(shape=(num_data, dim, image_size, image_size),
+                 dtype=np.float32)
+    for n in range(0, num_data):
+        for d in range(0, dim):
+            X[n, d, :, :] = np.array(Image.fromarray(x[n, d, :, :]).resize(
+                (image_size, image_size), Image.BILINEAR),
+                                     dtype=np.float32)
+    return X
+
+
+def run(global_rank,
+        world_size,
+        local_rank,
+        max_epoch,
+        batch_size,
+        model,
+        data,
+        sgd,
+        graph,
+        verbosity,
+        dist_option='plain',
+        spars=None,
+        precision='float32'):
+    dev = device.create_cuda_gpu_on(local_rank)
+    dev.SetRandSeed(0)
+    np.random.seed(0)
+
+    if data == 'cifar10':
+        from data import cifar10
+        train_x, train_y, val_x, val_y = cifar10.load()
+    elif data == 'cifar100':
+        from data import cifar100
+        train_x, train_y, val_x, val_y = cifar100.load()
+    elif data == 'mnist':
+        from data import mnist
+        train_x, train_y, val_x, val_y = mnist.load()
+
+
+    num_channels = train_x.shape[1]
+    image_size = train_x.shape[2]
+    data_size = np.prod(train_x.shape[1:train_x.ndim]).item()
+    num_classes = (np.max(train_y) + 1).item()
+
+    if model == 'resnet':
+        from model import resnet
+        model = resnet.resnet50(num_channels=num_channels,
+                                num_classes=num_classes)
+    elif model == 'xceptionnet':
+        from model import xceptionnet
+        model = xceptionnet.create_model(num_channels=num_channels,
+                                         num_classes=num_classes)
+    elif model == 'cnn':
+        from model import cnn
+        model = cnn.create_model(num_channels=num_channels,
+                                 num_classes=num_classes)
+    elif model == 'alexnet':
+        from model import alexnet
+        model = alexnet.create_model(num_channels=num_channels,
+                                     num_classes=num_classes)
+    elif model == 'mlp':
+        import os, sys, inspect
+        current = os.path.dirname(
+            os.path.abspath(inspect.getfile(inspect.currentframe())))
+        parent = os.path.dirname(current)
+        sys.path.insert(0, parent)
+        from mlp import model
+        model = model.create_model(data_size=data_size,
+                                    num_classes=num_classes)
+
+    # For distributed training, sequential has better performance
+    if hasattr(sgd, "communicator"):
+        DIST = True
+        sequential = True
+    else:
+        DIST = False
+        sequential = False
+
+    if DIST:
+        train_x, train_y, val_x, val_y = partition(global_rank, world_size,
+                                                   train_x, train_y, val_x,
+                                                   val_y)
+
+
+
+
+    if model.dimension == 4:
+        tx = tensor.Tensor(
+            (batch_size, num_channels, model.input_size, model.input_size), dev,
+            singa_dtype[precision])
+    elif model.dimension == 2:
+        tx = tensor.Tensor((batch_size, data_size), dev, singa_dtype[precision])
+        np.reshape(train_x, (train_x.shape[0], -1))
+        np.reshape(val_x, (val_x.shape[0], -1))
+
+    ty = tensor.Tensor((batch_size,), dev, tensor.int32)
+    num_train_batch = train_x.shape[0] // batch_size
+    idx = np.arange(train_x.shape[0], dtype=np.int32)
+
+    # Attach model to graph
+    model.set_optimizer(sgd)
+    model.compile([tx], is_train=True, use_graph=graph, sequential=sequential)
+    dev.SetVerbosity(verbosity)
+    model.train()
+
+    b = 0
+    x = train_x[idx[b * batch_size:(b + 1) * batch_size]]
+    if model.dimension == 4:
+        x = augmentation(x, batch_size)
+        if (image_size != model.input_size):
+            x = resize_dataset(x, model.input_size)
+    x = x.astype(np_dtype[precision])
+    y = train_y[idx[b * batch_size:(b + 1) * batch_size]]
+
+    # Copy the patch data into input tensors
+    tx.copy_from_numpy(x)
+    ty.copy_from_numpy(y)
+
+    niters = 100
+
+    # check dataset shape correctness
+    if global_rank == 0:
+        print("Check the shape of dataset:")
+        print(tx.shape)
+        print(ty.shape)
+
+    # Training and evaluation loop
+    # for epoch in range(max_epoch):
+    #     start_time = time.time()
+    #     np.random.shuffle(idx)
+    #
+    #     if global_rank == 0:
+    #         print('Starting Epoch %d:' % (epoch))
+
+    # Training phase
+    dev.Sync()
+    start = time.time()
+
+    for b in range(niters):
+        # Generate the patch data in this iteration
+        # Train the model
+        model(tx, ty, dist_option, spars)
+
+    dev.Sync()
+    end = time.time()
+    titer = (end - start) / float(niters)
+    throughput = float(niters * batch_size * world_size) / (end - start)
+
+    if global_rank == 0:
+        print("Throughput = {} per second".format(throughput), flush=True)
+        print("TotalTime={}".format(end - start), flush=True)
+        print("Total={}".format(titer), flush=True)
+        print("world_size={}".format(world_size), flush=True)
+        print("batch_size={}".format(batch_size), flush=True)
+        print("model.input_size={}".format(model.input_size), flush=True)
+        print("num_channels={}".format(num_channels), flush=True)
+        print("num_classes={}".format(num_classes), flush=True)
+        print("data_size={}".format(data_size), flush=True)
+        print("image_size={}".format(image_size), flush=True)
+
+    dev.PrintTimeProfiling()
+
+
+if __name__ == '__main__':
+    # Use argparse to get command config: max_epoch, model, data, etc., for single gpu training
+    parser = argparse.ArgumentParser(
+        description='Training using the autograd and graph.')
+    parser.add_argument(
+        'model',
+        choices=['cnn', 'resnet', 'xceptionnet', 'mlp', 'alexnet'],
+        default='cnn')
+    parser.add_argument('data',
+                        choices=['mnist', 'cifar10', 'cifar100'],
+                        default='mnist')
+    parser.add_argument('-p',
+                        choices=['float32', 'float16'],
+                        default='float32',
+                        dest='precision')
+    parser.add_argument('-m',
+                        '--max-epoch',
+                        default=10,
+                        type=int,
+                        help='maximum epochs',
+                        dest='max_epoch')
+    parser.add_argument('-b',
+                        '--batch-size',
+                        default=32,
+                        type=int,
+                        help='batch size',
+                        dest='batch_size')
+    parser.add_argument('-l',
+                        '--learning-rate',
+                        default=0.005,
+                        type=float,
+                        help='initial learning rate',
+                        dest='lr')
+    # Determine which gpu to use
+    parser.add_argument('-i',
+                        '--device-id',
+                        default=0,
+                        type=int,
+                        help='which GPU to use',
+                        dest='device_id')
+    parser.add_argument('-g',
+                        '--disable-graph',
+                        default='True',
+                        action='store_false',
+                        help='disable graph',
+                        dest='graph')
+    parser.add_argument('-v',
+                        '--log-verbosity',
+                        default=0,
+                        type=int,
+                        help='logging verbosity',
+                        dest='verbosity')
+
+    args = parser.parse_args()
+
+    sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5, dtype=singa_dtype[args.precision])
+    run(0,
+        1,
+        args.device_id,
+        args.max_epoch,
+        args.batch_size,
+        args.model,
+        args.data,
+        sgd,
+        args.graph,
+        args.verbosity,
+        precision=args.precision)
diff --git a/examples/cifar_distributed_cnn/train_mpi.py b/examples/cifar_distributed_cnn/train_mpi.py
new file mode 100644
index 0000000..dc9151b
--- /dev/null
+++ b/examples/cifar_distributed_cnn/train_mpi.py
@@ -0,0 +1,94 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+from singa import singa_wrap as singa
+from singa import opt
+# import opt
+from singa import tensor
+import argparse
+import train_cnn
+import warnings
+warnings.filterwarnings("ignore", category=DeprecationWarning)
+
+singa_dtype = {"float32": tensor.float32}
+
+if __name__ == '__main__':
+    # Use argparse to get command config: max_epoch, model, data, etc., for single gpu training
+    parser = argparse.ArgumentParser(
+        description='Training using the autograd and graph.')
+    parser.add_argument('model',
+                        choices=['cnn', 'resnet', 'xceptionnet', 'mlp'],
+                        default='cnn')
+    parser.add_argument('data', choices=['mnist', 'cifar10', 'cifar100'], default='mnist')
+    parser.add_argument('-p',
+                        choices=['float32', 'float16'],
+                        default='float32',
+                        dest='precision')
+    parser.add_argument('-m',
+                        '--max-epoch',
+                        default=10,
+                        type=int,
+                        help='maximum epochs',
+                        dest='max_epoch')
+    parser.add_argument('-b',
+                        '--batch-size',
+                        default=32,
+                        type=int,
+                        help='batch size',
+                        dest='batch_size')
+    parser.add_argument('-l',
+                        '--learning-rate',
+                        default=0.005,
+                        type=float,
+                        help='initial learning rate',
+                        dest='lr')
+    parser.add_argument('-d',
+                        '--dist-option',
+                        default='plain',
+                        choices=['plain','half','partialUpdate','sparseTopK','sparseThreshold'],
+                        help='distibuted training options',
+                        dest='dist_option')  # currently partialUpdate support graph=False only
+    parser.add_argument('-s',
+                        '--sparsification',
+                        default='0.05',
+                        type=float,
+                        help='the sparsity parameter used for sparsification, between 0 to 1',
+                        dest='spars')
+    parser.add_argument('-g',
+                        '--disable-graph',
+                        default='True',
+                        action='store_false',
+                        help='disable graph',
+                        dest='graph')
+    parser.add_argument('-v',
+                        '--log-verbosity',
+                        default=0,
+                        type=int,
+                        help='logging verbosity',
+                        dest='verbosity')
+
+    args = parser.parse_args()
+
+    sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5)
+    sgd = opt.DistOpt(sgd)
+
+    train_cnn.run(sgd.global_rank, sgd.world_size, sgd.local_rank, args.max_epoch,
+              args.batch_size, args.model, args.data, sgd, args.graph,
+              args.verbosity, args.dist_option, args.spars, args.precision)
diff --git a/examples/cifar_distributed_cnn/train_multiprocess.py b/examples/cifar_distributed_cnn/train_multiprocess.py
new file mode 100644
index 0000000..5a09adc
--- /dev/null
+++ b/examples/cifar_distributed_cnn/train_multiprocess.py
@@ -0,0 +1,115 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+from singa import singa_wrap as singa
+from singa import opt
+# import opt
+from singa import tensor
+import argparse
+import train_cnn
+import multiprocessing
+
+import warnings
+warnings.filterwarnings("ignore", category=DeprecationWarning)
+
+singa_dtype = {"float16": tensor.float16, "float32": tensor.float32}
+
+def run(args, local_rank, world_size, nccl_id):
+    sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5, dtype=singa_dtype[args.precision])
+    sgd = opt.DistOpt(sgd, nccl_id=nccl_id, local_rank=local_rank, world_size=world_size)
+    train_cnn.run(sgd.global_rank, sgd.world_size, sgd.local_rank, args.max_epoch,
+              args.batch_size, args.model, args.data, sgd, args.graph,
+              args.verbosity, args.dist_option, args.spars, args.precision)
+
+
+if __name__ == '__main__':
+    # Use argparse to get command config: max_epoch, model, data, etc., for single gpu training
+    parser = argparse.ArgumentParser(
+        description='Training using the autograd and graph.')
+    parser.add_argument('model',
+                        choices=['resnet', 'xceptionnet', 'cnn', 'mlp'],
+                        default='cnn')
+    parser.add_argument('data', choices=['cifar10', 'cifar100', 'mnist'], default='mnist')
+    parser.add_argument('-p',
+                        choices=['float32', 'float16'],
+                        default='float32',
+                        dest='precision')
+    parser.add_argument('-m',
+                        '--max-epoch',
+                        default=10,
+                        type=int,
+                        help='maximum epochs',
+                        dest='max_epoch')
+    parser.add_argument('-b',
+                        '--batch-size',
+                        default=32,
+                        type=int,
+                        help='batch size',
+                        dest='batch_size')
+    parser.add_argument('-l',
+                        '--learning-rate',
+                        default=0.005,
+                        type=float,
+                        help='initial learning rate',
+                        dest='lr')
+    parser.add_argument('-w',
+                        '--world-size',
+                        default=2,
+                        type=int,
+                        help='number of gpus to be used',
+                        dest='world_size')
+    parser.add_argument('-d',
+                        '--dist-option',
+                        default='plain',
+                        choices=['plain','half','partialUpdate','sparseTopK','sparseThreshold'],
+                        help='distibuted training options',
+                        dest='dist_option') # currently partialUpdate support graph=False only
+    parser.add_argument('-s',
+                        '--sparsification',
+                        default='0.05',
+                        type=float,
+                        help='the sparsity parameter used for sparsification, between 0 to 1',
+                        dest='spars')
+    parser.add_argument('-g',
+                        '--disable-graph',
+                        default='True',
+                        action='store_false',
+                        help='disable graph',
+                        dest='graph')
+    parser.add_argument('-v',
+                        '--log-verbosity',
+                        default=0,
+                        type=int,
+                        help='logging verbosity',
+                        dest='verbosity')
+
+    args = parser.parse_args()
+
+    # Generate a NCCL ID to be used for collective communication
+    nccl_id = singa.NcclIdHolder()
+
+    process = []
+    for local_rank in range(0, args.world_size):
+        process.append(
+            multiprocessing.Process(target=run,
+                                    args=(args, local_rank, args.world_size, nccl_id)))
+
+    for p in process:
+        p.start()
diff --git a/examples/cnn/autograd/mnist_cnn.py b/examples/cnn/autograd/mnist_cnn.py
index ff2e1dc..16752ce 100644
--- a/examples/cnn/autograd/mnist_cnn.py
+++ b/examples/cnn/autograd/mnist_cnn.py
@@ -126,7 +126,7 @@
     return np.array(a, "int").sum()
 
 
-# Function to all reduce NUMPY Accuracy and Loss from Multiple Devices
+# Function to all reduce NUMPY accuracy and loss from multiple devices
 def reduce_variable(variable, dist_opt, reducer):
     reducer.copy_from_numpy(variable)
     dist_opt.all_reduce(reducer.data)
@@ -171,7 +171,7 @@
                     topK=False,
                     corr=True):
 
-    # Define the hypermeters good for the mnist_cnn
+    # Define the hypermeters for the mnist_cnn
     max_epoch = 10
     batch_size = 64
     sgd = opt.SGD(lr=0.005, momentum=0.9, weight_decay=1e-5)
@@ -188,12 +188,13 @@
     test_x = test_x / 255
 
     if DIST:
-        # For Distributed GPU Training
+        # For distributed GPU training
         sgd = opt.DistOpt(sgd,
                           nccl_id=nccl_id,
                           local_rank=local_rank,
                           world_size=world_size)
         dev = device.create_cuda_gpu_on(sgd.local_rank)
+
         # Dataset partition for distributed training
         train_x, train_y = data_partition(train_x, train_y, sgd.global_rank,
                                           sgd.world_size)
@@ -201,11 +202,11 @@
                                         sgd.world_size)
         world_size = sgd.world_size
     else:
-        # For Single GPU
+        # For single GPU
         dev = device.create_cuda_gpu()
         world_size = 1
 
-    # create model
+    # Create model
     model = CNN()
 
     tx = tensor.Tensor((batch_size, 1, IMG_SIZE, IMG_SIZE), dev, tensor.float32)
@@ -227,7 +228,7 @@
         for p, g in autograd.backward(loss):
             synchronize(p, sgd)
 
-    # Training and Evaulation Loop
+    # Training and evaulation loop
     for epoch in range(max_epoch):
         start_time = time.time()
         np.random.shuffle(idx)
@@ -235,7 +236,7 @@
         if ((DIST == False) or (sgd.global_rank == 0)):
             print('Starting Epoch %d:' % (epoch))
 
-        # Training Phase
+        # Training phase
         autograd.training = True
         train_correct = np.zeros(shape=[1], dtype=np.float32)
         test_correct = np.zeros(shape=[1], dtype=np.float32)
@@ -263,19 +264,19 @@
                 sgd(loss)
 
         if DIST:
-            # Reduce the Evaluation Accuracy and Loss from Multiple Devices
+            # Reduce the evaluation accuracy and loss from multiple devices
             reducer = tensor.Tensor((1,), dev, tensor.float32)
             train_correct = reduce_variable(train_correct, sgd, reducer)
             train_loss = reduce_variable(train_loss, sgd, reducer)
 
-        # Output the Training Loss and Accuracy
+        # Output the training loss and accuracy
         if ((DIST == False) or (sgd.global_rank == 0)):
             print('Training loss = %f, training accuracy = %f' %
                   (train_loss, train_correct /
                    (num_train_batch * batch_size * world_size)),
                   flush=True)
 
-        # Evaluation Phase
+        # Evaluation phase
         autograd.training = False
         for b in range(num_test_batch):
             x = test_x[b * batch_size:(b + 1) * batch_size]
@@ -286,10 +287,10 @@
             test_correct += accuracy(tensor.to_numpy(out_test), y)
 
         if DIST:
-            # Reduce the Evaulation Accuracy from Multiple Devices
+            # Reduce the evaulation accuracy from multiple devices
             test_correct = reduce_variable(test_correct, sgd, reducer)
 
-        # Output the Evaluation Accuracy
+        # Output the evaluation accuracy
         if ((DIST == False) or (sgd.global_rank == 0)):
             print('Evaluation accuracy = %f, Elapsed Time = %fs' %
                   (test_correct / (num_test_batch * batch_size * world_size),
diff --git a/examples/cnn/autograd/mnist_multiprocess.py b/examples/cnn/autograd/mnist_multiprocess.py
index f5c2763..f51344f 100644
--- a/examples/cnn/autograd/mnist_multiprocess.py
+++ b/examples/cnn/autograd/mnist_multiprocess.py
@@ -26,7 +26,7 @@
     # Generate a NCCL ID to be used for collective communication
     nccl_id = singa.NcclIdHolder()
 
-    # number of GPUs to be used
+    # Number of GPUs to be used
     world_size = int(sys.argv[1])
 
     process = []
diff --git a/examples/cnn/autograd/resnet_cifar10.py b/examples/cnn/autograd/resnet_cifar10.py
index 14005bc..3c6876f 100644
--- a/examples/cnn/autograd/resnet_cifar10.py
+++ b/examples/cnn/autograd/resnet_cifar10.py
@@ -129,7 +129,7 @@
     return categorical

 

 

-# Function to all reduce NUMPY Accuracy and Loss from Multiple Devices

+# Function to all reduce NUMPY accuracy and loss from multiple devices

 def reduce_variable(variable, dist_opt, reducer):

     reducer.copy_from_numpy(variable)

     dist_opt.all_reduce(reducer.data)

@@ -159,7 +159,7 @@
                   nccl_id=None,

                   partial_update=False):

 

-    # Define the hypermeters good for the train_cifar10

+    # Define the hypermeters for the train_cifar10

     sgd = opt.SGD(lr=0.005, momentum=0.9, weight_decay=1e-5)

     max_epoch = 5

     batch_size = 32

@@ -171,12 +171,13 @@
     num_classes = 10

 

     if DIST:

-        # For Distributed GPU Training

+        # For distributed GPU training

         sgd = opt.DistOpt(sgd,

                           nccl_id=nccl_id,

                           local_rank=local_rank,

                           world_size=world_size)

         dev = device.create_cuda_gpu_on(sgd.local_rank)

+

         # Dataset partition for distributed training

         train_x, train_y = data_partition(train_x, train_y, sgd.global_rank,

                                           sgd.world_size)

@@ -184,7 +185,7 @@
                                         sgd.world_size)

         world_size = sgd.world_size

     else:

-        # For Single GPU

+        # For single GPU

         dev = device.create_cuda_gpu()

         world_size = 1

 

@@ -219,7 +220,7 @@
         if ((DIST == False) or (sgd.global_rank == 0)):

             print('Starting Epoch %d:' % (epoch))

 

-        #Training Phase

+        #Training phase

         autograd.training = True

         train_correct = np.zeros(shape=[1], dtype=np.float32)

         test_correct = np.zeros(shape=[1], dtype=np.float32)

@@ -244,12 +245,12 @@
                 sgd.backward_and_partial_update(loss)

 

         if DIST:

-            # Reduce the Evaluation Accuracy and Loss from Multiple Devices

+            # Reduce the evaluation accuracy and loss from multiple devices

             reducer = tensor.Tensor((1,), dev, tensor.float32)

             train_correct = reduce_variable(train_correct, sgd, reducer)

             train_loss = reduce_variable(train_loss, sgd, reducer)

 

-        # Output the Training Loss and Accuracy

+        # Output the training loss and accuracy

         if ((DIST == False) or (sgd.global_rank == 0)):

             print('Training loss = %f, training accuracy = %f' %

                   (train_loss, train_correct /

@@ -257,11 +258,11 @@
                   flush=True)

 

         if partial_update:

-            # sychronize parameters before evaluation phase

+            # Sychronize parameters before evaluation phase

             for p in param:

                 synchronize(p, sgd)

 

-        #Evaulation Phase

+        #Evaulation phase

         autograd.training = False

         for b in range(num_test_batch):

             x = test_x[b * batch_size:(b + 1) * batch_size]

@@ -274,10 +275,10 @@
                                      to_categorical(y, num_classes))

 

         if DIST:

-            # Reduce the Evaulation Accuracy from Multiple Devices

+            # Reduce the evaulation accuracy from multiple devices

             test_correct = reduce_variable(test_correct, sgd, reducer)

 

-        # Output the Evaluation Accuracy

+        # Output the evaluation accuracy

         if ((DIST == False) or (sgd.global_rank == 0)):

             print('Evaluation accuracy = %f, Elapsed Time = %fs' %

                   (test_correct / (num_test_batch * batch_size * world_size),

diff --git a/examples/cnn/autograd/sparsification_mnist.py b/examples/cnn/autograd/sparsification_mnist.py
index cc9b585..315605a 100644
--- a/examples/cnn/autograd/sparsification_mnist.py
+++ b/examples/cnn/autograd/sparsification_mnist.py
@@ -26,7 +26,7 @@
     # Generate a NCCL ID to be used for collective communication
     nccl_id = singa.NcclIdHolder()
 
-    # number of GPUs to be used
+    # Number of GPUs to be used
     world_size = int(sys.argv[1])
 
     # Use sparsification with parameters
diff --git a/examples/cnn/benchmark.py b/examples/cnn/benchmark.py
index a182139..9f69fee 100644
--- a/examples/cnn/benchmark.py
+++ b/examples/cnn/benchmark.py
@@ -32,7 +32,7 @@
 
 def train_resnet(DIST=True, graph=True, sequential=False, verbosity=0):
 
-    # Define the hypermeters good for the train_resnet
+    # Define the hypermeters for the train_resnet
     niters = 100
     batch_size = 32
     sgd = opt.SGD(lr=0.1, momentum=0.9, weight_decay=1e-5)
@@ -64,7 +64,7 @@
     dev.SetVerbosity(verbosity)
     dev.SetSkipIteration(5)
 
-    # construct the model
+    # Construct the model
     from model import resnet
     model = resnet.resnet50(num_channels=3, num_classes=1000)
 
@@ -72,7 +72,7 @@
     model.set_optimizer(sgd)
     model.compile([tx], is_train=True, use_graph=graph, sequential=sequential)
 
-    # train model
+    # Train model
     dev.Sync()
     start = time.time()
     with trange(niters) as t:
@@ -84,7 +84,7 @@
     titer = (end - start) / float(niters)
     throughput = float(niters * batch_size * world_size) / (end - start)
     if global_rank == 0:
-        print("Throughput = {} per second".format(throughput), flush=True)
+        print("\nThroughput = {} per second".format(throughput), flush=True)
         print("TotalTime={}".format(end - start), flush=True)
         print("Total={}".format(titer), flush=True)
         dev.PrintTimeProfiling()
diff --git a/examples/cnn/model/alexnet.py b/examples/cnn/model/alexnet.py
index 988596e..cad7b1e 100644
--- a/examples/cnn/model/alexnet.py
+++ b/examples/cnn/model/alexnet.py
@@ -81,9 +81,9 @@
         out = self.forward(x)
         loss = self.softmax_cross_entropy(out, y)
 
-        if dist_option == 'fp32':
+        if dist_option == 'plain':
             self.optimizer(loss)
-        elif dist_option == 'fp16':
+        elif dist_option == 'half':
             self.optimizer.backward_and_update_half(loss)
         elif dist_option == 'partialUpdate':
             self.optimizer.backward_and_partial_update(loss)
@@ -105,7 +105,11 @@
     """Constructs a AlexNet model.
 
     Args:
-        pretrained (bool): If True, returns a model pre-trained
+        pretrained (bool): If True, returns a pre-trained model.
+    
+    Returns:
+        The created AlexNet model.
+    
     """
     model = AlexNet(**kwargs)
 
diff --git a/examples/cnn/model/cnn.py b/examples/cnn/model/cnn.py
index 28ecd6c..3877e83 100644
--- a/examples/cnn/model/cnn.py
+++ b/examples/cnn/model/cnn.py
@@ -53,9 +53,9 @@
         out = self.forward(x)
         loss = self.softmax_cross_entropy(out, y)
 
-        if dist_option == 'fp32':
+        if dist_option == 'plain':
             self.optimizer(loss)
-        elif dist_option == 'fp16':
+        elif dist_option == 'half':
             self.optimizer.backward_and_update_half(loss)
         elif dist_option == 'partialUpdate':
             self.optimizer.backward_and_partial_update(loss)
@@ -77,7 +77,10 @@
     """Constructs a CNN model.
 
     Args:
-        pretrained (bool): If True, returns a model pre-trained
+        pretrained (bool): If True, returns a pre-trained model.
+
+    Returns:
+        The created CNN model.
     """
     model = CNN(**kwargs)
 
diff --git a/examples/cnn/model/resnet.py b/examples/cnn/model/resnet.py
index 2b2a7fd..28b5f99 100644
--- a/examples/cnn/model/resnet.py
+++ b/examples/cnn/model/resnet.py
@@ -205,9 +205,9 @@
         out = self.forward(x)
         loss = self.softmax_cross_entropy(out, y)
 
-        if dist_option == 'fp32':
+        if dist_option == 'plain':
             self.optimizer(loss)
-        elif dist_option == 'fp16':
+        elif dist_option == 'half':
             self.optimizer.backward_and_update_half(loss)
         elif dist_option == 'partialUpdate':
             self.optimizer.backward_and_partial_update(loss)
@@ -229,7 +229,10 @@
     """Constructs a ResNet-18 model.
 
     Args:
-        pretrained (bool): If True, returns a model pre-trained on ImageNet
+        pretrained (bool): If True, returns a model pre-trained on ImageNet.
+    
+    Returns:
+        The created ResNet-18 model.
     """
     model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
 
@@ -240,7 +243,10 @@
     """Constructs a ResNet-34 model.
 
     Args:
-        pretrained (bool): If True, returns a model pre-trained on ImageNet
+        pretrained (bool): If True, returns a model pre-trained on ImageNet.
+
+    Returns:
+        The created ResNet-34 model.
     """
     model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
 
@@ -251,7 +257,10 @@
     """Constructs a ResNet-50 model.
 
     Args:
-        pretrained (bool): If True, returns a model pre-trained on ImageNet
+        pretrained (bool): If True, returns a model pre-trained on ImageNet.
+
+    Returns:
+        The created ResNet-50 model.
     """
     model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
 
@@ -262,7 +271,10 @@
     """Constructs a ResNet-101 model.
 
     Args:
-        pretrained (bool): If True, returns a model pre-trained on ImageNet
+        pretrained (bool): If True, returns a model pre-trained on ImageNet.
+
+    Returns:
+        The created ResNet-101 model.
     """
     model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
 
@@ -273,7 +285,10 @@
     """Constructs a ResNet-152 model.
 
     Args:
-        pretrained (bool): If True, returns a model pre-trained on ImageNet
+        pretrained (bool): If True, returns a model pre-trained on ImageNet.
+
+    Returns:
+        The created ResNet-152 model.
     """
     model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
 
diff --git a/examples/cnn/model/xceptionnet.py b/examples/cnn/model/xceptionnet.py
index 524e3f6..34440ab 100644
--- a/examples/cnn/model/xceptionnet.py
+++ b/examples/cnn/model/xceptionnet.py
@@ -274,9 +274,9 @@
     def train_one_batch(self, x, y, dist_option, spars):
         out = self.forward(x)
         loss = self.softmax_cross_entropy(out, y)
-        if dist_option == 'fp32':
+        if dist_option == 'plain':
             self.optimizer(loss)
-        elif dist_option == 'fp16':
+        elif dist_option == 'half':
             self.optimizer.backward_and_update_half(loss)
         elif dist_option == 'partialUpdate':
             self.optimizer.backward_and_partial_update(loss)
@@ -298,7 +298,10 @@
     """Constructs a Xceptionnet model.
 
     Args:
-        pretrained (bool): If True, returns a model pre-trained
+        pretrained (bool): If True, returns a pre-trained model.
+
+    Returns:
+        The created Xceptionnet model.
     """
     model = Xception(**kwargs)
 
diff --git a/examples/cnn/train_cnn.py b/examples/cnn/train_cnn.py
index e4fd962..bcccc51 100644
--- a/examples/cnn/train_cnn.py
+++ b/examples/cnn/train_cnn.py
@@ -26,8 +26,12 @@
 import argparse
 from PIL import Image
 
+np_dtype = {"float16": np.float16, "float32": np.float32}
 
-# Data Augmentation
+singa_dtype = {"float16": tensor.float16, "float32": tensor.float32}
+
+
+# Data augmentation
 def augmentation(x, batch_size):
     xpad = np.pad(x, [[0, 0], [0, 0], [4, 4], [4, 4]], 'symmetric')
     for data_num in range(0, batch_size):
@@ -41,13 +45,12 @@
     return x
 
 
-# Calculate Accuracy
+# Calculate accuracy
 def accuracy(pred, target):
     # y is network output to be compared with ground truth (int)
     y = np.argmax(pred, axis=1)
     a = y == target
     correct = np.array(a, "int").sum()
-    # print(correct)
     return correct
 
 
@@ -59,6 +62,7 @@
     idx_end = (global_rank + 1) * data_per_rank
     train_x = train_x[idx_start:idx_end]
     train_y = train_y[idx_start:idx_end]
+
     # Partition evaluation data
     data_per_rank = val_x.shape[0] // world_size
     idx_start = global_rank * data_per_rank
@@ -68,7 +72,7 @@
     return train_x, train_y, val_x, val_y
 
 
-# Function to all reduce NUMPY Accuracy and Loss from Multiple Devices
+# Function to all reduce NUMPY accuracy and loss from multiple devices
 def reduce_variable(variable, dist_opt, reducer):
     reducer.copy_from_numpy(variable)
     dist_opt.all_reduce(reducer.data)
@@ -100,8 +104,9 @@
         sgd,
         graph,
         verbosity,
-        dist_option='fp32',
-        spars=None):
+        dist_option='plain',
+        spars=None,
+        precision='float32'):
     dev = device.create_cuda_gpu_on(local_rank)
     dev.SetRandSeed(0)
     np.random.seed(0)
@@ -116,11 +121,11 @@
         from data import mnist
         train_x, train_y, val_x, val_y = mnist.load()
 
+
     num_channels = train_x.shape[1]
     image_size = train_x.shape[2]
     data_size = np.prod(train_x.shape[1:train_x.ndim]).item()
     num_classes = (np.max(train_y) + 1).item()
-    #print(num_classes)
 
     if model == 'resnet':
         from model import resnet
@@ -148,7 +153,7 @@
         model = model.create_model(data_size=data_size,
                                     num_classes=num_classes)
 
-    # For distributed training, sequential gives better performance
+    # For distributed training, sequential has better performance
     if hasattr(sgd, "communicator"):
         DIST = True
         sequential = True
@@ -171,9 +176,9 @@
     if model.dimension == 4:
         tx = tensor.Tensor(
             (batch_size, num_channels, model.input_size, model.input_size), dev,
-            tensor.float32)
+            singa_dtype[precision])
     elif model.dimension == 2:
-        tx = tensor.Tensor((batch_size, data_size), dev, tensor.float32)
+        tx = tensor.Tensor((batch_size, data_size), dev, singa_dtype[precision])
         np.reshape(train_x, (train_x.shape[0], -1))
         np.reshape(val_x, (val_x.shape[0], -1))
 
@@ -182,12 +187,12 @@
     num_val_batch = val_x.shape[0] // batch_size
     idx = np.arange(train_x.shape[0], dtype=np.int32)
 
-    # attached model to graph
+    # Attach model to graph
     model.set_optimizer(sgd)
     model.compile([tx], is_train=True, use_graph=graph, sequential=sequential)
     dev.SetVerbosity(verbosity)
 
-    # Training and Evaluation Loop
+    # Training and evaluation loop
     for epoch in range(max_epoch):
         start_time = time.time()
         np.random.shuffle(idx)
@@ -195,7 +200,7 @@
         if global_rank == 0:
             print('Starting Epoch %d:' % (epoch))
 
-        # Training Phase
+        # Training phase
         train_correct = np.zeros(shape=[1], dtype=np.float32)
         test_correct = np.zeros(shape=[1], dtype=np.float32)
         train_loss = np.zeros(shape=[1], dtype=np.float32)
@@ -208,6 +213,7 @@
                 x = augmentation(x, batch_size)
                 if (image_size != model.input_size):
                     x = resize_dataset(x, model.input_size)
+            x = x.astype(np_dtype[precision])
             y = train_y[idx[b * batch_size:(b + 1) * batch_size]]
 
             # Copy the patch data into input tensors
@@ -220,7 +226,7 @@
             train_loss += tensor.to_numpy(loss)[0]
 
         if DIST:
-            # Reduce the Evaluation Accuracy and Loss from Multiple Devices
+            # Reduce the evaluation accuracy and loss from multiple devices
             reducer = tensor.Tensor((1,), dev, tensor.float32)
             train_correct = reduce_variable(train_correct, sgd, reducer)
             train_loss = reduce_variable(train_loss, sgd, reducer)
@@ -231,13 +237,14 @@
                    (num_train_batch * batch_size * world_size)),
                   flush=True)
 
-        # Evaluation Phase
+        # Evaluation phase
         model.eval()
         for b in range(num_val_batch):
             x = val_x[b * batch_size:(b + 1) * batch_size]
             if model.dimension == 4:
                 if (image_size != model.input_size):
                     x = resize_dataset(x, model.input_size)
+            x = x.astype(np_dtype[precision])
             y = val_y[b * batch_size:(b + 1) * batch_size]
             tx.copy_from_numpy(x)
             ty.copy_from_numpy(y)
@@ -245,10 +252,10 @@
             test_correct += accuracy(tensor.to_numpy(out_test), y)
 
         if DIST:
-            # Reduce the Evaulation Accuracy from Multiple Devices
+            # Reduce the evaulation accuracy from multiple devices
             test_correct = reduce_variable(test_correct, sgd, reducer)
 
-        # Output the Evaluation Accuracy
+        # Output the evaluation accuracy
         if global_rank == 0:
             print('Evaluation accuracy = %f, Elapsed Time = %fs' %
                   (test_correct / (num_val_batch * batch_size * world_size),
@@ -259,15 +266,20 @@
 
 
 if __name__ == '__main__':
-    # use argparse to get command config: max_epoch, model, data, etc. for single gpu training
+    # Use argparse to get command config: max_epoch, model, data, etc., for single gpu training
     parser = argparse.ArgumentParser(
         description='Training using the autograd and graph.')
-    parser.add_argument('model',
-                        choices=['resnet', 'xceptionnet', 'cnn', 'mlp', 'alexnet'],
-                        default='cnn')
+    parser.add_argument(
+        'model',
+        choices=['cnn', 'resnet', 'xceptionnet', 'mlp', 'alexnet'],
+        default='cnn')
     parser.add_argument('data',
-                        choices=['cifar10', 'cifar100', 'mnist'],
+                        choices=['mnist', 'cifar10', 'cifar100'],
                         default='mnist')
+    parser.add_argument('-p',
+                        choices=['float32', 'float16'],
+                        default='float32',
+                        dest='precision')
     parser.add_argument('-m',
                         '--max-epoch',
                         default=10,
@@ -286,7 +298,7 @@
                         type=float,
                         help='initial learning rate',
                         dest='lr')
-    # determine which gpu to use
+    # Determine which gpu to use
     parser.add_argument('-i',
                         '--device-id',
                         default=0,
@@ -308,6 +320,15 @@
 
     args = parser.parse_args()
 
-    sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5)
-    run(0, 1, args.device_id, args.max_epoch, args.batch_size, args.model,
-        args.data, sgd, args.graph, args.verbosity)
+    sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5, dtype=singa_dtype[args.precision])
+    run(0,
+        1,
+        args.device_id,
+        args.max_epoch,
+        args.batch_size,
+        args.model,
+        args.data,
+        sgd,
+        args.graph,
+        args.verbosity,
+        precision=args.precision)
diff --git a/examples/cnn/train_mpi.py b/examples/cnn/train_mpi.py
index fd78b12..563d4b2 100644
--- a/examples/cnn/train_mpi.py
+++ b/examples/cnn/train_mpi.py
@@ -20,17 +20,24 @@
 
 from singa import singa_wrap as singa
 from singa import opt
+from singa import tensor
 import argparse
 import train_cnn
 
+singa_dtype = {"float16": tensor.float16, "float32": tensor.float32}
+
 if __name__ == '__main__':
-    # use argparse to get command config: max_epoch, model, data, etc. for single gpu training
+    # Use argparse to get command config: max_epoch, model, data, etc., for single gpu training
     parser = argparse.ArgumentParser(
         description='Training using the autograd and graph.')
     parser.add_argument('model',
-                        choices=['resnet', 'xceptionnet', 'cnn', 'mlp'],
+                        choices=['cnn', 'resnet', 'xceptionnet', 'mlp'],
                         default='cnn')
-    parser.add_argument('data', choices=['cifar10', 'cifar100', 'mnist'], default='mnist')
+    parser.add_argument('data', choices=['mnist', 'cifar10', 'cifar100'], default='mnist')
+    parser.add_argument('-p',
+                        choices=['float32', 'float16'],
+                        default='float32',
+                        dest='precision')
     parser.add_argument('-m',
                         '--max-epoch',
                         default=10,
@@ -51,8 +58,8 @@
                         dest='lr')
     parser.add_argument('-d',
                         '--dist-option',
-                        default='fp32',
-                        choices=['fp32','fp16','partialUpdate','sparseTopK','sparseThreshold'],
+                        default='plain',
+                        choices=['plain','half','partialUpdate','sparseTopK','sparseThreshold'],
                         help='distibuted training options',
                         dest='dist_option')  # currently partialUpdate support graph=False only
     parser.add_argument('-s',
@@ -76,9 +83,9 @@
 
     args = parser.parse_args()
 
-    sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5)
+    sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5, dtype=singa_dtype[args.precision])
     sgd = opt.DistOpt(sgd)
 
     train_cnn.run(sgd.global_rank, sgd.world_size, sgd.local_rank, args.max_epoch,
               args.batch_size, args.model, args.data, sgd, args.graph,
-              args.verbosity, args.dist_option, args.spars)
+              args.verbosity, args.dist_option, args.spars, args.precision)
diff --git a/examples/cnn/train_multiprocess.py b/examples/cnn/train_multiprocess.py
index 9972ddd..182dd35 100644
--- a/examples/cnn/train_multiprocess.py
+++ b/examples/cnn/train_multiprocess.py
@@ -20,26 +20,33 @@
 
 from singa import singa_wrap as singa
 from singa import opt
+from singa import tensor
 import argparse
 import train_cnn
 import multiprocessing
 
+singa_dtype = {"float16": tensor.float16, "float32": tensor.float32}
+
 def run(args, local_rank, world_size, nccl_id):
-    sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5)
+    sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5, dtype=singa_dtype[args.precision])
     sgd = opt.DistOpt(sgd, nccl_id=nccl_id, local_rank=local_rank, world_size=world_size)
     train_cnn.run(sgd.global_rank, sgd.world_size, sgd.local_rank, args.max_epoch,
               args.batch_size, args.model, args.data, sgd, args.graph,
-              args.verbosity, args.dist_option, args.spars)
+              args.verbosity, args.dist_option, args.spars, args.precision)
 
 
 if __name__ == '__main__':
-    # use argparse to get command config: max_epoch, model, data, etc. for single gpu training
+    # Use argparse to get command config: max_epoch, model, data, etc., for single gpu training
     parser = argparse.ArgumentParser(
         description='Training using the autograd and graph.')
     parser.add_argument('model',
                         choices=['resnet', 'xceptionnet', 'cnn', 'mlp'],
                         default='cnn')
     parser.add_argument('data', choices=['cifar10', 'cifar100', 'mnist'], default='mnist')
+    parser.add_argument('-p',
+                        choices=['float32', 'float16'],
+                        default='float32',
+                        dest='precision')
     parser.add_argument('-m',
                         '--max-epoch',
                         default=10,
@@ -66,8 +73,8 @@
                         dest='world_size')
     parser.add_argument('-d',
                         '--dist-option',
-                        default='fp32',
-                        choices=['fp32','fp16','partialUpdate','sparseTopK','sparseThreshold'],
+                        default='plain',
+                        choices=['plain','half','partialUpdate','sparseTopK','sparseThreshold'],
                         help='distibuted training options',
                         dest='dist_option') # currently partialUpdate support graph=False only
     parser.add_argument('-s',
diff --git a/examples/largedataset_cnn/README.md b/examples/largedataset_cnn/README.md
new file mode 100644
index 0000000..915d361
--- /dev/null
+++ b/examples/largedataset_cnn/README.md
@@ -0,0 +1,59 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+# Image Classification using Convolutional Neural Networks with a dataset from the filesysetm
+
+Examples inside this folder show how to train CNN models using SINGA for image classification.
+
+It reads the dataset from the filesystem defined by `process_data.py`. Hence, users can modify `process_data.py`
+for their perference of dataset format.
+
+In the current setting, 'classes.txt' contains the names of the classes at each line. For example, if it is a food dataset containing three classes, 'classes.txt' may read like this:
+
+    Fish_and_chips
+    bagel_and_croissant
+    bak_kut_teh
+
+Then, the directory '/Data/' contains all the folders for images of different classes, while each folder name should be the same as that appeared in 'classes.txt'. The name of an image file should not be a concern, but it should be placed inside the folder of the class it belongs to. For the same example above, the folder structure may look like this: 
+
+    Data/
+        Fish_and_chips/
+            fish1.jpg
+            fish2.jpg
+            ...
+            chip1.jpg
+            ...
+        bagel_and_croissant/
+            bagel.jpg
+            ...
+            croissant1.jpg
+            croissant2.jpg
+            ...
+        bak_kut_teh/
+            photo.jpg
+            photo2.jpg
+            ...             
+
+Before running the code, the `model` folder in `examples/cnn` should be copied to this directory.
+
+* `train_largedata.py` is the training script, which controls the training flow by
+  doing BackPropagation and SGD update.
+
+* `train_mpi.py` is the script for distributed training (among multiple nodes) 
+  using MPI and NCCL for communication.
diff --git a/examples/largedataset_cnn/process_data.py b/examples/largedataset_cnn/process_data.py
new file mode 100644
index 0000000..7d92d01
--- /dev/null
+++ b/examples/largedataset_cnn/process_data.py
@@ -0,0 +1,98 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+import imghdr
+import numpy as np
+from PIL import Image
+
+def paths_to_images(paths, image_size):
+    num_images=len(paths)
+    im = np.zeros((num_images,3,image_size,image_size), dtype=np.float32)
+
+    for i in range(num_images):
+        temp = np.array(Image.open(paths[i]).convert('RGB').resize((image_size, image_size), Image.BILINEAR))
+        temp = np.moveaxis(temp,-1,0)
+        im[i] = temp
+
+    im /= 255
+
+    return im
+
+
+def process_data(dataset_root, classes):
+    # load class names
+    with open(classes, 'r', encoding='utf-8') as f:
+        classes = f.readlines()
+        classes = list(map(lambda x: x.strip(), classes))
+
+    # make input_paths and labels
+    input_paths, labels = [], []
+    for class_name in os.listdir(dataset_root):
+        class_root = os.path.join(dataset_root, class_name)
+        class_id = classes.index(class_name)
+        for path in os.listdir(class_root):
+            path = os.path.join(class_root, path)
+            if imghdr.what(path) is None:
+                # this is not an image file
+                continue
+            input_paths.append(path)
+            labels.append(class_id)
+
+    # convert to numpy array
+    input_paths = np.array(input_paths)
+    labels = np.array(labels, dtype=np.int32)
+
+    # shuffle dataset
+    np.random.seed(0)
+    perm = np.random.permutation(len(input_paths))
+    input_paths = input_paths[perm]
+    labels = labels[perm]
+
+    # split dataset for training and validation
+    border = int(len(input_paths) * 0.8)
+    train_labels = labels[:border]
+    val_labels = labels[border:]
+    train_input_paths = input_paths[:border]
+    val_input_paths = input_paths[border:]
+
+
+    print("Training on %d images and labels" % (len(train_input_paths)))
+    print("Validation on %d images and labels" % (len(val_input_paths)))
+
+    return train_input_paths, train_labels, val_input_paths, val_labels
+
+def loaddata():
+    dataset_root = '/Dataset/Data/'
+    classes = '/Dataset/classes.txt'
+    return process_data(dataset_root, classes)
+
+if __name__ == '__main__':
+
+    # test script in main
+    train_input_paths, train_labels, val_input_paths, val_labels = loaddata()
+
+    print(train_input_paths.shape)
+    print(train_labels.shape)
+    print(val_input_paths.shape)
+    print(val_labels.shape)
+
+    a=paths_to_images(paths=train_input_paths[0:5], image_size=299)
+    print(a)
+    print(a.shape)
diff --git a/examples/largedataset_cnn/train_largedata.py b/examples/largedataset_cnn/train_largedata.py
new file mode 100644
index 0000000..2a79675
--- /dev/null
+++ b/examples/largedataset_cnn/train_largedata.py
@@ -0,0 +1,300 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from singa import singa_wrap as singa
+from singa import device
+from singa import tensor
+from singa import opt
+import numpy as np
+import time
+import argparse
+from PIL import Image
+import process_data
+
+# Data Augmentation
+def augmentation(x, batch_size):
+    xpad = np.pad(x, [[0, 0], [0, 0], [4, 4], [4, 4]], 'symmetric')
+    for data_num in range(0, batch_size):
+        offset = np.random.randint(8, size=2)
+        x[data_num, :, :, :] = xpad[data_num, :,
+                                    offset[0]:offset[0] + x.shape[2],
+                                    offset[1]:offset[1] + x.shape[2]]
+        if_flip = np.random.randint(2)
+        if (if_flip):
+            x[data_num, :, :, :] = x[data_num, :, :, ::-1]
+    return x
+
+
+# Calculate Accuracy
+def accuracy(pred, target):
+    # y is network output to be compared with ground truth (int)
+    y = np.argmax(pred, axis=1)
+    a = y == target
+    correct = np.array(a, "int").sum()
+    # print(correct)
+    return correct
+
+
+# Data partition according to the rank
+def partition(global_rank, world_size, train_x, train_y, val_x, val_y):
+    # Partition training data
+    data_per_rank = train_x.shape[0] // world_size
+    idx_start = global_rank * data_per_rank
+    idx_end = (global_rank + 1) * data_per_rank
+    train_x = train_x[idx_start:idx_end]
+    train_y = train_y[idx_start:idx_end]
+    # Partition evaluation data
+    data_per_rank = val_x.shape[0] // world_size
+    idx_start = global_rank * data_per_rank
+    idx_end = (global_rank + 1) * data_per_rank
+    val_x = val_x[idx_start:idx_end]
+    val_y = val_y[idx_start:idx_end]
+    return train_x, train_y, val_x, val_y
+
+
+# Function to all reduce NUMPY Accuracy and Loss from Multiple Devices
+def reduce_variable(variable, dist_opt, reducer):
+    reducer.copy_from_numpy(variable)
+    dist_opt.all_reduce(reducer.data)
+    dist_opt.wait()
+    output = tensor.to_numpy(reducer)
+    return output
+
+
+def resize_dataset(x, image_size):
+    num_data = x.shape[0]
+    dim = x.shape[1]
+    X = np.zeros(shape=(num_data, dim, image_size, image_size),
+                 dtype=np.float32)
+    for n in range(0, num_data):
+        for d in range(0, dim):
+            X[n, d, :, :] = np.array(Image.fromarray(x[n, d, :, :]).resize(
+                (image_size, image_size), Image.BILINEAR),
+                                     dtype=np.float32)
+    return X
+
+
+def run(global_rank,
+        world_size,
+        local_rank,
+        max_epoch,
+        batch_size,
+        model,
+        data,
+        sgd,
+        graph,
+        verbosity,
+        dist_option='fp32',
+        spars=None):
+    dev = device.create_cuda_gpu_on(local_rank)
+    dev.SetRandSeed(0)
+    np.random.seed(0)
+
+
+    train_x, train_y, val_x, val_y = process_data.loaddata()
+
+    num_channels = 3
+    num_classes = (np.max(train_y) + 1).item()
+    print(num_classes)
+
+    if model == 'resnet':
+        from model import resnet
+        model = resnet.resnet50(num_channels=num_channels,
+                                num_classes=num_classes)
+    elif model == 'xceptionnet':
+        from model import xceptionnet
+        model = xceptionnet.create_model(num_channels=num_channels,
+                                         num_classes=num_classes)
+    elif model == 'cnn':
+        from model import cnn
+        model = cnn.create_model(num_channels=num_channels,
+                                 num_classes=num_classes)
+    elif model == 'alexnet':
+        from model import alexnet
+        model = alexnet.create_model(num_channels=num_channels,
+                                     num_classes=num_classes)
+
+    # For distributed training, sequential gives better performance
+    if hasattr(sgd, "communicator"):
+        DIST = True
+        sequential = True
+    else:
+        DIST = False
+        sequential = False
+
+    if DIST:
+        train_x, train_y, val_x, val_y = partition(global_rank, world_size,
+                                                   train_x, train_y, val_x,
+                                                   val_y)
+    '''
+    # check dataset shape correctness
+    if global_rank == 0:
+        print("Check the shape of dataset:")
+        print(train_x.shape)
+        print(train_y.shape)
+    '''
+
+    if model.dimension == 4:
+        tx = tensor.Tensor(
+            (batch_size, num_channels, model.input_size, model.input_size), dev,
+            tensor.float32)
+    elif model.dimension == 2:
+        tx = tensor.Tensor((batch_size, data_size), dev, tensor.float32)
+        np.reshape(train_x, (train_x.shape[0], -1))
+        np.reshape(val_x, (val_x.shape[0], -1))
+
+    ty = tensor.Tensor((batch_size,), dev, tensor.int32)
+    num_train_batch = train_x.shape[0] // batch_size
+    num_val_batch = val_x.shape[0] // batch_size
+    idx = np.arange(train_x.shape[0], dtype=np.int32)
+
+    # attached model to graph
+    model.set_optimizer(sgd)
+    model.compile([tx], is_train=True, use_graph=graph, sequential=sequential)
+    dev.SetVerbosity(verbosity)
+
+    checkpointpath="checkpoint.zip"
+
+    import os
+    if os.path.exists(checkpointpath):
+        model.load_states(fpath=checkpointpath)
+
+    # Training and Evaluation Loop
+    for epoch in range(max_epoch):
+        start_time = time.time()
+        np.random.shuffle(idx)
+
+        if global_rank == 0:
+            print('Starting Epoch %d:' % (epoch))
+
+        # Training Phase
+        train_correct = np.zeros(shape=[1], dtype=np.float32)
+        test_correct = np.zeros(shape=[1], dtype=np.float32)
+        train_loss = np.zeros(shape=[1], dtype=np.float32)
+
+        model.train()
+        for b in range(num_train_batch):
+            # Generate the patch data in this iteration
+            x = train_x[idx[b * batch_size:(b + 1) * batch_size]]
+            x = process_data.paths_to_images(x,model.input_size)
+            if model.dimension == 4:
+                x = augmentation(x, batch_size)
+            y = train_y[idx[b * batch_size:(b + 1) * batch_size]]
+
+            # Copy the patch data into input tensors
+            tx.copy_from_numpy(x)
+            ty.copy_from_numpy(y)
+
+            # Train the model
+            out, loss = model(tx, ty, dist_option, spars)
+            train_correct += accuracy(tensor.to_numpy(out), y)
+            train_loss += tensor.to_numpy(loss)[0]
+
+        if DIST:
+            # Reduce the Evaluation Accuracy and Loss from Multiple Devices
+            reducer = tensor.Tensor((1,), dev, tensor.float32)
+            train_correct = reduce_variable(train_correct, sgd, reducer)
+            train_loss = reduce_variable(train_loss, sgd, reducer)
+
+        if global_rank == 0:
+            print('Training loss = %f, training accuracy = %f' %
+                  (train_loss, train_correct /
+                   (num_train_batch * batch_size * world_size)),
+                  flush=True)
+
+        # Evaluation Phase
+        model.eval()
+        for b in range(num_val_batch):
+            x = val_x[b * batch_size:(b + 1) * batch_size]
+            x = process_data.paths_to_images(x,model.input_size)
+            y = val_y[b * batch_size:(b + 1) * batch_size]
+            tx.copy_from_numpy(x)
+            ty.copy_from_numpy(y)
+            out_test = model(tx)
+            test_correct += accuracy(tensor.to_numpy(out_test), y)
+
+        if DIST:
+            # Reduce the Evaulation Accuracy from Multiple Devices
+            test_correct = reduce_variable(test_correct, sgd, reducer)
+
+        # Output the Evaluation Accuracy
+        if global_rank == 0:
+            print('Evaluation accuracy = %f, Elapsed Time = %fs' %
+                  (test_correct / (num_val_batch * batch_size * world_size),
+                   time.time() - start_time),
+                  flush=True)
+
+    dev.PrintTimeProfiling()
+
+    if global_rank == 0:
+        if os.path.exists(checkpointpath):
+            os.remove(checkpointpath)
+        model.save_states(checkpointpath)
+
+
+if __name__ == '__main__':
+    # use argparse to get command config: max_epoch, model, data, etc. for single gpu training
+    parser = argparse.ArgumentParser(
+        description='Training using the autograd and graph.')
+    parser.add_argument('model',
+                        choices=['resnet', 'xceptionnet', 'cnn', 'alexnet'],
+                        default='cnn')
+    parser.add_argument('--epoch',
+                        '--max-epoch',
+                        default=10,
+                        type=int,
+                        help='maximum epochs',
+                        dest='max_epoch')
+    parser.add_argument('--bs',
+                        '--batch-size',
+                        default=64,
+                        type=int,
+                        help='batch size',
+                        dest='batch_size')
+    parser.add_argument('--lr',
+                        '--learning-rate',
+                        default=0.005,
+                        type=float,
+                        help='initial learning rate',
+                        dest='lr')
+    # determine which gpu to use
+    parser.add_argument('--id',
+                        '--device-id',
+                        default=0,
+                        type=int,
+                        help='which GPU to use',
+                        dest='device_id')
+    parser.add_argument('--no-graph',
+                        '--disable-graph',
+                        default='True',
+                        action='store_false',
+                        help='disable graph',
+                        dest='graph')
+    parser.add_argument('--verbosity',
+                        '--log-verbosity',
+                        default=0,
+                        type=int,
+                        help='logging verbosity',
+                        dest='verbosity')
+
+    args = parser.parse_args()
+
+    sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5)
+    run(0, 1, args.device_id, args.max_epoch, args.batch_size, args.model,
+        "no", sgd, args.graph, args.verbosity)
diff --git a/examples/largedataset_cnn/train_mpi.py b/examples/largedataset_cnn/train_mpi.py
new file mode 100644
index 0000000..28d260b
--- /dev/null
+++ b/examples/largedataset_cnn/train_mpi.py
@@ -0,0 +1,83 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+from singa import singa_wrap as singa
+from singa import opt
+import argparse
+import train_largedata
+
+if __name__ == '__main__':
+    # use argparse to get command config: max_epoch, model, data, etc. for single gpu training
+    parser = argparse.ArgumentParser(
+        description='Training using the autograd and graph.')
+    parser.add_argument('model',
+                        choices=['resnet', 'xceptionnet', 'cnn', 'mlp'],
+                        default='cnn')
+    parser.add_argument('--epoch',
+                        '--max-epoch',
+                        default=10,
+                        type=int,
+                        help='maximum epochs',
+                        dest='max_epoch')
+    parser.add_argument('--bs',
+                        '--batch-size',
+                        default=64,
+                        type=int,
+                        help='batch size',
+                        dest='batch_size')
+    parser.add_argument('--lr',
+                        '--learning-rate',
+                        default=0.005,
+                        type=float,
+                        help='initial learning rate',
+                        dest='lr')
+    parser.add_argument('--op',
+                        '--option',
+                        default='fp32',
+                        choices=['fp32','fp16','partialUpdate','sparseTopK','sparseThreshold'],
+                        help='distibuted training options',
+                        dest='dist_option')  # currently partialUpdate support graph=False only
+    parser.add_argument('--spars',
+                        '--sparsification',
+                        default='0.05',
+                        type=float,
+                        help='the sparsity parameter used for sparsification, between 0 to 1',
+                        dest='spars')
+    parser.add_argument('--no-graph',
+                        '--disable-graph',
+                        default='True',
+                        action='store_false',
+                        help='disable graph',
+                        dest='graph')
+    parser.add_argument('--verbosity',
+                        '--log-verbosity',
+                        default=0,
+                        type=int,
+                        help='logging verbosity',
+                        dest='verbosity')
+
+    args = parser.parse_args()
+
+    sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5)
+    sgd = opt.DistOpt(sgd)
+
+    train_largedata.run(sgd.global_rank, sgd.world_size, sgd.local_rank, args.max_epoch,
+              args.batch_size, args.model, "no", sgd, args.graph,
+              args.verbosity, args.dist_option, args.spars)
diff --git a/examples/mlp/model.py b/examples/mlp/model.py
index ab6a0bf..9a6f3a7 100644
--- a/examples/mlp/model.py
+++ b/examples/mlp/model.py
@@ -20,6 +20,14 @@
 from singa import layer
 from singa import model
 from singa import tensor
+from singa import opt
+from singa import device
+import argparse
+import numpy as np
+
+np_dtype = {"float16": np.float16, "float32": np.float32}
+
+singa_dtype = {"float16": tensor.float16, "float32": tensor.float32}
 
 
 class MLP(model.Model):
@@ -44,9 +52,9 @@
         out = self.forward(x)
         loss = self.softmax_cross_entropy(out, y)
 
-        if dist_option == 'fp32':
+        if dist_option == 'plain':
             self.optimizer(loss)
-        elif dist_option == 'fp16':
+        elif dist_option == 'half':
             self.optimizer.backward_and_update_half(loss)
         elif dist_option == 'partialUpdate':
             self.optimizer.backward_and_partial_update(loss)
@@ -68,7 +76,10 @@
     """Constructs a CNN model.
 
     Args:
-        pretrained (bool): If True, returns a model pre-trained
+        pretrained (bool): If True, returns a pre-trained model.
+    
+    Returns:
+        The created CNN model.
     """
     model = MLP(**kwargs)
 
@@ -78,34 +89,56 @@
 __all__ = ['MLP', 'create_model']
 
 if __name__ == "__main__":
+    np.random.seed(0)
 
-    import numpy as np
-    from singa import opt
-    from singa import device
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-p',
+                        choices=['float32', 'float16'],
+                        default='float32',
+                        dest='precision')
+    parser.add_argument('-g',
+                        '--disable-graph',
+                        default='True',
+                        action='store_false',
+                        help='disable graph',
+                        dest='graph')
+    parser.add_argument('-m',
+                        '--max-epoch',
+                        default=1001,
+                        type=int,
+                        help='maximum epochs',
+                        dest='max_epoch')
+    args = parser.parse_args()
 
     # generate the boundary
     f = lambda x: (5 * x + 1)
     bd_x = np.linspace(-1.0, 1, 200)
     bd_y = f(bd_x)
+
     # generate the training data
     x = np.random.uniform(-1, 1, 400)
     y = f(x) + 2 * np.random.randn(len(x))
+
+    # choose one precision
+    precision = singa_dtype[args.precision]
+    np_precision = np_dtype[args.precision]
+
     # convert training data to 2d space
     label = np.asarray([5 * a + 1 > b for (a, b) in zip(x, y)]).astype(np.int32)
-    data = np.array([[a, b] for (a, b) in zip(x, y)], dtype=np.float32)
+    data = np.array([[a, b] for (a, b) in zip(x, y)], dtype=np_precision)
 
     dev = device.create_cuda_gpu_on(0)
-    sgd = opt.SGD(0.05)
-    tx = tensor.Tensor((400, 2), dev, tensor.float32)
+    sgd = opt.SGD(0.1, 0.9, 1e-5, dtype=singa_dtype[args.precision])
+    tx = tensor.Tensor((400, 2), dev, precision)
     ty = tensor.Tensor((400,), dev, tensor.int32)
     model = MLP(data_size=2, perceptron_size=3, num_classes=2)
 
-    # attached model to graph
+    # attach model to graph
     model.set_optimizer(sgd)
-    model.compile([tx], is_train=True, use_graph=True, sequential=False)
+    model.compile([tx], is_train=True, use_graph=args.graph, sequential=True)
     model.train()
 
-    for i in range(1001):
+    for i in range(args.max_epoch):
         tx.copy_from_numpy(data)
         ty.copy_from_numpy(label)
         out, loss = model(tx, ty, 'fp32', spars=None)
diff --git a/examples/mlp/native.py b/examples/mlp/native.py
index 00f4c0d..a82ec3b 100644
--- a/examples/mlp/native.py
+++ b/examples/mlp/native.py
@@ -22,8 +22,28 @@
 from singa import autograd
 from singa import opt
 import numpy as np
+from singa import device
+import argparse
+
+np_dtype = {"float16": np.float16, "float32": np.float32}
+
+singa_dtype = {"float16": tensor.float16, "float32": tensor.float32}
 
 if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-p',
+                        choices=['float32', 'float16'],
+                        default='float32',
+                        dest='precision')
+    parser.add_argument('-m',
+                        '--max-epoch',
+                        default=1001,
+                        type=int,
+                        help='maximum epochs',
+                        dest='max_epoch')
+    args = parser.parse_args()
+
+    np.random.seed(0)
 
     autograd.training = True
 
@@ -33,9 +53,11 @@
     f = lambda x: (5 * x + 1)
     bd_x = np.linspace(-1.0, 1, 200)
     bd_y = f(bd_x)
+
     # generate the training data
     x = np.random.uniform(-1, 1, 400)
     y = f(x) + 2 * np.random.randn(len(x))
+
     # convert training data to 2d space
     label = np.asarray([5 * a + 1 > b for (a, b) in zip(x, y)])
     data = np.array([[a, b] for (a, b) in zip(x, y)], dtype=np.float32)
@@ -44,12 +66,12 @@
         """
         Converts a class vector (integers) to binary class matrix.
 
-        Args
+        Args:
             y: class vector to be converted into a matrix
                 (integers from 0 to num_classes).
             num_classes: total number of classes.
 
-        Return
+        Returns:
             A binary matrix representation of the input.
         """
         y = np.array(y, dtype="int")
@@ -62,22 +84,47 @@
     print("train_data_shape:", data.shape)
     print("train_label_shape:", label.shape)
 
-    inputs = Tensor(data=data)
-    target = Tensor(data=label)
+    precision = singa_dtype[args.precision]
+    np_precision = np_dtype[args.precision]
 
-    w0 = Tensor(shape=(2, 3), requires_grad=True, stores_grad=True)
-    w0.gaussian(0.0, 0.1)
-    b0 = Tensor(shape=(3,), requires_grad=True, stores_grad=True)
+    dev = device.create_cuda_gpu()
+
+    inputs = Tensor(data=data, device=dev)
+    target = Tensor(data=label, device=dev)
+
+    inputs = inputs.as_type(precision)
+    target = target.as_type(tensor.int32)
+
+    w0_np = np.random.normal(0, 0.1, (2, 3)).astype(np_precision)
+    w0 = Tensor(data=w0_np,
+                device=dev,
+                dtype=precision,
+                requires_grad=True,
+                stores_grad=True)
+    b0 = Tensor(shape=(3,),
+                device=dev,
+                dtype=precision,
+                requires_grad=True,
+                stores_grad=True)
     b0.set_value(0.0)
 
-    w1 = Tensor(shape=(3, 2), requires_grad=True, stores_grad=True)
-    w1.gaussian(0.0, 0.1)
-    b1 = Tensor(shape=(2,), requires_grad=True, stores_grad=True)
+    w1_np = np.random.normal(0, 0.1, (3, 2)).astype(np_precision)
+    w1 = Tensor(data=w1_np,
+                device=dev,
+                dtype=precision,
+                requires_grad=True,
+                stores_grad=True)
+    b1 = Tensor(shape=(2,),
+                device=dev,
+                dtype=precision,
+                requires_grad=True,
+                stores_grad=True)
     b1.set_value(0.0)
 
-    sgd = opt.SGD(0.05)
+    sgd = opt.SGD(0.05, 0.8)
+
     # training process
-    for i in range(1001):
+    for i in range(args.max_epoch):
         x = autograd.matmul(inputs, w0)
         x = autograd.add_bias(x, b0)
         x = autograd.relu(x)
@@ -87,4 +134,4 @@
         sgd(loss)
 
         if i % 100 == 0:
-            print("training loss = ", tensor.to_numpy(loss)[0])
+            print("%d, training loss = " % i, tensor.to_numpy(loss)[0])
diff --git a/include/half.hpp b/include/half.hpp
new file mode 100644
index 0000000..69f2ffd
--- /dev/null
+++ b/include/half.hpp
@@ -0,0 +1,4575 @@
+// half - IEEE 754-based half-precision floating-point library.

+//

+// Copyright (c) 2012-2019 Christian Rau <rauy@users.sourceforge.net>

+//

+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation 

+// files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, 

+// modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the 

+// Software is furnished to do so, subject to the following conditions:

+//

+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

+//

+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 

+// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 

+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 

+// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+

+// Version 2.1.0

+

+/// \file

+/// Main header file for half-precision functionality.

+

+#ifndef HALF_HALF_HPP

+#define HALF_HALF_HPP

+

+#define HALF_GCC_VERSION (__GNUC__*100+__GNUC_MINOR__)

+

+#if defined(__INTEL_COMPILER)

+	#define HALF_ICC_VERSION __INTEL_COMPILER

+#elif defined(__ICC)

+	#define HALF_ICC_VERSION __ICC

+#elif defined(__ICL)

+	#define HALF_ICC_VERSION __ICL

+#else

+	#define HALF_ICC_VERSION 0

+#endif

+

+// check C++11 language features

+#if defined(__clang__)										// clang

+	#if __has_feature(cxx_static_assert) && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT)

+		#define HALF_ENABLE_CPP11_STATIC_ASSERT 1

+	#endif

+	#if __has_feature(cxx_constexpr) && !defined(HALF_ENABLE_CPP11_CONSTEXPR)

+		#define HALF_ENABLE_CPP11_CONSTEXPR 1

+	#endif

+	#if __has_feature(cxx_noexcept) && !defined(HALF_ENABLE_CPP11_NOEXCEPT)

+		#define HALF_ENABLE_CPP11_NOEXCEPT 1

+	#endif

+	#if __has_feature(cxx_user_literals) && !defined(HALF_ENABLE_CPP11_USER_LITERALS)

+		#define HALF_ENABLE_CPP11_USER_LITERALS 1

+	#endif

+	#if __has_feature(cxx_thread_local) && !defined(HALF_ENABLE_CPP11_THREAD_LOCAL)

+		#define HALF_ENABLE_CPP11_THREAD_LOCAL 1

+	#endif

+	#if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L) && !defined(HALF_ENABLE_CPP11_LONG_LONG)

+		#define HALF_ENABLE_CPP11_LONG_LONG 1

+	#endif

+#elif HALF_ICC_VERSION && defined(__INTEL_CXX11_MODE__)		// Intel C++

+	#if HALF_ICC_VERSION >= 1500 && !defined(HALF_ENABLE_CPP11_THREAD_LOCAL)

+		#define HALF_ENABLE_CPP11_THREAD_LOCAL 1

+	#endif

+	#if HALF_ICC_VERSION >= 1500 && !defined(HALF_ENABLE_CPP11_USER_LITERALS)

+		#define HALF_ENABLE_CPP11_USER_LITERALS 1

+	#endif

+	#if HALF_ICC_VERSION >= 1400 && !defined(HALF_ENABLE_CPP11_CONSTEXPR)

+		#define HALF_ENABLE_CPP11_CONSTEXPR 1

+	#endif

+	#if HALF_ICC_VERSION >= 1400 && !defined(HALF_ENABLE_CPP11_NOEXCEPT)

+		#define HALF_ENABLE_CPP11_NOEXCEPT 1

+	#endif

+	#if HALF_ICC_VERSION >= 1110 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT)

+		#define HALF_ENABLE_CPP11_STATIC_ASSERT 1

+	#endif

+	#if HALF_ICC_VERSION >= 1110 && !defined(HALF_ENABLE_CPP11_LONG_LONG)

+		#define HALF_ENABLE_CPP11_LONG_LONG 1

+	#endif

+#elif defined(__GNUC__)										// gcc

+	#if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L

+		#if HALF_GCC_VERSION >= 408 && !defined(HALF_ENABLE_CPP11_THREAD_LOCAL)

+			#define HALF_ENABLE_CPP11_THREAD_LOCAL 1

+		#endif

+		#if HALF_GCC_VERSION >= 407 && !defined(HALF_ENABLE_CPP11_USER_LITERALS)

+			#define HALF_ENABLE_CPP11_USER_LITERALS 1

+		#endif

+		#if HALF_GCC_VERSION >= 406 && !defined(HALF_ENABLE_CPP11_CONSTEXPR)

+			#define HALF_ENABLE_CPP11_CONSTEXPR 1

+		#endif

+		#if HALF_GCC_VERSION >= 406 && !defined(HALF_ENABLE_CPP11_NOEXCEPT)

+			#define HALF_ENABLE_CPP11_NOEXCEPT 1

+		#endif

+		#if HALF_GCC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT)

+			#define HALF_ENABLE_CPP11_STATIC_ASSERT 1

+		#endif

+		#if !defined(HALF_ENABLE_CPP11_LONG_LONG)

+			#define HALF_ENABLE_CPP11_LONG_LONG 1

+		#endif

+	#endif

+	#define HALF_TWOS_COMPLEMENT_INT 1

+#elif defined(_MSC_VER)										// Visual C++

+	#if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_THREAD_LOCAL)

+		#define HALF_ENABLE_CPP11_THREAD_LOCAL 1

+	#endif

+	#if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_USER_LITERALS)

+		#define HALF_ENABLE_CPP11_USER_LITERALS 1

+	#endif

+	#if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_CONSTEXPR)

+		#define HALF_ENABLE_CPP11_CONSTEXPR 1

+	#endif

+	#if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_NOEXCEPT)

+		#define HALF_ENABLE_CPP11_NOEXCEPT 1

+	#endif

+	#if _MSC_VER >= 1600 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT)

+		#define HALF_ENABLE_CPP11_STATIC_ASSERT 1

+	#endif

+	#if _MSC_VER >= 1310 && !defined(HALF_ENABLE_CPP11_LONG_LONG)

+		#define HALF_ENABLE_CPP11_LONG_LONG 1

+	#endif

+	#define HALF_TWOS_COMPLEMENT_INT 1

+	#define HALF_POP_WARNINGS 1

+	#pragma warning(push)

+	#pragma warning(disable : 4099 4127 4146)	//struct vs class, constant in if, negative unsigned

+#endif

+

+// check C++11 library features

+#include <utility>

+#if defined(_LIBCPP_VERSION)								// libc++

+	#if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103

+		#ifndef HALF_ENABLE_CPP11_TYPE_TRAITS

+			#define HALF_ENABLE_CPP11_TYPE_TRAITS 1

+		#endif

+		#ifndef HALF_ENABLE_CPP11_CSTDINT

+			#define HALF_ENABLE_CPP11_CSTDINT 1

+		#endif

+		#ifndef HALF_ENABLE_CPP11_CMATH

+			#define HALF_ENABLE_CPP11_CMATH 1

+		#endif

+		#ifndef HALF_ENABLE_CPP11_HASH

+			#define HALF_ENABLE_CPP11_HASH 1

+		#endif

+		#ifndef HALF_ENABLE_CPP11_CFENV

+			#define HALF_ENABLE_CPP11_CFENV 1

+		#endif

+	#endif

+#elif defined(__GLIBCXX__)									// libstdc++

+	#if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103

+		#ifdef __clang__

+			#if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_TYPE_TRAITS)

+				#define HALF_ENABLE_CPP11_TYPE_TRAITS 1

+			#endif

+			#if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_CSTDINT)

+				#define HALF_ENABLE_CPP11_CSTDINT 1

+			#endif

+			#if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_CMATH)

+				#define HALF_ENABLE_CPP11_CMATH 1

+			#endif

+			#if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_HASH)

+				#define HALF_ENABLE_CPP11_HASH 1

+			#endif

+			#if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_CFENV)

+				#define HALF_ENABLE_CPP11_CFENV 1

+			#endif

+		#else

+			#if HALF_GCC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_TYPE_TRAITS)

+				#define HALF_ENABLE_CPP11_TYPE_TRAITS 1

+			#endif

+			#if HALF_GCC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_CSTDINT)

+				#define HALF_ENABLE_CPP11_CSTDINT 1

+			#endif

+			#if HALF_GCC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_CMATH)

+				#define HALF_ENABLE_CPP11_CMATH 1

+			#endif

+			#if HALF_GCC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_HASH)

+				#define HALF_ENABLE_CPP11_HASH 1

+			#endif

+			#if HALF_GCC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_CFENV)

+				#define HALF_ENABLE_CPP11_CFENV 1

+			#endif

+		#endif

+	#endif

+#elif defined(_CPPLIB_VER)									// Dinkumware/Visual C++

+	#if _CPPLIB_VER >= 520 && !defined(HALF_ENABLE_CPP11_TYPE_TRAITS)

+		#define HALF_ENABLE_CPP11_TYPE_TRAITS 1

+	#endif

+	#if _CPPLIB_VER >= 520 && !defined(HALF_ENABLE_CPP11_CSTDINT)

+			#define HALF_ENABLE_CPP11_CSTDINT 1

+	#endif

+	#if _CPPLIB_VER >= 520 && !defined(HALF_ENABLE_CPP11_HASH)

+		#define HALF_ENABLE_CPP11_HASH 1

+	#endif

+	#if _CPPLIB_VER >= 610 && !defined(HALF_ENABLE_CPP11_CMATH)

+		#define HALF_ENABLE_CPP11_CMATH 1

+	#endif

+	#if _CPPLIB_VER >= 610 && !defined(HALF_ENABLE_CPP11_CFENV)

+		#define HALF_ENABLE_CPP11_CFENV 1

+	#endif

+#endif

+#undef HALF_GCC_VERSION

+#undef HALF_ICC_VERSION

+

+// any error throwing C++ exceptions?

+#if defined(HALF_ERRHANDLING_THROW_INVALID) || defined(HALF_ERRHANDLING_THROW_DIVBYZERO) || defined(HALF_ERRHANDLING_THROW_OVERFLOW) || defined(HALF_ERRHANDLING_THROW_UNDERFLOW) || defined(HALF_ERRHANDLING_THROW_INEXACT)

+#define HALF_ERRHANDLING_THROWS 1

+#endif

+

+// any error handling enabled?

+#define HALF_ERRHANDLING	(HALF_ERRHANDLING_FLAGS||HALF_ERRHANDLING_ERRNO||HALF_ERRHANDLING_FENV||HALF_ERRHANDLING_THROWS)

+

+#if HALF_ERRHANDLING

+	#define HALF_UNUSED_NOERR(name) name

+#else

+	#define HALF_UNUSED_NOERR(name)

+#endif

+

+// support constexpr

+#if HALF_ENABLE_CPP11_CONSTEXPR

+	#define HALF_CONSTEXPR				constexpr

+	#define HALF_CONSTEXPR_CONST		constexpr

+	#if HALF_ERRHANDLING

+		#define HALF_CONSTEXPR_NOERR

+	#else

+		#define HALF_CONSTEXPR_NOERR	constexpr

+	#endif

+#else

+	#define HALF_CONSTEXPR

+	#define HALF_CONSTEXPR_CONST		const

+	#define HALF_CONSTEXPR_NOERR

+#endif

+

+// support noexcept

+#if HALF_ENABLE_CPP11_NOEXCEPT

+	#define HALF_NOEXCEPT	noexcept

+	#define HALF_NOTHROW	noexcept

+#else

+	#define HALF_NOEXCEPT

+	#define HALF_NOTHROW	throw()

+#endif

+

+// support thread storage

+#if HALF_ENABLE_CPP11_THREAD_LOCAL

+	#define HALF_THREAD_LOCAL	thread_local

+#else

+	#define HALF_THREAD_LOCAL	static

+#endif

+

+#include <utility>

+#include <algorithm>

+#include <istream>

+#include <ostream>

+#include <limits>

+#include <stdexcept>

+#include <climits>

+#include <cmath>

+#include <cstring>

+#include <cstdlib>

+#if HALF_ENABLE_CPP11_TYPE_TRAITS

+	#include <type_traits>

+#endif

+#if HALF_ENABLE_CPP11_CSTDINT

+	#include <cstdint>

+#endif

+#if HALF_ERRHANDLING_ERRNO

+	#include <cerrno>

+#endif

+#if HALF_ENABLE_CPP11_CFENV

+	#include <cfenv>

+#endif

+#if HALF_ENABLE_CPP11_HASH

+	#include <functional>

+#endif

+#if HALF_ENABLE_F16C_INTRINSICS

+	#include <immintrin.h>

+#endif

+

+

+#ifndef HALF_ENABLE_F16C_INTRINSICS

+	/// Enable F16C intruction set intrinsics.

+	/// Defining this to 1 enables the use of [F16C compiler intrinsics](https://en.wikipedia.org/wiki/F16C) for converting between 

+	/// half-precision and single-precision values which may result in improved performance. This will not perform additional checks 

+	/// for support of the F16C instruction set, so an appropriate target platform is required when enabling this feature.

+	///

+	/// Unless predefined it will be enabled automatically when the `__F16C__` symbol is defined, which some compilers do on supporting platforms.

+	#define HALF_ENABLE_F16C_INTRINSICS __F16C__

+#endif

+

+#ifdef HALF_DOXYGEN_ONLY

+/// Type for internal floating-point computations.

+/// This can be predefined to a built-in floating-point type (`float`, `double` or `long double`) to override the internal 

+/// half-precision implementation to use this type for computing arithmetic operations and mathematical function (if available). 

+/// This can result in improved performance for arithmetic operators and mathematical functions but might cause results to 

+/// deviate from the specified half-precision rounding mode and inhibits proper detection of half-precision exceptions.

+#define HALF_ARITHMETIC_TYPE (undefined)

+

+/// Enable internal exception flags.

+/// Defining this to 1 causes operations on half-precision values to raise internal floating-point exception flags according to 

+/// the IEEE 754 standard. These can then be cleared and checked with clearexcept(), testexcept().

+#define HALF_ERRHANDLING_FLAGS	0

+

+/// Enable exception propagation to `errno`.

+/// Defining this to 1 causes operations on half-precision values to propagate floating-point exceptions to 

+/// [errno](https://en.cppreference.com/w/cpp/error/errno) from `<cerrno>`. Specifically this will propagate domain errors as 

+/// [EDOM](https://en.cppreference.com/w/cpp/error/errno_macros) and pole, overflow and underflow errors as 

+/// [ERANGE](https://en.cppreference.com/w/cpp/error/errno_macros). Inexact errors won't be propagated.

+#define HALF_ERRHANDLING_ERRNO	0

+

+/// Enable exception propagation to built-in floating-point platform.

+/// Defining this to 1 causes operations on half-precision values to propagate floating-point exceptions to the built-in 

+/// single- and double-precision implementation's exception flags using the 

+/// [C++11 floating-point environment control](https://en.cppreference.com/w/cpp/numeric/fenv) from `<cfenv>`. However, this 

+/// does not work in reverse and single- or double-precision exceptions will not raise the corresponding half-precision 

+/// exception flags, nor will explicitly clearing flags clear the corresponding built-in flags.

+#define HALF_ERRHANDLING_FENV	0

+

+/// Throw C++ exception on domain errors.

+/// Defining this to a string literal causes operations on half-precision values to throw a 

+/// [std::domain_error](https://en.cppreference.com/w/cpp/error/domain_error) with the specified message on domain errors.

+#define HALF_ERRHANDLING_THROW_INVALID		(undefined)

+

+/// Throw C++ exception on pole errors.

+/// Defining this to a string literal causes operations on half-precision values to throw a 

+/// [std::domain_error](https://en.cppreference.com/w/cpp/error/domain_error) with the specified message on pole errors.

+#define HALF_ERRHANDLING_THROW_DIVBYZERO	(undefined)

+

+/// Throw C++ exception on overflow errors.

+/// Defining this to a string literal causes operations on half-precision values to throw a 

+/// [std::overflow_error](https://en.cppreference.com/w/cpp/error/overflow_error) with the specified message on overflows.

+#define HALF_ERRHANDLING_THROW_OVERFLOW		(undefined)

+

+/// Throw C++ exception on underflow errors.

+/// Defining this to a string literal causes operations on half-precision values to throw a 

+/// [std::underflow_error](https://en.cppreference.com/w/cpp/error/underflow_error) with the specified message on underflows.

+#define HALF_ERRHANDLING_THROW_UNDERFLOW	(undefined)

+

+/// Throw C++ exception on rounding errors.

+/// Defining this to 1 causes operations on half-precision values to throw a 

+/// [std::range_error](https://en.cppreference.com/w/cpp/error/range_error) with the specified message on general rounding errors.

+#define HALF_ERRHANDLING_THROW_INEXACT		(undefined)

+#endif

+

+#ifndef HALF_ERRHANDLING_OVERFLOW_TO_INEXACT

+/// Raise INEXACT exception on overflow.

+/// Defining this to 1 (default) causes overflow errors to automatically raise inexact exceptions in addition.

+/// These will be raised after any possible handling of the underflow exception.

+#define HALF_ERRHANDLING_OVERFLOW_TO_INEXACT	1

+#endif

+

+#ifndef HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT

+/// Raise INEXACT exception on underflow.

+/// Defining this to 1 (default) causes underflow errors to automatically raise inexact exceptions in addition.

+/// These will be raised after any possible handling of the underflow exception.

+///

+/// **Note:** This will actually cause underflow (and the accompanying inexact) exceptions to be raised *only* when the result 

+/// is inexact, while if disabled bare underflow errors will be raised for *any* (possibly exact) subnormal result.

+#define HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT	1

+#endif

+

+/// Default rounding mode.

+/// This specifies the rounding mode used for all conversions between [half](\ref half_float::half)s and more precise types 

+/// (unless using half_cast() and specifying the rounding mode directly) as well as in arithmetic operations and mathematical 

+/// functions. It can be redefined (before including half.hpp) to one of the standard rounding modes using their respective 

+/// constants or the equivalent values of 

+/// [std::float_round_style](https://en.cppreference.com/w/cpp/types/numeric_limits/float_round_style):

+///

+/// `std::float_round_style`         | value | rounding

+/// ---------------------------------|-------|-------------------------

+/// `std::round_indeterminate`       | -1    | fastest

+/// `std::round_toward_zero`         | 0     | toward zero

+/// `std::round_to_nearest`          | 1     | to nearest (default)

+/// `std::round_toward_infinity`     | 2     | toward positive infinity

+/// `std::round_toward_neg_infinity` | 3     | toward negative infinity

+///

+/// By default this is set to `1` (`std::round_to_nearest`), which rounds results to the nearest representable value. It can even 

+/// be set to [std::numeric_limits<float>::round_style](https://en.cppreference.com/w/cpp/types/numeric_limits/round_style) to synchronize 

+/// the rounding mode with that of the built-in single-precision implementation (which is likely `std::round_to_nearest`, though).

+#ifndef HALF_ROUND_STYLE

+	#define HALF_ROUND_STYLE	1		// = std::round_to_nearest

+#endif

+

+/// Value signaling overflow.

+/// In correspondence with `HUGE_VAL[F|L]` from `<cmath>` this symbol expands to a positive value signaling the overflow of an 

+/// operation, in particular it just evaluates to positive infinity.

+///

+/// **See also:** Documentation for [HUGE_VAL](https://en.cppreference.com/w/cpp/numeric/math/HUGE_VAL)

+#define HUGE_VALH	std::numeric_limits<half_float::half>::infinity()

+

+/// Fast half-precision fma function.

+/// This symbol is defined if the fma() function generally executes as fast as, or faster than, a separate 

+/// half-precision multiplication followed by an addition, which is always the case.

+///

+/// **See also:** Documentation for [FP_FAST_FMA](https://en.cppreference.com/w/cpp/numeric/math/fma)

+#define FP_FAST_FMAH	1

+

+///	Half rounding mode.

+/// In correspondence with `FLT_ROUNDS` from `<cfloat>` this symbol expands to the rounding mode used for 

+/// half-precision operations. It is an alias for [HALF_ROUND_STYLE](\ref HALF_ROUND_STYLE).

+///

+/// **See also:** Documentation for [FLT_ROUNDS](https://en.cppreference.com/w/cpp/types/climits/FLT_ROUNDS)

+#define HLF_ROUNDS	HALF_ROUND_STYLE

+

+#ifndef FP_ILOGB0

+	#define FP_ILOGB0		INT_MIN

+#endif

+#ifndef FP_ILOGBNAN

+	#define FP_ILOGBNAN		INT_MAX

+#endif

+#ifndef FP_SUBNORMAL

+	#define FP_SUBNORMAL	0

+#endif

+#ifndef FP_ZERO

+	#define FP_ZERO			1

+#endif

+#ifndef FP_NAN

+	#define FP_NAN			2

+#endif

+#ifndef FP_INFINITE

+	#define FP_INFINITE		3

+#endif

+#ifndef FP_NORMAL

+	#define FP_NORMAL		4

+#endif

+

+#if !HALF_ENABLE_CPP11_CFENV && !defined(FE_ALL_EXCEPT)

+	#define FE_INVALID		0x10

+	#define FE_DIVBYZERO	0x08

+	#define FE_OVERFLOW		0x04

+	#define FE_UNDERFLOW	0x02

+	#define FE_INEXACT		0x01

+	#define FE_ALL_EXCEPT	(FE_INVALID|FE_DIVBYZERO|FE_OVERFLOW|FE_UNDERFLOW|FE_INEXACT)

+#endif

+

+

+/// Main namespace for half-precision functionality.

+/// This namespace contains all the functionality provided by the library.

+namespace half_float

+{

+	class half;

+

+#if HALF_ENABLE_CPP11_USER_LITERALS

+	/// Library-defined half-precision literals.

+	/// Import this namespace to enable half-precision floating-point literals:

+	/// ~~~~{.cpp}

+	/// using namespace half_float::literal;

+	/// half_float::half = 4.2_h;

+	/// ~~~~

+	namespace literal

+	{

+		half operator "" _h(long double);

+	}

+#endif

+

+	/// \internal

+	/// \brief Implementation details.

+	namespace detail

+	{

+	#if HALF_ENABLE_CPP11_TYPE_TRAITS

+		/// Conditional type.

+		template<bool B,typename T,typename F> struct conditional : std::conditional<B,T,F> {};

+

+		/// Helper for tag dispatching.

+		template<bool B> struct bool_type : std::integral_constant<bool,B> {};

+		using std::true_type;

+		using std::false_type;

+

+		/// Type traits for floating-point types.

+		template<typename T> struct is_float : std::is_floating_point<T> {};

+	#else

+		/// Conditional type.

+		template<bool,typename T,typename> struct conditional { typedef T type; };

+		template<typename T,typename F> struct conditional<false,T,F> { typedef F type; };

+

+		/// Helper for tag dispatching.

+		template<bool> struct bool_type {};

+		typedef bool_type<true> true_type;

+		typedef bool_type<false> false_type;

+

+		/// Type traits for floating-point types.

+		template<typename> struct is_float : false_type {};

+		template<typename T> struct is_float<const T> : is_float<T> {};

+		template<typename T> struct is_float<volatile T> : is_float<T> {};

+		template<typename T> struct is_float<const volatile T> : is_float<T> {};

+		template<> struct is_float<float> : true_type {};

+		template<> struct is_float<double> : true_type {};

+		template<> struct is_float<long double> : true_type {};

+	#endif

+

+		/// Type traits for floating-point bits.

+		template<typename T> struct bits { typedef unsigned char type; };

+		template<typename T> struct bits<const T> : bits<T> {};

+		template<typename T> struct bits<volatile T> : bits<T> {};

+		template<typename T> struct bits<const volatile T> : bits<T> {};

+

+	#if HALF_ENABLE_CPP11_CSTDINT

+		/// Unsigned integer of (at least) 16 bits width.

+		typedef std::uint_least16_t uint16;

+

+		/// Fastest unsigned integer of (at least) 32 bits width.

+		typedef std::uint_fast32_t uint32;

+

+		/// Fastest signed integer of (at least) 32 bits width.

+		typedef std::int_fast32_t int32;

+

+		/// Unsigned integer of (at least) 32 bits width.

+		template<> struct bits<float> { typedef std::uint_least32_t type; };

+

+		/// Unsigned integer of (at least) 64 bits width.

+		template<> struct bits<double> { typedef std::uint_least64_t type; };

+	#else

+		/// Unsigned integer of (at least) 16 bits width.

+		typedef unsigned short uint16;

+

+		/// Fastest unsigned integer of (at least) 32 bits width.

+		typedef unsigned long uint32;

+

+		/// Fastest unsigned integer of (at least) 32 bits width.

+		typedef long int32;

+

+		/// Unsigned integer of (at least) 32 bits width.

+		template<> struct bits<float> : conditional<std::numeric_limits<unsigned int>::digits>=32,unsigned int,unsigned long> {};

+

+		#if HALF_ENABLE_CPP11_LONG_LONG

+			/// Unsigned integer of (at least) 64 bits width.

+			template<> struct bits<double> : conditional<std::numeric_limits<unsigned long>::digits>=64,unsigned long,unsigned long long> {};

+		#else

+			/// Unsigned integer of (at least) 64 bits width.

+			template<> struct bits<double> { typedef unsigned long type; };

+		#endif

+	#endif

+

+	#ifdef HALF_ARITHMETIC_TYPE

+		/// Type to use for arithmetic computations and mathematic functions internally.

+		typedef HALF_ARITHMETIC_TYPE internal_t;

+	#endif

+

+		/// Tag type for binary construction.

+		struct binary_t {};

+

+		/// Tag for binary construction.

+		HALF_CONSTEXPR_CONST binary_t binary = binary_t();

+

+		/// \name Implementation defined classification and arithmetic

+		/// \{

+

+		/// Check for infinity.

+		/// \tparam T argument type (builtin floating-point type)

+		/// \param arg value to query

+		/// \retval true if infinity

+		/// \retval false else

+		template<typename T> bool builtin_isinf(T arg)

+		{

+		#if HALF_ENABLE_CPP11_CMATH

+			return std::isinf(arg);

+		#elif defined(_MSC_VER)

+			return !::_finite(static_cast<double>(arg)) && !::_isnan(static_cast<double>(arg));

+		#else

+			return arg == std::numeric_limits<T>::infinity() || arg == -std::numeric_limits<T>::infinity();

+		#endif

+		}

+

+		/// Check for NaN.

+		/// \tparam T argument type (builtin floating-point type)

+		/// \param arg value to query

+		/// \retval true if not a number

+		/// \retval false else

+		template<typename T> bool builtin_isnan(T arg)

+		{

+		#if HALF_ENABLE_CPP11_CMATH

+			return std::isnan(arg);

+		#elif defined(_MSC_VER)

+			return ::_isnan(static_cast<double>(arg)) != 0;

+		#else

+			return arg != arg;

+		#endif

+		}

+

+		/// Check sign.

+		/// \tparam T argument type (builtin floating-point type)

+		/// \param arg value to query

+		/// \retval true if signbit set

+		/// \retval false else

+		template<typename T> bool builtin_signbit(T arg)

+		{

+		#if HALF_ENABLE_CPP11_CMATH

+			return std::signbit(arg);

+		#else

+			return arg < T() || (arg == T() && T(1)/arg < T());

+		#endif

+		}

+

+		/// Platform-independent sign mask.

+		/// \param arg integer value in two's complement

+		/// \retval -1 if \a arg negative

+		/// \retval 0 if \a arg positive

+		inline uint32 sign_mask(uint32 arg)

+		{

+			static const int N = std::numeric_limits<uint32>::digits - 1;

+		#if HALF_TWOS_COMPLEMENT_INT

+			return static_cast<int32>(arg) >> N;

+		#else

+			return -((arg>>N)&1);

+		#endif

+		}

+

+		/// Platform-independent arithmetic right shift.

+		/// \param arg integer value in two's complement

+		/// \param i shift amount (at most 31)

+		/// \return \a arg right shifted for \a i bits with possible sign extension

+		inline uint32 arithmetic_shift(uint32 arg, int i)

+		{

+		#if HALF_TWOS_COMPLEMENT_INT

+			return static_cast<int32>(arg) >> i;

+		#else

+			return static_cast<int32>(arg)/(static_cast<int32>(1)<<i) - ((arg>>(std::numeric_limits<uint32>::digits-1))&1);

+		#endif

+		}

+

+		/// \}

+		/// \name Error handling

+		/// \{

+

+		/// Internal exception flags.

+		/// \return reference to global exception flags

+		inline int& errflags() { HALF_THREAD_LOCAL int flags = 0; return flags; }

+

+		/// Raise floating-point exception.

+		/// \param flags exceptions to raise

+		/// \param cond condition to raise exceptions for

+		inline void raise(int HALF_UNUSED_NOERR(flags), bool HALF_UNUSED_NOERR(cond) = true)

+		{

+		#if HALF_ERRHANDLING

+			if(!cond)

+				return;

+		#if HALF_ERRHANDLING_FLAGS

+			errflags() |= flags;

+		#endif

+		#if HALF_ERRHANDLING_ERRNO

+			if(flags & FE_INVALID)

+				errno = EDOM;

+			else if(flags & (FE_DIVBYZERO|FE_OVERFLOW|FE_UNDERFLOW))

+				errno = ERANGE;

+		#endif

+		#if HALF_ERRHANDLING_FENV && HALF_ENABLE_CPP11_CFENV

+			std::feraiseexcept(flags);

+		#endif

+		#ifdef HALF_ERRHANDLING_THROW_INVALID

+			if(flags & FE_INVALID)

+				throw std::domain_error(HALF_ERRHANDLING_THROW_INVALID);

+		#endif

+		#ifdef HALF_ERRHANDLING_THROW_DIVBYZERO

+			if(flags & FE_DIVBYZERO)

+				throw std::domain_error(HALF_ERRHANDLING_THROW_DIVBYZERO);

+		#endif

+		#ifdef HALF_ERRHANDLING_THROW_OVERFLOW

+			if(flags & FE_OVERFLOW)

+				throw std::overflow_error(HALF_ERRHANDLING_THROW_OVERFLOW);

+		#endif

+		#ifdef HALF_ERRHANDLING_THROW_UNDERFLOW

+			if(flags & FE_UNDERFLOW)

+				throw std::underflow_error(HALF_ERRHANDLING_THROW_UNDERFLOW);

+		#endif

+		#ifdef HALF_ERRHANDLING_THROW_INEXACT

+			if(flags & FE_INEXACT)

+				throw std::range_error(HALF_ERRHANDLING_THROW_INEXACT);

+		#endif

+		#if HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT

+			if((flags & FE_UNDERFLOW) && !(flags & FE_INEXACT))

+				raise(FE_INEXACT);

+		#endif

+		#if HALF_ERRHANDLING_OVERFLOW_TO_INEXACT

+			if((flags & FE_OVERFLOW) && !(flags & FE_INEXACT))

+				raise(FE_INEXACT);

+		#endif

+		#endif

+		}

+

+		/// Check and signal for any NaN.

+		/// \param x first half-precision value to check

+		/// \param y second half-precision value to check

+		/// \retval true if either \a x or \a y is NaN

+		/// \retval false else

+		/// \exception FE_INVALID if \a x or \a y is NaN

+		inline HALF_CONSTEXPR_NOERR bool compsignal(unsigned int x, unsigned int y)

+		{

+		#if HALF_ERRHANDLING

+			raise(FE_INVALID, (x&0x7FFF)>0x7C00 || (y&0x7FFF)>0x7C00);

+		#endif

+			return (x&0x7FFF) > 0x7C00 || (y&0x7FFF) > 0x7C00;

+		}

+

+		/// Signal and silence signaling NaN.

+		/// \param nan half-precision NaN value

+		/// \return quiet NaN

+		/// \exception FE_INVALID if \a nan is signaling NaN

+		inline HALF_CONSTEXPR_NOERR unsigned int signal(unsigned int nan)

+		{

+		#if HALF_ERRHANDLING

+			raise(FE_INVALID, !(nan&0x200));

+		#endif

+			return nan | 0x200;

+		}

+

+		/// Signal and silence signaling NaNs.

+		/// \param x first half-precision value to check

+		/// \param y second half-precision value to check

+		/// \return quiet NaN

+		/// \exception FE_INVALID if \a x or \a y is signaling NaN

+		inline HALF_CONSTEXPR_NOERR unsigned int signal(unsigned int x, unsigned int y)

+		{

+		#if HALF_ERRHANDLING

+			raise(FE_INVALID, ((x&0x7FFF)>0x7C00 && !(x&0x200)) || ((y&0x7FFF)>0x7C00 && !(y&0x200)));

+		#endif

+			return ((x&0x7FFF)>0x7C00) ? (x|0x200) : (y|0x200);

+		}

+

+		/// Signal and silence signaling NaNs.

+		/// \param x first half-precision value to check

+		/// \param y second half-precision value to check

+		/// \param z third half-precision value to check

+		/// \return quiet NaN

+		/// \exception FE_INVALID if \a x, \a y or \a z is signaling NaN

+		inline HALF_CONSTEXPR_NOERR unsigned int signal(unsigned int x, unsigned int y, unsigned int z)

+		{

+		#if HALF_ERRHANDLING

+			raise(FE_INVALID, ((x&0x7FFF)>0x7C00 && !(x&0x200)) || ((y&0x7FFF)>0x7C00 && !(y&0x200)) || ((z&0x7FFF)>0x7C00 && !(z&0x200)));

+		#endif

+			return ((x&0x7FFF)>0x7C00) ? (x|0x200) : ((y&0x7FFF)>0x7C00) ? (y|0x200) : (z|0x200);

+		}

+

+		/// Select value or signaling NaN.

+		/// \param x preferred half-precision value

+		/// \param y ignored half-precision value except for signaling NaN

+		/// \return \a y if signaling NaN, \a x otherwise

+		/// \exception FE_INVALID if \a y is signaling NaN

+		inline HALF_CONSTEXPR_NOERR unsigned int select(unsigned int x, unsigned int HALF_UNUSED_NOERR(y))

+		{

+		#if HALF_ERRHANDLING

+			return (((y&0x7FFF)>0x7C00) && !(y&0x200)) ? signal(y) : x;

+		#else

+			return x;

+		#endif

+		}

+

+		/// Raise domain error and return NaN.

+		/// return quiet NaN

+		/// \exception FE_INVALID

+		inline HALF_CONSTEXPR_NOERR unsigned int invalid()

+		{

+		#if HALF_ERRHANDLING

+			raise(FE_INVALID);

+		#endif

+			return 0x7FFF;

+		}

+

+		/// Raise pole error and return infinity.

+		/// \param sign half-precision value with sign bit only

+		/// \return half-precision infinity with sign of \a sign

+		/// \exception FE_DIVBYZERO

+		inline HALF_CONSTEXPR_NOERR unsigned int pole(unsigned int sign = 0)

+		{

+		#if HALF_ERRHANDLING

+			raise(FE_DIVBYZERO);

+		#endif

+			return sign | 0x7C00;

+		}

+

+		/// Check value for underflow.

+		/// \param arg non-zero half-precision value to check

+		/// \return \a arg

+		/// \exception FE_UNDERFLOW if arg is subnormal

+		inline HALF_CONSTEXPR_NOERR unsigned int check_underflow(unsigned int arg)

+		{

+		#if HALF_ERRHANDLING && !HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT

+			raise(FE_UNDERFLOW, !(arg&0x7C00));

+		#endif

+			return arg;

+		}

+

+		/// \}

+		/// \name Conversion and rounding

+		/// \{

+

+		/// Half-precision overflow.

+		/// \tparam R rounding mode to use

+		/// \param sign half-precision value with sign bit only

+		/// \return rounded overflowing half-precision value

+		/// \exception FE_OVERFLOW

+		template<std::float_round_style R> HALF_CONSTEXPR_NOERR unsigned int overflow(unsigned int sign = 0)

+		{

+		#if HALF_ERRHANDLING

+			raise(FE_OVERFLOW);

+		#endif

+			return	(R==std::round_toward_infinity) ? (sign+0x7C00-(sign>>15)) :

+					(R==std::round_toward_neg_infinity) ? (sign+0x7BFF+(sign>>15)) :

+					(R==std::round_toward_zero) ? (sign|0x7BFF) :

+					(sign|0x7C00);

+		}

+

+		/// Half-precision underflow.

+		/// \tparam R rounding mode to use

+		/// \param sign half-precision value with sign bit only

+		/// \return rounded underflowing half-precision value

+		/// \exception FE_UNDERFLOW

+		template<std::float_round_style R> HALF_CONSTEXPR_NOERR unsigned int underflow(unsigned int sign = 0)

+		{

+		#if HALF_ERRHANDLING

+			raise(FE_UNDERFLOW);

+		#endif

+			return	(R==std::round_toward_infinity) ? (sign+1-(sign>>15)) :

+					(R==std::round_toward_neg_infinity) ? (sign+(sign>>15)) :

+					sign;

+		}

+

+		/// Round half-precision number.

+		/// \tparam R rounding mode to use

+		/// \tparam I `true` to always raise INEXACT exception, `false` to raise only for rounded results

+		/// \param value finite half-precision number to round

+		/// \param g guard bit (most significant discarded bit)

+		/// \param s sticky bit (or of all but the most significant discarded bits)

+		/// \return rounded half-precision value

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_UNDERFLOW on underflows

+		/// \exception FE_INEXACT if value had to be rounded or \a I is `true`

+		template<std::float_round_style R,bool I> HALF_CONSTEXPR_NOERR unsigned int rounded(unsigned int value, int g, int s)

+		{

+		#if HALF_ERRHANDLING

+			value +=	(R==std::round_to_nearest) ? (g&(s|value)) :

+						(R==std::round_toward_infinity) ? (~(value>>15)&(g|s)) :

+						(R==std::round_toward_neg_infinity) ? ((value>>15)&(g|s)) : 0;

+			if((value&0x7C00) == 0x7C00)

+				raise(FE_OVERFLOW);

+			else if(value & 0x7C00)

+				raise(FE_INEXACT, I || (g|s)!=0);

+			else

+				raise(FE_UNDERFLOW, !(HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT) || I || (g|s)!=0);

+			return value;

+		#else

+			return	(R==std::round_to_nearest) ? (value+(g&(s|value))) :

+					(R==std::round_toward_infinity) ? (value+(~(value>>15)&(g|s))) :

+					(R==std::round_toward_neg_infinity) ? (value+((value>>15)&(g|s))) :

+					value;

+		#endif

+		}

+

+		/// Round half-precision number to nearest integer value.

+		/// \tparam R rounding mode to use

+		/// \tparam E `true` for round to even, `false` for round away from zero

+		/// \tparam I `true` to raise INEXACT exception (if inexact), `false` to never raise it

+		/// \param value half-precision value to round

+		/// \return half-precision bits for nearest integral value

+		/// \exception FE_INVALID for signaling NaN

+		/// \exception FE_INEXACT if value had to be rounded and \a I is `true`

+		template<std::float_round_style R,bool E,bool I> unsigned int integral(unsigned int value)

+		{

+			unsigned int abs = value & 0x7FFF;

+			if(abs < 0x3C00)

+			{

+				raise(FE_INEXACT, I);

+				return ((R==std::round_to_nearest) ? (0x3C00&-static_cast<unsigned>(abs>=(0x3800+E))) :

+						(R==std::round_toward_infinity) ? (0x3C00&-(~(value>>15)&(abs!=0))) :

+						(R==std::round_toward_neg_infinity) ? (0x3C00&-static_cast<unsigned>(value>0x8000)) :

+						0) | (value&0x8000);

+			}

+			if(abs >= 0x6400)

+				return (abs>0x7C00) ? signal(value) : value;

+			unsigned int exp = 25 - (abs>>10), mask = (1<<exp) - 1;

+			raise(FE_INEXACT, I && (value&mask));

+			return ((	(R==std::round_to_nearest) ? ((1<<(exp-1))-(~(value>>exp)&E)) :

+						(R==std::round_toward_infinity) ? (mask&((value>>15)-1)) :

+						(R==std::round_toward_neg_infinity) ? (mask&-(value>>15)) :

+						0) + value) & ~mask;

+		}

+

+		/// Convert fixed point to half-precision floating-point.

+		/// \tparam R rounding mode to use

+		/// \tparam F number of fractional bits (at least 11)

+		/// \tparam S `true` for signed, `false` for unsigned

+		/// \tparam N `true` for additional normalization step, `false` if already normalized to 1.F

+		/// \tparam I `true` to always raise INEXACT exception, `false` to raise only for rounded results

+		/// \param m mantissa in Q1.F fixed point format

+		/// \param exp exponent

+		/// \param sign half-precision value with sign bit only

+		/// \param s sticky bit (or of all but the most significant already discarded bits)

+		/// \return value converted to half-precision

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_UNDERFLOW on underflows

+		/// \exception FE_INEXACT if value had to be rounded or \a I is `true`

+		template<std::float_round_style R,unsigned int F,bool S,bool N,bool I> unsigned int fixed2half(uint32 m, int exp = 14, unsigned int sign = 0, int s = 0)

+		{

+			if(S)

+			{

+				uint32 msign = sign_mask(m);

+				m = (m^msign) - msign;

+				sign = msign & 0x8000;

+			}

+			if(N)

+				for(; m<(static_cast<uint32>(1)<<F) && exp; m<<=1,--exp) ;

+			else if(exp < 0)

+				return rounded<R,I>(sign+(m>>(F-10-exp)), (m>>(F-11-exp))&1, s|((m&((static_cast<uint32>(1)<<(F-11-exp))-1))!=0));

+			return rounded<R,I>(sign+(exp<<10)+(m>>(F-10)), (m>>(F-11))&1, s|((m&((static_cast<uint32>(1)<<(F-11))-1))!=0));

+		}

+

+		/// Convert IEEE single-precision to half-precision.

+		/// Credit for this goes to [Jeroen van der Zijp](ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf).

+		/// \tparam R rounding mode to use

+		/// \param value single-precision value to convert

+		/// \return rounded half-precision value

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_UNDERFLOW on underflows

+		/// \exception FE_INEXACT if value had to be rounded

+		template<std::float_round_style R> unsigned int float2half_impl(float value, true_type)

+		{

+		#if HALF_ENABLE_F16C_INTRINSICS

+			return _mm_cvtsi128_si32(_mm_cvtps_ph(_mm_set_ss(value),

+				(R==std::round_to_nearest) ? _MM_FROUND_TO_NEAREST_INT :

+				(R==std::round_toward_zero) ? _MM_FROUND_TO_ZERO :

+				(R==std::round_toward_infinity) ? _MM_FROUND_TO_POS_INF :

+				(R==std::round_toward_neg_infinity) ? _MM_FROUND_TO_NEG_INF :

+				_MM_FROUND_CUR_DIRECTION));

+		#else

+			bits<float>::type fbits;

+			std::memcpy(&fbits, &value, sizeof(float));

+		#if 1

+			unsigned int sign = (fbits>>16) & 0x8000;

+			fbits &= 0x7FFFFFFF;

+			if(fbits >= 0x7F800000)

+				return sign | 0x7C00 | ((fbits>0x7F800000) ? (0x200|((fbits>>13)&0x3FF)) : 0);

+			if(fbits >= 0x47800000)

+				return overflow<R>(sign);

+			if(fbits >= 0x38800000)

+				return rounded<R,false>(sign|(((fbits>>23)-112)<<10)|((fbits>>13)&0x3FF), (fbits>>12)&1, (fbits&0xFFF)!=0);

+			if(fbits >= 0x33000000)

+			{

+				int i = 125 - (fbits>>23);

+				fbits = (fbits&0x7FFFFF) | 0x800000;

+				return rounded<R,false>(sign|(fbits>>(i+1)), (fbits>>i)&1, (fbits&((static_cast<uint32>(1)<<i)-1))!=0);

+			}

+			if(fbits != 0)

+				return underflow<R>(sign);

+			return sign;

+		#else

+			static const uint16 base_table[512] = {

+				0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 

+				0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 

+				0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 

+				0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 

+				0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 

+				0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 

+				0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 

+				0x0200, 0x0400, 0x0800, 0x0C00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000, 0x2400, 0x2800, 0x2C00, 0x3000, 0x3400, 0x3800, 0x3C00, 

+				0x4000, 0x4400, 0x4800, 0x4C00, 0x5000, 0x5400, 0x5800, 0x5C00, 0x6000, 0x6400, 0x6800, 0x6C00, 0x7000, 0x7400, 0x7800, 0x7BFF, 

+				0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 

+				0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 

+				0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 

+				0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 

+				0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 

+				0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 

+				0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7C00, 

+				0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 

+				0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 

+				0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 

+				0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 

+				0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 

+				0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 

+				0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8001, 0x8002, 0x8004, 0x8008, 0x8010, 0x8020, 0x8040, 0x8080, 0x8100, 

+				0x8200, 0x8400, 0x8800, 0x8C00, 0x9000, 0x9400, 0x9800, 0x9C00, 0xA000, 0xA400, 0xA800, 0xAC00, 0xB000, 0xB400, 0xB800, 0xBC00, 

+				0xC000, 0xC400, 0xC800, 0xCC00, 0xD000, 0xD400, 0xD800, 0xDC00, 0xE000, 0xE400, 0xE800, 0xEC00, 0xF000, 0xF400, 0xF800, 0xFBFF, 

+				0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 

+				0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 

+				0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 

+				0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 

+				0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 

+				0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 

+				0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFC00 };

+			static const unsigned char shift_table[256] = {

+				24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 

+				25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 

+				25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 

+				25, 25, 25, 25, 25, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 

+				13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 

+				24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 

+				24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 

+				24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 13 };

+			int sexp = fbits >> 23, exp = sexp & 0xFF, i = shift_table[exp];

+			fbits &= 0x7FFFFF;

+			uint32 m = (fbits|((exp!=0)<<23)) & -static_cast<uint32>(exp!=0xFF);

+			return rounded<R,false>(base_table[sexp]+(fbits>>i), (m>>(i-1))&1, (((static_cast<uint32>(1)<<(i-1))-1)&m)!=0);

+		#endif

+		#endif

+		}

+

+		/// Convert IEEE double-precision to half-precision.

+		/// \tparam R rounding mode to use

+		/// \param value double-precision value to convert

+		/// \return rounded half-precision value

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_UNDERFLOW on underflows

+		/// \exception FE_INEXACT if value had to be rounded

+		template<std::float_round_style R> unsigned int float2half_impl(double value, true_type)

+		{

+		#if HALF_ENABLE_F16C_INTRINSICS

+			if(R == std::round_indeterminate)

+				return _mm_cvtsi128_si32(_mm_cvtps_ph(_mm_cvtpd_ps(_mm_set_sd(value)), _MM_FROUND_CUR_DIRECTION));

+		#endif

+			bits<double>::type dbits;

+			std::memcpy(&dbits, &value, sizeof(double));

+			uint32 hi = dbits >> 32, lo = dbits & 0xFFFFFFFF;

+			unsigned int sign = (hi>>16) & 0x8000;

+			hi &= 0x7FFFFFFF;

+			if(hi >= 0x7FF00000)

+				return sign | 0x7C00 | ((dbits&0xFFFFFFFFFFFFF) ? (0x200|((hi>>10)&0x3FF)) : 0);

+			if(hi >= 0x40F00000)

+				return overflow<R>(sign);

+			if(hi >= 0x3F100000)

+				return rounded<R,false>(sign|(((hi>>20)-1008)<<10)|((hi>>10)&0x3FF), (hi>>9)&1, ((hi&0x1FF)|lo)!=0);

+			if(hi >= 0x3E600000)

+			{

+				int i = 1018 - (hi>>20);

+				hi = (hi&0xFFFFF) | 0x100000;

+				return rounded<R,false>(sign|(hi>>(i+1)), (hi>>i)&1, ((hi&((static_cast<uint32>(1)<<i)-1))|lo)!=0);

+			}

+			if((hi|lo) != 0)

+				return underflow<R>(sign);

+			return sign;

+		}

+

+		/// Convert non-IEEE floating-point to half-precision.

+		/// \tparam R rounding mode to use

+		/// \tparam T source type (builtin floating-point type)

+		/// \param value floating-point value to convert

+		/// \return rounded half-precision value

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_UNDERFLOW on underflows

+		/// \exception FE_INEXACT if value had to be rounded

+		template<std::float_round_style R,typename T> unsigned int float2half_impl(T value, ...)

+		{

+			unsigned int hbits = static_cast<unsigned>(builtin_signbit(value)) << 15;

+			if(value == T())

+				return hbits;

+			if(builtin_isnan(value))

+				return hbits | 0x7FFF;

+			if(builtin_isinf(value))

+				return hbits | 0x7C00;

+			int exp;

+			std::frexp(value, &exp);

+			if(exp > 16)

+				return overflow<R>(hbits);

+			if(exp < -13)

+				value = std::ldexp(value, 25);

+			else

+			{

+				value = std::ldexp(value, 12-exp);

+				hbits |= ((exp+13)<<10);

+			}

+			T ival, frac = std::modf(value, &ival);

+			int m = std::abs(static_cast<int>(ival));

+			return rounded<R,false>(hbits+(m>>1), m&1, frac!=T());

+		}

+

+		/// Convert floating-point to half-precision.

+		/// \tparam R rounding mode to use

+		/// \tparam T source type (builtin floating-point type)

+		/// \param value floating-point value to convert

+		/// \return rounded half-precision value

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_UNDERFLOW on underflows

+		/// \exception FE_INEXACT if value had to be rounded

+		template<std::float_round_style R,typename T> unsigned int float2half(T value)

+		{

+			return float2half_impl<R>(value, bool_type<std::numeric_limits<T>::is_iec559&&sizeof(typename bits<T>::type)==sizeof(T)>());

+		}

+

+		/// Convert integer to half-precision floating-point.

+		/// \tparam R rounding mode to use

+		/// \tparam T type to convert (builtin integer type)

+		/// \param value integral value to convert

+		/// \return rounded half-precision value

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_INEXACT if value had to be rounded

+		template<std::float_round_style R,typename T> unsigned int int2half(T value)

+		{

+			unsigned int bits = static_cast<unsigned>(value<0) << 15;

+			if(!value)

+				return bits;

+			if(bits)

+				value = -value;

+			if(value > 0xFFFF)

+				return overflow<R>(bits);

+			unsigned int m = static_cast<unsigned int>(value), exp = 24;

+			for(; m<0x400; m<<=1,--exp) ;

+			for(; m>0x7FF; m>>=1,++exp) ;

+			bits |= (exp<<10) + m;

+			return (exp>24) ? rounded<R,false>(bits, (value>>(exp-25))&1, (((1<<(exp-25))-1)&value)!=0) : bits;

+		}

+

+		/// Convert half-precision to IEEE single-precision.

+		/// Credit for this goes to [Jeroen van der Zijp](ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf).

+		/// \param value half-precision value to convert

+		/// \return single-precision value

+		inline float half2float_impl(unsigned int value, float, true_type)

+		{

+		#if HALF_ENABLE_F16C_INTRINSICS

+			return _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(value)));

+		#else

+		#if 0

+			bits<float>::type fbits = static_cast<bits<float>::type>(value&0x8000) << 16;

+			int abs = value & 0x7FFF;

+			if(abs)

+			{

+				fbits |= 0x38000000 << static_cast<unsigned>(abs>=0x7C00);

+				for(; abs<0x400; abs<<=1,fbits-=0x800000) ;

+				fbits += static_cast<bits<float>::type>(abs) << 13;

+			}

+		#else

+			static const bits<float>::type mantissa_table[2048] = {

+				0x00000000, 0x33800000, 0x34000000, 0x34400000, 0x34800000, 0x34A00000, 0x34C00000, 0x34E00000, 0x35000000, 0x35100000, 0x35200000, 0x35300000, 0x35400000, 0x35500000, 0x35600000, 0x35700000, 

+				0x35800000, 0x35880000, 0x35900000, 0x35980000, 0x35A00000, 0x35A80000, 0x35B00000, 0x35B80000, 0x35C00000, 0x35C80000, 0x35D00000, 0x35D80000, 0x35E00000, 0x35E80000, 0x35F00000, 0x35F80000, 

+				0x36000000, 0x36040000, 0x36080000, 0x360C0000, 0x36100000, 0x36140000, 0x36180000, 0x361C0000, 0x36200000, 0x36240000, 0x36280000, 0x362C0000, 0x36300000, 0x36340000, 0x36380000, 0x363C0000, 

+				0x36400000, 0x36440000, 0x36480000, 0x364C0000, 0x36500000, 0x36540000, 0x36580000, 0x365C0000, 0x36600000, 0x36640000, 0x36680000, 0x366C0000, 0x36700000, 0x36740000, 0x36780000, 0x367C0000, 

+				0x36800000, 0x36820000, 0x36840000, 0x36860000, 0x36880000, 0x368A0000, 0x368C0000, 0x368E0000, 0x36900000, 0x36920000, 0x36940000, 0x36960000, 0x36980000, 0x369A0000, 0x369C0000, 0x369E0000, 

+				0x36A00000, 0x36A20000, 0x36A40000, 0x36A60000, 0x36A80000, 0x36AA0000, 0x36AC0000, 0x36AE0000, 0x36B00000, 0x36B20000, 0x36B40000, 0x36B60000, 0x36B80000, 0x36BA0000, 0x36BC0000, 0x36BE0000, 

+				0x36C00000, 0x36C20000, 0x36C40000, 0x36C60000, 0x36C80000, 0x36CA0000, 0x36CC0000, 0x36CE0000, 0x36D00000, 0x36D20000, 0x36D40000, 0x36D60000, 0x36D80000, 0x36DA0000, 0x36DC0000, 0x36DE0000, 

+				0x36E00000, 0x36E20000, 0x36E40000, 0x36E60000, 0x36E80000, 0x36EA0000, 0x36EC0000, 0x36EE0000, 0x36F00000, 0x36F20000, 0x36F40000, 0x36F60000, 0x36F80000, 0x36FA0000, 0x36FC0000, 0x36FE0000, 

+				0x37000000, 0x37010000, 0x37020000, 0x37030000, 0x37040000, 0x37050000, 0x37060000, 0x37070000, 0x37080000, 0x37090000, 0x370A0000, 0x370B0000, 0x370C0000, 0x370D0000, 0x370E0000, 0x370F0000, 

+				0x37100000, 0x37110000, 0x37120000, 0x37130000, 0x37140000, 0x37150000, 0x37160000, 0x37170000, 0x37180000, 0x37190000, 0x371A0000, 0x371B0000, 0x371C0000, 0x371D0000, 0x371E0000, 0x371F0000, 

+				0x37200000, 0x37210000, 0x37220000, 0x37230000, 0x37240000, 0x37250000, 0x37260000, 0x37270000, 0x37280000, 0x37290000, 0x372A0000, 0x372B0000, 0x372C0000, 0x372D0000, 0x372E0000, 0x372F0000, 

+				0x37300000, 0x37310000, 0x37320000, 0x37330000, 0x37340000, 0x37350000, 0x37360000, 0x37370000, 0x37380000, 0x37390000, 0x373A0000, 0x373B0000, 0x373C0000, 0x373D0000, 0x373E0000, 0x373F0000, 

+				0x37400000, 0x37410000, 0x37420000, 0x37430000, 0x37440000, 0x37450000, 0x37460000, 0x37470000, 0x37480000, 0x37490000, 0x374A0000, 0x374B0000, 0x374C0000, 0x374D0000, 0x374E0000, 0x374F0000, 

+				0x37500000, 0x37510000, 0x37520000, 0x37530000, 0x37540000, 0x37550000, 0x37560000, 0x37570000, 0x37580000, 0x37590000, 0x375A0000, 0x375B0000, 0x375C0000, 0x375D0000, 0x375E0000, 0x375F0000, 

+				0x37600000, 0x37610000, 0x37620000, 0x37630000, 0x37640000, 0x37650000, 0x37660000, 0x37670000, 0x37680000, 0x37690000, 0x376A0000, 0x376B0000, 0x376C0000, 0x376D0000, 0x376E0000, 0x376F0000, 

+				0x37700000, 0x37710000, 0x37720000, 0x37730000, 0x37740000, 0x37750000, 0x37760000, 0x37770000, 0x37780000, 0x37790000, 0x377A0000, 0x377B0000, 0x377C0000, 0x377D0000, 0x377E0000, 0x377F0000, 

+				0x37800000, 0x37808000, 0x37810000, 0x37818000, 0x37820000, 0x37828000, 0x37830000, 0x37838000, 0x37840000, 0x37848000, 0x37850000, 0x37858000, 0x37860000, 0x37868000, 0x37870000, 0x37878000, 

+				0x37880000, 0x37888000, 0x37890000, 0x37898000, 0x378A0000, 0x378A8000, 0x378B0000, 0x378B8000, 0x378C0000, 0x378C8000, 0x378D0000, 0x378D8000, 0x378E0000, 0x378E8000, 0x378F0000, 0x378F8000, 

+				0x37900000, 0x37908000, 0x37910000, 0x37918000, 0x37920000, 0x37928000, 0x37930000, 0x37938000, 0x37940000, 0x37948000, 0x37950000, 0x37958000, 0x37960000, 0x37968000, 0x37970000, 0x37978000, 

+				0x37980000, 0x37988000, 0x37990000, 0x37998000, 0x379A0000, 0x379A8000, 0x379B0000, 0x379B8000, 0x379C0000, 0x379C8000, 0x379D0000, 0x379D8000, 0x379E0000, 0x379E8000, 0x379F0000, 0x379F8000, 

+				0x37A00000, 0x37A08000, 0x37A10000, 0x37A18000, 0x37A20000, 0x37A28000, 0x37A30000, 0x37A38000, 0x37A40000, 0x37A48000, 0x37A50000, 0x37A58000, 0x37A60000, 0x37A68000, 0x37A70000, 0x37A78000, 

+				0x37A80000, 0x37A88000, 0x37A90000, 0x37A98000, 0x37AA0000, 0x37AA8000, 0x37AB0000, 0x37AB8000, 0x37AC0000, 0x37AC8000, 0x37AD0000, 0x37AD8000, 0x37AE0000, 0x37AE8000, 0x37AF0000, 0x37AF8000, 

+				0x37B00000, 0x37B08000, 0x37B10000, 0x37B18000, 0x37B20000, 0x37B28000, 0x37B30000, 0x37B38000, 0x37B40000, 0x37B48000, 0x37B50000, 0x37B58000, 0x37B60000, 0x37B68000, 0x37B70000, 0x37B78000, 

+				0x37B80000, 0x37B88000, 0x37B90000, 0x37B98000, 0x37BA0000, 0x37BA8000, 0x37BB0000, 0x37BB8000, 0x37BC0000, 0x37BC8000, 0x37BD0000, 0x37BD8000, 0x37BE0000, 0x37BE8000, 0x37BF0000, 0x37BF8000, 

+				0x37C00000, 0x37C08000, 0x37C10000, 0x37C18000, 0x37C20000, 0x37C28000, 0x37C30000, 0x37C38000, 0x37C40000, 0x37C48000, 0x37C50000, 0x37C58000, 0x37C60000, 0x37C68000, 0x37C70000, 0x37C78000, 

+				0x37C80000, 0x37C88000, 0x37C90000, 0x37C98000, 0x37CA0000, 0x37CA8000, 0x37CB0000, 0x37CB8000, 0x37CC0000, 0x37CC8000, 0x37CD0000, 0x37CD8000, 0x37CE0000, 0x37CE8000, 0x37CF0000, 0x37CF8000, 

+				0x37D00000, 0x37D08000, 0x37D10000, 0x37D18000, 0x37D20000, 0x37D28000, 0x37D30000, 0x37D38000, 0x37D40000, 0x37D48000, 0x37D50000, 0x37D58000, 0x37D60000, 0x37D68000, 0x37D70000, 0x37D78000, 

+				0x37D80000, 0x37D88000, 0x37D90000, 0x37D98000, 0x37DA0000, 0x37DA8000, 0x37DB0000, 0x37DB8000, 0x37DC0000, 0x37DC8000, 0x37DD0000, 0x37DD8000, 0x37DE0000, 0x37DE8000, 0x37DF0000, 0x37DF8000, 

+				0x37E00000, 0x37E08000, 0x37E10000, 0x37E18000, 0x37E20000, 0x37E28000, 0x37E30000, 0x37E38000, 0x37E40000, 0x37E48000, 0x37E50000, 0x37E58000, 0x37E60000, 0x37E68000, 0x37E70000, 0x37E78000, 

+				0x37E80000, 0x37E88000, 0x37E90000, 0x37E98000, 0x37EA0000, 0x37EA8000, 0x37EB0000, 0x37EB8000, 0x37EC0000, 0x37EC8000, 0x37ED0000, 0x37ED8000, 0x37EE0000, 0x37EE8000, 0x37EF0000, 0x37EF8000, 

+				0x37F00000, 0x37F08000, 0x37F10000, 0x37F18000, 0x37F20000, 0x37F28000, 0x37F30000, 0x37F38000, 0x37F40000, 0x37F48000, 0x37F50000, 0x37F58000, 0x37F60000, 0x37F68000, 0x37F70000, 0x37F78000, 

+				0x37F80000, 0x37F88000, 0x37F90000, 0x37F98000, 0x37FA0000, 0x37FA8000, 0x37FB0000, 0x37FB8000, 0x37FC0000, 0x37FC8000, 0x37FD0000, 0x37FD8000, 0x37FE0000, 0x37FE8000, 0x37FF0000, 0x37FF8000, 

+				0x38000000, 0x38004000, 0x38008000, 0x3800C000, 0x38010000, 0x38014000, 0x38018000, 0x3801C000, 0x38020000, 0x38024000, 0x38028000, 0x3802C000, 0x38030000, 0x38034000, 0x38038000, 0x3803C000, 

+				0x38040000, 0x38044000, 0x38048000, 0x3804C000, 0x38050000, 0x38054000, 0x38058000, 0x3805C000, 0x38060000, 0x38064000, 0x38068000, 0x3806C000, 0x38070000, 0x38074000, 0x38078000, 0x3807C000, 

+				0x38080000, 0x38084000, 0x38088000, 0x3808C000, 0x38090000, 0x38094000, 0x38098000, 0x3809C000, 0x380A0000, 0x380A4000, 0x380A8000, 0x380AC000, 0x380B0000, 0x380B4000, 0x380B8000, 0x380BC000, 

+				0x380C0000, 0x380C4000, 0x380C8000, 0x380CC000, 0x380D0000, 0x380D4000, 0x380D8000, 0x380DC000, 0x380E0000, 0x380E4000, 0x380E8000, 0x380EC000, 0x380F0000, 0x380F4000, 0x380F8000, 0x380FC000, 

+				0x38100000, 0x38104000, 0x38108000, 0x3810C000, 0x38110000, 0x38114000, 0x38118000, 0x3811C000, 0x38120000, 0x38124000, 0x38128000, 0x3812C000, 0x38130000, 0x38134000, 0x38138000, 0x3813C000, 

+				0x38140000, 0x38144000, 0x38148000, 0x3814C000, 0x38150000, 0x38154000, 0x38158000, 0x3815C000, 0x38160000, 0x38164000, 0x38168000, 0x3816C000, 0x38170000, 0x38174000, 0x38178000, 0x3817C000, 

+				0x38180000, 0x38184000, 0x38188000, 0x3818C000, 0x38190000, 0x38194000, 0x38198000, 0x3819C000, 0x381A0000, 0x381A4000, 0x381A8000, 0x381AC000, 0x381B0000, 0x381B4000, 0x381B8000, 0x381BC000, 

+				0x381C0000, 0x381C4000, 0x381C8000, 0x381CC000, 0x381D0000, 0x381D4000, 0x381D8000, 0x381DC000, 0x381E0000, 0x381E4000, 0x381E8000, 0x381EC000, 0x381F0000, 0x381F4000, 0x381F8000, 0x381FC000, 

+				0x38200000, 0x38204000, 0x38208000, 0x3820C000, 0x38210000, 0x38214000, 0x38218000, 0x3821C000, 0x38220000, 0x38224000, 0x38228000, 0x3822C000, 0x38230000, 0x38234000, 0x38238000, 0x3823C000, 

+				0x38240000, 0x38244000, 0x38248000, 0x3824C000, 0x38250000, 0x38254000, 0x38258000, 0x3825C000, 0x38260000, 0x38264000, 0x38268000, 0x3826C000, 0x38270000, 0x38274000, 0x38278000, 0x3827C000, 

+				0x38280000, 0x38284000, 0x38288000, 0x3828C000, 0x38290000, 0x38294000, 0x38298000, 0x3829C000, 0x382A0000, 0x382A4000, 0x382A8000, 0x382AC000, 0x382B0000, 0x382B4000, 0x382B8000, 0x382BC000, 

+				0x382C0000, 0x382C4000, 0x382C8000, 0x382CC000, 0x382D0000, 0x382D4000, 0x382D8000, 0x382DC000, 0x382E0000, 0x382E4000, 0x382E8000, 0x382EC000, 0x382F0000, 0x382F4000, 0x382F8000, 0x382FC000, 

+				0x38300000, 0x38304000, 0x38308000, 0x3830C000, 0x38310000, 0x38314000, 0x38318000, 0x3831C000, 0x38320000, 0x38324000, 0x38328000, 0x3832C000, 0x38330000, 0x38334000, 0x38338000, 0x3833C000, 

+				0x38340000, 0x38344000, 0x38348000, 0x3834C000, 0x38350000, 0x38354000, 0x38358000, 0x3835C000, 0x38360000, 0x38364000, 0x38368000, 0x3836C000, 0x38370000, 0x38374000, 0x38378000, 0x3837C000, 

+				0x38380000, 0x38384000, 0x38388000, 0x3838C000, 0x38390000, 0x38394000, 0x38398000, 0x3839C000, 0x383A0000, 0x383A4000, 0x383A8000, 0x383AC000, 0x383B0000, 0x383B4000, 0x383B8000, 0x383BC000, 

+				0x383C0000, 0x383C4000, 0x383C8000, 0x383CC000, 0x383D0000, 0x383D4000, 0x383D8000, 0x383DC000, 0x383E0000, 0x383E4000, 0x383E8000, 0x383EC000, 0x383F0000, 0x383F4000, 0x383F8000, 0x383FC000, 

+				0x38400000, 0x38404000, 0x38408000, 0x3840C000, 0x38410000, 0x38414000, 0x38418000, 0x3841C000, 0x38420000, 0x38424000, 0x38428000, 0x3842C000, 0x38430000, 0x38434000, 0x38438000, 0x3843C000, 

+				0x38440000, 0x38444000, 0x38448000, 0x3844C000, 0x38450000, 0x38454000, 0x38458000, 0x3845C000, 0x38460000, 0x38464000, 0x38468000, 0x3846C000, 0x38470000, 0x38474000, 0x38478000, 0x3847C000, 

+				0x38480000, 0x38484000, 0x38488000, 0x3848C000, 0x38490000, 0x38494000, 0x38498000, 0x3849C000, 0x384A0000, 0x384A4000, 0x384A8000, 0x384AC000, 0x384B0000, 0x384B4000, 0x384B8000, 0x384BC000, 

+				0x384C0000, 0x384C4000, 0x384C8000, 0x384CC000, 0x384D0000, 0x384D4000, 0x384D8000, 0x384DC000, 0x384E0000, 0x384E4000, 0x384E8000, 0x384EC000, 0x384F0000, 0x384F4000, 0x384F8000, 0x384FC000, 

+				0x38500000, 0x38504000, 0x38508000, 0x3850C000, 0x38510000, 0x38514000, 0x38518000, 0x3851C000, 0x38520000, 0x38524000, 0x38528000, 0x3852C000, 0x38530000, 0x38534000, 0x38538000, 0x3853C000, 

+				0x38540000, 0x38544000, 0x38548000, 0x3854C000, 0x38550000, 0x38554000, 0x38558000, 0x3855C000, 0x38560000, 0x38564000, 0x38568000, 0x3856C000, 0x38570000, 0x38574000, 0x38578000, 0x3857C000, 

+				0x38580000, 0x38584000, 0x38588000, 0x3858C000, 0x38590000, 0x38594000, 0x38598000, 0x3859C000, 0x385A0000, 0x385A4000, 0x385A8000, 0x385AC000, 0x385B0000, 0x385B4000, 0x385B8000, 0x385BC000, 

+				0x385C0000, 0x385C4000, 0x385C8000, 0x385CC000, 0x385D0000, 0x385D4000, 0x385D8000, 0x385DC000, 0x385E0000, 0x385E4000, 0x385E8000, 0x385EC000, 0x385F0000, 0x385F4000, 0x385F8000, 0x385FC000, 

+				0x38600000, 0x38604000, 0x38608000, 0x3860C000, 0x38610000, 0x38614000, 0x38618000, 0x3861C000, 0x38620000, 0x38624000, 0x38628000, 0x3862C000, 0x38630000, 0x38634000, 0x38638000, 0x3863C000, 

+				0x38640000, 0x38644000, 0x38648000, 0x3864C000, 0x38650000, 0x38654000, 0x38658000, 0x3865C000, 0x38660000, 0x38664000, 0x38668000, 0x3866C000, 0x38670000, 0x38674000, 0x38678000, 0x3867C000, 

+				0x38680000, 0x38684000, 0x38688000, 0x3868C000, 0x38690000, 0x38694000, 0x38698000, 0x3869C000, 0x386A0000, 0x386A4000, 0x386A8000, 0x386AC000, 0x386B0000, 0x386B4000, 0x386B8000, 0x386BC000, 

+				0x386C0000, 0x386C4000, 0x386C8000, 0x386CC000, 0x386D0000, 0x386D4000, 0x386D8000, 0x386DC000, 0x386E0000, 0x386E4000, 0x386E8000, 0x386EC000, 0x386F0000, 0x386F4000, 0x386F8000, 0x386FC000, 

+				0x38700000, 0x38704000, 0x38708000, 0x3870C000, 0x38710000, 0x38714000, 0x38718000, 0x3871C000, 0x38720000, 0x38724000, 0x38728000, 0x3872C000, 0x38730000, 0x38734000, 0x38738000, 0x3873C000, 

+				0x38740000, 0x38744000, 0x38748000, 0x3874C000, 0x38750000, 0x38754000, 0x38758000, 0x3875C000, 0x38760000, 0x38764000, 0x38768000, 0x3876C000, 0x38770000, 0x38774000, 0x38778000, 0x3877C000, 

+				0x38780000, 0x38784000, 0x38788000, 0x3878C000, 0x38790000, 0x38794000, 0x38798000, 0x3879C000, 0x387A0000, 0x387A4000, 0x387A8000, 0x387AC000, 0x387B0000, 0x387B4000, 0x387B8000, 0x387BC000, 

+				0x387C0000, 0x387C4000, 0x387C8000, 0x387CC000, 0x387D0000, 0x387D4000, 0x387D8000, 0x387DC000, 0x387E0000, 0x387E4000, 0x387E8000, 0x387EC000, 0x387F0000, 0x387F4000, 0x387F8000, 0x387FC000, 

+				0x38000000, 0x38002000, 0x38004000, 0x38006000, 0x38008000, 0x3800A000, 0x3800C000, 0x3800E000, 0x38010000, 0x38012000, 0x38014000, 0x38016000, 0x38018000, 0x3801A000, 0x3801C000, 0x3801E000, 

+				0x38020000, 0x38022000, 0x38024000, 0x38026000, 0x38028000, 0x3802A000, 0x3802C000, 0x3802E000, 0x38030000, 0x38032000, 0x38034000, 0x38036000, 0x38038000, 0x3803A000, 0x3803C000, 0x3803E000, 

+				0x38040000, 0x38042000, 0x38044000, 0x38046000, 0x38048000, 0x3804A000, 0x3804C000, 0x3804E000, 0x38050000, 0x38052000, 0x38054000, 0x38056000, 0x38058000, 0x3805A000, 0x3805C000, 0x3805E000, 

+				0x38060000, 0x38062000, 0x38064000, 0x38066000, 0x38068000, 0x3806A000, 0x3806C000, 0x3806E000, 0x38070000, 0x38072000, 0x38074000, 0x38076000, 0x38078000, 0x3807A000, 0x3807C000, 0x3807E000, 

+				0x38080000, 0x38082000, 0x38084000, 0x38086000, 0x38088000, 0x3808A000, 0x3808C000, 0x3808E000, 0x38090000, 0x38092000, 0x38094000, 0x38096000, 0x38098000, 0x3809A000, 0x3809C000, 0x3809E000, 

+				0x380A0000, 0x380A2000, 0x380A4000, 0x380A6000, 0x380A8000, 0x380AA000, 0x380AC000, 0x380AE000, 0x380B0000, 0x380B2000, 0x380B4000, 0x380B6000, 0x380B8000, 0x380BA000, 0x380BC000, 0x380BE000, 

+				0x380C0000, 0x380C2000, 0x380C4000, 0x380C6000, 0x380C8000, 0x380CA000, 0x380CC000, 0x380CE000, 0x380D0000, 0x380D2000, 0x380D4000, 0x380D6000, 0x380D8000, 0x380DA000, 0x380DC000, 0x380DE000, 

+				0x380E0000, 0x380E2000, 0x380E4000, 0x380E6000, 0x380E8000, 0x380EA000, 0x380EC000, 0x380EE000, 0x380F0000, 0x380F2000, 0x380F4000, 0x380F6000, 0x380F8000, 0x380FA000, 0x380FC000, 0x380FE000, 

+				0x38100000, 0x38102000, 0x38104000, 0x38106000, 0x38108000, 0x3810A000, 0x3810C000, 0x3810E000, 0x38110000, 0x38112000, 0x38114000, 0x38116000, 0x38118000, 0x3811A000, 0x3811C000, 0x3811E000, 

+				0x38120000, 0x38122000, 0x38124000, 0x38126000, 0x38128000, 0x3812A000, 0x3812C000, 0x3812E000, 0x38130000, 0x38132000, 0x38134000, 0x38136000, 0x38138000, 0x3813A000, 0x3813C000, 0x3813E000, 

+				0x38140000, 0x38142000, 0x38144000, 0x38146000, 0x38148000, 0x3814A000, 0x3814C000, 0x3814E000, 0x38150000, 0x38152000, 0x38154000, 0x38156000, 0x38158000, 0x3815A000, 0x3815C000, 0x3815E000, 

+				0x38160000, 0x38162000, 0x38164000, 0x38166000, 0x38168000, 0x3816A000, 0x3816C000, 0x3816E000, 0x38170000, 0x38172000, 0x38174000, 0x38176000, 0x38178000, 0x3817A000, 0x3817C000, 0x3817E000, 

+				0x38180000, 0x38182000, 0x38184000, 0x38186000, 0x38188000, 0x3818A000, 0x3818C000, 0x3818E000, 0x38190000, 0x38192000, 0x38194000, 0x38196000, 0x38198000, 0x3819A000, 0x3819C000, 0x3819E000, 

+				0x381A0000, 0x381A2000, 0x381A4000, 0x381A6000, 0x381A8000, 0x381AA000, 0x381AC000, 0x381AE000, 0x381B0000, 0x381B2000, 0x381B4000, 0x381B6000, 0x381B8000, 0x381BA000, 0x381BC000, 0x381BE000, 

+				0x381C0000, 0x381C2000, 0x381C4000, 0x381C6000, 0x381C8000, 0x381CA000, 0x381CC000, 0x381CE000, 0x381D0000, 0x381D2000, 0x381D4000, 0x381D6000, 0x381D8000, 0x381DA000, 0x381DC000, 0x381DE000, 

+				0x381E0000, 0x381E2000, 0x381E4000, 0x381E6000, 0x381E8000, 0x381EA000, 0x381EC000, 0x381EE000, 0x381F0000, 0x381F2000, 0x381F4000, 0x381F6000, 0x381F8000, 0x381FA000, 0x381FC000, 0x381FE000, 

+				0x38200000, 0x38202000, 0x38204000, 0x38206000, 0x38208000, 0x3820A000, 0x3820C000, 0x3820E000, 0x38210000, 0x38212000, 0x38214000, 0x38216000, 0x38218000, 0x3821A000, 0x3821C000, 0x3821E000, 

+				0x38220000, 0x38222000, 0x38224000, 0x38226000, 0x38228000, 0x3822A000, 0x3822C000, 0x3822E000, 0x38230000, 0x38232000, 0x38234000, 0x38236000, 0x38238000, 0x3823A000, 0x3823C000, 0x3823E000, 

+				0x38240000, 0x38242000, 0x38244000, 0x38246000, 0x38248000, 0x3824A000, 0x3824C000, 0x3824E000, 0x38250000, 0x38252000, 0x38254000, 0x38256000, 0x38258000, 0x3825A000, 0x3825C000, 0x3825E000, 

+				0x38260000, 0x38262000, 0x38264000, 0x38266000, 0x38268000, 0x3826A000, 0x3826C000, 0x3826E000, 0x38270000, 0x38272000, 0x38274000, 0x38276000, 0x38278000, 0x3827A000, 0x3827C000, 0x3827E000, 

+				0x38280000, 0x38282000, 0x38284000, 0x38286000, 0x38288000, 0x3828A000, 0x3828C000, 0x3828E000, 0x38290000, 0x38292000, 0x38294000, 0x38296000, 0x38298000, 0x3829A000, 0x3829C000, 0x3829E000, 

+				0x382A0000, 0x382A2000, 0x382A4000, 0x382A6000, 0x382A8000, 0x382AA000, 0x382AC000, 0x382AE000, 0x382B0000, 0x382B2000, 0x382B4000, 0x382B6000, 0x382B8000, 0x382BA000, 0x382BC000, 0x382BE000, 

+				0x382C0000, 0x382C2000, 0x382C4000, 0x382C6000, 0x382C8000, 0x382CA000, 0x382CC000, 0x382CE000, 0x382D0000, 0x382D2000, 0x382D4000, 0x382D6000, 0x382D8000, 0x382DA000, 0x382DC000, 0x382DE000, 

+				0x382E0000, 0x382E2000, 0x382E4000, 0x382E6000, 0x382E8000, 0x382EA000, 0x382EC000, 0x382EE000, 0x382F0000, 0x382F2000, 0x382F4000, 0x382F6000, 0x382F8000, 0x382FA000, 0x382FC000, 0x382FE000, 

+				0x38300000, 0x38302000, 0x38304000, 0x38306000, 0x38308000, 0x3830A000, 0x3830C000, 0x3830E000, 0x38310000, 0x38312000, 0x38314000, 0x38316000, 0x38318000, 0x3831A000, 0x3831C000, 0x3831E000, 

+				0x38320000, 0x38322000, 0x38324000, 0x38326000, 0x38328000, 0x3832A000, 0x3832C000, 0x3832E000, 0x38330000, 0x38332000, 0x38334000, 0x38336000, 0x38338000, 0x3833A000, 0x3833C000, 0x3833E000, 

+				0x38340000, 0x38342000, 0x38344000, 0x38346000, 0x38348000, 0x3834A000, 0x3834C000, 0x3834E000, 0x38350000, 0x38352000, 0x38354000, 0x38356000, 0x38358000, 0x3835A000, 0x3835C000, 0x3835E000, 

+				0x38360000, 0x38362000, 0x38364000, 0x38366000, 0x38368000, 0x3836A000, 0x3836C000, 0x3836E000, 0x38370000, 0x38372000, 0x38374000, 0x38376000, 0x38378000, 0x3837A000, 0x3837C000, 0x3837E000, 

+				0x38380000, 0x38382000, 0x38384000, 0x38386000, 0x38388000, 0x3838A000, 0x3838C000, 0x3838E000, 0x38390000, 0x38392000, 0x38394000, 0x38396000, 0x38398000, 0x3839A000, 0x3839C000, 0x3839E000, 

+				0x383A0000, 0x383A2000, 0x383A4000, 0x383A6000, 0x383A8000, 0x383AA000, 0x383AC000, 0x383AE000, 0x383B0000, 0x383B2000, 0x383B4000, 0x383B6000, 0x383B8000, 0x383BA000, 0x383BC000, 0x383BE000, 

+				0x383C0000, 0x383C2000, 0x383C4000, 0x383C6000, 0x383C8000, 0x383CA000, 0x383CC000, 0x383CE000, 0x383D0000, 0x383D2000, 0x383D4000, 0x383D6000, 0x383D8000, 0x383DA000, 0x383DC000, 0x383DE000, 

+				0x383E0000, 0x383E2000, 0x383E4000, 0x383E6000, 0x383E8000, 0x383EA000, 0x383EC000, 0x383EE000, 0x383F0000, 0x383F2000, 0x383F4000, 0x383F6000, 0x383F8000, 0x383FA000, 0x383FC000, 0x383FE000, 

+				0x38400000, 0x38402000, 0x38404000, 0x38406000, 0x38408000, 0x3840A000, 0x3840C000, 0x3840E000, 0x38410000, 0x38412000, 0x38414000, 0x38416000, 0x38418000, 0x3841A000, 0x3841C000, 0x3841E000, 

+				0x38420000, 0x38422000, 0x38424000, 0x38426000, 0x38428000, 0x3842A000, 0x3842C000, 0x3842E000, 0x38430000, 0x38432000, 0x38434000, 0x38436000, 0x38438000, 0x3843A000, 0x3843C000, 0x3843E000, 

+				0x38440000, 0x38442000, 0x38444000, 0x38446000, 0x38448000, 0x3844A000, 0x3844C000, 0x3844E000, 0x38450000, 0x38452000, 0x38454000, 0x38456000, 0x38458000, 0x3845A000, 0x3845C000, 0x3845E000, 

+				0x38460000, 0x38462000, 0x38464000, 0x38466000, 0x38468000, 0x3846A000, 0x3846C000, 0x3846E000, 0x38470000, 0x38472000, 0x38474000, 0x38476000, 0x38478000, 0x3847A000, 0x3847C000, 0x3847E000, 

+				0x38480000, 0x38482000, 0x38484000, 0x38486000, 0x38488000, 0x3848A000, 0x3848C000, 0x3848E000, 0x38490000, 0x38492000, 0x38494000, 0x38496000, 0x38498000, 0x3849A000, 0x3849C000, 0x3849E000, 

+				0x384A0000, 0x384A2000, 0x384A4000, 0x384A6000, 0x384A8000, 0x384AA000, 0x384AC000, 0x384AE000, 0x384B0000, 0x384B2000, 0x384B4000, 0x384B6000, 0x384B8000, 0x384BA000, 0x384BC000, 0x384BE000, 

+				0x384C0000, 0x384C2000, 0x384C4000, 0x384C6000, 0x384C8000, 0x384CA000, 0x384CC000, 0x384CE000, 0x384D0000, 0x384D2000, 0x384D4000, 0x384D6000, 0x384D8000, 0x384DA000, 0x384DC000, 0x384DE000, 

+				0x384E0000, 0x384E2000, 0x384E4000, 0x384E6000, 0x384E8000, 0x384EA000, 0x384EC000, 0x384EE000, 0x384F0000, 0x384F2000, 0x384F4000, 0x384F6000, 0x384F8000, 0x384FA000, 0x384FC000, 0x384FE000, 

+				0x38500000, 0x38502000, 0x38504000, 0x38506000, 0x38508000, 0x3850A000, 0x3850C000, 0x3850E000, 0x38510000, 0x38512000, 0x38514000, 0x38516000, 0x38518000, 0x3851A000, 0x3851C000, 0x3851E000, 

+				0x38520000, 0x38522000, 0x38524000, 0x38526000, 0x38528000, 0x3852A000, 0x3852C000, 0x3852E000, 0x38530000, 0x38532000, 0x38534000, 0x38536000, 0x38538000, 0x3853A000, 0x3853C000, 0x3853E000, 

+				0x38540000, 0x38542000, 0x38544000, 0x38546000, 0x38548000, 0x3854A000, 0x3854C000, 0x3854E000, 0x38550000, 0x38552000, 0x38554000, 0x38556000, 0x38558000, 0x3855A000, 0x3855C000, 0x3855E000, 

+				0x38560000, 0x38562000, 0x38564000, 0x38566000, 0x38568000, 0x3856A000, 0x3856C000, 0x3856E000, 0x38570000, 0x38572000, 0x38574000, 0x38576000, 0x38578000, 0x3857A000, 0x3857C000, 0x3857E000, 

+				0x38580000, 0x38582000, 0x38584000, 0x38586000, 0x38588000, 0x3858A000, 0x3858C000, 0x3858E000, 0x38590000, 0x38592000, 0x38594000, 0x38596000, 0x38598000, 0x3859A000, 0x3859C000, 0x3859E000, 

+				0x385A0000, 0x385A2000, 0x385A4000, 0x385A6000, 0x385A8000, 0x385AA000, 0x385AC000, 0x385AE000, 0x385B0000, 0x385B2000, 0x385B4000, 0x385B6000, 0x385B8000, 0x385BA000, 0x385BC000, 0x385BE000, 

+				0x385C0000, 0x385C2000, 0x385C4000, 0x385C6000, 0x385C8000, 0x385CA000, 0x385CC000, 0x385CE000, 0x385D0000, 0x385D2000, 0x385D4000, 0x385D6000, 0x385D8000, 0x385DA000, 0x385DC000, 0x385DE000, 

+				0x385E0000, 0x385E2000, 0x385E4000, 0x385E6000, 0x385E8000, 0x385EA000, 0x385EC000, 0x385EE000, 0x385F0000, 0x385F2000, 0x385F4000, 0x385F6000, 0x385F8000, 0x385FA000, 0x385FC000, 0x385FE000, 

+				0x38600000, 0x38602000, 0x38604000, 0x38606000, 0x38608000, 0x3860A000, 0x3860C000, 0x3860E000, 0x38610000, 0x38612000, 0x38614000, 0x38616000, 0x38618000, 0x3861A000, 0x3861C000, 0x3861E000, 

+				0x38620000, 0x38622000, 0x38624000, 0x38626000, 0x38628000, 0x3862A000, 0x3862C000, 0x3862E000, 0x38630000, 0x38632000, 0x38634000, 0x38636000, 0x38638000, 0x3863A000, 0x3863C000, 0x3863E000, 

+				0x38640000, 0x38642000, 0x38644000, 0x38646000, 0x38648000, 0x3864A000, 0x3864C000, 0x3864E000, 0x38650000, 0x38652000, 0x38654000, 0x38656000, 0x38658000, 0x3865A000, 0x3865C000, 0x3865E000, 

+				0x38660000, 0x38662000, 0x38664000, 0x38666000, 0x38668000, 0x3866A000, 0x3866C000, 0x3866E000, 0x38670000, 0x38672000, 0x38674000, 0x38676000, 0x38678000, 0x3867A000, 0x3867C000, 0x3867E000, 

+				0x38680000, 0x38682000, 0x38684000, 0x38686000, 0x38688000, 0x3868A000, 0x3868C000, 0x3868E000, 0x38690000, 0x38692000, 0x38694000, 0x38696000, 0x38698000, 0x3869A000, 0x3869C000, 0x3869E000, 

+				0x386A0000, 0x386A2000, 0x386A4000, 0x386A6000, 0x386A8000, 0x386AA000, 0x386AC000, 0x386AE000, 0x386B0000, 0x386B2000, 0x386B4000, 0x386B6000, 0x386B8000, 0x386BA000, 0x386BC000, 0x386BE000, 

+				0x386C0000, 0x386C2000, 0x386C4000, 0x386C6000, 0x386C8000, 0x386CA000, 0x386CC000, 0x386CE000, 0x386D0000, 0x386D2000, 0x386D4000, 0x386D6000, 0x386D8000, 0x386DA000, 0x386DC000, 0x386DE000, 

+				0x386E0000, 0x386E2000, 0x386E4000, 0x386E6000, 0x386E8000, 0x386EA000, 0x386EC000, 0x386EE000, 0x386F0000, 0x386F2000, 0x386F4000, 0x386F6000, 0x386F8000, 0x386FA000, 0x386FC000, 0x386FE000, 

+				0x38700000, 0x38702000, 0x38704000, 0x38706000, 0x38708000, 0x3870A000, 0x3870C000, 0x3870E000, 0x38710000, 0x38712000, 0x38714000, 0x38716000, 0x38718000, 0x3871A000, 0x3871C000, 0x3871E000, 

+				0x38720000, 0x38722000, 0x38724000, 0x38726000, 0x38728000, 0x3872A000, 0x3872C000, 0x3872E000, 0x38730000, 0x38732000, 0x38734000, 0x38736000, 0x38738000, 0x3873A000, 0x3873C000, 0x3873E000, 

+				0x38740000, 0x38742000, 0x38744000, 0x38746000, 0x38748000, 0x3874A000, 0x3874C000, 0x3874E000, 0x38750000, 0x38752000, 0x38754000, 0x38756000, 0x38758000, 0x3875A000, 0x3875C000, 0x3875E000, 

+				0x38760000, 0x38762000, 0x38764000, 0x38766000, 0x38768000, 0x3876A000, 0x3876C000, 0x3876E000, 0x38770000, 0x38772000, 0x38774000, 0x38776000, 0x38778000, 0x3877A000, 0x3877C000, 0x3877E000, 

+				0x38780000, 0x38782000, 0x38784000, 0x38786000, 0x38788000, 0x3878A000, 0x3878C000, 0x3878E000, 0x38790000, 0x38792000, 0x38794000, 0x38796000, 0x38798000, 0x3879A000, 0x3879C000, 0x3879E000, 

+				0x387A0000, 0x387A2000, 0x387A4000, 0x387A6000, 0x387A8000, 0x387AA000, 0x387AC000, 0x387AE000, 0x387B0000, 0x387B2000, 0x387B4000, 0x387B6000, 0x387B8000, 0x387BA000, 0x387BC000, 0x387BE000, 

+				0x387C0000, 0x387C2000, 0x387C4000, 0x387C6000, 0x387C8000, 0x387CA000, 0x387CC000, 0x387CE000, 0x387D0000, 0x387D2000, 0x387D4000, 0x387D6000, 0x387D8000, 0x387DA000, 0x387DC000, 0x387DE000, 

+				0x387E0000, 0x387E2000, 0x387E4000, 0x387E6000, 0x387E8000, 0x387EA000, 0x387EC000, 0x387EE000, 0x387F0000, 0x387F2000, 0x387F4000, 0x387F6000, 0x387F8000, 0x387FA000, 0x387FC000, 0x387FE000 };

+			static const bits<float>::type exponent_table[64] = {

+				0x00000000, 0x00800000, 0x01000000, 0x01800000, 0x02000000, 0x02800000, 0x03000000, 0x03800000, 0x04000000, 0x04800000, 0x05000000, 0x05800000, 0x06000000, 0x06800000, 0x07000000, 0x07800000, 

+				0x08000000, 0x08800000, 0x09000000, 0x09800000, 0x0A000000, 0x0A800000, 0x0B000000, 0x0B800000, 0x0C000000, 0x0C800000, 0x0D000000, 0x0D800000, 0x0E000000, 0x0E800000, 0x0F000000, 0x47800000, 

+				0x80000000, 0x80800000, 0x81000000, 0x81800000, 0x82000000, 0x82800000, 0x83000000, 0x83800000, 0x84000000, 0x84800000, 0x85000000, 0x85800000, 0x86000000, 0x86800000, 0x87000000, 0x87800000, 

+				0x88000000, 0x88800000, 0x89000000, 0x89800000, 0x8A000000, 0x8A800000, 0x8B000000, 0x8B800000, 0x8C000000, 0x8C800000, 0x8D000000, 0x8D800000, 0x8E000000, 0x8E800000, 0x8F000000, 0xC7800000 };

+			static const unsigned short offset_table[64] = {

+				0, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 

+				0, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024 };

+			bits<float>::type fbits = mantissa_table[offset_table[value>>10]+(value&0x3FF)] + exponent_table[value>>10];

+		#endif

+			float out;

+			std::memcpy(&out, &fbits, sizeof(float));

+			return out;

+		#endif

+		}

+

+		/// Convert half-precision to IEEE double-precision.

+		/// \param value half-precision value to convert

+		/// \return double-precision value

+		inline double half2float_impl(unsigned int value, double, true_type)

+		{

+		#if HALF_ENABLE_F16C_INTRINSICS

+			return _mm_cvtsd_f64(_mm_cvtps_pd(_mm_cvtph_ps(_mm_cvtsi32_si128(value))));

+		#else

+			uint32 hi = static_cast<uint32>(value&0x8000) << 16;

+			unsigned int abs = value & 0x7FFF;

+			if(abs)

+			{

+				hi |= 0x3F000000 << static_cast<unsigned>(abs>=0x7C00);

+				for(; abs<0x400; abs<<=1,hi-=0x100000) ;

+				hi += static_cast<uint32>(abs) << 10;

+			}

+			bits<double>::type dbits = static_cast<bits<double>::type>(hi) << 32;

+			double out;

+			std::memcpy(&out, &dbits, sizeof(double));

+			return out;

+		#endif

+		}

+

+		/// Convert half-precision to non-IEEE floating-point.

+		/// \tparam T type to convert to (builtin integer type)

+		/// \param value half-precision value to convert

+		/// \return floating-point value

+		template<typename T> T half2float_impl(unsigned int value, T, ...)

+		{

+			T out;

+			unsigned int abs = value & 0x7FFF;

+			if(abs > 0x7C00)

+				out = (std::numeric_limits<T>::has_signaling_NaN && !(abs&0x200)) ? std::numeric_limits<T>::signaling_NaN() :

+					std::numeric_limits<T>::has_quiet_NaN ? std::numeric_limits<T>::quiet_NaN() : T();

+			else if(abs == 0x7C00)

+				out = std::numeric_limits<T>::has_infinity ? std::numeric_limits<T>::infinity() : std::numeric_limits<T>::max();

+			else if(abs > 0x3FF)

+				out = std::ldexp(static_cast<T>((abs&0x3FF)|0x400), (abs>>10)-25);

+			else

+				out = std::ldexp(static_cast<T>(abs), -24);

+			return (value&0x8000) ? -out : out;

+		}

+

+		/// Convert half-precision to floating-point.

+		/// \tparam T type to convert to (builtin integer type)

+		/// \param value half-precision value to convert

+		/// \return floating-point value

+		template<typename T> T half2float(unsigned int value)

+		{

+			return half2float_impl(value, T(), bool_type<std::numeric_limits<T>::is_iec559&&sizeof(typename bits<T>::type)==sizeof(T)>());

+		}

+

+		/// Convert half-precision floating-point to integer.

+		/// \tparam R rounding mode to use

+		/// \tparam E `true` for round to even, `false` for round away from zero

+		/// \tparam I `true` to raise INEXACT exception (if inexact), `false` to never raise it

+		/// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits)

+		/// \param value half-precision value to convert

+		/// \return rounded integer value

+		/// \exception FE_INVALID if value is not representable in type \a T

+		/// \exception FE_INEXACT if value had to be rounded and \a I is `true`

+		template<std::float_round_style R,bool E,bool I,typename T> T half2int(unsigned int value)

+		{

+			unsigned int abs = value & 0x7FFF;

+			if(abs >= 0x7C00)

+			{

+				raise(FE_INVALID);

+				return (value&0x8000) ? std::numeric_limits<T>::min() : std::numeric_limits<T>::max();

+			}

+			if(abs < 0x3800)

+			{

+				raise(FE_INEXACT, I);

+				return	(R==std::round_toward_infinity) ? T(~(value>>15)&(abs!=0)) :

+						(R==std::round_toward_neg_infinity) ? -T(value>0x8000) :

+						T();

+			}

+			int exp = 25 - (abs>>10);

+			unsigned int m = (value&0x3FF) | 0x400;

+			int32 i = static_cast<int32>((exp<=0) ? (m<<-exp) : ((m+(

+				(R==std::round_to_nearest) ? ((1<<(exp-1))-(~(m>>exp)&E)) :

+				(R==std::round_toward_infinity) ? (((1<<exp)-1)&((value>>15)-1)) :

+				(R==std::round_toward_neg_infinity) ? (((1<<exp)-1)&-(value>>15)) : 0))>>exp));

+			if((!std::numeric_limits<T>::is_signed && (value&0x8000)) || (std::numeric_limits<T>::digits<16 &&

+				((value&0x8000) ? (-i<std::numeric_limits<T>::min()) : (i>std::numeric_limits<T>::max()))))

+				raise(FE_INVALID);

+			else if(I && exp > 0 && (m&((1<<exp)-1)))

+				raise(FE_INEXACT);

+			return static_cast<T>((value&0x8000) ? -i : i);

+		}

+

+		/// \}

+		/// \name Mathematics

+		/// \{

+

+		/// upper part of 64-bit multiplication.

+		/// \tparam R rounding mode to use

+		/// \param x first factor

+		/// \param y second factor

+		/// \return upper 32 bit of \a x * \a y

+		template<std::float_round_style R> uint32 mulhi(uint32 x, uint32 y)

+		{

+			uint32 xy = (x>>16) * (y&0xFFFF), yx = (x&0xFFFF) * (y>>16), c = (xy&0xFFFF) + (yx&0xFFFF) + (((x&0xFFFF)*(y&0xFFFF))>>16);

+			return (x>>16)*(y>>16) + (xy>>16) + (yx>>16) + (c>>16) +

+				((R==std::round_to_nearest) ? ((c>>15)&1) : (R==std::round_toward_infinity) ? ((c&0xFFFF)!=0) : 0);

+		}

+

+		/// 64-bit multiplication.

+		/// \param x first factor

+		/// \param y second factor

+		/// \return upper 32 bit of \a x * \a y rounded to nearest

+		inline uint32 multiply64(uint32 x, uint32 y)

+		{

+		#if HALF_ENABLE_CPP11_LONG_LONG

+			return static_cast<uint32>((static_cast<unsigned long long>(x)*static_cast<unsigned long long>(y)+0x80000000)>>32);

+		#else

+			return mulhi<std::round_to_nearest>(x, y);

+		#endif

+		}

+

+		/// 64-bit division.

+		/// \param x upper 32 bit of dividend

+		/// \param y divisor

+		/// \param s variable to store sticky bit for rounding

+		/// \return (\a x << 32) / \a y

+		inline uint32 divide64(uint32 x, uint32 y, int &s)

+		{

+		#if HALF_ENABLE_CPP11_LONG_LONG

+			unsigned long long xx = static_cast<unsigned long long>(x) << 32;

+			return s = (xx%y!=0), static_cast<uint32>(xx/y);

+		#else

+			y >>= 1;

+			uint32 rem = x, div = 0;

+			for(unsigned int i=0; i<32; ++i)

+			{

+				div <<= 1;

+				if(rem >= y)

+				{

+					rem -= y;

+					div |= 1;

+				}

+				rem <<= 1;

+			}

+			return s = rem > 1, div;

+		#endif

+		}

+

+		/// Half precision positive modulus.

+		/// \tparam Q `true` to compute full quotient, `false` else

+		/// \tparam R `true` to compute signed remainder, `false` for positive remainder

+		/// \param x first operand as positive finite half-precision value

+		/// \param y second operand as positive finite half-precision value

+		/// \param quo adress to store quotient at, `nullptr` if \a Q `false`

+		/// \return modulus of \a x / \a y

+		template<bool Q,bool R> unsigned int mod(unsigned int x, unsigned int y, int *quo = NULL)

+		{

+			unsigned int q = 0;

+			if(x > y)

+			{

+				int absx = x, absy = y, expx = 0, expy = 0;

+				for(; absx<0x400; absx<<=1,--expx) ;

+				for(; absy<0x400; absy<<=1,--expy) ;

+				expx += absx >> 10;

+				expy += absy >> 10;

+				int mx = (absx&0x3FF) | 0x400, my = (absy&0x3FF) | 0x400;

+				for(int d=expx-expy; d; --d)

+				{

+					if(!Q && mx == my)

+						return 0;

+					if(mx >= my)

+					{

+						mx -= my;

+						q += Q;

+					}

+					mx <<= 1;

+					q <<= static_cast<int>(Q);

+				}

+				if(!Q && mx == my)

+					return 0;

+				if(mx >= my)

+				{

+					mx -= my;

+					++q;

+				}

+				if(Q)

+				{

+					q &= (1<<(std::numeric_limits<int>::digits-1)) - 1;

+					if(!mx)

+						return *quo = q, 0;

+				}

+				for(; mx<0x400; mx<<=1,--expy) ;

+				x = (expy>0) ? ((expy<<10)|(mx&0x3FF)) : (mx>>(1-expy));

+			}

+			if(R)

+			{

+				unsigned int a, b;

+				if(y < 0x800)

+				{

+					a = (x<0x400) ? (x<<1) : (x+0x400);

+					b = y;

+				}

+				else

+				{

+					a = x;

+					b = y - 0x400;

+				}

+				if(a > b || (a == b && (q&1)))

+				{

+					int exp = (y>>10) + (y<=0x3FF), d = exp - (x>>10) - (x<=0x3FF);

+					int m = (((y&0x3FF)|((y>0x3FF)<<10))<<1) - (((x&0x3FF)|((x>0x3FF)<<10))<<(1-d));

+					for(; m<0x800 && exp>1; m<<=1,--exp) ;

+					x = 0x8000 + ((exp-1)<<10) + (m>>1);

+					q += Q;

+				}

+			}

+			if(Q)

+				*quo = q;

+			return x;

+		}

+

+		/// Fixed point square root.

+		/// \tparam F number of fractional bits

+		/// \param r radicand in Q1.F fixed point format

+		/// \param exp exponent

+		/// \return square root as Q1.F/2

+		template<unsigned int F> uint32 sqrt(uint32 &r, int &exp)

+		{

+			int i = exp & 1;

+			r <<= i;

+			exp = (exp-i) / 2;

+			uint32 m = 0;

+			for(uint32 bit=static_cast<uint32>(1)<<F; bit; bit>>=2)

+			{

+				if(r < m+bit)

+					m >>= 1;

+				else

+				{

+					r -= m + bit;

+					m = (m>>1) + bit;

+				}

+			}

+			return m;

+		}

+

+		/// Fixed point binary exponential.

+		/// This uses the BKM algorithm in E-mode.

+		/// \param m exponent in [0,1) as Q0.31

+		/// \param n number of iterations (at most 32)

+		/// \return 2 ^ \a m as Q1.31

+		inline uint32 exp2(uint32 m, unsigned int n = 32)

+		{

+			static const uint32 logs[] = {

+				0x80000000, 0x4AE00D1D, 0x2934F098, 0x15C01A3A, 0x0B31FB7D, 0x05AEB4DD, 0x02DCF2D1, 0x016FE50B,

+				0x00B84E23, 0x005C3E10, 0x002E24CA, 0x001713D6, 0x000B8A47, 0x0005C53B, 0x0002E2A3, 0x00017153,

+				0x0000B8AA, 0x00005C55, 0x00002E2B, 0x00001715, 0x00000B8B, 0x000005C5, 0x000002E3, 0x00000171,

+				0x000000B9, 0x0000005C, 0x0000002E, 0x00000017, 0x0000000C, 0x00000006, 0x00000003, 0x00000001 };

+			if(!m)

+				return 0x80000000;

+			uint32 mx = 0x80000000, my = 0;

+			for(unsigned int i=1; i<n; ++i)

+			{

+				uint32 mz = my + logs[i];

+				if(mz <= m)

+				{

+					my = mz;

+					mx += mx >> i;

+				}

+			}

+			return mx;

+		}

+

+		/// Fixed point binary logarithm.

+		/// This uses the BKM algorithm in L-mode.

+		/// \param m mantissa in [1,2) as Q1.30

+		/// \param n number of iterations (at most 32)

+		/// \return log2(\a m) as Q0.31

+		inline uint32 log2(uint32 m, unsigned int n = 32)

+		{

+			static const uint32 logs[] = {

+				0x80000000, 0x4AE00D1D, 0x2934F098, 0x15C01A3A, 0x0B31FB7D, 0x05AEB4DD, 0x02DCF2D1, 0x016FE50B,

+				0x00B84E23, 0x005C3E10, 0x002E24CA, 0x001713D6, 0x000B8A47, 0x0005C53B, 0x0002E2A3, 0x00017153,

+				0x0000B8AA, 0x00005C55, 0x00002E2B, 0x00001715, 0x00000B8B, 0x000005C5, 0x000002E3, 0x00000171,

+				0x000000B9, 0x0000005C, 0x0000002E, 0x00000017, 0x0000000C, 0x00000006, 0x00000003, 0x00000001 };

+			if(m == 0x40000000)

+				return 0;

+			uint32 mx = 0x40000000, my = 0;

+			for(unsigned int i=1; i<n; ++i)

+			{

+				uint32 mz = mx + (mx>>i);

+				if(mz <= m)

+				{

+					mx = mz;

+					my += logs[i];

+				}

+			}

+			return my;

+		}

+

+		/// Fixed point sine and cosine.

+		/// This uses the CORDIC algorithm in rotation mode.

+		/// \param mz angle in [-pi/2,pi/2] as Q1.30

+		/// \param n number of iterations (at most 31)

+		/// \return sine and cosine of \a mz as Q1.30

+		inline std::pair<uint32,uint32> sincos(uint32 mz, unsigned int n = 31)

+		{

+			static const uint32 angles[] = {

+				0x3243F6A9, 0x1DAC6705, 0x0FADBAFD, 0x07F56EA7, 0x03FEAB77, 0x01FFD55C, 0x00FFFAAB, 0x007FFF55,

+				0x003FFFEB, 0x001FFFFD, 0x00100000, 0x00080000, 0x00040000, 0x00020000, 0x00010000, 0x00008000,

+				0x00004000, 0x00002000, 0x00001000, 0x00000800, 0x00000400, 0x00000200, 0x00000100, 0x00000080,

+				0x00000040, 0x00000020, 0x00000010, 0x00000008, 0x00000004, 0x00000002, 0x00000001 };

+			uint32 mx = 0x26DD3B6A, my = 0;

+			for(unsigned int i=0; i<n; ++i)

+			{

+				uint32 sign = sign_mask(mz);

+				uint32 tx = mx - (arithmetic_shift(my, i)^sign) + sign;

+				uint32 ty = my + (arithmetic_shift(mx, i)^sign) - sign;

+				mx = tx; my = ty; mz -= (angles[i]^sign) - sign;

+			}

+			return std::make_pair(my, mx);

+		}

+

+		/// Fixed point arc tangent.

+		/// This uses the CORDIC algorithm in vectoring mode.

+		/// \param my y coordinate as Q0.30

+		/// \param mx x coordinate as Q0.30

+		/// \param n number of iterations (at most 31)

+		/// \return arc tangent of \a my / \a mx as Q1.30

+		inline uint32 atan2(uint32 my, uint32 mx, unsigned int n = 31)

+		{

+			static const uint32 angles[] = {

+				0x3243F6A9, 0x1DAC6705, 0x0FADBAFD, 0x07F56EA7, 0x03FEAB77, 0x01FFD55C, 0x00FFFAAB, 0x007FFF55,

+				0x003FFFEB, 0x001FFFFD, 0x00100000, 0x00080000, 0x00040000, 0x00020000, 0x00010000, 0x00008000,

+				0x00004000, 0x00002000, 0x00001000, 0x00000800, 0x00000400, 0x00000200, 0x00000100, 0x00000080,

+				0x00000040, 0x00000020, 0x00000010, 0x00000008, 0x00000004, 0x00000002, 0x00000001 };

+			uint32 mz = 0;

+			for(unsigned int i=0; i<n; ++i)

+			{

+				uint32 sign = sign_mask(my);

+				uint32 tx = mx + (arithmetic_shift(my, i)^sign) - sign;

+				uint32 ty = my - (arithmetic_shift(mx, i)^sign) + sign;

+				mx = tx; my = ty; mz += (angles[i]^sign) - sign;

+			}

+			return mz;

+		}

+

+		/// Reduce argument for trigonometric functions.

+		/// \param abs half-precision floating-point value

+		/// \param k value to take quarter period

+		/// \return \a abs reduced to [-pi/4,pi/4] as Q0.30

+		inline uint32 angle_arg(unsigned int abs, int &k)

+		{

+			uint32 m = (abs&0x3FF) | ((abs>0x3FF)<<10);

+			int exp = (abs>>10) + (abs<=0x3FF) - 15;

+			if(abs < 0x3A48)

+				return k = 0, m << (exp+20);

+		#if HALF_ENABLE_CPP11_LONG_LONG

+			unsigned long long y = m * 0xA2F9836E4E442, mask = (1ULL<<(62-exp)) - 1, yi = (y+(mask>>1)) & ~mask, f = y - yi;

+			uint32 sign = -static_cast<uint32>(f>>63);

+			k = static_cast<int>(yi>>(62-exp));

+			return (multiply64(static_cast<uint32>((sign ? -f : f)>>(31-exp)), 0xC90FDAA2)^sign) - sign;

+		#else

+			uint32 yh = m*0xA2F98 + mulhi<std::round_toward_zero>(m, 0x36E4E442), yl = (m*0x36E4E442) & 0xFFFFFFFF;

+			uint32 mask = (static_cast<uint32>(1)<<(30-exp)) - 1, yi = (yh+(mask>>1)) & ~mask, sign = -static_cast<uint32>(yi>yh);

+			k = static_cast<int>(yi>>(30-exp));

+			uint32 fh = (yh^sign) + (yi^~sign) - ~sign, fl = (yl^sign) - sign;

+			return (multiply64((exp>-1) ? (((fh<<(1+exp))&0xFFFFFFFF)|((fl&0xFFFFFFFF)>>(31-exp))) : fh, 0xC90FDAA2)^sign) - sign;

+		#endif

+		}

+

+		/// Get arguments for atan2 function.

+		/// \param abs half-precision floating-point value

+		/// \return \a abs and sqrt(1 - \a abs^2) as Q0.30

+		inline std::pair<uint32,uint32> atan2_args(unsigned int abs)

+		{

+			int exp = -15;

+			for(; abs<0x400; abs<<=1,--exp) ;

+			exp += abs >> 10;

+			uint32 my = ((abs&0x3FF)|0x400) << 5, r = my * my;

+			int rexp = 2 * exp;

+			r = 0x40000000 - ((rexp>-31) ? ((r>>-rexp)|((r&((static_cast<uint32>(1)<<-rexp)-1))!=0)) : 1);

+			for(rexp=0; r<0x40000000; r<<=1,--rexp) ;

+			uint32 mx = sqrt<30>(r, rexp);

+			int d = exp - rexp;

+			if(d < 0)

+				return std::make_pair((d<-14) ? ((my>>(-d-14))+((my>>(-d-15))&1)) : (my<<(14+d)), (mx<<14)+(r<<13)/mx);

+			if(d > 0)

+				return std::make_pair(my<<14, (d>14) ? ((mx>>(d-14))+((mx>>(d-15))&1)) : ((d==14) ? mx : ((mx<<(14-d))+(r<<(13-d))/mx)));

+			return std::make_pair(my<<13, (mx<<13)+(r<<12)/mx);

+		}

+

+		/// Get exponentials for hyperbolic computation

+		/// \param abs half-precision floating-point value

+		/// \param exp variable to take unbiased exponent of larger result

+		/// \param n number of BKM iterations (at most 32)

+		/// \return exp(abs) and exp(-\a abs) as Q1.31 with same exponent

+		inline std::pair<uint32,uint32> hyperbolic_args(unsigned int abs, int &exp, unsigned int n = 32)

+		{

+			uint32 mx = detail::multiply64(static_cast<uint32>((abs&0x3FF)+((abs>0x3FF)<<10))<<21, 0xB8AA3B29), my;

+			int e = (abs>>10) + (abs<=0x3FF);

+			if(e < 14)

+			{

+				exp = 0;

+				mx >>= 14 - e;

+			}

+			else

+			{

+				exp = mx >> (45-e);

+				mx = (mx<<(e-14)) & 0x7FFFFFFF;

+			}

+			mx = exp2(mx, n);

+			int d = exp << 1, s;

+			if(mx > 0x80000000)

+			{

+				my = divide64(0x80000000, mx, s);

+				my |= s;

+				++d;

+			}

+			else

+				my = mx;

+			return std::make_pair(mx, (d<31) ? ((my>>d)|((my&((static_cast<uint32>(1)<<d)-1))!=0)) : 1);

+		}

+

+		/// Postprocessing for binary exponential.

+		/// \tparam R rounding mode to use

+		/// \tparam I `true` to always raise INEXACT exception, `false` to raise only for rounded results

+		/// \param m mantissa as Q1.31

+		/// \param exp absolute value of unbiased exponent

+		/// \param esign sign of actual exponent

+		/// \param sign sign bit of result

+		/// \return value converted to half-precision

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_UNDERFLOW on underflows

+		/// \exception FE_INEXACT if value had to be rounded or \a I is `true`

+		template<std::float_round_style R,bool I> unsigned int exp2_post(uint32 m, int exp, bool esign, unsigned int sign = 0)

+		{

+			int s = 0;

+			if(esign)

+			{

+				if(m > 0x80000000)

+				{

+					m = divide64(0x80000000, m, s);

+					++exp;

+				}

+				if(exp > 25)

+					return underflow<R>(sign);

+				else if(exp == 25)

+					return rounded<R,I>(sign, 1, (m&0x7FFFFFFF)!=0);

+				exp = -exp;

+			}

+			else if(exp > 15)

+				return overflow<R>(sign);

+			return fixed2half<R,31,false,false,I>(m, exp+14, sign, s);

+		}

+

+		/// Postprocessing for binary logarithm.

+		/// \tparam R rounding mode to use

+		/// \tparam L logarithm for base transformation as Q1.31

+		/// \param m fractional part of logarithm as Q0.31

+		/// \param ilog signed integer part of logarithm

+		/// \param exp biased exponent of result

+		/// \param sign sign bit of result

+		/// \return value base-transformed and converted to half-precision

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_UNDERFLOW on underflows

+		/// \exception FE_INEXACT if no other exception occurred

+		template<std::float_round_style R,uint32 L> unsigned int log2_post(uint32 m, int ilog, int exp, unsigned int sign = 0)

+		{

+			uint32 msign = sign_mask(ilog);

+			m = (((static_cast<uint32>(ilog)<<27)+(m>>4))^msign) - msign;

+			if(!m)

+				return 0;

+			for(; m<0x80000000; m<<=1,--exp) ;

+			int i = m >= L, s;

+			exp += i;

+			m >>= 1 + i;

+			sign ^= msign & 0x8000;

+			if(exp < -11)

+				return underflow<R>(sign);

+			m = divide64(m, L, s);

+			return fixed2half<R,30,false,false,true>(m, exp, sign, 1);

+		}

+

+		/// Hypotenuse square root and postprocessing.

+		/// \tparam R rounding mode to use

+		/// \param r mantissa as Q2.30

+		/// \param exp unbiased exponent

+		/// \return square root converted to half-precision

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_UNDERFLOW on underflows

+		/// \exception FE_INEXACT if value had to be rounded

+		template<std::float_round_style R> unsigned int hypot_post(uint32 r, int exp)

+		{

+			int i = r >> 31;

+			if((exp+=i) > 46)

+				return overflow<R>();

+			if(exp < -34)

+				return underflow<R>();

+			r = (r>>i) | (r&i);

+			uint32 m = sqrt<30>(r, exp+=15);

+			return fixed2half<R,15,false,false,false>(m, exp-1, 0, r!=0);

+		}

+

+		/// Division and postprocessing for tangents.

+		/// \tparam R rounding mode to use

+		/// \param my dividend as Q1.31

+		/// \param mx divisor as Q1.31

+		/// \param exp biased exponent of result

+		/// \param sign sign bit of result

+		/// \return quotient converted to half-precision

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_UNDERFLOW on underflows

+		/// \exception FE_INEXACT if no other exception occurred

+		template<std::float_round_style R> unsigned int tangent_post(uint32 my, uint32 mx, int exp, unsigned int sign = 0)

+		{

+			int i = my >= mx, s;

+			exp += i;

+			if(exp > 29)

+				return overflow<R>(sign);

+			if(exp < -11)

+				return underflow<R>(sign);

+			uint32 m = divide64(my>>(i+1), mx, s);

+			return fixed2half<R,30,false,false,true>(m, exp, sign, s);

+		}

+

+		/// Area function and postprocessing.

+		/// This computes the value directly in Q2.30 using the representation `asinh|acosh(x) = log(x+sqrt(x^2+|-1))`.

+		/// \tparam R rounding mode to use

+		/// \tparam S `true` for asinh, `false` for acosh

+		/// \param arg half-precision argument

+		/// \return asinh|acosh(\a arg) converted to half-precision

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_UNDERFLOW on underflows

+		/// \exception FE_INEXACT if no other exception occurred

+		template<std::float_round_style R,bool S> unsigned int area(unsigned int arg)

+		{

+			int abs = arg & 0x7FFF, expx = (abs>>10) + (abs<=0x3FF) - 15, expy = -15, ilog, i;

+			uint32 mx = static_cast<uint32>((abs&0x3FF)|((abs>0x3FF)<<10)) << 20, my, r;

+			for(; abs<0x400; abs<<=1,--expy) ;

+			expy += abs >> 10;

+			r = ((abs&0x3FF)|0x400) << 5;

+			r *= r;

+			i = r >> 31;

+			expy = 2*expy + i;

+			r >>= i;

+			if(S)

+			{

+				if(expy < 0)

+				{

+					r = 0x40000000 + ((expy>-30) ? ((r>>-expy)|((r&((static_cast<uint32>(1)<<-expy)-1))!=0)) : 1);

+					expy = 0;

+				}

+				else

+				{

+					r += 0x40000000 >> expy;

+					i = r >> 31;

+					r = (r>>i) | (r&i);

+					expy += i;

+				}

+			}

+			else

+			{

+				r -= 0x40000000 >> expy;

+				for(; r<0x40000000; r<<=1,--expy) ;

+			}

+			my = sqrt<30>(r, expy);

+			my = (my<<15) + (r<<14)/my;

+			if(S)

+			{

+				mx >>= expy - expx;

+				ilog = expy;

+			}

+			else

+			{

+				my >>= expx - expy;

+				ilog = expx;

+			}

+			my += mx;

+			i = my >> 31;

+			static const int G = S && (R==std::round_to_nearest);

+			return log2_post<R,0xB8AA3B2A>(log2(my>>i, 26+S+G)+(G<<3), ilog+i, 17, arg&(static_cast<unsigned>(S)<<15));

+		}

+

+		/// Class for 1.31 unsigned floating-point computation

+		struct f31

+		{

+			/// Constructor.

+			/// \param mant mantissa as 1.31

+			/// \param e exponent

+			HALF_CONSTEXPR f31(uint32 mant, int e) : m(mant), exp(e) {}

+

+			/// Constructor.

+			/// \param abs unsigned half-precision value

+			f31(unsigned int abs) : exp(-15)

+			{

+				for(; abs<0x400; abs<<=1,--exp) ;

+				m = static_cast<uint32>((abs&0x3FF)|0x400) << 21;

+				exp += (abs>>10);

+			}

+

+			/// Addition operator.

+			/// \param a first operand

+			/// \param b second operand

+			/// \return \a a + \a b

+			friend f31 operator+(f31 a, f31 b)

+			{

+				if(b.exp > a.exp)

+					std::swap(a, b);

+				int d = a.exp - b.exp;

+				uint32 m = a.m + ((d<32) ? (b.m>>d) : 0);

+				int i = (m&0xFFFFFFFF) < a.m;

+				return f31(((m+i)>>i)|0x80000000, a.exp+i);

+			}

+

+			/// Subtraction operator.

+			/// \param a first operand

+			/// \param b second operand

+			/// \return \a a - \a b

+			friend f31 operator-(f31 a, f31 b)

+			{

+				int d = a.exp - b.exp, exp = a.exp;

+				uint32 m = a.m - ((d<32) ? (b.m>>d) : 0);

+				if(!m)

+					return f31(0, -32);

+				for(; m<0x80000000; m<<=1,--exp) ;

+				return f31(m, exp);

+			}

+

+			/// Multiplication operator.

+			/// \param a first operand

+			/// \param b second operand

+			/// \return \a a * \a b

+			friend f31 operator*(f31 a, f31 b)

+			{

+				uint32 m = multiply64(a.m, b.m);

+				int i = m >> 31;

+				return f31(m<<(1-i), a.exp + b.exp + i);

+			}

+

+			/// Division operator.

+			/// \param a first operand

+			/// \param b second operand

+			/// \return \a a / \a b

+			friend f31 operator/(f31 a, f31 b)

+			{

+				int i = a.m >= b.m, s;

+				uint32 m = divide64((a.m+i)>>i, b.m, s);

+				return f31(m, a.exp - b.exp + i - 1);

+			}

+

+			uint32 m;			///< mantissa as 1.31.

+			int exp;			///< exponent.

+		};

+

+		/// Error function and postprocessing.

+		/// This computes the value directly in Q1.31 using the approximations given 

+		/// [here](https://en.wikipedia.org/wiki/Error_function#Approximation_with_elementary_functions).

+		/// \tparam R rounding mode to use

+		/// \tparam C `true` for comlementary error function, `false` else

+		/// \param arg half-precision function argument

+		/// \return approximated value of error function in half-precision

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_UNDERFLOW on underflows

+		/// \exception FE_INEXACT if no other exception occurred

+		template<std::float_round_style R,bool C> unsigned int erf(unsigned int arg)

+		{

+			unsigned int abs = arg & 0x7FFF, sign = arg & 0x8000;

+			f31 x(abs), x2 = x * x * f31(0xB8AA3B29, 0), t = f31(0x80000000, 0) / (f31(0x80000000, 0)+f31(0xA7BA054A, -2)*x), t2 = t * t;

+			f31 e = ((f31(0x87DC2213, 0)*t2+f31(0xB5F0E2AE, 0))*t2+f31(0x82790637, -2)-(f31(0xBA00E2B8, 0)*t2+f31(0x91A98E62, -2))*t) * t /

+					((x2.exp<0) ? f31(exp2((x2.exp>-32) ? (x2.m>>-x2.exp) : 0, 30), 0) : f31(exp2((x2.m<<x2.exp)&0x7FFFFFFF, 22), x2.m>>(31-x2.exp)));

+			return (!C || sign) ? fixed2half<R,31,false,true,true>(0x80000000-(e.m>>(C-e.exp)), 14+C, sign&(C-1U)) :

+					(e.exp<-25) ? underflow<R>() : fixed2half<R,30,false,false,true>(e.m>>1, e.exp+14, 0, e.m&1);

+		}

+

+		/// Gamma function and postprocessing.

+		/// This approximates the value of either the gamma function or its logarithm directly in Q1.31.

+		/// \tparam R rounding mode to use

+		/// \tparam L `true` for lograithm of gamma function, `false` for gamma function

+		/// \param arg half-precision floating-point value

+		/// \return lgamma/tgamma(\a arg) in half-precision

+		/// \exception FE_OVERFLOW on overflows

+		/// \exception FE_UNDERFLOW on underflows

+		/// \exception FE_INEXACT if \a arg is not a positive integer

+		template<std::float_round_style R,bool L> unsigned int gamma(unsigned int arg)

+		{

+/*			static const double p[] ={ 2.50662827563479526904, 225.525584619175212544, -268.295973841304927459, 80.9030806934622512966, -5.00757863970517583837, 0.0114684895434781459556 };

+			double t = arg + 4.65, s = p[0];

+			for(unsigned int i=0; i<5; ++i)

+				s += p[i+1] / (arg+i);

+			return std::log(s) + (arg-0.5)*std::log(t) - t;

+*/			static const f31 pi(0xC90FDAA2, 1), lbe(0xB8AA3B29, 0);

+			unsigned int abs = arg & 0x7FFF, sign = arg & 0x8000;

+			bool bsign = sign != 0;

+			f31 z(abs), x = sign ? (z+f31(0x80000000, 0)) : z, t = x + f31(0x94CCCCCD, 2), s =

+				f31(0xA06C9901, 1) + f31(0xBBE654E2, -7)/(x+f31(0x80000000, 2)) + f31(0xA1CE6098, 6)/(x+f31(0x80000000, 1))

+				+ f31(0xE1868CB7, 7)/x - f31(0x8625E279, 8)/(x+f31(0x80000000, 0)) - f31(0xA03E158F, 2)/(x+f31(0xC0000000, 1));

+			int i = (s.exp>=2) + (s.exp>=4) + (s.exp>=8) + (s.exp>=16);

+			s = f31((static_cast<uint32>(s.exp)<<(31-i))+(log2(s.m>>1, 28)>>i), i) / lbe;

+			if(x.exp != -1 || x.m != 0x80000000)

+			{

+				i = (t.exp>=2) + (t.exp>=4) + (t.exp>=8);

+				f31 l = f31((static_cast<uint32>(t.exp)<<(31-i))+(log2(t.m>>1, 30)>>i), i) / lbe;

+				s = (x.exp<-1) ? (s-(f31(0x80000000, -1)-x)*l) : (s+(x-f31(0x80000000, -1))*l);

+			}

+			s = x.exp ? (s-t) : (t-s);

+			if(bsign)

+			{

+				if(z.exp >= 0)

+				{

+					sign &= (L|((z.m>>(31-z.exp))&1)) - 1;

+					for(z=f31((z.m<<(1+z.exp))&0xFFFFFFFF, -1); z.m<0x80000000; z.m<<=1,--z.exp) ;

+				}

+				if(z.exp == -1)

+					z = f31(0x80000000, 0) - z;

+				if(z.exp < -1)

+				{

+					z = z * pi;

+					z.m = sincos(z.m>>(1-z.exp), 30).first;

+					for(z.exp=1; z.m<0x80000000; z.m<<=1,--z.exp) ;

+				}

+				else

+					z = f31(0x80000000, 0);

+			}

+			if(L)

+			{

+				if(bsign)

+				{

+					f31 l(0x92868247, 0);

+					if(z.exp < 0)

+					{

+						uint32 m = log2((z.m+1)>>1, 27);

+						z = f31(-((static_cast<uint32>(z.exp)<<26)+(m>>5)), 5);

+						for(; z.m<0x80000000; z.m<<=1,--z.exp) ;

+						l = l + z / lbe;

+					}

+					sign = static_cast<unsigned>(x.exp&&(l.exp<s.exp||(l.exp==s.exp&&l.m<s.m))) << 15;

+					s = sign ? (s-l) : x.exp ? (l-s) : (l+s);

+				}

+				else

+				{

+					sign = static_cast<unsigned>(x.exp==0) << 15;

+					if(s.exp < -24)

+						return underflow<R>(sign);

+					if(s.exp > 15)

+						return overflow<R>(sign);

+				}

+			}

+			else

+			{

+				s = s * lbe;

+				uint32 m;

+				if(s.exp < 0)

+				{

+					m = s.m >> -s.exp;

+					s.exp = 0;

+				}

+				else

+				{

+					m = (s.m<<s.exp) & 0x7FFFFFFF;

+					s.exp = (s.m>>(31-s.exp));

+				}

+				s.m = exp2(m, 27);

+				if(!x.exp)

+					s = f31(0x80000000, 0) / s;

+				if(bsign)

+				{

+					if(z.exp < 0)

+						s = s * z;

+					s = pi / s;

+					if(s.exp < -24)

+						return underflow<R>(sign);

+				}

+				else if(z.exp > 0 && !(z.m&((1<<(31-z.exp))-1)))

+					return ((s.exp+14)<<10) + (s.m>>21);

+				if(s.exp > 15)

+					return overflow<R>(sign);

+			}

+			return fixed2half<R,31,false,false,true>(s.m, s.exp+14, sign);

+		}

+		/// \}

+

+		template<typename,typename,std::float_round_style> struct half_caster;

+	}

+

+	/// Half-precision floating-point type.

+	/// This class implements an IEEE-conformant half-precision floating-point type with the usual arithmetic 

+	/// operators and conversions. It is implicitly convertible to single-precision floating-point, which makes artihmetic 

+	/// expressions and functions with mixed-type operands to be of the most precise operand type.

+	///

+	/// According to the C++98/03 definition, the half type is not a POD type. But according to C++11's less strict and 

+	/// extended definitions it is both a standard layout type and a trivially copyable type (even if not a POD type), which 

+	/// means it can be standard-conformantly copied using raw binary copies. But in this context some more words about the 

+	/// actual size of the type. Although the half is representing an IEEE 16-bit type, it does not neccessarily have to be of 

+	/// exactly 16-bits size. But on any reasonable implementation the actual binary representation of this type will most 

+	/// probably not ivolve any additional "magic" or padding beyond the simple binary representation of the underlying 16-bit 

+	/// IEEE number, even if not strictly guaranteed by the standard. But even then it only has an actual size of 16 bits if 

+	/// your C++ implementation supports an unsigned integer type of exactly 16 bits width. But this should be the case on 

+	/// nearly any reasonable platform.

+	///

+	/// So if your C++ implementation is not totally exotic or imposes special alignment requirements, it is a reasonable 

+	/// assumption that the data of a half is just comprised of the 2 bytes of the underlying IEEE representation.

+	class half

+	{

+	public:

+		/// \name Construction and assignment

+		/// \{

+

+		/// Default constructor.

+		/// This initializes the half to 0. Although this does not match the builtin types' default-initialization semantics 

+		/// and may be less efficient than no initialization, it is needed to provide proper value-initialization semantics.

+		HALF_CONSTEXPR half() HALF_NOEXCEPT : data_() {}

+

+		/// Conversion constructor.

+		/// \param rhs float to convert

+		/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+		explicit half(float rhs) : data_(static_cast<detail::uint16>(detail::float2half<round_style>(rhs))) {}

+	

+		/// Conversion to single-precision.

+		/// \return single precision value representing expression value

+		operator float() const { return detail::half2float<float>(data_); }

+

+		/// Assignment operator.

+		/// \param rhs single-precision value to copy from

+		/// \return reference to this half

+		/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+		half& operator=(float rhs) { data_ = static_cast<detail::uint16>(detail::float2half<round_style>(rhs)); return *this; }

+

+		/// \}

+		/// \name Arithmetic updates

+		/// \{

+

+		/// Arithmetic assignment.

+		/// \tparam T type of concrete half expression

+		/// \param rhs half expression to add

+		/// \return reference to this half

+		/// \exception FE_... according to operator+(half,half)

+		half& operator+=(half rhs) { return *this = *this + rhs; }

+

+		/// Arithmetic assignment.

+		/// \tparam T type of concrete half expression

+		/// \param rhs half expression to subtract

+		/// \return reference to this half

+		/// \exception FE_... according to operator-(half,half)

+		half& operator-=(half rhs) { return *this = *this - rhs; }

+

+		/// Arithmetic assignment.

+		/// \tparam T type of concrete half expression

+		/// \param rhs half expression to multiply with

+		/// \return reference to this half

+		/// \exception FE_... according to operator*(half,half)

+		half& operator*=(half rhs) { return *this = *this * rhs; }

+

+		/// Arithmetic assignment.

+		/// \tparam T type of concrete half expression

+		/// \param rhs half expression to divide by

+		/// \return reference to this half

+		/// \exception FE_... according to operator/(half,half)

+		half& operator/=(half rhs) { return *this = *this / rhs; }

+

+		/// Arithmetic assignment.

+		/// \param rhs single-precision value to add

+		/// \return reference to this half

+		/// \exception FE_... according to operator=()

+		half& operator+=(float rhs) { return *this = *this + rhs; }

+

+		/// Arithmetic assignment.

+		/// \param rhs single-precision value to subtract

+		/// \return reference to this half

+		/// \exception FE_... according to operator=()

+		half& operator-=(float rhs) { return *this = *this - rhs; }

+

+		/// Arithmetic assignment.

+		/// \param rhs single-precision value to multiply with

+		/// \return reference to this half

+		/// \exception FE_... according to operator=()

+		half& operator*=(float rhs) { return *this = *this * rhs; }

+

+		/// Arithmetic assignment.

+		/// \param rhs single-precision value to divide by

+		/// \return reference to this half

+		/// \exception FE_... according to operator=()

+		half& operator/=(float rhs) { return *this = *this / rhs; }

+

+		/// \}

+		/// \name Increment and decrement

+		/// \{

+

+		/// Prefix increment.

+		/// \return incremented half value

+		/// \exception FE_... according to operator+(half,half)

+		half& operator++() { return *this = *this + half(detail::binary, 0x3C00); }

+

+		/// Prefix decrement.

+		/// \return decremented half value

+		/// \exception FE_... according to operator-(half,half)

+		half& operator--() { return *this = *this + half(detail::binary, 0xBC00); }

+

+		/// Postfix increment.

+		/// \return non-incremented half value

+		/// \exception FE_... according to operator+(half,half)

+		half operator++(int) { half out(*this); ++*this; return out; }

+

+		/// Postfix decrement.

+		/// \return non-decremented half value

+		/// \exception FE_... according to operator-(half,half)

+		half operator--(int) { half out(*this); --*this; return out; }

+		/// \}

+	

+	private:

+		/// Rounding mode to use

+		static const std::float_round_style round_style = (std::float_round_style)(HALF_ROUND_STYLE);

+

+		/// Constructor.

+		/// \param bits binary representation to set half to

+		HALF_CONSTEXPR half(detail::binary_t, unsigned int bits) HALF_NOEXCEPT : data_(static_cast<detail::uint16>(bits)) {}

+

+		/// Internal binary representation

+		detail::uint16 data_;

+

+	#ifndef HALF_DOXYGEN_ONLY

+		friend HALF_CONSTEXPR_NOERR bool operator==(half, half);

+		friend HALF_CONSTEXPR_NOERR bool operator!=(half, half);

+		friend HALF_CONSTEXPR_NOERR bool operator<(half, half);

+		friend HALF_CONSTEXPR_NOERR bool operator>(half, half);

+		friend HALF_CONSTEXPR_NOERR bool operator<=(half, half);

+		friend HALF_CONSTEXPR_NOERR bool operator>=(half, half);

+		friend HALF_CONSTEXPR half operator-(half);

+		friend half operator+(half, half);

+		friend half operator-(half, half);

+		friend half operator*(half, half);

+		friend half operator/(half, half);

+		template<typename charT,typename traits> friend std::basic_ostream<charT,traits>& operator<<(std::basic_ostream<charT,traits>&, half);

+		template<typename charT,typename traits> friend std::basic_istream<charT,traits>& operator>>(std::basic_istream<charT,traits>&, half&);

+		friend HALF_CONSTEXPR half fabs(half);

+		friend half fmod(half, half);

+		friend half remainder(half, half);

+		friend half remquo(half, half, int*);

+		friend half fma(half, half, half);

+		friend HALF_CONSTEXPR_NOERR half fmax(half, half);

+		friend HALF_CONSTEXPR_NOERR half fmin(half, half);

+		friend half fdim(half, half);

+		friend half nanh(const char*);

+		friend half exp(half);

+		friend half exp2(half);

+		friend half expm1(half);

+		friend half log(half);

+		friend half log10(half);

+		friend half log2(half);

+		friend half log1p(half);

+		friend half sqrt(half);

+		friend half cbrt(half);

+		friend half hypot(half, half);

+		friend half hypot(half, half, half);

+		friend half pow(half, half);

+		friend void sincos(half, half*, half*);

+		friend half sin(half);

+		friend half cos(half);

+		friend half tan(half);

+		friend half asin(half);

+		friend half acos(half);

+		friend half atan(half);

+		friend half atan2(half, half);

+		friend half sinh(half);

+		friend half cosh(half);

+		friend half tanh(half);

+		friend half asinh(half);

+		friend half acosh(half);

+		friend half atanh(half);

+		friend half erf(half);

+		friend half erfc(half);

+		friend half lgamma(half);

+		friend half tgamma(half);

+		friend half ceil(half);

+		friend half floor(half);

+		friend half trunc(half);

+		friend half round(half);

+		friend long lround(half);

+		friend half rint(half);

+		friend long lrint(half);

+		friend half nearbyint(half);

+	#ifdef HALF_ENABLE_CPP11_LONG_LONG

+		friend long long llround(half);

+		friend long long llrint(half);

+	#endif

+		friend half frexp(half, int*);

+		friend half scalbln(half, long);

+		friend half modf(half, half*);

+		friend int ilogb(half);

+		friend half logb(half);

+		friend half nextafter(half, half);

+		friend half nexttoward(half, long double);

+		friend HALF_CONSTEXPR half copysign(half, half);

+		friend HALF_CONSTEXPR int fpclassify(half);

+		friend HALF_CONSTEXPR bool isfinite(half);

+		friend HALF_CONSTEXPR bool isinf(half);

+		friend HALF_CONSTEXPR bool isnan(half);

+		friend HALF_CONSTEXPR bool isnormal(half);

+		friend HALF_CONSTEXPR bool signbit(half);

+		friend HALF_CONSTEXPR bool isgreater(half, half);

+		friend HALF_CONSTEXPR bool isgreaterequal(half, half);

+		friend HALF_CONSTEXPR bool isless(half, half);

+		friend HALF_CONSTEXPR bool islessequal(half, half);

+		friend HALF_CONSTEXPR bool islessgreater(half, half);

+		template<typename,typename,std::float_round_style> friend struct detail::half_caster;

+		friend class std::numeric_limits<half>;

+	#if HALF_ENABLE_CPP11_HASH

+		friend struct std::hash<half>;

+	#endif

+	#if HALF_ENABLE_CPP11_USER_LITERALS

+		friend half literal::operator "" _h(long double);

+	#endif

+	#endif

+	};

+

+#if HALF_ENABLE_CPP11_USER_LITERALS

+	namespace literal

+	{

+		/// Half literal.

+		/// While this returns a properly rounded half-precision value, half literals can unfortunately not be constant 

+		/// expressions due to rather involved conversions. So don't expect this to be a literal literal without involving 

+		/// conversion operations at runtime. It is a convenience feature, not a performance optimization.

+		/// \param value literal value

+		/// \return half with of given value (possibly rounded)

+		/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+		inline half operator "" _h(long double value) { return half(detail::binary, detail::float2half<half::round_style>(value)); }

+	}

+#endif

+

+	namespace detail

+	{

+		/// Helper class for half casts.

+		/// This class template has to be specialized for all valid cast arguments to define an appropriate static 

+		/// `cast` member function and a corresponding `type` member denoting its return type.

+		/// \tparam T destination type

+		/// \tparam U source type

+		/// \tparam R rounding mode to use

+		template<typename T,typename U,std::float_round_style R=(std::float_round_style)(HALF_ROUND_STYLE)> struct half_caster {};

+		template<typename U,std::float_round_style R> struct half_caster<half,U,R>

+		{

+		#if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS

+			static_assert(std::is_arithmetic<U>::value, "half_cast from non-arithmetic type unsupported");

+		#endif

+

+			static half cast(U arg) { return cast_impl(arg, is_float<U>()); };

+

+		private:

+			static half cast_impl(U arg, true_type) { return half(binary, float2half<R>(arg)); }

+			static half cast_impl(U arg, false_type) { return half(binary, int2half<R>(arg)); }

+		};

+		template<typename T,std::float_round_style R> struct half_caster<T,half,R>

+		{

+		#if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS

+			static_assert(std::is_arithmetic<T>::value, "half_cast to non-arithmetic type unsupported");

+		#endif

+

+			static T cast(half arg) { return cast_impl(arg, is_float<T>()); }

+

+		private:

+			static T cast_impl(half arg, true_type) { return half2float<T>(arg.data_); }

+			static T cast_impl(half arg, false_type) { return half2int<R,true,true,T>(arg.data_); }

+		};

+		template<std::float_round_style R> struct half_caster<half,half,R>

+		{

+			static half cast(half arg) { return arg; }

+		};

+	}

+}

+

+/// Extensions to the C++ standard library.

+namespace std

+{

+	/// Numeric limits for half-precision floats.

+	/// **See also:** Documentation for [std::numeric_limits](https://en.cppreference.com/w/cpp/types/numeric_limits)

+	template<> class numeric_limits<half_float::half>

+	{

+	public:

+		/// Is template specialization.

+		static HALF_CONSTEXPR_CONST bool is_specialized = true;

+

+		/// Supports signed values.

+		static HALF_CONSTEXPR_CONST bool is_signed = true;

+

+		/// Is not an integer type.

+		static HALF_CONSTEXPR_CONST bool is_integer = false;

+

+		/// Is not exact.

+		static HALF_CONSTEXPR_CONST bool is_exact = false;

+

+		/// Doesn't provide modulo arithmetic.

+		static HALF_CONSTEXPR_CONST bool is_modulo = false;

+

+		/// Has a finite set of values.

+		static HALF_CONSTEXPR_CONST bool is_bounded = true;

+

+		/// IEEE conformant.

+		static HALF_CONSTEXPR_CONST bool is_iec559 = true;

+

+		/// Supports infinity.

+		static HALF_CONSTEXPR_CONST bool has_infinity = true;

+

+		/// Supports quiet NaNs.

+		static HALF_CONSTEXPR_CONST bool has_quiet_NaN = true;

+

+		/// Supports signaling NaNs.

+		static HALF_CONSTEXPR_CONST bool has_signaling_NaN = true;

+

+		/// Supports subnormal values.

+		static HALF_CONSTEXPR_CONST float_denorm_style has_denorm = denorm_present;

+

+		/// Supports no denormalization detection.

+		static HALF_CONSTEXPR_CONST bool has_denorm_loss = false;

+

+	#if HALF_ERRHANDLING_THROWS

+		static HALF_CONSTEXPR_CONST bool traps = true;

+	#else

+		/// Traps only if [HALF_ERRHANDLING_THROW_...](\ref HALF_ERRHANDLING_THROW_INVALID) is acitvated.

+		static HALF_CONSTEXPR_CONST bool traps = false;

+	#endif

+

+		/// Does not support no pre-rounding underflow detection.

+		static HALF_CONSTEXPR_CONST bool tinyness_before = false;

+

+		/// Rounding mode.

+		static HALF_CONSTEXPR_CONST float_round_style round_style = half_float::half::round_style;

+

+		/// Significant digits.

+		static HALF_CONSTEXPR_CONST int digits = 11;

+

+		/// Significant decimal digits.

+		static HALF_CONSTEXPR_CONST int digits10 = 3;

+

+		/// Required decimal digits to represent all possible values.

+		static HALF_CONSTEXPR_CONST int max_digits10 = 5;

+

+		/// Number base.

+		static HALF_CONSTEXPR_CONST int radix = 2;

+

+		/// One more than smallest exponent.

+		static HALF_CONSTEXPR_CONST int min_exponent = -13;

+

+		/// Smallest normalized representable power of 10.

+		static HALF_CONSTEXPR_CONST int min_exponent10 = -4;

+

+		/// One more than largest exponent

+		static HALF_CONSTEXPR_CONST int max_exponent = 16;

+

+		/// Largest finitely representable power of 10.

+		static HALF_CONSTEXPR_CONST int max_exponent10 = 4;

+

+		/// Smallest positive normal value.

+		static HALF_CONSTEXPR half_float::half min() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x0400); }

+

+		/// Smallest finite value.

+		static HALF_CONSTEXPR half_float::half lowest() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0xFBFF); }

+

+		/// Largest finite value.

+		static HALF_CONSTEXPR half_float::half max() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7BFF); }

+

+		/// Difference between 1 and next representable value.

+		static HALF_CONSTEXPR half_float::half epsilon() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x1400); }

+

+		/// Maximum rounding error in ULP (units in the last place).

+		static HALF_CONSTEXPR half_float::half round_error() HALF_NOTHROW

+			{ return half_float::half(half_float::detail::binary, (round_style==std::round_to_nearest) ? 0x3800 : 0x3C00); }

+

+		/// Positive infinity.

+		static HALF_CONSTEXPR half_float::half infinity() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7C00); }

+

+		/// Quiet NaN.

+		static HALF_CONSTEXPR half_float::half quiet_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7FFF); }

+

+		/// Signaling NaN.

+		static HALF_CONSTEXPR half_float::half signaling_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7DFF); }

+

+		/// Smallest positive subnormal value.

+		static HALF_CONSTEXPR half_float::half denorm_min() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x0001); }

+	};

+

+#if HALF_ENABLE_CPP11_HASH

+	/// Hash function for half-precision floats.

+	/// This is only defined if C++11 `std::hash` is supported and enabled.

+	///

+	/// **See also:** Documentation for [std::hash](https://en.cppreference.com/w/cpp/utility/hash)

+	template<> struct hash<half_float::half>

+	{

+		/// Type of function argument.

+		typedef half_float::half argument_type;

+

+		/// Function return type.

+		typedef size_t result_type;

+

+		/// Compute hash function.

+		/// \param arg half to hash

+		/// \return hash value

+		result_type operator()(argument_type arg) const { return hash<half_float::detail::uint16>()(arg.data_&-static_cast<unsigned>(arg.data_!=0x8000)); }

+	};

+#endif

+}

+

+namespace half_float

+{

+	/// \anchor compop

+	/// \name Comparison operators

+	/// \{

+

+	/// Comparison for equality.

+	/// \param x first operand

+	/// \param y second operand

+	/// \retval true if operands equal

+	/// \retval false else

+	/// \exception FE_INVALID if \a x or \a y is NaN

+	inline HALF_CONSTEXPR_NOERR bool operator==(half x, half y)

+	{

+		return !detail::compsignal(x.data_, y.data_) && (x.data_==y.data_ || !((x.data_|y.data_)&0x7FFF));

+	}

+

+	/// Comparison for inequality.

+	/// \param x first operand

+	/// \param y second operand

+	/// \retval true if operands not equal

+	/// \retval false else

+	/// \exception FE_INVALID if \a x or \a y is NaN

+	inline HALF_CONSTEXPR_NOERR bool operator!=(half x, half y)

+	{

+		return detail::compsignal(x.data_, y.data_) || (x.data_!=y.data_ && ((x.data_|y.data_)&0x7FFF));

+	}

+

+	/// Comparison for less than.

+	/// \param x first operand

+	/// \param y second operand

+	/// \retval true if \a x less than \a y

+	/// \retval false else

+	/// \exception FE_INVALID if \a x or \a y is NaN

+	inline HALF_CONSTEXPR_NOERR bool operator<(half x, half y)

+	{

+		return !detail::compsignal(x.data_, y.data_) &&

+			((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) < ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15));

+	}

+

+	/// Comparison for greater than.

+	/// \param x first operand

+	/// \param y second operand

+	/// \retval true if \a x greater than \a y

+	/// \retval false else

+	/// \exception FE_INVALID if \a x or \a y is NaN

+	inline HALF_CONSTEXPR_NOERR bool operator>(half x, half y)

+	{

+		return !detail::compsignal(x.data_, y.data_) &&

+			((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) > ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15));

+	}

+

+	/// Comparison for less equal.

+	/// \param x first operand

+	/// \param y second operand

+	/// \retval true if \a x less equal \a y

+	/// \retval false else

+	/// \exception FE_INVALID if \a x or \a y is NaN

+	inline HALF_CONSTEXPR_NOERR bool operator<=(half x, half y)

+	{

+		return !detail::compsignal(x.data_, y.data_) &&

+			((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) <= ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15));

+	}

+

+	/// Comparison for greater equal.

+	/// \param x first operand

+	/// \param y second operand

+	/// \retval true if \a x greater equal \a y

+	/// \retval false else

+	/// \exception FE_INVALID if \a x or \a y is NaN

+	inline HALF_CONSTEXPR_NOERR bool operator>=(half x, half y)

+	{

+		return !detail::compsignal(x.data_, y.data_) &&

+			((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) >= ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15));

+	}

+

+	/// \}

+	/// \anchor arithmetics

+	/// \name Arithmetic operators

+	/// \{

+

+	/// Identity.

+	/// \param arg operand

+	/// \return unchanged operand

+	inline HALF_CONSTEXPR half operator+(half arg) { return arg; }

+

+	/// Negation.

+	/// \param arg operand

+	/// \return negated operand

+	inline HALF_CONSTEXPR half operator-(half arg) { return half(detail::binary, arg.data_^0x8000); }

+

+	/// Addition.

+	/// This operation is exact to rounding for all rounding modes.

+	/// \param x left operand

+	/// \param y right operand

+	/// \return sum of half expressions

+	/// \exception FE_INVALID if \a x and \a y are infinities with different signs or signaling NaNs

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half operator+(half x, half y)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(detail::half2float<detail::internal_t>(x.data_)+detail::half2float<detail::internal_t>(y.data_)));

+	#else

+		int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF;

+		bool sub = ((x.data_^y.data_)&0x8000) != 0;

+		if(absx >= 0x7C00 || absy >= 0x7C00)

+			return half(detail::binary,	(absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) : (absy!=0x7C00) ? x.data_ :

+										(sub && absx==0x7C00) ? detail::invalid() : y.data_);

+		if(!absx)

+			return absy ? y : half(detail::binary, (half::round_style==std::round_toward_neg_infinity) ? (x.data_|y.data_) : (x.data_&y.data_));

+		if(!absy)

+			return x;

+		unsigned int sign = ((sub && absy>absx) ? y.data_ : x.data_) & 0x8000;

+		if(absy > absx)

+			std::swap(absx, absy);

+		int exp = (absx>>10) + (absx<=0x3FF), d = exp - (absy>>10) - (absy<=0x3FF), mx = ((absx&0x3FF)|((absx>0x3FF)<<10)) << 3, my;

+		if(d < 13)

+		{

+			my = ((absy&0x3FF)|((absy>0x3FF)<<10)) << 3;

+			my = (my>>d) | ((my&((1<<d)-1))!=0);

+		}

+		else

+			my = 1;

+		if(sub)

+		{

+			if(!(mx-=my))

+				return half(detail::binary, static_cast<unsigned>(half::round_style==std::round_toward_neg_infinity)<<15);

+			for(; mx<0x2000 && exp>1; mx<<=1,--exp) ;

+		}

+		else

+		{

+			mx += my;

+			int i = mx >> 14;

+			if((exp+=i) > 30)

+				return half(detail::binary, detail::overflow<half::round_style>(sign));

+			mx = (mx>>i) | (mx&i);

+		}

+		return half(detail::binary, detail::rounded<half::round_style,false>(sign+((exp-1)<<10)+(mx>>3), (mx>>2)&1, (mx&0x3)!=0));

+	#endif

+	}

+

+	/// Subtraction.

+	/// This operation is exact to rounding for all rounding modes.

+	/// \param x left operand

+	/// \param y right operand

+	/// \return difference of half expressions

+	/// \exception FE_INVALID if \a x and \a y are infinities with equal signs or signaling NaNs

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half operator-(half x, half y)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(detail::half2float<detail::internal_t>(x.data_)-detail::half2float<detail::internal_t>(y.data_)));

+	#else

+		return x + -y;

+	#endif

+	}

+

+	/// Multiplication.

+	/// This operation is exact to rounding for all rounding modes.

+	/// \param x left operand

+	/// \param y right operand

+	/// \return product of half expressions

+	/// \exception FE_INVALID if multiplying 0 with infinity or if \a x or \a y is signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half operator*(half x, half y)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(detail::half2float<detail::internal_t>(x.data_)*detail::half2float<detail::internal_t>(y.data_)));

+	#else

+		int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, exp = -16;

+		unsigned int sign = (x.data_^y.data_) & 0x8000;

+		if(absx >= 0x7C00 || absy >= 0x7C00)

+			return half(detail::binary,	(absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) :

+										((absx==0x7C00 && !absy)||(absy==0x7C00 && !absx)) ? detail::invalid() : (sign|0x7C00));

+		if(!absx || !absy)

+			return half(detail::binary, sign);

+		for(; absx<0x400; absx<<=1,--exp) ;

+		for(; absy<0x400; absy<<=1,--exp) ;

+		detail::uint32 m = static_cast<detail::uint32>((absx&0x3FF)|0x400) * static_cast<detail::uint32>((absy&0x3FF)|0x400);

+		int i = m >> 21, s = m & i;

+		exp += (absx>>10) + (absy>>10) + i;

+		if(exp > 29)

+			return half(detail::binary, detail::overflow<half::round_style>(sign));

+		else if(exp < -11)

+			return half(detail::binary, detail::underflow<half::round_style>(sign));

+		return half(detail::binary, detail::fixed2half<half::round_style,20,false,false,false>(m>>i, exp, sign, s));

+	#endif

+	}

+

+	/// Division.

+	/// This operation is exact to rounding for all rounding modes.

+	/// \param x left operand

+	/// \param y right operand

+	/// \return quotient of half expressions

+	/// \exception FE_INVALID if dividing 0s or infinities with each other or if \a x or \a y is signaling NaN

+	/// \exception FE_DIVBYZERO if dividing finite value by 0

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half operator/(half x, half y)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(detail::half2float<detail::internal_t>(x.data_)/detail::half2float<detail::internal_t>(y.data_)));

+	#else

+		int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, exp = 14;

+		unsigned int sign = (x.data_^y.data_) & 0x8000;

+		if(absx >= 0x7C00 || absy >= 0x7C00)

+			return half(detail::binary,	(absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) :

+										(absx==absy) ? detail::invalid() : (sign|((absx==0x7C00) ? 0x7C00 : 0)));

+		if(!absx)

+			return half(detail::binary, absy ? sign : detail::invalid());

+		if(!absy)

+			return half(detail::binary, detail::pole(sign));

+		for(; absx<0x400; absx<<=1,--exp) ;

+		for(; absy<0x400; absy<<=1,++exp) ;

+		detail::uint32 mx = (absx&0x3FF) | 0x400, my = (absy&0x3FF) | 0x400;

+		int i = mx < my;

+		exp += (absx>>10) - (absy>>10) - i;

+		if(exp > 29)

+			return half(detail::binary, detail::overflow<half::round_style>(sign));

+		else if(exp < -11)

+			return half(detail::binary, detail::underflow<half::round_style>(sign));

+		mx <<= 12 + i;

+		my <<= 1;

+		return half(detail::binary, detail::fixed2half<half::round_style,11,false,false,false>(mx/my, exp, sign, mx%my!=0));

+	#endif

+	}

+

+	/// \}

+	/// \anchor streaming

+	/// \name Input and output

+	/// \{

+

+	/// Output operator.

+	///	This uses the built-in functionality for streaming out floating-point numbers.

+	/// \param out output stream to write into

+	/// \param arg half expression to write

+	/// \return reference to output stream

+	template<typename charT,typename traits> std::basic_ostream<charT,traits>& operator<<(std::basic_ostream<charT,traits> &out, half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return out << detail::half2float<detail::internal_t>(arg.data_);

+	#else

+		return out << detail::half2float<float>(arg.data_);

+	#endif

+	}

+

+	/// Input operator.

+	///	This uses the built-in functionality for streaming in floating-point numbers, specifically double precision floating 

+	/// point numbers (unless overridden with [HALF_ARITHMETIC_TYPE](\ref HALF_ARITHMETIC_TYPE)). So the input string is first 

+	/// rounded to double precision using the underlying platform's current floating-point rounding mode before being rounded 

+	/// to half-precision using the library's half-precision rounding mode.

+	/// \param in input stream to read from

+	/// \param arg half to read into

+	/// \return reference to input stream

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	template<typename charT,typename traits> std::basic_istream<charT,traits>& operator>>(std::basic_istream<charT,traits> &in, half &arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		detail::internal_t f;

+	#else

+		double f;

+	#endif

+		if(in >> f)

+			arg.data_ = detail::float2half<half::round_style>(f);

+		return in;

+	}

+

+	/// \}

+	/// \anchor basic

+	/// \name Basic mathematical operations

+	/// \{

+

+	/// Absolute value.

+	/// **See also:** Documentation for [std::fabs](https://en.cppreference.com/w/cpp/numeric/math/fabs).

+	/// \param arg operand

+	/// \return absolute value of \a arg

+	inline HALF_CONSTEXPR half fabs(half arg) { return half(detail::binary, arg.data_&0x7FFF); }

+

+	/// Absolute value.

+	/// **See also:** Documentation for [std::abs](https://en.cppreference.com/w/cpp/numeric/math/fabs).

+	/// \param arg operand

+	/// \return absolute value of \a arg

+	inline HALF_CONSTEXPR half abs(half arg) { return fabs(arg); }

+

+	/// Remainder of division.

+	/// **See also:** Documentation for [std::fmod](https://en.cppreference.com/w/cpp/numeric/math/fmod).

+	/// \param x first operand

+	/// \param y second operand

+	/// \return remainder of floating-point division.

+	/// \exception FE_INVALID if \a x is infinite or \a y is 0 or if \a x or \a y is signaling NaN

+	inline half fmod(half x, half y)

+	{

+		unsigned int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, sign = x.data_ & 0x8000;

+		if(absx >= 0x7C00 || absy >= 0x7C00)

+			return half(detail::binary,	(absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) :

+										(absx==0x7C00) ? detail::invalid() : x.data_);

+		if(!absy)

+			return half(detail::binary, detail::invalid());

+		if(!absx)

+			return x;

+		if(absx == absy)

+			return half(detail::binary, sign);

+		return half(detail::binary, sign|detail::mod<false,false>(absx, absy));

+	}

+

+	/// Remainder of division.

+	/// **See also:** Documentation for [std::remainder](https://en.cppreference.com/w/cpp/numeric/math/remainder).

+	/// \param x first operand

+	/// \param y second operand

+	/// \return remainder of floating-point division.

+	/// \exception FE_INVALID if \a x is infinite or \a y is 0 or if \a x or \a y is signaling NaN

+	inline half remainder(half x, half y)

+	{

+		unsigned int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, sign = x.data_ & 0x8000;

+		if(absx >= 0x7C00 || absy >= 0x7C00)

+			return half(detail::binary,	(absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) :

+										(absx==0x7C00) ? detail::invalid() : x.data_);

+		if(!absy)

+			return half(detail::binary, detail::invalid());

+		if(absx == absy)

+			return half(detail::binary, sign);

+		return half(detail::binary, sign^detail::mod<false,true>(absx, absy));

+	}

+

+	/// Remainder of division.

+	/// **See also:** Documentation for [std::remquo](https://en.cppreference.com/w/cpp/numeric/math/remquo).

+	/// \param x first operand

+	/// \param y second operand

+	/// \param quo address to store some bits of quotient at

+	/// \return remainder of floating-point division.

+	/// \exception FE_INVALID if \a x is infinite or \a y is 0 or if \a x or \a y is signaling NaN

+	inline half remquo(half x, half y, int *quo)

+	{

+		unsigned int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, value = x.data_ & 0x8000;

+		if(absx >= 0x7C00 || absy >= 0x7C00)

+			return half(detail::binary,	(absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) :

+										(absx==0x7C00) ? detail::invalid() : (*quo = 0, x.data_));

+		if(!absy)

+			return half(detail::binary, detail::invalid());

+		bool qsign = ((value^y.data_)&0x8000) != 0;

+		int q = 1;

+		if(absx != absy)

+			value ^= detail::mod<true, true>(absx, absy, &q);

+		return *quo = qsign ? -q : q, half(detail::binary, value);

+	}

+

+	/// Fused multiply add.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::fma](https://en.cppreference.com/w/cpp/numeric/math/fma).

+	/// \param x first operand

+	/// \param y second operand

+	/// \param z third operand

+	/// \return ( \a x * \a y ) + \a z rounded as one operation.

+	/// \exception FE_INVALID according to operator*() and operator+() unless any argument is a quiet NaN and no argument is a signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding the final addition

+	inline half fma(half x, half y, half z)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		detail::internal_t fx = detail::half2float<detail::internal_t>(x.data_), fy = detail::half2float<detail::internal_t>(y.data_), fz = detail::half2float<detail::internal_t>(z.data_);

+		#if HALF_ENABLE_CPP11_CMATH && FP_FAST_FMA

+			return half(detail::binary, detail::float2half<half::round_style>(std::fma(fx, fy, fz)));

+		#else

+			return half(detail::binary, detail::float2half<half::round_style>(fx*fy+fz));

+		#endif

+	#else

+		int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, absz = z.data_ & 0x7FFF, exp = -15;

+		unsigned int sign = (x.data_^y.data_) & 0x8000;

+		bool sub = ((sign^z.data_)&0x8000) != 0;

+		if(absx >= 0x7C00 || absy >= 0x7C00 || absz >= 0x7C00)

+			return	(absx>0x7C00 || absy>0x7C00 || absz>0x7C00) ? half(detail::binary, detail::signal(x.data_, y.data_, z.data_)) :

+					(absx==0x7C00) ? half(detail::binary, (!absy || (sub && absz==0x7C00)) ? detail::invalid() : (sign|0x7C00)) :

+					(absy==0x7C00) ? half(detail::binary, (!absx || (sub && absz==0x7C00)) ? detail::invalid() : (sign|0x7C00)) : z;

+		if(!absx || !absy)

+			return absz ? z : half(detail::binary, (half::round_style==std::round_toward_neg_infinity) ? (z.data_|sign) : (z.data_&sign));

+		for(; absx<0x400; absx<<=1,--exp) ;

+		for(; absy<0x400; absy<<=1,--exp) ;

+		detail::uint32 m = static_cast<detail::uint32>((absx&0x3FF)|0x400) * static_cast<detail::uint32>((absy&0x3FF)|0x400);

+		int i = m >> 21;

+		exp += (absx>>10) + (absy>>10) + i;

+		m <<= 3 - i;

+		if(absz)

+		{

+			int expz = 0;

+			for(; absz<0x400; absz<<=1,--expz) ;

+			expz += absz >> 10;

+			detail::uint32 mz = static_cast<detail::uint32>((absz&0x3FF)|0x400) << 13;

+			if(expz > exp || (expz == exp && mz > m))

+			{

+				std::swap(m, mz);

+				std::swap(exp, expz);

+				if(sub)

+					sign = z.data_ & 0x8000;

+			}

+			int d = exp - expz;

+			mz = (d<23) ? ((mz>>d)|((mz&((static_cast<detail::uint32>(1)<<d)-1))!=0)) : 1;

+			if(sub)

+			{

+				m = m - mz;

+				if(!m)

+					return half(detail::binary, static_cast<unsigned>(half::round_style==std::round_toward_neg_infinity)<<15);

+				for(; m<0x800000; m<<=1,--exp) ;

+			}

+			else

+			{

+				m += mz;

+				i = m >> 24;

+				m = (m>>i) | (m&i);

+				exp += i;

+			}

+		}

+		if(exp > 30)

+			return half(detail::binary, detail::overflow<half::round_style>(sign));

+		else if(exp < -10)

+			return half(detail::binary, detail::underflow<half::round_style>(sign));

+		return half(detail::binary, detail::fixed2half<half::round_style,23,false,false,false>(m, exp-1, sign));

+	#endif

+	}

+

+	/// Maximum of half expressions.

+	/// **See also:** Documentation for [std::fmax](https://en.cppreference.com/w/cpp/numeric/math/fmax).

+	/// \param x first operand

+	/// \param y second operand

+	/// \return maximum of operands, ignoring quiet NaNs

+	/// \exception FE_INVALID if \a x or \a y is signaling NaN

+	inline HALF_CONSTEXPR_NOERR half fmax(half x, half y)

+	{

+		return half(detail::binary, (!isnan(y) && (isnan(x) || (x.data_^(0x8000|(0x8000-(x.data_>>15)))) < 

+			(y.data_^(0x8000|(0x8000-(y.data_>>15)))))) ? detail::select(y.data_, x.data_) : detail::select(x.data_, y.data_));

+	}

+

+	/// Minimum of half expressions.

+	/// **See also:** Documentation for [std::fmin](https://en.cppreference.com/w/cpp/numeric/math/fmin).

+	/// \param x first operand

+	/// \param y second operand

+	/// \return minimum of operands, ignoring quiet NaNs

+	/// \exception FE_INVALID if \a x or \a y is signaling NaN

+	inline HALF_CONSTEXPR_NOERR half fmin(half x, half y)

+	{

+		return half(detail::binary, (!isnan(y) && (isnan(x) || (x.data_^(0x8000|(0x8000-(x.data_>>15)))) >

+			(y.data_^(0x8000|(0x8000-(y.data_>>15)))))) ? detail::select(y.data_, x.data_) : detail::select(x.data_, y.data_));

+	}

+

+	/// Positive difference.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::fdim](https://en.cppreference.com/w/cpp/numeric/math/fdim).

+	/// \param x first operand

+	/// \param y second operand

+	/// \return \a x - \a y or 0 if difference negative

+	/// \exception FE_... according to operator-(half,half)

+	inline half fdim(half x, half y)

+	{

+		if(isnan(x) || isnan(y))

+			return half(detail::binary, detail::signal(x.data_, y.data_));

+		return (x.data_^(0x8000|(0x8000-(x.data_>>15)))) <= (y.data_^(0x8000|(0x8000-(y.data_>>15)))) ? half(detail::binary, 0) : (x-y);

+	}

+

+	/// Get NaN value.

+	/// **See also:** Documentation for [std::nan](https://en.cppreference.com/w/cpp/numeric/math/nan).

+	/// \param arg string code

+	/// \return quiet NaN

+	inline half nanh(const char *arg)

+	{

+		unsigned int value = 0x7FFF;

+		while(*arg)

+			value ^= static_cast<unsigned>(*arg++) & 0xFF;

+		return half(detail::binary, value);

+	}

+

+	/// \}

+	/// \anchor exponential

+	/// \name Exponential functions

+	/// \{

+

+	/// Exponential function.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::exp](https://en.cppreference.com/w/cpp/numeric/math/exp).

+	/// \param arg function argument

+	/// \return e raised to \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half exp(half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::exp(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF;

+		if(!abs)

+			return half(detail::binary, 0x3C00);

+		if(abs >= 0x7C00)

+			return half(detail::binary, (abs==0x7C00) ? (0x7C00&((arg.data_>>15)-1U)) : detail::signal(arg.data_));

+		if(abs >= 0x4C80)

+			return half(detail::binary, (arg.data_&0x8000) ? detail::underflow<half::round_style>() : detail::overflow<half::round_style>());

+		detail::uint32 m = detail::multiply64(static_cast<detail::uint32>((abs&0x3FF)+((abs>0x3FF)<<10))<<21, 0xB8AA3B29);

+		int e = (abs>>10) + (abs<=0x3FF), exp;

+		if(e < 14)

+		{

+			exp = 0;

+			m >>= 14 - e;

+		}

+		else

+		{

+			exp = m >> (45-e);

+			m = (m<<(e-14)) & 0x7FFFFFFF;

+		}

+		return half(detail::binary, detail::exp2_post<half::round_style,true>(detail::exp2(m, 26), exp, (arg.data_&0x8000)!=0));

+	#endif

+	}

+

+	/// Binary exponential.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::exp2](https://en.cppreference.com/w/cpp/numeric/math/exp2).

+	/// \param arg function argument

+	/// \return 2 raised to \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half exp2(half arg)

+	{

+	#if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH

+		return half(detail::binary, detail::float2half<half::round_style>(std::exp2(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF;

+		if(!abs)

+			return half(detail::binary, 0x3C00);

+		if(abs >= 0x7C00)

+			return half(detail::binary, (abs==0x7C00) ? (0x7C00&((arg.data_>>15)-1U)) : detail::signal(arg.data_));

+		if(abs >= 0x4E40)

+			return half(detail::binary, (arg.data_&0x8000) ? detail::underflow<half::round_style>() : detail::overflow<half::round_style>());

+		int e = (abs>>10) + (abs<=0x3FF), exp = (abs&0x3FF) + ((abs>0x3FF)<<10);

+		detail::uint32 m = detail::exp2((static_cast<detail::uint32>(exp)<<(6+e))&0x7FFFFFFF, 28);

+		exp >>= 25 - e;

+		if(m == 0x80000000)

+		{

+			if(arg.data_&0x8000)

+				exp = -exp;

+			else if(exp > 15)

+				return half(detail::binary, detail::overflow<half::round_style>());

+			return half(detail::binary, detail::fixed2half<half::round_style,31,false,false,false>(m, exp+14));

+		}

+		return half(detail::binary, detail::exp2_post<half::round_style,true>(m, exp, (arg.data_&0x8000)!=0));

+	#endif

+	}

+

+	/// Exponential minus one.

+	/// This function may be 1 ULP off the correctly rounded exact result in <0.05% of inputs for `std::round_to_nearest` 

+	/// and in <1% of inputs for any other rounding mode.

+	///

+	/// **See also:** Documentation for [std::expm1](https://en.cppreference.com/w/cpp/numeric/math/expm1).

+	/// \param arg function argument

+	/// \return e raised to \a arg and subtracted by 1

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half expm1(half arg)

+	{

+	#if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH

+		return half(detail::binary, detail::float2half<half::round_style>(std::expm1(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		unsigned int abs = arg.data_ & 0x7FFF, sign = arg.data_ & 0x8000;

+		if(!abs)

+			return arg;

+		if(abs >= 0x7C00)

+			return half(detail::binary, (abs==0x7C00) ? (0x7C00+(sign>>1)) : detail::signal(arg.data_));

+		if(abs >= 0x4A00)

+			return half(detail::binary, (arg.data_&0x8000) ? detail::rounded<half::round_style,true>(0xBBFF, 1, 1) : detail::overflow<half::round_style>());

+		detail::uint32 m = detail::multiply64(static_cast<detail::uint32>((abs&0x3FF)+((abs>0x3FF)<<10))<<21, 0xB8AA3B29);

+		int e = (abs>>10) + (abs<=0x3FF), exp;

+		if(e < 14)

+		{

+			exp = 0;

+			m >>= 14 - e;

+		}

+		else

+		{

+			exp = m >> (45-e);

+			m = (m<<(e-14)) & 0x7FFFFFFF;

+		}

+		m = detail::exp2(m);

+		if(sign)

+		{

+			int s = 0;

+			if(m > 0x80000000)

+			{

+				++exp;

+				m = detail::divide64(0x80000000, m, s);

+			}

+			m = 0x80000000 - ((m>>exp)|((m&((static_cast<detail::uint32>(1)<<exp)-1))!=0)|s);

+			exp = 0;

+		}

+		else

+			m -= (exp<31) ? (0x80000000>>exp) : 1;

+		for(exp+=14; m<0x80000000 && exp; m<<=1,--exp) ;

+		if(exp > 29)

+			return half(detail::binary, detail::overflow<half::round_style>());

+		return half(detail::binary, detail::rounded<half::round_style,true>(sign+(exp<<10)+(m>>21), (m>>20)&1, (m&0xFFFFF)!=0));

+	#endif

+	}

+

+	/// Natural logarithm.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::log](https://en.cppreference.com/w/cpp/numeric/math/log).

+	/// \param arg function argument

+	/// \return logarithm of \a arg to base e

+	/// \exception FE_INVALID for signaling NaN or negative argument

+	/// \exception FE_DIVBYZERO for 0

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half log(half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::log(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF, exp = -15;

+		if(!abs)

+			return half(detail::binary, detail::pole(0x8000));

+		if(arg.data_ & 0x8000)

+			return half(detail::binary, (arg.data_<=0xFC00) ? detail::invalid() : detail::signal(arg.data_));

+		if(abs >= 0x7C00)

+			return (abs==0x7C00) ? arg : half(detail::binary, detail::signal(arg.data_));

+		for(; abs<0x400; abs<<=1,--exp) ;

+		exp += abs >> 10;

+		return half(detail::binary, detail::log2_post<half::round_style,0xB8AA3B2A>(

+			detail::log2(static_cast<detail::uint32>((abs&0x3FF)|0x400)<<20, 27)+8, exp, 17));

+	#endif

+	}

+

+	/// Common logarithm.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::log10](https://en.cppreference.com/w/cpp/numeric/math/log10).

+	/// \param arg function argument

+	/// \return logarithm of \a arg to base 10

+	/// \exception FE_INVALID for signaling NaN or negative argument

+	/// \exception FE_DIVBYZERO for 0

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half log10(half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::log10(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF, exp = -15;

+		if(!abs)

+			return half(detail::binary, detail::pole(0x8000));

+		if(arg.data_ & 0x8000)

+			return half(detail::binary, (arg.data_<=0xFC00) ? detail::invalid() : detail::signal(arg.data_));

+		if(abs >= 0x7C00)

+			return (abs==0x7C00) ? arg : half(detail::binary, detail::signal(arg.data_));

+		switch(abs)

+		{

+			case 0x4900: return half(detail::binary, 0x3C00);

+			case 0x5640: return half(detail::binary, 0x4000);

+			case 0x63D0: return half(detail::binary, 0x4200);

+			case 0x70E2: return half(detail::binary, 0x4400);

+		}

+		for(; abs<0x400; abs<<=1,--exp) ;

+		exp += abs >> 10;

+		return half(detail::binary, detail::log2_post<half::round_style,0xD49A784C>(

+			detail::log2(static_cast<detail::uint32>((abs&0x3FF)|0x400)<<20, 27)+8, exp, 16));

+	#endif

+	}

+

+	/// Binary logarithm.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::log2](https://en.cppreference.com/w/cpp/numeric/math/log2).

+	/// \param arg function argument

+	/// \return logarithm of \a arg to base 2

+	/// \exception FE_INVALID for signaling NaN or negative argument

+	/// \exception FE_DIVBYZERO for 0

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half log2(half arg)

+	{

+	#if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH

+		return half(detail::binary, detail::float2half<half::round_style>(std::log2(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF, exp = -15, s = 0;

+		if(!abs)

+			return half(detail::binary, detail::pole(0x8000));

+		if(arg.data_ & 0x8000)

+			return half(detail::binary, (arg.data_<=0xFC00) ? detail::invalid() : detail::signal(arg.data_));

+		if(abs >= 0x7C00)

+			return (abs==0x7C00) ? arg : half(detail::binary, detail::signal(arg.data_));

+		if(abs == 0x3C00)

+			return half(detail::binary, 0);

+		for(; abs<0x400; abs<<=1,--exp) ;

+		exp += (abs>>10);

+		if(!(abs&0x3FF))

+		{

+			unsigned int value = static_cast<unsigned>(exp<0) << 15, m = std::abs(exp) << 6;

+			for(exp=18; m<0x400; m<<=1,--exp) ;

+			return half(detail::binary, value+(exp<<10)+m);

+		}

+		detail::uint32 ilog = exp, sign = detail::sign_mask(ilog), m = 

+			(((ilog<<27)+(detail::log2(static_cast<detail::uint32>((abs&0x3FF)|0x400)<<20, 28)>>4))^sign) - sign;

+		if(!m)

+			return half(detail::binary, 0);

+		for(exp=14; m<0x8000000 && exp; m<<=1,--exp) ;

+		for(; m>0xFFFFFFF; m>>=1,++exp)

+			s |= m & 1;

+		return half(detail::binary, detail::fixed2half<half::round_style,27,false,false,true>(m, exp, sign&0x8000, s));

+	#endif

+	}

+

+	/// Natural logarithm plus one.

+	/// This function may be 1 ULP off the correctly rounded exact result in <0.05% of inputs for `std::round_to_nearest` 

+	/// and in ~1% of inputs for any other rounding mode.

+	///

+	/// **See also:** Documentation for [std::log1p](https://en.cppreference.com/w/cpp/numeric/math/log1p).

+	/// \param arg function argument

+	/// \return logarithm of \a arg plus 1 to base e

+	/// \exception FE_INVALID for signaling NaN or argument <-1

+	/// \exception FE_DIVBYZERO for -1

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half log1p(half arg)

+	{

+	#if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH

+		return half(detail::binary, detail::float2half<half::round_style>(std::log1p(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		if(arg.data_ >= 0xBC00)

+			return half(detail::binary, (arg.data_==0xBC00) ? detail::pole(0x8000) : (arg.data_<=0xFC00) ? detail::invalid() : detail::signal(arg.data_));

+		int abs = arg.data_ & 0x7FFF, exp = -15;

+		if(!abs || abs >= 0x7C00)

+			return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;

+		for(; abs<0x400; abs<<=1,--exp) ;

+		exp += abs >> 10;

+		detail::uint32 m = static_cast<detail::uint32>((abs&0x3FF)|0x400) << 20;

+		if(arg.data_ & 0x8000)

+		{

+			m = 0x40000000 - (m>>-exp);

+			for(exp=0; m<0x40000000; m<<=1,--exp) ;

+		}

+		else

+		{

+			if(exp < 0)

+			{

+				m = 0x40000000 + (m>>-exp);

+				exp = 0;

+			}

+			else

+			{

+				m += 0x40000000 >> exp;

+				int i = m >> 31;

+				m >>= i;

+				exp += i;

+			}

+		}

+		return half(detail::binary, detail::log2_post<half::round_style,0xB8AA3B2A>(detail::log2(m), exp, 17));

+	#endif

+	}

+

+	/// \}

+	/// \anchor power

+	/// \name Power functions

+	/// \{

+

+	/// Square root.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::sqrt](https://en.cppreference.com/w/cpp/numeric/math/sqrt).

+	/// \param arg function argument

+	/// \return square root of \a arg

+	/// \exception FE_INVALID for signaling NaN and negative arguments

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half sqrt(half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::sqrt(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF, exp = 15;

+		if(!abs || arg.data_ >= 0x7C00)

+			return half(detail::binary, (abs>0x7C00) ? detail::signal(arg.data_) : (arg.data_>0x8000) ? detail::invalid() : arg.data_);

+		for(; abs<0x400; abs<<=1,--exp) ;

+		detail::uint32 r = static_cast<detail::uint32>((abs&0x3FF)|0x400) << 10, m = detail::sqrt<20>(r, exp+=abs>>10);

+		return half(detail::binary, detail::rounded<half::round_style,false>((exp<<10)+(m&0x3FF), r>m, r!=0));

+	#endif

+	}

+

+	/// Cubic root.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::cbrt](https://en.cppreference.com/w/cpp/numeric/math/cbrt).

+	/// \param arg function argument

+	/// \return cubic root of \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half cbrt(half arg)

+	{

+	#if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH

+		return half(detail::binary, detail::float2half<half::round_style>(std::cbrt(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF, exp = -15;

+		if(!abs || abs == 0x3C00 || abs >= 0x7C00)

+			return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;

+		for(; abs<0x400; abs<<=1, --exp);

+		detail::uint32 ilog = exp + (abs>>10), sign = detail::sign_mask(ilog), f, m = 

+			(((ilog<<27)+(detail::log2(static_cast<detail::uint32>((abs&0x3FF)|0x400)<<20, 24)>>4))^sign) - sign;

+		for(exp=2; m<0x80000000; m<<=1,--exp) ;

+		m = detail::multiply64(m, 0xAAAAAAAB);

+		int i = m >> 31, s;

+		exp += i;

+		m <<= 1 - i;

+		if(exp < 0)

+		{

+			f = m >> -exp;

+			exp = 0;

+		}

+		else

+		{

+			f = (m<<exp) & 0x7FFFFFFF;

+			exp = m >> (31-exp);

+		}

+		m = detail::exp2(f, (half::round_style==std::round_to_nearest) ? 29 : 26);

+		if(sign)

+		{

+			if(m > 0x80000000)

+			{

+				m = detail::divide64(0x80000000, m, s);

+				++exp;

+			}

+			exp = -exp;

+		}

+		return half(detail::binary, (half::round_style==std::round_to_nearest) ?

+			detail::fixed2half<half::round_style,31,false,false,false>(m, exp+14, arg.data_&0x8000) :

+			detail::fixed2half<half::round_style,23,false,false,false>((m+0x80)>>8, exp+14, arg.data_&0x8000));

+	#endif

+	}

+

+	/// Hypotenuse function.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::hypot](https://en.cppreference.com/w/cpp/numeric/math/hypot).

+	/// \param x first argument

+	/// \param y second argument

+	/// \return square root of sum of squares without internal over- or underflows

+	/// \exception FE_INVALID if \a x or \a y is signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding of the final square root

+	inline half hypot(half x, half y)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		detail::internal_t fx = detail::half2float<detail::internal_t>(x.data_), fy = detail::half2float<detail::internal_t>(y.data_);

+		#if HALF_ENABLE_CPP11_CMATH

+			return half(detail::binary, detail::float2half<half::round_style>(std::hypot(fx, fy)));

+		#else

+			return half(detail::binary, detail::float2half<half::round_style>(std::sqrt(fx*fx+fy*fy)));

+		#endif

+	#else

+		int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, expx = 0, expy = 0;

+		if(absx >= 0x7C00 || absy >= 0x7C00)

+			return half(detail::binary,	(absx==0x7C00) ? detail::select(0x7C00, y.data_) :

+				(absy==0x7C00) ? detail::select(0x7C00, x.data_) : detail::signal(x.data_, y.data_));

+		if(!absx)

+			return half(detail::binary, absy ? detail::check_underflow(absy) : 0);

+		if(!absy)

+			return half(detail::binary, detail::check_underflow(absx));

+		if(absy > absx)

+			std::swap(absx, absy);

+		for(; absx<0x400; absx<<=1,--expx) ;

+		for(; absy<0x400; absy<<=1,--expy) ;

+		detail::uint32 mx = (absx&0x3FF) | 0x400, my = (absy&0x3FF) | 0x400;

+		mx *= mx;

+		my *= my;

+		int ix = mx >> 21, iy = my >> 21;

+		expx = 2*(expx+(absx>>10)) - 15 + ix;

+		expy = 2*(expy+(absy>>10)) - 15 + iy;

+		mx <<= 10 - ix;

+		my <<= 10 - iy;

+		int d = expx - expy;

+		my = (d<30) ? ((my>>d)|((my&((static_cast<detail::uint32>(1)<<d)-1))!=0)) : 1;

+		return half(detail::binary, detail::hypot_post<half::round_style>(mx+my, expx));

+	#endif

+	}

+

+	/// Hypotenuse function.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::hypot](https://en.cppreference.com/w/cpp/numeric/math/hypot).

+	/// \param x first argument

+	/// \param y second argument

+	/// \param z third argument

+	/// \return square root of sum of squares without internal over- or underflows

+	/// \exception FE_INVALID if \a x, \a y or \a z is signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding of the final square root

+	inline half hypot(half x, half y, half z)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		detail::internal_t fx = detail::half2float<detail::internal_t>(x.data_), fy = detail::half2float<detail::internal_t>(y.data_), fz = detail::half2float<detail::internal_t>(z.data_);

+		return half(detail::binary, detail::float2half<half::round_style>(std::sqrt(fx*fx+fy*fy+fz*fz)));

+	#else

+		int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, absz = z.data_ & 0x7FFF, expx = 0, expy = 0, expz = 0;

+		if(!absx)

+			return hypot(y, z);

+		if(!absy)

+			return hypot(x, z);

+		if(!absz)

+			return hypot(x, y);

+		if(absx >= 0x7C00 || absy >= 0x7C00 || absz >= 0x7C00)

+			return half(detail::binary,	(absx==0x7C00) ? detail::select(0x7C00, detail::select(y.data_, z.data_)) :

+										(absy==0x7C00) ? detail::select(0x7C00, detail::select(x.data_, z.data_)) :

+										(absz==0x7C00) ? detail::select(0x7C00, detail::select(x.data_, y.data_)) :

+										detail::signal(x.data_, y.data_, z.data_));

+		if(absz > absy)

+			std::swap(absy, absz);

+		if(absy > absx)

+			std::swap(absx, absy);

+		if(absz > absy)

+			std::swap(absy, absz);

+		for(; absx<0x400; absx<<=1,--expx) ;

+		for(; absy<0x400; absy<<=1,--expy) ;

+		for(; absz<0x400; absz<<=1,--expz) ;

+		detail::uint32 mx = (absx&0x3FF) | 0x400, my = (absy&0x3FF) | 0x400, mz = (absz&0x3FF) | 0x400;

+		mx *= mx;

+		my *= my;

+		mz *= mz;

+		int ix = mx >> 21, iy = my >> 21, iz = mz >> 21;

+		expx = 2*(expx+(absx>>10)) - 15 + ix;

+		expy = 2*(expy+(absy>>10)) - 15 + iy;

+		expz = 2*(expz+(absz>>10)) - 15 + iz;

+		mx <<= 10 - ix;

+		my <<= 10 - iy;

+		mz <<= 10 - iz;

+		int d = expy - expz;

+		mz = (d<30) ? ((mz>>d)|((mz&((static_cast<detail::uint32>(1)<<d)-1))!=0)) : 1;

+		my += mz;

+		if(my & 0x80000000)

+		{

+			my = (my>>1) | (my&1);

+			if(++expy > expx)

+			{

+				std::swap(mx, my);

+				std::swap(expx, expy);

+			}

+		}

+		d = expx - expy;

+		my = (d<30) ? ((my>>d)|((my&((static_cast<detail::uint32>(1)<<d)-1))!=0)) : 1;

+		return half(detail::binary, detail::hypot_post<half::round_style>(mx+my, expx));

+	#endif

+	}

+

+	/// Power function.

+	/// This function may be 1 ULP off the correctly rounded exact result for any rounding mode in ~0.00025% of inputs.

+	///

+	/// **See also:** Documentation for [std::pow](https://en.cppreference.com/w/cpp/numeric/math/pow).

+	/// \param x base

+	/// \param y exponent

+	/// \return \a x raised to \a y

+	/// \exception FE_INVALID if \a x or \a y is signaling NaN or if \a x is finite an negative and \a y is finite and not integral

+	/// \exception FE_DIVBYZERO if \a x is 0 and \a y is negative

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half pow(half x, half y)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::pow(detail::half2float<detail::internal_t>(x.data_), detail::half2float<detail::internal_t>(y.data_))));

+	#else

+		int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, exp = -15;

+		if(!absy || x.data_ == 0x3C00)

+			return half(detail::binary, detail::select(0x3C00, (x.data_==0x3C00) ? y.data_ : x.data_));

+		bool is_int = absy >= 0x6400 || (absy>=0x3C00 && !(absy&((1<<(25-(absy>>10)))-1)));

+		unsigned int sign = x.data_ & (static_cast<unsigned>((absy<0x6800)&&is_int&&((absy>>(25-(absy>>10)))&1))<<15);

+		if(absx >= 0x7C00 || absy >= 0x7C00)

+			return half(detail::binary,	(absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) :

+										(absy==0x7C00) ? ((absx==0x3C00) ? 0x3C00 : (!absx && y.data_==0xFC00) ? detail::pole() :

+										(0x7C00&-((y.data_>>15)^(absx>0x3C00)))) : (sign|(0x7C00&((y.data_>>15)-1U))));

+		if(!absx)

+			return half(detail::binary, (y.data_&0x8000) ? detail::pole(sign) : sign);

+		if((x.data_&0x8000) && !is_int)

+			return half(detail::binary, detail::invalid());

+		if(x.data_ == 0xBC00)

+			return half(detail::binary, sign|0x3C00);

+		if(y.data_ == 0x3800)

+			return sqrt(x);

+		if(y.data_ == 0x3C00)

+			return half(detail::binary, detail::check_underflow(x.data_));

+		if(y.data_ == 0x4000)

+			return x * x;

+		for(; absx<0x400; absx<<=1,--exp) ;

+		detail::uint32 ilog = exp + (absx>>10), msign = detail::sign_mask(ilog), f, m = 

+			(((ilog<<27)+((detail::log2(static_cast<detail::uint32>((absx&0x3FF)|0x400)<<20)+8)>>4))^msign) - msign;

+		for(exp=-11; m<0x80000000; m<<=1,--exp) ;

+		for(; absy<0x400; absy<<=1,--exp) ;

+		m = detail::multiply64(m, static_cast<detail::uint32>((absy&0x3FF)|0x400)<<21);

+		int i = m >> 31;

+		exp += (absy>>10) + i;

+		m <<= 1 - i;

+		if(exp < 0)

+		{

+			f = m >> -exp;

+			exp = 0;

+		}

+		else

+		{

+			f = (m<<exp) & 0x7FFFFFFF;

+			exp = m >> (31-exp);

+		}

+		return half(detail::binary, detail::exp2_post<half::round_style,false>(detail::exp2(f), exp, ((msign&1)^(y.data_>>15))!=0, sign));

+	#endif

+	}

+

+	/// \}

+	/// \anchor trigonometric

+	/// \name Trigonometric functions

+	/// \{

+

+	/// Compute sine and cosine simultaneously.

+	///	This returns the same results as sin() and cos() but is faster than calling each function individually.

+	///

+	/// This function is exact to rounding for all rounding modes.

+	/// \param arg function argument

+	/// \param sin variable to take sine of \a arg

+	/// \param cos variable to take cosine of \a arg

+	/// \exception FE_INVALID for signaling NaN or infinity

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline void sincos(half arg, half *sin, half *cos)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		detail::internal_t f = detail::half2float<detail::internal_t>(arg.data_);

+		*sin = half(detail::binary, detail::float2half<half::round_style>(std::sin(f)));

+		*cos = half(detail::binary, detail::float2half<half::round_style>(std::cos(f)));

+	#else

+		int abs = arg.data_ & 0x7FFF, sign = arg.data_ >> 15, k;

+		if(abs >= 0x7C00)

+			*sin = *cos = half(detail::binary, (abs==0x7C00) ? detail::invalid() : detail::signal(arg.data_));

+		else if(!abs)

+		{

+			*sin = arg;

+			*cos = half(detail::binary, 0x3C00);

+		}

+		else if(abs < 0x2500)

+		{

+			*sin = half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-1, 1, 1));

+			*cos = half(detail::binary, detail::rounded<half::round_style,true>(0x3BFF, 1, 1));

+		}

+		else

+		{

+			if(half::round_style != std::round_to_nearest)

+			{

+				switch(abs)

+				{

+				case 0x48B7:

+					*sin = half(detail::binary, detail::rounded<half::round_style,true>((~arg.data_&0x8000)|0x1D07, 1, 1));

+					*cos = half(detail::binary, detail::rounded<half::round_style,true>(0xBBFF, 1, 1));

+					return;

+				case 0x598C:

+					*sin = half(detail::binary, detail::rounded<half::round_style,true>((arg.data_&0x8000)|0x3BFF, 1, 1));

+					*cos = half(detail::binary, detail::rounded<half::round_style,true>(0x80FC, 1, 1));

+					return;

+				case 0x6A64:

+					*sin = half(detail::binary, detail::rounded<half::round_style,true>((~arg.data_&0x8000)|0x3BFE, 1, 1));

+					*cos = half(detail::binary, detail::rounded<half::round_style,true>(0x27FF, 1, 1));

+					return;

+				case 0x6D8C:

+					*sin = half(detail::binary, detail::rounded<half::round_style,true>((arg.data_&0x8000)|0x0FE6, 1, 1));

+					*cos = half(detail::binary, detail::rounded<half::round_style,true>(0x3BFF, 1, 1));

+					return;

+				}

+			}

+			std::pair<detail::uint32,detail::uint32> sc = detail::sincos(detail::angle_arg(abs, k), 28);

+			switch(k & 3)

+			{

+				case 1: sc = std::make_pair(sc.second, -sc.first); break;

+				case 2: sc = std::make_pair(-sc.first, -sc.second); break;

+				case 3: sc = std::make_pair(-sc.second, sc.first); break;

+			}

+			*sin = half(detail::binary, detail::fixed2half<half::round_style,30,true,true,true>((sc.first^-static_cast<detail::uint32>(sign))+sign));

+			*cos = half(detail::binary, detail::fixed2half<half::round_style,30,true,true,true>(sc.second));

+		}

+	#endif

+	}

+

+	/// Sine function.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::sin](https://en.cppreference.com/w/cpp/numeric/math/sin).

+	/// \param arg function argument

+	/// \return sine value of \a arg

+	/// \exception FE_INVALID for signaling NaN or infinity

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half sin(half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::sin(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF, k;

+		if(!abs)

+			return arg;

+		if(abs >= 0x7C00)

+			return half(detail::binary, (abs==0x7C00) ? detail::invalid() : detail::signal(arg.data_));

+		if(abs < 0x2900)

+			return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-1, 1, 1));

+		if(half::round_style != std::round_to_nearest)

+			switch(abs)

+			{

+				case 0x48B7: return half(detail::binary, detail::rounded<half::round_style,true>((~arg.data_&0x8000)|0x1D07, 1, 1));

+				case 0x6A64: return half(detail::binary, detail::rounded<half::round_style,true>((~arg.data_&0x8000)|0x3BFE, 1, 1));

+				case 0x6D8C: return half(detail::binary, detail::rounded<half::round_style,true>((arg.data_&0x8000)|0x0FE6, 1, 1));

+			}

+		std::pair<detail::uint32,detail::uint32> sc = detail::sincos(detail::angle_arg(abs, k), 28);

+		detail::uint32 sign = -static_cast<detail::uint32>(((k>>1)&1)^(arg.data_>>15));

+		return half(detail::binary, detail::fixed2half<half::round_style,30,true,true,true>((((k&1) ? sc.second : sc.first)^sign) - sign));

+	#endif

+	}

+

+	/// Cosine function.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::cos](https://en.cppreference.com/w/cpp/numeric/math/cos).

+	/// \param arg function argument

+	/// \return cosine value of \a arg

+	/// \exception FE_INVALID for signaling NaN or infinity

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half cos(half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::cos(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF, k;

+		if(!abs)

+			return half(detail::binary, 0x3C00);

+		if(abs >= 0x7C00)

+			return half(detail::binary, (abs==0x7C00) ? detail::invalid() : detail::signal(arg.data_));

+		if(abs < 0x2500)

+			return half(detail::binary, detail::rounded<half::round_style,true>(0x3BFF, 1, 1));

+		if(half::round_style != std::round_to_nearest && abs == 0x598C)

+			return half(detail::binary, detail::rounded<half::round_style,true>(0x80FC, 1, 1));

+		std::pair<detail::uint32,detail::uint32> sc = detail::sincos(detail::angle_arg(abs, k), 28);

+		detail::uint32 sign = -static_cast<detail::uint32>(((k>>1)^k)&1);

+		return half(detail::binary, detail::fixed2half<half::round_style,30,true,true,true>((((k&1) ? sc.first : sc.second)^sign) - sign));

+	#endif

+	}

+

+	/// Tangent function.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::tan](https://en.cppreference.com/w/cpp/numeric/math/tan).

+	/// \param arg function argument

+	/// \return tangent value of \a arg

+	/// \exception FE_INVALID for signaling NaN or infinity

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half tan(half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::tan(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF, exp = 13, k;

+		if(!abs)

+			return arg;

+		if(abs >= 0x7C00)

+			return half(detail::binary, (abs==0x7C00) ? detail::invalid() : detail::signal(arg.data_));

+		if(abs < 0x2700)

+			return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_, 0, 1));

+		if(half::round_style != std::round_to_nearest)

+			switch(abs)

+			{

+				case 0x658C: return half(detail::binary, detail::rounded<half::round_style,true>((arg.data_&0x8000)|0x07E6, 1, 1));

+				case 0x7330: return half(detail::binary, detail::rounded<half::round_style,true>((~arg.data_&0x8000)|0x4B62, 1, 1));

+			}

+		std::pair<detail::uint32,detail::uint32> sc = detail::sincos(detail::angle_arg(abs, k), 30);

+		if(k & 1)

+			sc = std::make_pair(-sc.second, sc.first);

+		detail::uint32 signy = detail::sign_mask(sc.first), signx = detail::sign_mask(sc.second);

+		detail::uint32 my = (sc.first^signy) - signy, mx = (sc.second^signx) - signx;

+		for(; my<0x80000000; my<<=1,--exp) ;

+		for(; mx<0x80000000; mx<<=1,++exp) ;

+		return half(detail::binary, detail::tangent_post<half::round_style>(my, mx, exp, (signy^signx^arg.data_)&0x8000));

+	#endif

+	}

+

+	/// Arc sine.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::asin](https://en.cppreference.com/w/cpp/numeric/math/asin).

+	/// \param arg function argument

+	/// \return arc sine value of \a arg

+	/// \exception FE_INVALID for signaling NaN or if abs(\a arg) > 1

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half asin(half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::asin(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		unsigned int abs = arg.data_ & 0x7FFF, sign = arg.data_ & 0x8000;

+		if(!abs)

+			return arg;

+		if(abs >= 0x3C00)

+			return half(detail::binary, (abs>0x7C00) ? detail::signal(arg.data_) : (abs>0x3C00) ? detail::invalid() :

+										detail::rounded<half::round_style,true>(sign|0x3E48, 0, 1));

+		if(abs < 0x2900)

+			return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_, 0, 1));

+		if(half::round_style != std::round_to_nearest && (abs == 0x2B44 || abs == 0x2DC3))

+			return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_+1, 1, 1));

+		std::pair<detail::uint32,detail::uint32> sc = detail::atan2_args(abs);

+		detail::uint32 m = detail::atan2(sc.first, sc.second, (half::round_style==std::round_to_nearest) ? 27 : 26);

+		return half(detail::binary, detail::fixed2half<half::round_style,30,false,true,true>(m, 14, sign));

+	#endif

+	}

+

+	/// Arc cosine function.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::acos](https://en.cppreference.com/w/cpp/numeric/math/acos).

+	/// \param arg function argument

+	/// \return arc cosine value of \a arg

+	/// \exception FE_INVALID for signaling NaN or if abs(\a arg) > 1

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half acos(half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::acos(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		unsigned int abs = arg.data_ & 0x7FFF, sign = arg.data_ >> 15;

+		if(!abs)

+			return half(detail::binary, detail::rounded<half::round_style,true>(0x3E48, 0, 1));

+		if(abs >= 0x3C00)

+			return half(detail::binary,	(abs>0x7C00) ? detail::signal(arg.data_) : (abs>0x3C00) ? detail::invalid() :

+										sign ? detail::rounded<half::round_style,true>(0x4248, 0, 1) : 0);

+		std::pair<detail::uint32,detail::uint32> cs = detail::atan2_args(abs);

+		detail::uint32 m = detail::atan2(cs.second, cs.first, 28);

+		return half(detail::binary, detail::fixed2half<half::round_style,31,false,true,true>(sign ? (0xC90FDAA2-m) : m, 15, 0, sign));

+	#endif

+	}

+

+	/// Arc tangent function.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::atan](https://en.cppreference.com/w/cpp/numeric/math/atan).

+	/// \param arg function argument

+	/// \return arc tangent value of \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half atan(half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::atan(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		unsigned int abs = arg.data_ & 0x7FFF, sign = arg.data_ & 0x8000;

+		if(!abs)

+			return arg;

+		if(abs >= 0x7C00)

+			return half(detail::binary, (abs==0x7C00) ? detail::rounded<half::round_style,true>(sign|0x3E48, 0, 1) : detail::signal(arg.data_));

+		if(abs <= 0x2700)

+			return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-1, 1, 1));

+		int exp = (abs>>10) + (abs<=0x3FF);

+		detail::uint32 my = (abs&0x3FF) | ((abs>0x3FF)<<10);

+		detail::uint32 m = (exp>15) ?	detail::atan2(my<<19, 0x20000000>>(exp-15), (half::round_style==std::round_to_nearest) ? 26 : 24) :

+										detail::atan2(my<<(exp+4), 0x20000000, (half::round_style==std::round_to_nearest) ? 30 : 28);

+		return half(detail::binary, detail::fixed2half<half::round_style,30,false,true,true>(m, 14, sign));

+	#endif

+	}

+

+	/// Arc tangent function.

+	/// This function may be 1 ULP off the correctly rounded exact result in ~0.005% of inputs for `std::round_to_nearest`, 

+	/// in ~0.1% of inputs for `std::round_toward_zero` and in ~0.02% of inputs for any other rounding mode.

+	///

+	/// **See also:** Documentation for [std::atan2](https://en.cppreference.com/w/cpp/numeric/math/atan2).

+	/// \param y numerator

+	/// \param x denominator

+	/// \return arc tangent value

+	/// \exception FE_INVALID if \a x or \a y is signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half atan2(half y, half x)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::atan2(detail::half2float<detail::internal_t>(y.data_), detail::half2float<detail::internal_t>(x.data_))));

+	#else

+		unsigned int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, signx = x.data_ >> 15, signy = y.data_ & 0x8000;

+		if(absx >= 0x7C00 || absy >= 0x7C00)

+		{

+			if(absx > 0x7C00 || absy > 0x7C00)

+				return half(detail::binary, detail::signal(x.data_, y.data_));

+			if(absy == 0x7C00)

+				return half(detail::binary, (absx<0x7C00) ?	detail::rounded<half::round_style,true>(signy|0x3E48, 0, 1) :

+													signx ?	detail::rounded<half::round_style,true>(signy|0x40B6, 0, 1) :

+															detail::rounded<half::round_style,true>(signy|0x3A48, 0, 1));

+			return (x.data_==0x7C00) ? half(detail::binary, signy) : half(detail::binary, detail::rounded<half::round_style,true>(signy|0x4248, 0, 1));

+		}

+		if(!absy)

+			return signx ? half(detail::binary, detail::rounded<half::round_style,true>(signy|0x4248, 0, 1)) : y;

+		if(!absx)

+			return half(detail::binary, detail::rounded<half::round_style,true>(signy|0x3E48, 0, 1));

+		int d = (absy>>10) + (absy<=0x3FF) - (absx>>10) - (absx<=0x3FF);

+		if(d > (signx ? 18 : 12))

+			return half(detail::binary, detail::rounded<half::round_style,true>(signy|0x3E48, 0, 1));

+		if(signx && d < -11)

+			return half(detail::binary, detail::rounded<half::round_style,true>(signy|0x4248, 0, 1));

+		if(!signx && d < ((half::round_style==std::round_toward_zero) ? -15 : -9))

+		{

+			for(; absy<0x400; absy<<=1,--d) ;

+			detail::uint32 mx = ((absx<<1)&0x7FF) | 0x800, my = ((absy<<1)&0x7FF) | 0x800;

+			int i = my < mx;

+			d -= i;

+			if(d < -25)

+				return half(detail::binary, detail::underflow<half::round_style>(signy));

+			my <<= 11 + i;

+			return half(detail::binary, detail::fixed2half<half::round_style,11,false,false,true>(my/mx, d+14, signy, my%mx!=0));

+		}

+		detail::uint32 m = detail::atan2(	((absy&0x3FF)|((absy>0x3FF)<<10))<<(19+((d<0) ? d : (d>0) ? 0 : -1)),

+											((absx&0x3FF)|((absx>0x3FF)<<10))<<(19-((d>0) ? d : (d<0) ? 0 : 1)));

+		return half(detail::binary, detail::fixed2half<half::round_style,31,false,true,true>(signx ? (0xC90FDAA2-m) : m, 15, signy, signx));

+	#endif

+	}

+

+	/// \}

+	/// \anchor hyperbolic

+	/// \name Hyperbolic functions

+	/// \{

+

+	/// Hyperbolic sine.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::sinh](https://en.cppreference.com/w/cpp/numeric/math/sinh).

+	/// \param arg function argument

+	/// \return hyperbolic sine value of \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half sinh(half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::sinh(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF, exp;

+		if(!abs || abs >= 0x7C00)

+			return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;

+		if(abs <= 0x2900)

+			return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_, 0, 1));

+		std::pair<detail::uint32,detail::uint32> mm = detail::hyperbolic_args(abs, exp, (half::round_style==std::round_to_nearest) ? 29 : 27);

+		detail::uint32 m = mm.first - mm.second;

+		for(exp+=13; m<0x80000000 && exp; m<<=1,--exp) ;

+		unsigned int sign = arg.data_ & 0x8000;

+		if(exp > 29)

+			return half(detail::binary, detail::overflow<half::round_style>(sign));

+		return half(detail::binary, detail::fixed2half<half::round_style,31,false,false,true>(m, exp, sign));

+	#endif

+	}

+

+	/// Hyperbolic cosine.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::cosh](https://en.cppreference.com/w/cpp/numeric/math/cosh).

+	/// \param arg function argument

+	/// \return hyperbolic cosine value of \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half cosh(half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::cosh(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF, exp;

+		if(!abs)

+			return half(detail::binary, 0x3C00);

+		if(abs >= 0x7C00)

+			return half(detail::binary, (abs>0x7C00) ? detail::signal(arg.data_) : 0x7C00);

+		std::pair<detail::uint32,detail::uint32> mm = detail::hyperbolic_args(abs, exp, (half::round_style==std::round_to_nearest) ? 23 : 26);

+		detail::uint32 m = mm.first + mm.second, i = (~m&0xFFFFFFFF) >> 31;

+		m = (m>>i) | (m&i) | 0x80000000;

+		if((exp+=13+i) > 29)

+			return half(detail::binary, detail::overflow<half::round_style>());

+		return half(detail::binary, detail::fixed2half<half::round_style,31,false,false,true>(m, exp));

+	#endif

+	}

+

+	/// Hyperbolic tangent.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::tanh](https://en.cppreference.com/w/cpp/numeric/math/tanh).

+	/// \param arg function argument

+	/// \return hyperbolic tangent value of \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half tanh(half arg)

+	{

+	#ifdef HALF_ARITHMETIC_TYPE

+		return half(detail::binary, detail::float2half<half::round_style>(std::tanh(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF, exp;

+		if(!abs)

+			return arg;

+		if(abs >= 0x7C00)

+			return half(detail::binary, (abs>0x7C00) ? detail::signal(arg.data_) : (arg.data_-0x4000));

+		if(abs >= 0x4500)

+			return half(detail::binary, detail::rounded<half::round_style,true>((arg.data_&0x8000)|0x3BFF, 1, 1));

+		if(abs < 0x2700)

+			return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-1, 1, 1));

+		if(half::round_style != std::round_to_nearest && abs == 0x2D3F)

+			return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-3, 0, 1));

+		std::pair<detail::uint32,detail::uint32> mm = detail::hyperbolic_args(abs, exp, 27);

+		detail::uint32 my = mm.first - mm.second - (half::round_style!=std::round_to_nearest), mx = mm.first + mm.second, i = (~mx&0xFFFFFFFF) >> 31;

+		for(exp=13; my<0x80000000; my<<=1,--exp) ;

+		mx = (mx>>i) | 0x80000000;

+		return half(detail::binary, detail::tangent_post<half::round_style>(my, mx, exp-i, arg.data_&0x8000));

+	#endif

+	}

+

+	/// Hyperbolic area sine.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::asinh](https://en.cppreference.com/w/cpp/numeric/math/asinh).

+	/// \param arg function argument

+	/// \return area sine value of \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half asinh(half arg)

+	{

+	#if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH

+		return half(detail::binary, detail::float2half<half::round_style>(std::asinh(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF;

+		if(!abs || abs >= 0x7C00)

+			return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;

+		if(abs <= 0x2900)

+			return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-1, 1, 1));

+		if(half::round_style != std::round_to_nearest)

+			switch(abs)

+			{

+				case 0x32D4: return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-13, 1, 1));

+				case 0x3B5B: return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-197, 1, 1));

+			}

+		return half(detail::binary, detail::area<half::round_style,true>(arg.data_));

+	#endif

+	}

+

+	/// Hyperbolic area cosine.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::acosh](https://en.cppreference.com/w/cpp/numeric/math/acosh).

+	/// \param arg function argument

+	/// \return area cosine value of \a arg

+	/// \exception FE_INVALID for signaling NaN or arguments <1

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half acosh(half arg)

+	{

+	#if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH

+		return half(detail::binary, detail::float2half<half::round_style>(std::acosh(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF;

+		if((arg.data_&0x8000) || abs < 0x3C00)

+			return half(detail::binary, (abs<=0x7C00) ? detail::invalid() : detail::signal(arg.data_));

+		if(abs == 0x3C00)

+			return half(detail::binary, 0);

+		if(arg.data_ >= 0x7C00)

+			return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;

+		return half(detail::binary, detail::area<half::round_style,false>(arg.data_));

+	#endif

+	}

+

+	/// Hyperbolic area tangent.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::atanh](https://en.cppreference.com/w/cpp/numeric/math/atanh).

+	/// \param arg function argument

+	/// \return area tangent value of \a arg

+	/// \exception FE_INVALID for signaling NaN or if abs(\a arg) > 1

+	/// \exception FE_DIVBYZERO for +/-1

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half atanh(half arg)

+	{

+	#if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH

+		return half(detail::binary, detail::float2half<half::round_style>(std::atanh(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF, exp = 0;

+		if(!abs)

+			return arg;

+		if(abs >= 0x3C00)

+			return half(detail::binary, (abs==0x3C00) ? detail::pole(arg.data_&0x8000) : (abs<=0x7C00) ? detail::invalid() : detail::signal(arg.data_));

+		if(abs < 0x2700)

+			return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_, 0, 1));

+		detail::uint32 m = static_cast<detail::uint32>((abs&0x3FF)|((abs>0x3FF)<<10)) << ((abs>>10)+(abs<=0x3FF)+6), my = 0x80000000 + m, mx = 0x80000000 - m;

+		for(; mx<0x80000000; mx<<=1,++exp) ;

+		int i = my >= mx, s;

+		return half(detail::binary, detail::log2_post<half::round_style,0xB8AA3B2A>(detail::log2(

+			(detail::divide64(my>>i, mx, s)+1)>>1, 27)+0x10, exp+i-1, 16, arg.data_&0x8000));

+	#endif

+	}

+

+	/// \}

+	/// \anchor special

+	/// \name Error and gamma functions

+	/// \{

+

+	/// Error function.

+	/// This function may be 1 ULP off the correctly rounded exact result for any rounding mode in <0.5% of inputs.

+	///

+	/// **See also:** Documentation for [std::erf](https://en.cppreference.com/w/cpp/numeric/math/erf).

+	/// \param arg function argument

+	/// \return error function value of \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half erf(half arg)

+	{

+	#if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH

+		return half(detail::binary, detail::float2half<half::round_style>(std::erf(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		unsigned int abs = arg.data_ & 0x7FFF;

+		if(!abs || abs >= 0x7C00)

+			return (abs>=0x7C00) ? half(detail::binary, (abs==0x7C00) ? (arg.data_-0x4000) : detail::signal(arg.data_)) : arg;

+		if(abs >= 0x4200)

+			return half(detail::binary, detail::rounded<half::round_style,true>((arg.data_&0x8000)|0x3BFF, 1, 1));

+		return half(detail::binary, detail::erf<half::round_style,false>(arg.data_));

+	#endif

+	}

+

+	/// Complementary error function.

+	/// This function may be 1 ULP off the correctly rounded exact result for any rounding mode in <0.5% of inputs.

+	///

+	/// **See also:** Documentation for [std::erfc](https://en.cppreference.com/w/cpp/numeric/math/erfc).

+	/// \param arg function argument

+	/// \return 1 minus error function value of \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half erfc(half arg)

+	{

+	#if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH

+		return half(detail::binary, detail::float2half<half::round_style>(std::erfc(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		unsigned int abs = arg.data_ & 0x7FFF, sign = arg.data_ & 0x8000;

+		if(abs >= 0x7C00)

+			return (abs>=0x7C00) ? half(detail::binary, (abs==0x7C00) ? (sign>>1) : detail::signal(arg.data_)) : arg;

+		if(!abs)

+			return half(detail::binary, 0x3C00);

+		if(abs >= 0x4400)

+			return half(detail::binary, detail::rounded<half::round_style,true>((sign>>1)-(sign>>15), sign>>15, 1));

+		return half(detail::binary, detail::erf<half::round_style,true>(arg.data_));

+	#endif

+	}

+

+	/// Natural logarithm of gamma function.

+	/// This function may be 1 ULP off the correctly rounded exact result for any rounding mode in ~0.025% of inputs.

+	///

+	/// **See also:** Documentation for [std::lgamma](https://en.cppreference.com/w/cpp/numeric/math/lgamma).

+	/// \param arg function argument

+	/// \return natural logarith of gamma function for \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_DIVBYZERO for 0 or negative integer arguments

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half lgamma(half arg)

+	{

+	#if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH

+		return half(detail::binary, detail::float2half<half::round_style>(std::lgamma(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		int abs = arg.data_ & 0x7FFF;

+		if(abs >= 0x7C00)

+			return half(detail::binary, (abs==0x7C00) ? 0x7C00 : detail::signal(arg.data_));

+		if(!abs || arg.data_ >= 0xE400 || (arg.data_ >= 0xBC00 && !(abs&((1<<(25-(abs>>10)))-1))))

+			return half(detail::binary, detail::pole());

+		if(arg.data_ == 0x3C00 || arg.data_ == 0x4000)

+			return half(detail::binary, 0);

+		return half(detail::binary, detail::gamma<half::round_style,true>(arg.data_));

+	#endif

+	}

+

+	/// Gamma function.

+	/// This function may be 1 ULP off the correctly rounded exact result for any rounding mode in <0.25% of inputs.

+	///

+	/// **See also:** Documentation for [std::tgamma](https://en.cppreference.com/w/cpp/numeric/math/tgamma).

+	/// \param arg function argument

+	/// \return gamma function value of \a arg

+	/// \exception FE_INVALID for signaling NaN, negative infinity or negative integer arguments

+	/// \exception FE_DIVBYZERO for 0

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half tgamma(half arg)

+	{

+	#if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH

+		return half(detail::binary, detail::float2half<half::round_style>(std::tgamma(detail::half2float<detail::internal_t>(arg.data_))));

+	#else

+		unsigned int abs = arg.data_ & 0x7FFF;

+		if(!abs)

+			return half(detail::binary, detail::pole(arg.data_));

+		if(abs >= 0x7C00)

+			return (arg.data_==0x7C00) ? arg : half(detail::binary, detail::signal(arg.data_));

+		if(arg.data_ >= 0xE400 || (arg.data_ >= 0xBC00 && !(abs&((1<<(25-(abs>>10)))-1))))

+			return half(detail::binary, detail::invalid());

+		if(arg.data_ >= 0xCA80)

+			return half(detail::binary, detail::underflow<half::round_style>((1-((abs>>(25-(abs>>10)))&1))<<15));

+		if(arg.data_ <= 0x100 || (arg.data_ >= 0x4900 && arg.data_ < 0x8000))

+			return half(detail::binary, detail::overflow<half::round_style>());

+		if(arg.data_ == 0x3C00)

+			return arg;

+		return half(detail::binary, detail::gamma<half::round_style,false>(arg.data_));

+	#endif

+	}

+

+	/// \}

+	/// \anchor rounding

+	/// \name Rounding

+	/// \{

+

+	/// Nearest integer not less than half value.

+	/// **See also:** Documentation for [std::ceil](https://en.cppreference.com/w/cpp/numeric/math/ceil).

+	/// \param arg half to round

+	/// \return nearest integer not less than \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_INEXACT if value had to be rounded

+	inline half ceil(half arg) { return half(detail::binary, detail::integral<std::round_toward_infinity,true,true>(arg.data_)); }

+

+	/// Nearest integer not greater than half value.

+	/// **See also:** Documentation for [std::floor](https://en.cppreference.com/w/cpp/numeric/math/floor).

+	/// \param arg half to round

+	/// \return nearest integer not greater than \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_INEXACT if value had to be rounded

+	inline half floor(half arg) { return half(detail::binary, detail::integral<std::round_toward_neg_infinity,true,true>(arg.data_)); }

+

+	/// Nearest integer not greater in magnitude than half value.

+	/// **See also:** Documentation for [std::trunc](https://en.cppreference.com/w/cpp/numeric/math/trunc).

+	/// \param arg half to round

+	/// \return nearest integer not greater in magnitude than \a arg

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_INEXACT if value had to be rounded

+	inline half trunc(half arg) { return half(detail::binary, detail::integral<std::round_toward_zero,true,true>(arg.data_)); }

+

+	/// Nearest integer.

+	/// **See also:** Documentation for [std::round](https://en.cppreference.com/w/cpp/numeric/math/round).

+	/// \param arg half to round

+	/// \return nearest integer, rounded away from zero in half-way cases

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_INEXACT if value had to be rounded

+	inline half round(half arg) { return half(detail::binary, detail::integral<std::round_to_nearest,false,true>(arg.data_)); }

+

+	/// Nearest integer.

+	/// **See also:** Documentation for [std::lround](https://en.cppreference.com/w/cpp/numeric/math/round).

+	/// \param arg half to round

+	/// \return nearest integer, rounded away from zero in half-way cases

+	/// \exception FE_INVALID if value is not representable as `long`

+	inline long lround(half arg) { return detail::half2int<std::round_to_nearest,false,false,long>(arg.data_); }

+

+	/// Nearest integer using half's internal rounding mode.

+	/// **See also:** Documentation for [std::rint](https://en.cppreference.com/w/cpp/numeric/math/rint).

+	/// \param arg half expression to round

+	/// \return nearest integer using default rounding mode

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_INEXACT if value had to be rounded

+	inline half rint(half arg) { return half(detail::binary, detail::integral<half::round_style,true,true>(arg.data_)); }

+

+	/// Nearest integer using half's internal rounding mode.

+	/// **See also:** Documentation for [std::lrint](https://en.cppreference.com/w/cpp/numeric/math/rint).

+	/// \param arg half expression to round

+	/// \return nearest integer using default rounding mode

+	/// \exception FE_INVALID if value is not representable as `long`

+	/// \exception FE_INEXACT if value had to be rounded

+	inline long lrint(half arg) { return detail::half2int<half::round_style,true,true,long>(arg.data_); }

+

+	/// Nearest integer using half's internal rounding mode.

+	/// **See also:** Documentation for [std::nearbyint](https://en.cppreference.com/w/cpp/numeric/math/nearbyint).

+	/// \param arg half expression to round

+	/// \return nearest integer using default rounding mode

+	/// \exception FE_INVALID for signaling NaN

+	inline half nearbyint(half arg) { return half(detail::binary, detail::integral<half::round_style,true,false>(arg.data_)); }

+#if HALF_ENABLE_CPP11_LONG_LONG

+	/// Nearest integer.

+	/// **See also:** Documentation for [std::llround](https://en.cppreference.com/w/cpp/numeric/math/round).

+	/// \param arg half to round

+	/// \return nearest integer, rounded away from zero in half-way cases

+	/// \exception FE_INVALID if value is not representable as `long long`

+	inline long long llround(half arg) { return detail::half2int<std::round_to_nearest,false,false,long long>(arg.data_); }

+

+	/// Nearest integer using half's internal rounding mode.

+	/// **See also:** Documentation for [std::llrint](https://en.cppreference.com/w/cpp/numeric/math/rint).

+	/// \param arg half expression to round

+	/// \return nearest integer using default rounding mode

+	/// \exception FE_INVALID if value is not representable as `long long`

+	/// \exception FE_INEXACT if value had to be rounded

+	inline long long llrint(half arg) { return detail::half2int<half::round_style,true,true,long long>(arg.data_); }

+#endif

+

+	/// \}

+	/// \anchor float

+	/// \name Floating point manipulation

+	/// \{

+

+	/// Decompress floating-point number.

+	/// **See also:** Documentation for [std::frexp](https://en.cppreference.com/w/cpp/numeric/math/frexp).

+	/// \param arg number to decompress

+	/// \param exp address to store exponent at

+	/// \return significant in range [0.5, 1)

+	/// \exception FE_INVALID for signaling NaN

+	inline half frexp(half arg, int *exp)

+	{

+		*exp = 0;

+		unsigned int abs = arg.data_ & 0x7FFF;

+		if(abs >= 0x7C00 || !abs)

+			return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;

+		for(; abs<0x400; abs<<=1,--*exp) ;

+		*exp += (abs>>10) - 14;

+		return half(detail::binary, (arg.data_&0x8000)|0x3800|(abs&0x3FF));

+	}

+

+	/// Multiply by power of two.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::scalbln](https://en.cppreference.com/w/cpp/numeric/math/scalbn).

+	/// \param arg number to modify

+	/// \param exp power of two to multiply with

+	/// \return \a arg multplied by 2 raised to \a exp

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half scalbln(half arg, long exp)

+	{

+		unsigned int abs = arg.data_ & 0x7FFF, sign = arg.data_ & 0x8000;

+		if(abs >= 0x7C00 || !abs)

+			return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;

+		for(; abs<0x400; abs<<=1,--exp) ;

+		exp += abs >> 10;

+		if(exp > 30)

+			return half(detail::binary, detail::overflow<half::round_style>(sign));

+		else if(exp < -10)

+			return half(detail::binary, detail::underflow<half::round_style>(sign));

+		else if(exp > 0)

+			return half(detail::binary, sign|(exp<<10)|(abs&0x3FF));

+		unsigned int m = (abs&0x3FF) | 0x400;

+		return half(detail::binary, detail::rounded<half::round_style,false>(sign|(m>>(1-exp)), (m>>-exp)&1, (m&((1<<-exp)-1))!=0));

+	}

+

+	/// Multiply by power of two.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::scalbn](https://en.cppreference.com/w/cpp/numeric/math/scalbn).

+	/// \param arg number to modify

+	/// \param exp power of two to multiply with

+	/// \return \a arg multplied by 2 raised to \a exp

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half scalbn(half arg, int exp) { return scalbln(arg, exp); }

+

+	/// Multiply by power of two.

+	/// This function is exact to rounding for all rounding modes.

+	///

+	/// **See also:** Documentation for [std::ldexp](https://en.cppreference.com/w/cpp/numeric/math/ldexp).

+	/// \param arg number to modify

+	/// \param exp power of two to multiply with

+	/// \return \a arg multplied by 2 raised to \a exp

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	inline half ldexp(half arg, int exp) { return scalbln(arg, exp); }

+

+	/// Extract integer and fractional parts.

+	/// **See also:** Documentation for [std::modf](https://en.cppreference.com/w/cpp/numeric/math/modf).

+	/// \param arg number to decompress

+	/// \param iptr address to store integer part at

+	/// \return fractional part

+	/// \exception FE_INVALID for signaling NaN

+	inline half modf(half arg, half *iptr)

+	{

+		unsigned int abs = arg.data_ & 0x7FFF;

+		if(abs > 0x7C00)

+		{

+			arg = half(detail::binary, detail::signal(arg.data_));

+			return *iptr = arg, arg;

+		}

+		if(abs >= 0x6400)

+			return *iptr = arg, half(detail::binary, arg.data_&0x8000);

+		if(abs < 0x3C00)

+			return iptr->data_ = arg.data_ & 0x8000, arg;

+		unsigned int exp = abs >> 10, mask = (1<<(25-exp)) - 1, m = arg.data_ & mask;

+		iptr->data_ = arg.data_ & ~mask;

+		if(!m)

+			return half(detail::binary, arg.data_&0x8000);

+		for(; m<0x400; m<<=1,--exp) ;

+		return half(detail::binary, (arg.data_&0x8000)|(exp<<10)|(m&0x3FF));

+	}

+

+	/// Extract exponent.

+	/// **See also:** Documentation for [std::ilogb](https://en.cppreference.com/w/cpp/numeric/math/ilogb).

+	/// \param arg number to query

+	/// \return floating-point exponent

+	/// \retval FP_ILOGB0 for zero

+	/// \retval FP_ILOGBNAN for NaN

+	/// \retval INT_MAX for infinity

+	/// \exception FE_INVALID for 0 or infinite values

+	inline int ilogb(half arg)

+	{

+		int abs = arg.data_ & 0x7FFF, exp;

+		if(!abs || abs >= 0x7C00)

+		{

+			detail::raise(FE_INVALID);

+			return !abs ? FP_ILOGB0 : (abs==0x7C00) ? INT_MAX : FP_ILOGBNAN;

+		}

+		for(exp=(abs>>10)-15; abs<0x200; abs<<=1,--exp) ;

+		return exp;

+	}

+

+	/// Extract exponent.

+	/// **See also:** Documentation for [std::logb](https://en.cppreference.com/w/cpp/numeric/math/logb).

+	/// \param arg number to query

+	/// \return floating-point exponent

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_DIVBYZERO for 0

+	inline half logb(half arg)

+	{

+		int abs = arg.data_ & 0x7FFF, exp;

+		if(!abs)

+			return half(detail::binary, detail::pole(0x8000));

+		if(abs >= 0x7C00)

+			return half(detail::binary, (abs==0x7C00) ? 0x7C00 : detail::signal(arg.data_));

+		for(exp=(abs>>10)-15; abs<0x200; abs<<=1,--exp) ;

+		unsigned int value = static_cast<unsigned>(exp<0) << 15;

+		if(exp)

+		{

+			unsigned int m = std::abs(exp) << 6;

+			for(exp=18; m<0x400; m<<=1,--exp) ;

+			value |= (exp<<10) + m;

+		}

+		return half(detail::binary, value);

+	}

+

+	/// Next representable value.

+	/// **See also:** Documentation for [std::nextafter](https://en.cppreference.com/w/cpp/numeric/math/nextafter).

+	/// \param from value to compute next representable value for

+	/// \param to direction towards which to compute next value

+	/// \return next representable value after \a from in direction towards \a to

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW for infinite result from finite argument

+	/// \exception FE_UNDERFLOW for subnormal result

+	inline half nextafter(half from, half to)

+	{

+		int fabs = from.data_ & 0x7FFF, tabs = to.data_ & 0x7FFF;

+		if(fabs > 0x7C00 || tabs > 0x7C00)

+			return half(detail::binary, detail::signal(from.data_, to.data_));

+		if(from.data_ == to.data_ || !(fabs|tabs))

+			return to;

+		if(!fabs)

+		{

+			detail::raise(FE_UNDERFLOW, !HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT);

+			return half(detail::binary, (to.data_&0x8000)+1);

+		}

+		unsigned int out = from.data_ + (((from.data_>>15)^static_cast<unsigned>(

+			(from.data_^(0x8000|(0x8000-(from.data_>>15))))<(to.data_^(0x8000|(0x8000-(to.data_>>15))))))<<1) - 1;

+		detail::raise(FE_OVERFLOW, fabs<0x7C00 && (out&0x7C00)==0x7C00);

+		detail::raise(FE_UNDERFLOW, !HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT && (out&0x7C00)<0x400);

+		return half(detail::binary, out);

+	}

+

+	/// Next representable value.

+	/// **See also:** Documentation for [std::nexttoward](https://en.cppreference.com/w/cpp/numeric/math/nexttoward).

+	/// \param from value to compute next representable value for

+	/// \param to direction towards which to compute next value

+	/// \return next representable value after \a from in direction towards \a to

+	/// \exception FE_INVALID for signaling NaN

+	/// \exception FE_OVERFLOW for infinite result from finite argument

+	/// \exception FE_UNDERFLOW for subnormal result

+	inline half nexttoward(half from, long double to)

+	{

+		int fabs = from.data_ & 0x7FFF;

+		if(fabs > 0x7C00)

+			return half(detail::binary, detail::signal(from.data_));

+		long double lfrom = static_cast<long double>(from);

+		if(detail::builtin_isnan(to) || lfrom == to)

+			return half(static_cast<float>(to));

+		if(!fabs)

+		{

+			detail::raise(FE_UNDERFLOW, !HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT);

+			return half(detail::binary, (static_cast<unsigned>(detail::builtin_signbit(to))<<15)+1);

+		}

+		unsigned int out = from.data_ + (((from.data_>>15)^static_cast<unsigned>(lfrom<to))<<1) - 1;

+		detail::raise(FE_OVERFLOW, (out&0x7FFF)==0x7C00);

+		detail::raise(FE_UNDERFLOW, !HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT && (out&0x7FFF)<0x400);

+		return half(detail::binary, out);

+	}

+

+	/// Take sign.

+	/// **See also:** Documentation for [std::copysign](https://en.cppreference.com/w/cpp/numeric/math/copysign).

+	/// \param x value to change sign for

+	/// \param y value to take sign from

+	/// \return value equal to \a x in magnitude and to \a y in sign

+	inline HALF_CONSTEXPR half copysign(half x, half y) { return half(detail::binary, x.data_^((x.data_^y.data_)&0x8000)); }

+

+	/// \}

+	/// \anchor classification

+	/// \name Floating point classification

+	/// \{

+

+	/// Classify floating-point value.

+	/// **See also:** Documentation for [std::fpclassify](https://en.cppreference.com/w/cpp/numeric/math/fpclassify).

+	/// \param arg number to classify

+	/// \retval FP_ZERO for positive and negative zero

+	/// \retval FP_SUBNORMAL for subnormal numbers

+	/// \retval FP_INFINITY for positive and negative infinity

+	/// \retval FP_NAN for NaNs

+	/// \retval FP_NORMAL for all other (normal) values

+	inline HALF_CONSTEXPR int fpclassify(half arg)

+	{

+		return	!(arg.data_&0x7FFF) ? FP_ZERO :

+				((arg.data_&0x7FFF)<0x400) ? FP_SUBNORMAL :

+				((arg.data_&0x7FFF)<0x7C00) ? FP_NORMAL :

+				((arg.data_&0x7FFF)==0x7C00) ? FP_INFINITE :

+				FP_NAN;

+	}

+

+	/// Check if finite number.

+	/// **See also:** Documentation for [std::isfinite](https://en.cppreference.com/w/cpp/numeric/math/isfinite).

+	/// \param arg number to check

+	/// \retval true if neither infinity nor NaN

+	/// \retval false else

+	inline HALF_CONSTEXPR bool isfinite(half arg) { return (arg.data_&0x7C00) != 0x7C00; }

+

+	/// Check for infinity.

+	/// **See also:** Documentation for [std::isinf](https://en.cppreference.com/w/cpp/numeric/math/isinf).

+	/// \param arg number to check

+	/// \retval true for positive or negative infinity

+	/// \retval false else

+	inline HALF_CONSTEXPR bool isinf(half arg) { return (arg.data_&0x7FFF) == 0x7C00; }

+

+	/// Check for NaN.

+	/// **See also:** Documentation for [std::isnan](https://en.cppreference.com/w/cpp/numeric/math/isnan).

+	/// \param arg number to check

+	/// \retval true for NaNs

+	/// \retval false else

+	inline HALF_CONSTEXPR bool isnan(half arg) { return (arg.data_&0x7FFF) > 0x7C00; }

+

+	/// Check if normal number.

+	/// **See also:** Documentation for [std::isnormal](https://en.cppreference.com/w/cpp/numeric/math/isnormal).

+	/// \param arg number to check

+	/// \retval true if normal number

+	/// \retval false if either subnormal, zero, infinity or NaN

+	inline HALF_CONSTEXPR bool isnormal(half arg) { return ((arg.data_&0x7C00)!=0) & ((arg.data_&0x7C00)!=0x7C00); }

+

+	/// Check sign.

+	/// **See also:** Documentation for [std::signbit](https://en.cppreference.com/w/cpp/numeric/math/signbit).

+	/// \param arg number to check

+	/// \retval true for negative number

+	/// \retval false for positive number

+	inline HALF_CONSTEXPR bool signbit(half arg) { return (arg.data_&0x8000) != 0; }

+

+	/// \}

+	/// \anchor compfunc

+	/// \name Comparison

+	/// \{

+

+	/// Quiet comparison for greater than.

+	/// **See also:** Documentation for [std::isgreater](https://en.cppreference.com/w/cpp/numeric/math/isgreater).

+	/// \param x first operand

+	/// \param y second operand

+	/// \retval true if \a x greater than \a y

+	/// \retval false else

+	inline HALF_CONSTEXPR bool isgreater(half x, half y)

+	{

+		return ((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) > ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15)) && !isnan(x) && !isnan(y);

+	}

+

+	/// Quiet comparison for greater equal.

+	/// **See also:** Documentation for [std::isgreaterequal](https://en.cppreference.com/w/cpp/numeric/math/isgreaterequal).

+	/// \param x first operand

+	/// \param y second operand

+	/// \retval true if \a x greater equal \a y

+	/// \retval false else

+	inline HALF_CONSTEXPR bool isgreaterequal(half x, half y)

+	{

+		return ((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) >= ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15)) && !isnan(x) && !isnan(y);

+	}

+

+	/// Quiet comparison for less than.

+	/// **See also:** Documentation for [std::isless](https://en.cppreference.com/w/cpp/numeric/math/isless).

+	/// \param x first operand

+	/// \param y second operand

+	/// \retval true if \a x less than \a y

+	/// \retval false else

+	inline HALF_CONSTEXPR bool isless(half x, half y)

+	{

+		return ((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) < ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15)) && !isnan(x) && !isnan(y);

+	}

+

+	/// Quiet comparison for less equal.

+	/// **See also:** Documentation for [std::islessequal](https://en.cppreference.com/w/cpp/numeric/math/islessequal).

+	/// \param x first operand

+	/// \param y second operand

+	/// \retval true if \a x less equal \a y

+	/// \retval false else

+	inline HALF_CONSTEXPR bool islessequal(half x, half y)

+	{

+		return ((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) <= ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15)) && !isnan(x) && !isnan(y);

+	}

+

+	/// Quiet comarison for less or greater.

+	/// **See also:** Documentation for [std::islessgreater](https://en.cppreference.com/w/cpp/numeric/math/islessgreater).

+	/// \param x first operand

+	/// \param y second operand

+	/// \retval true if either less or greater

+	/// \retval false else

+	inline HALF_CONSTEXPR bool islessgreater(half x, half y)

+	{

+		return x.data_!=y.data_ && ((x.data_|y.data_)&0x7FFF) && !isnan(x) && !isnan(y);

+	}

+

+	/// Quiet check if unordered.

+	/// **See also:** Documentation for [std::isunordered](https://en.cppreference.com/w/cpp/numeric/math/isunordered).

+	/// \param x first operand

+	/// \param y second operand

+	/// \retval true if unordered (one or two NaN operands)

+	/// \retval false else

+	inline HALF_CONSTEXPR bool isunordered(half x, half y) { return isnan(x) || isnan(y); }

+

+	/// \}

+	/// \anchor casting

+	/// \name Casting

+	/// \{

+

+	/// Cast to or from half-precision floating-point number.

+	/// This casts between [half](\ref half_float::half) and any built-in arithmetic type. The values are converted 

+	/// directly using the default rounding mode, without any roundtrip over `float` that a `static_cast` would otherwise do.

+	///

+	/// Using this cast with neither of the two types being a [half](\ref half_float::half) or with any of the two types 

+	/// not being a built-in arithmetic type (apart from [half](\ref half_float::half), of course) results in a compiler 

+	/// error and casting between [half](\ref half_float::half)s returns the argument unmodified.

+	/// \tparam T destination type (half or built-in arithmetic type)

+	/// \tparam U source type (half or built-in arithmetic type)

+	/// \param arg value to cast

+	/// \return \a arg converted to destination type

+	/// \exception FE_INVALID if \a T is integer type and result is not representable as \a T

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	template<typename T,typename U> T half_cast(U arg) { return detail::half_caster<T,U>::cast(arg); }

+

+	/// Cast to or from half-precision floating-point number.

+	/// This casts between [half](\ref half_float::half) and any built-in arithmetic type. The values are converted 

+	/// directly using the specified rounding mode, without any roundtrip over `float` that a `static_cast` would otherwise do.

+	///

+	/// Using this cast with neither of the two types being a [half](\ref half_float::half) or with any of the two types 

+	/// not being a built-in arithmetic type (apart from [half](\ref half_float::half), of course) results in a compiler 

+	/// error and casting between [half](\ref half_float::half)s returns the argument unmodified.

+	/// \tparam T destination type (half or built-in arithmetic type)

+	/// \tparam R rounding mode to use.

+	/// \tparam U source type (half or built-in arithmetic type)

+	/// \param arg value to cast

+	/// \return \a arg converted to destination type

+	/// \exception FE_INVALID if \a T is integer type and result is not representable as \a T

+	/// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding

+	template<typename T,std::float_round_style R,typename U> T half_cast(U arg) { return detail::half_caster<T,U,R>::cast(arg); }

+	/// \}

+

+	/// \}

+	/// \anchor errors

+	/// \name Error handling

+	/// \{

+

+	/// Clear exception flags.

+	/// This function works even if [automatic exception flag handling](\ref HALF_ERRHANDLING_FLAGS) is disabled, 

+	/// but in that case manual flag management is the only way to raise flags.

+	///

+	/// **See also:** Documentation for [std::feclearexcept](https://en.cppreference.com/w/cpp/numeric/fenv/feclearexcept).

+	/// \param excepts OR of exceptions to clear

+	/// \retval 0 all selected flags cleared successfully

+	inline int feclearexcept(int excepts) { detail::errflags() &= ~excepts; return 0; }

+

+	/// Test exception flags.

+	/// This function works even if [automatic exception flag handling](\ref HALF_ERRHANDLING_FLAGS) is disabled, 

+	/// but in that case manual flag management is the only way to raise flags.

+	///

+	/// **See also:** Documentation for [std::fetestexcept](https://en.cppreference.com/w/cpp/numeric/fenv/fetestexcept).

+	/// \param excepts OR of exceptions to test

+	/// \return OR of selected exceptions if raised

+	inline int fetestexcept(int excepts) { return detail::errflags() & excepts; }

+

+	/// Raise exception flags.

+	/// This raises the specified floating point exceptions and also invokes any additional automatic exception handling as 

+	/// configured with the [HALF_ERRHANDLIG_...](\ref HALF_ERRHANDLING_ERRNO) preprocessor symbols.

+	/// This function works even if [automatic exception flag handling](\ref HALF_ERRHANDLING_FLAGS) is disabled, 

+	/// but in that case manual flag management is the only way to raise flags.

+	///

+	/// **See also:** Documentation for [std::feraiseexcept](https://en.cppreference.com/w/cpp/numeric/fenv/feraiseexcept).

+	/// \param excepts OR of exceptions to raise

+	/// \retval 0 all selected exceptions raised successfully

+	inline int feraiseexcept(int excepts) { detail::errflags() |= excepts; detail::raise(excepts); return 0; }

+

+	/// Save exception flags.

+	/// This function works even if [automatic exception flag handling](\ref HALF_ERRHANDLING_FLAGS) is disabled, 

+	/// but in that case manual flag management is the only way to raise flags.

+	///

+	/// **See also:** Documentation for [std::fegetexceptflag](https://en.cppreference.com/w/cpp/numeric/fenv/feexceptflag).

+	/// \param flagp adress to store flag state at

+	/// \param excepts OR of flags to save

+	/// \retval 0 for success

+	inline int fegetexceptflag(int *flagp, int excepts) { *flagp = detail::errflags() & excepts; return 0; }

+

+	/// Restore exception flags.

+	/// This only copies the specified exception state (including unset flags) without incurring any additional exception handling.

+	/// This function works even if [automatic exception flag handling](\ref HALF_ERRHANDLING_FLAGS) is disabled, 

+	/// but in that case manual flag management is the only way to raise flags.

+	///

+	/// **See also:** Documentation for [std::fesetexceptflag](https://en.cppreference.com/w/cpp/numeric/fenv/feexceptflag).

+	/// \param flagp adress to take flag state from

+	/// \param excepts OR of flags to restore

+	/// \retval 0 for success

+	inline int fesetexceptflag(const int *flagp, int excepts) { detail::errflags() = (detail::errflags()|(*flagp&excepts)) & (*flagp|~excepts); return 0; }

+

+	/// Throw C++ exceptions based on set exception flags.

+	/// This function manually throws a corresponding C++ exception if one of the specified flags is set, 

+	/// no matter if automatic throwing (via [HALF_ERRHANDLING_THROW_...](\ref HALF_ERRHANDLING_THROW_INVALID)) is enabled or not.

+	/// This function works even if [automatic exception flag handling](\ref HALF_ERRHANDLING_FLAGS) is disabled, 

+	/// but in that case manual flag management is the only way to raise flags.

+	/// \param excepts OR of exceptions to test

+	/// \param msg error message to use for exception description

+	/// \throw std::domain_error if `FE_INVALID` or `FE_DIVBYZERO` is selected and set

+	/// \throw std::overflow_error if `FE_OVERFLOW` is selected and set

+	/// \throw std::underflow_error if `FE_UNDERFLOW` is selected and set

+	/// \throw std::range_error if `FE_INEXACT` is selected and set

+	inline void fethrowexcept(int excepts, const char *msg = "")

+	{

+		excepts &= detail::errflags();

+		if(excepts & (FE_INVALID|FE_DIVBYZERO))

+			throw std::domain_error(msg);

+		if(excepts & FE_OVERFLOW)

+			throw std::overflow_error(msg);

+		if(excepts & FE_UNDERFLOW)

+			throw std::underflow_error(msg);

+		if(excepts & FE_INEXACT)

+			throw std::range_error(msg);

+	}

+	/// \}

+}

+

+

+#undef HALF_UNUSED_NOERR

+#undef HALF_CONSTEXPR

+#undef HALF_CONSTEXPR_CONST

+#undef HALF_CONSTEXPR_NOERR

+#undef HALF_NOEXCEPT

+#undef HALF_NOTHROW

+#undef HALF_THREAD_LOCAL

+#undef HALF_TWOS_COMPLEMENT_INT

+#ifdef HALF_POP_WARNINGS

+	#pragma warning(pop)

+	#undef HALF_POP_WARNINGS

+#endif

+

+#endif

diff --git a/include/singa/core/common.h b/include/singa/core/common.h
index a408650..d5edcb2 100644
--- a/include/singa/core/common.h
+++ b/include/singa/core/common.h
@@ -48,10 +48,10 @@
 namespace singa {
 
 namespace lang {
-/// To implemente functions using cpp libraries
+/// To implement functions using cpp libraries
 typedef struct _Cpp {
 } Cpp;
-/// To implemente functions using cuda libraries
+/// To implement functions using cuda libraries
 typedef struct _Cuda {
 } Cuda;
 /// To implement function using opencl libraries
@@ -100,8 +100,8 @@
   std::mt19937 random_generator;
 #ifdef USE_CUDA
   cublasHandle_t cublas_handle;
-  cudaStream_t stream; 
-  curandGenerator_t curand_generator; 
+  cudaStream_t stream;
+  curandGenerator_t curand_generator;
 
 #ifdef USE_CUDNN
   cudnnHandle_t cudnn_handle;
diff --git a/include/singa/core/tensor.h b/include/singa/core/tensor.h
index aea988d..8d3f721 100644
--- a/include/singa/core/tensor.h
+++ b/include/singa/core/tensor.h
@@ -22,6 +22,7 @@
 #include <tuple>
 #include <vector>
 
+#include "half.hpp"
 #include "singa/core/common.h"
 #include "singa/core/device.h"
 #include "singa/proto/core.pb.h"
@@ -123,10 +124,12 @@
     return false;
   }
 
+  bool is_contiguous() const { return !broadcasted() && !transpose(); }
+
   const vector<int> &stride() const { return stride_; }
 
   /// Return true if the content of the tensor is initialized
-  bool initailized() const {
+  bool initialized() const {
     return block_ != nullptr && block_->initialized();
   }
 
@@ -267,7 +270,10 @@
   Tensor &ResetLike(const Tensor &t);
 
   /// Reset the data type, it would reallocate block if type changes.
-  Tensor AsType(const DataType type);
+  Tensor AsType(const DataType type) const;
+
+  /// change data type for this tensor
+  Tensor &ToType(const DataType type);
 
   /// Reset the device.
   /// If the target device is a diff device, then do deep data copy.
diff --git a/include/singa/io/communicator.h b/include/singa/io/communicator.h
index 3f738ea..8344873 100644
--- a/include/singa/io/communicator.h
+++ b/include/singa/io/communicator.h
@@ -106,8 +106,8 @@
                             float sparsThreshold, bool topK, Context *ctx);
   void _sparsification(Tensor &t, Tensor *accumulation, float sparsThreshold,
                        bool topK, Context *ctx);
-  void valSparsAllReduce(size_t num, float *accumulation, Context *ctx);
-  void topKSparsAllReduce(size_t num, float *accumulation, Context *ctx);
+  void valSparsAllReduce(size_t num, void *accumulation, Context *ctx);
+  void topKSparsAllReduce(size_t num, void *accumulation, Context *ctx);
 
   // last group of synchronized memory blocks
   std::shared_ptr<Device> device_ = nullptr;
@@ -123,13 +123,16 @@
 
   // normal synch
   size_t sendBuffOffset = 0;
-  float *fusedSendBuff;
-  float *fusedRecvBuff;
+  void *fusedSendBuff;
+  void *fusedRecvBuff;
+  void *offsetPointer;
+  size_t dataSize;
+  ncclDataType_t ncclType;
 
   // half synch
   bool halfInitialized;
-  __half *fusedSendBuffHalf;
-  __half *fusedRecvBuffHalf;
+  void *fusedSendBuffHalf;
+  void *fusedRecvBuffHalf;
 
   // sparsification
   cusparseHandle_t cusparse_handle;
@@ -142,9 +145,9 @@
   int *nnzGPU;
   int *nnzAllGPU;
   float threshold;
-  float *sparsSendBuff;
-  float *sparsRecvBuff;
-  float *backupBuff;
+  void *sparsSendBuff;
+  void *sparsRecvBuff;
+  void *backupBuff;
   int *fusedIndex;
 };
 }  // namespace singa
diff --git a/include/singa/utils/logging.h b/include/singa/utils/logging.h
index 9b9e643..92ab733 100644
--- a/include/singa/utils/logging.h
+++ b/include/singa/utils/logging.h
@@ -24,6 +24,7 @@
 #ifndef SINGA_UTILS_LOGGING_H_
 #define SINGA_UTILS_LOGGING_H_
 
+#include "singa/singa_config.h"
 #include <stdlib.h>
 
 #include <sstream>
diff --git a/python/singa/__init__.py b/python/singa/__init__.py
index 039d356..11db05a 100644
--- a/python/singa/__init__.py
+++ b/python/singa/__init__.py
@@ -17,4 +17,9 @@
 
 from . import singa_wrap
 
+import faulthandler
+faulthandler.enable()
+
+singa_wrap.InitLogging("")
+
 __version__ = singa_wrap.SINGA_VERSION
diff --git a/python/singa/autograd.py b/python/singa/autograd.py
index 76a645e..518d53a 100644
--- a/python/singa/autograd.py
+++ b/python/singa/autograd.py
@@ -701,10 +701,12 @@
             a tuple for (db, dx), db is data for dL / db, dx is data
             for dL / dx.
         """
+        dtype = dy.data_type()
+        _dy = dy.AsType(tensor.float32)
         if self.axis == 0:
-            return dy, singa.Sum(dy, 0)
+            return dy, singa.Sum(_dy, 0).AsType(dtype)
         elif self.axis == 1:
-            return dy, singa.Sum(dy, 0)
+            return dy, singa.Sum(_dy, 0).AsType(dtype)
 
 
 def add_bias(x, b, axis=0):
@@ -1171,8 +1173,8 @@
         """
         posx = singa.AddFloat(x, 0.0001)
         loss = singa.SumAll(singa.__mul__(self.t, singa.Log(posx)))
-        negt = singa.AddFloat(singa.MultFloat(self.t,-1.0), 1.0)
-        negx = singa.AddFloat(singa.MultFloat(x,-1.0), 1.0001)
+        negt = singa.AddFloat(singa.MultFloat(self.t, -1.0), 1.0)
+        negx = singa.AddFloat(singa.MultFloat(x, -1.0), 1.0001)
         negLoss = singa.SumAll(singa.__mul__(negt, singa.Log(negx)))
         loss += negLoss
         loss /= -x.shape()[0]
@@ -1506,7 +1508,8 @@
         assert np.logical_and(
             -_x.shape[self.axis] < self.indices,
             self.indices <= _x.shape[self.axis]).all(
-            ), "The values of the indexes should be between %d and %d" % (-_x.shape[self.axis], _x.shape[self.axis] - 1)
+            ), "The values of the indexes should be between %d and %d" % (
+                -_x.shape[self.axis], _x.shape[self.axis] - 1)
 
         self.axis = self.axis % x_rank
         u_shape = self.updates.shape
@@ -1553,7 +1556,6 @@
     return ScatterElements(indices, updates, axis)(x)[0]
 
 
-
 class Concat(Operator):
     """
     Concatenate a list of tensors into a single tensor. All input tensors must
@@ -1623,6 +1625,8 @@
         a Tensor for the result
     """
     return Concat(axis)(*xs)[0]
+
+
 """
 def make_slice(arr, axis, i):  # type: ignore
         slc = [slice(None)] * arr.ndim
@@ -1630,6 +1634,7 @@
         return slc
 """
 
+
 class _Conv2d(Operator):
     """
     Init a conv 2d operator
@@ -4921,7 +4926,6 @@
             dW = singa.GpuRNNBackwardW(self.inputs['x'], self.inputs['hx'],
                                        self.inputs['y'], self.handle)
 
-
         return dx, dhx, dcx, dW
 
 
@@ -5550,7 +5554,8 @@
             self.condition = tensor.from_numpy(self.condition)
             self.condition.to_device(a.device())
             self.condition = self.condition.data
-        self.neg_condition = singa.AddFloat(singa.MultFloat(self.condition, -1.), 1.)
+        self.neg_condition = singa.AddFloat(
+            singa.MultFloat(self.condition, -1.), 1.)
         _a, _b = a, b
         dtype0 = _a.data_type()
         dtype1 = _b.data_type()
@@ -5558,11 +5563,11 @@
             _a = a.AsType(singa.kFloat32)
             _b = b.AsType(singa.kFloat32)
             res = singa.__add__(singa.__mul__(self.condition, _a),
-                             singa.__mul__(self.neg_condition, _b))
+                                singa.__mul__(self.neg_condition, _b))
             res = res.AsType(singa.kInt)
         else:
             res = singa.__add__(singa.__mul__(self.condition, _a),
-                             singa.__mul__(self.neg_condition, _b))
+                                singa.__mul__(self.neg_condition, _b))
         return res
 
     def backward(self, dy):
diff --git a/python/singa/layer.py b/python/singa/layer.py
index e5abea7..0954b86 100644
--- a/python/singa/layer.py
+++ b/python/singa/layer.py
@@ -22,6 +22,7 @@
 
 from singa import utils
 from .tensor import Tensor
+from . import tensor
 from . import singa_wrap as singa
 
 
@@ -167,6 +168,22 @@
                     sublayer.set_states(states)
         self.set_params(states)
 
+    def dtype_check(self, *inputs):
+        """ check if all input have same data type.
+
+        Args:
+            *inputs: input args consisting of only PyTensors
+        """
+        flag = inputs[0].device.graph_enabled()
+        inputs[0].device.EnableGraph(False)
+
+        x_dtype = inputs[0].dtype
+        for inp in inputs:
+            if inp.dtype != x_dtype:
+                inp.to_type(x_dtype)
+
+        inputs[0].device.EnableGraph(flag)
+
     def device_check(self, *inputs):
         """ Check if the devices of the input tensor are the same.
 
@@ -299,12 +316,18 @@
         w_shape = (self.in_features, self.out_features)
         b_shape = (self.out_features,)
 
-        self.W = Tensor(shape=w_shape, requires_grad=True, stores_grad=True)
+        self.W = Tensor(shape=w_shape,
+                        dtype=x.dtype,
+                        requires_grad=True,
+                        stores_grad=True)
         std = math.sqrt(2.0 / (self.in_features + self.out_features))
         self.W.gaussian(0.0, std)
 
         if self.bias:
-            self.b = Tensor(shape=b_shape, requires_grad=True, stores_grad=True)
+            self.b = Tensor(shape=b_shape,
+                            dtype=x.dtype,
+                            requires_grad=True,
+                            stores_grad=True)
             self.b.set_value(0.0)
         else:
             self.b = None
@@ -312,8 +335,10 @@
     def forward(self, x):
         if self.b:
             self.device_check(x, self.W, self.b)
+            self.dtype_check(x, self.W, self.b)
         else:
             self.device_check(x, self.W)
+            self.dtype_check(x, self.W)
 
         assert x.shape[1] == self.W.shape[0], (
             "Linear layer expects input features size %d received %d" %
@@ -656,20 +681,35 @@
                     )
         else:
             if not hasattr(self, "handle"):
-                self.handle = singa.CudnnConvHandle(
-                    _x.data,
-                    self.kernel_size,
-                    self.stride,
-                    self.padding,
-                    self.in_channels,
-                    self.nb_kernels,
-                    self.bias,
-                    self.group,
-                )
+                if _x.dtype == tensor.float16:
+                    self.handle = singa.CudnnConvHandle(
+                        _x.data,
+                        self.kernel_size,
+                        self.stride,
+                        self.padding,
+                        self.in_channels,
+                        self.nb_kernels,
+                        self.bias,
+                        self.group,
+                        1024*1024*1024,
+                        "tensor_ops"
+                    )
+                else:
+                    self.handle = singa.CudnnConvHandle(
+                        _x.data,
+                        self.kernel_size,
+                        self.stride,
+                        self.padding,
+                        self.in_channels,
+                        self.nb_kernels,
+                        self.bias,
+                        self.group,
+                    )
 
     def forward(self, x):
         # sanitize the device of params/states, TODO: better to decorate forward()
         self.device_check(x, *[s for k, s in self.get_states().items()])
+        self.dtype_check(x, *[s for k, s in self.get_states().items()])
 
         assert (self.group >= 1 and self.in_channels % self.group
                 == 0), "please set reasonable group."
@@ -816,6 +856,8 @@
 
         self.device_check(x, self.scale, self.bias, self.running_mean,
                           self.running_var)
+        self.dtype_check(x, self.scale, self.bias, self.running_mean,
+                        self.running_var)
 
         y = autograd.batchnorm_2d(
             self.handle,
diff --git a/python/singa/opt.py b/python/singa/opt.py
index 8eda563..015eea8 100755
--- a/python/singa/opt.py
+++ b/python/singa/opt.py
@@ -75,7 +75,7 @@
         config (Dict): specify the default values of configurable variables.
     """
 
-    def __init__(self, lr):
+    def __init__(self, lr, dtype=tensor.float32):
         # init lr(could be a constant scalar or a learning rate scheduler)
         if type(lr) == float or type(lr) == int:
             self.lr = Constant(lr)
@@ -85,6 +85,7 @@
             raise TypeError("Wrong learning rate type")
 
         # init step counter
+        self.dtype = dtype
         # TODO change type to int32
         self.step_counter = Tensor((1,), dtype=tensor.float32)
         self.step_counter.set_value(0)
@@ -217,8 +218,9 @@
                  momentum=0,
                  dampening=0,
                  weight_decay=0,
-                 nesterov=False):
-        super(SGD, self).__init__(lr)
+                 nesterov=False,
+                 dtype=tensor.float32):
+        super(SGD, self).__init__(lr, dtype)
 
         # init momentum
         if type(momentum) == float or type(momentum) == int:
@@ -230,7 +232,7 @@
             momentum = momentum.init_value
         else:
             raise TypeError("Wrong momentum type")
-        self.mom_value = self.momentum(self.step_counter)
+        self.mom_value = self.momentum(self.step_counter).as_type(self.dtype)
 
         # init dampening
         if type(dampening) == float or type(dampening) == int:
@@ -240,7 +242,7 @@
             dampening = dampening.init_value
         else:
             raise TypeError("Wrong dampening type")
-        self.dam_value = self.dampening(self.step_counter)
+        self.dam_value = self.dampening(self.step_counter).as_type(self.dtype)
 
         # init weight_decay
         if type(weight_decay) == float or type(weight_decay) == int:
@@ -252,7 +254,8 @@
             self.weight_decay = weight_decay
         else:
             raise TypeError("Wrong weight_decay type")
-        self.decay_value = self.weight_decay(self.step_counter)
+        self.decay_value = self.weight_decay(self.step_counter).as_type(
+            self.dtype)
 
         # init other params
         self.nesterov = nesterov
@@ -278,6 +281,9 @@
         self.device_check(param_value, self.step_counter, self.lr_value,
                           self.mom_value, self.dam_value, self.decay_value)
 
+        # derive dtype from input
+        assert param_value.dtype == self.dtype
+
         # TODO add branch operator
         # if self.decay_value != 0:
         if self.weight_decay.init_value != 0:
@@ -306,9 +312,9 @@
     def step(self):
         # increment step counter, lr and moment
         super().step()
-        mom_value = self.momentum(self.step_counter)
-        dam_value = self.dampening(self.step_counter)
-        decay_value = self.weight_decay(self.step_counter)
+        mom_value = self.momentum(self.step_counter).as_type(self.dtype)
+        dam_value = self.dampening(self.step_counter).as_type(self.dtype)
+        decay_value = self.weight_decay(self.step_counter).as_type(self.dtype)
         self.mom_value.copy_from(mom_value)
         self.dam_value.copy_from(dam_value)
         self.decay_value.copy_from(decay_value)
@@ -888,6 +894,9 @@
         acc = 0
         glist = []
         for p, g in autograd.backward(loss):
+            assert p.dtype == tensor.float32, (
+                'This function is only available for input tensor precision 32 bit, '
+                'which are converted into 16 bits before transmit')
             if clipping:
                 g = autograd.clip(g, -clip_Value, clip_Value)
             if g.size() > threshold:
diff --git a/python/singa/tensor.py b/python/singa/tensor.py
index 4f62a31..e9e9ae7 100755
--- a/python/singa/tensor.py
+++ b/python/singa/tensor.py
@@ -65,6 +65,7 @@
 from .device import get_default_device
 
 int32 = 2  #core.proto.kInt32
+float16 = 1  #core.proto.kFloat16
 float32 = 0  #core.proto.kFloat32
 CTensor = singa.Tensor
 
@@ -268,6 +269,8 @@
         '''
         if dtype == singa.kInt:
             pass
+        elif dtype == singa.kFloat16:
+            pass
         elif dtype == singa.kFloat32:
             pass
         elif dtype == 'int':
@@ -280,6 +283,32 @@
         t.data = self.data.AsType(dtype)
         return t
 
+    def to_type(self, dtype):
+        '''Change the data type inplace.
+
+        Args:
+            dtype: accepts 'int', 'float', 'singa.kFloat32', 'singa.kInt'
+
+        Returns:
+            new tensor with new type
+        '''
+        assert self.data.initialized()
+        if dtype == singa.kInt:
+            pass
+        elif dtype == singa.kFloat32:
+            pass
+        elif dtype == singa.kFloat16:
+            pass
+        elif dtype == 'int':
+            dtype = singa.kInt
+        elif dtype == 'float':
+            dtype = singa.kFloat32
+        else:
+            raise TypeError("invalid data type %s" % dtype)
+        self.data.ToType(dtype)
+        self.dtype = dtype
+        return self
+
     def to_device(self, device):
         '''Move the tensor data onto a given device.
 
@@ -341,10 +370,12 @@
         dt = np_array.dtype
         if dt == np.float32:
             self.data.CopyFloatDataFromHostPtr(np_array)
+        elif dt == np.float16:
+            self.data.CopyHalfFloatDataFromHostPtr(np_array)
         elif dt == np.int or dt == np.int32:
             self.data.CopyIntDataFromHostPtr(np_array)
         else:
-            print('Not implemented yet for ', dt)
+            raise NotImplementedError('Not implemented yet for ', dt)
 
     def copy_data(self, t):
         '''Copy data from other Tensor instance.
@@ -744,8 +775,15 @@
         one /= self
         return one
 
+    dtype_name = {
+        float16: "float16",
+        float32: "float32",
+        int32: "int32",
+    }
+
     def __repr__(self):
-        return np.array2string(to_numpy(self))
+        return "%s, %s" % (np.array2string(
+            to_numpy(self)), self.dtype_name[self.dtype])
 
 
 ''' alias Tensor to PlaceHolder
@@ -863,6 +901,8 @@
 
     if np_array.dtype == np.float32:
         dtype = float32
+    elif np_array.dtype == np.float16:
+        dtype = float16
     else:
         assert np_array.dtype == np.int32, \
             'Only float and int tensors are supported'
@@ -900,6 +940,8 @@
     th = to_host(t)
     if th.dtype == float32:
         np_array = th.data.GetFloatValue(int(th.size()))
+    elif th.dtype == float16:
+        np_array = th.data.GetHalfFloatValue(int(th.size()))
     elif th.dtype == int32:
         np_array = th.data.GetIntValue(int(th.size()))
     else:
@@ -1754,10 +1796,12 @@
     dt = np_array.dtype
     if dt == np.float32:
         data.CopyFloatDataFromHostPtr(np_array)
+    elif dt == np.float16:
+        data.CopyHalfFloatDataFromHostPtr(np_array)
     elif dt == np.int or dt == np.int32:
         data.CopyIntDataFromHostPtr(np_array)
     else:
-        print('Not implemented yet for ', dt)
+        raise NotImplementedError('Not implemented yet for ', dt)
 
 
 def concatenate(tensors, axis):
diff --git a/src/api/core_tensor.i b/src/api/core_tensor.i
index f7e3160..154240a 100755
--- a/src/api/core_tensor.i
+++ b/src/api/core_tensor.i
@@ -33,6 +33,9 @@
 #include "singa/core/device.h"
 #include "singa/proto/core.pb.h"
 // #include "singa/proto/model.pb.h"
+#include "half.hpp"
+#include "singa/utils/logging.h"
+
 using singa::DataType;
 %}
 %shared_ptr(singa::Device)
@@ -57,6 +60,12 @@
 %apply (int *ARGOUT_ARRAY1, int DIM1) {
        (int *value, const size_t num)
 }
+%apply (half_float::half *IN_ARRAY1, int DIM1) {
+       (const half_float::half *src, const size_t num)
+}
+%apply (half_float::half *ARGOUT_ARRAY1, int DIM1) {
+       (half_float::half *value, const size_t num)
+}
 #endif // USE_PYTHON
 
 #if USE_JAVA
@@ -96,6 +105,9 @@
 
     std::shared_ptr<singa::Device> device() const;
 
+    template <typename SType> void get_value(SType* value, const size_t num) const;
+    %template(GetHalfFloatValue) get_value<half_float::half>;
+
     template <typename SType> void GetValue(SType* value, const size_t num) const;
     %template(GetFloatValue) GetValue<float>;
     %template(GetIntValue) GetValue<int>;
@@ -109,11 +121,13 @@
     bool transpose() const;
     size_t nDim() const;
 
+    bool initialized() const;
     size_t Size() const;
     size_t MemSize() const;
 
     void ResetLike(const Tensor &t);
     Tensor AsType(DataType type);
+    Tensor ToType(DataType type);
     void ToDevice(std::shared_ptr<singa::Device> dev);
     void ToHost();
     float L2() const;
@@ -124,6 +138,7 @@
                                                        const size_t offset = 0) const;
     %template(CopyFloatDataFromHostPtr) CopyDataFromHostPtr<float>;
     %template(CopyIntDataFromHostPtr) CopyDataFromHostPtr<int>;
+    %template(CopyHalfFloatDataFromHostPtr) CopyDataFromHostPtr<half_float::half>;
 
     void CopyData(const Tensor &other);
     void RepeatData(std::vector<size_t> repeats, int axis, int total_repeats, const Tensor &src);
@@ -371,4 +386,6 @@
 
   Tensor CrossEntropyFwd(const Tensor& p, const Tensor& t);
   Tensor SoftmaxCrossEntropyBwd(const Tensor& p, const Tensor& t);
+
+  void InitLogging(const char* argv);
 }
diff --git a/src/api/numpy.i b/src/api/numpy.i
index e58090e..de4203e 100644
--- a/src/api/numpy.i
+++ b/src/api/numpy.i
@@ -40,6 +40,7 @@
 #define NO_IMPORT_ARRAY
 #endif
 #include "stdio.h"
+#include "half.hpp"
 #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
 #include <numpy/arrayobject.h>
 %}
@@ -3090,6 +3091,7 @@
 %numpy_typemaps(unsigned long long, NPY_ULONGLONG, int)
 %numpy_typemaps(float             , NPY_FLOAT    , int)
 %numpy_typemaps(double            , NPY_DOUBLE   , int)
+%numpy_typemaps(half_float::half  , NPY_FLOAT16  , int)
 
 /* ***************************************************************
  * The follow macro expansion does not work, because C++ bool is 4
diff --git a/src/core/tensor/math_kernel.cu b/src/core/tensor/math_kernel.cu
index 43be56d..f25f7ab 100644
--- a/src/core/tensor/math_kernel.cu
+++ b/src/core/tensor/math_kernel.cu
@@ -1,29 +1,30 @@
 /************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ *************************************************************/
 
 #include "singa/singa_config.h"
 #ifdef USE_CUDA
-#include <cmath>
 #include <algorithm>
 #include <cfloat>
+#include <cmath>
+
 #include "./math_kernel.h"
 
 #define CU2DBLOCK_X 32
@@ -88,6 +89,33 @@
   }
 }
 
+__global__ void KernelTraverseUnaryTransform(const size_t n, size_t nDim,
+                                             const __half *in, const int *shape,
+                                             const int *stride, __half *out) {
+  for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
+       i += blockDim.x * gridDim.x) {
+    int shape_accu = n;
+    size_t offset = 0;
+    int remains = i;
+
+    for (int k = 0; k < nDim; k++) {
+      shape_accu = shape_accu / shape[k];
+      int idx = remains / shape_accu;
+      remains = remains % shape_accu;
+      offset = offset + idx * stride[k];
+    }
+    out[i] = in[offset];
+  }
+}
+
+__global__ void KernelAdd(const size_t n, const __half *in1, const __half *in2,
+                          __half *out) {
+  for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
+       i += blockDim.x * gridDim.x) {
+    out[i] = __hadd(in1[i], in2[i]);
+  }
+}
+
 __global__ void KernelAdd(const size_t n, const float *in1, const float *in2,
                           float *out) {
   for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
@@ -104,6 +132,14 @@
   }
 }
 
+__global__ void KernelSub(const size_t n, const __half *in1, const __half *in2,
+                          __half *out) {
+  for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
+       i += blockDim.x * gridDim.x) {
+    out[i] = __hsub(in1[i], in2[i]);
+  }
+}
+
 __global__ void KernelSub(const size_t n, const float *in1, const float *in2,
                           float *out) {
   for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
@@ -149,16 +185,15 @@
 __global__ void KernelRoundE(const size_t n, const float *in, float *out) {
   for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
        i += blockDim.x * gridDim.x) {
-    float doub = in[i]*2;
+    float doub = in[i] * 2;
     if (ceilf(doub) == doub) {
-      out[i] = roundf(in[i]/2)*2;
+      out[i] = roundf(in[i] / 2) * 2;
     } else {
       out[i] = roundf(in[i]);
     }
   }
 }
 
-
 __global__ void KernelLog(const size_t n, const float *in, float *out) {
   for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
        i += blockDim.x * gridDim.x) {
@@ -204,18 +239,41 @@
   }
 }
 
-__global__ void KernelReLUBackward(const size_t n, const float *in1, const float *in2,
-                         float *out) {
+__global__ void KernelRelu(const size_t n, const __half *in, __half *out) {
+  for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
+       i += blockDim.x * gridDim.x) {
+    if (__hgt(in[i], 0.0f)) {
+      out[i] = in[i];
+    } else {
+      out[i] = 0.0f;
+    }
+  }
+}
+
+__global__ void KernelReLUBackward(const size_t n, const float *in1,
+                                   const float *in2, float *out) {
   for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
        i += blockDim.x * gridDim.x) {
     out[i] = in2[i] > 0 ? in1[i] : 0.0f;
   }
 }
 
+__global__ void KernelReLUBackward(const size_t n, const __half *in1,
+                                   const __half *in2, __half *out) {
+  for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
+       i += blockDim.x * gridDim.x) {
+    if (__hge(in2[i], 0)) {
+      out[i] = in1[i];
+    } else {
+      out[i] = 0;
+    }
+  }
+}
+
 __global__ void KernelAbs(const size_t n, const float *in, float *out) {
   for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
        i += blockDim.x * gridDim.x) {
-    out[i] =  max(in[i], -in[i]);
+    out[i] = max(in[i], -in[i]);
   }
 }
 
@@ -239,7 +297,7 @@
     out[i] = logf(1 + expf(in[i]));
   }
 }
-  
+
 __global__ void KernelSoftsign(const size_t n, const float *in, float *out) {
   for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
        i += blockDim.x * gridDim.x) {
@@ -268,6 +326,14 @@
   }
 }
 
+__global__ void KernelPow(const size_t n, const __half *in1, const __half *in2,
+                          __half *out) {
+  for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
+       i += blockDim.x * gridDim.x) {
+    out[i] = __float2half(__powf(__half2float(in1[i]), __half2float(in2[i])));
+  }
+}
+
 __global__ void KernelPow(const size_t n, const float *in, const float x,
                           float *out) {
   for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
@@ -284,6 +350,14 @@
   }
 }
 
+__global__ void KernelMult(const size_t n, const __half *in1, const __half *in2,
+                           __half *out) {
+  for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
+       i += blockDim.x * gridDim.x) {
+    out[i] = __hmul(in1[i], in2[i]);
+  }
+}
+
 __global__ void KernelMult(const size_t n, const float *in, const float x,
                            float *out) {
   for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
@@ -292,6 +366,22 @@
   }
 }
 
+__global__ void KernelMult(const size_t n, const __half *in, const __half x,
+                           __half *out) {
+  for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
+       i += blockDim.x * gridDim.x) {
+    out[i] = __hmul(in[i], x);
+  }
+}
+
+__global__ void KernelDiv(const size_t n, const __half *in1, const __half *in2,
+                          __half *out) {
+  for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
+       i += blockDim.x * gridDim.x) {
+    out[i] = __hdiv(in1[i], in2[i]);
+  }
+}
+
 __global__ void KernelDiv(const size_t n, const float *in1, const float *in2,
                           float *out) {
   for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
@@ -330,7 +420,7 @@
 }
 
 __global__ void KernelBGE(const size_t num, const float *in1, const float *in2,
-                         float *out) {
+                          float *out) {
   for (size_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num;
        idx += blockDim.x * gridDim.x) {
     out[idx] = in1[idx] >= in2[idx] ? 1.0f : 0.0f;
@@ -346,7 +436,7 @@
 }
 
 __global__ void KernelBEQ(const size_t num, const float *in1, const float *in2,
-                         float *out) {
+                          float *out) {
   for (size_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num;
        idx += blockDim.x * gridDim.x) {
     out[idx] = in1[idx] == in2[idx] ? 1.0f : 0.0f;
@@ -361,7 +451,7 @@
   }
 }
 __global__ void KernelBGT(const size_t num, const float *in1, const float *in2,
-                         float *out) {
+                          float *out) {
   for (size_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num;
        idx += blockDim.x * gridDim.x) {
     out[idx] = in1[idx] > in2[idx] ? 1.0f : 0.0f;
@@ -375,7 +465,7 @@
   }
 }
 __global__ void KernelBLE(const size_t num, const float *in1, const float *in2,
-                         float *out) {
+                          float *out) {
   for (size_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num;
        idx += blockDim.x * gridDim.x) {
     out[idx] = in1[idx] <= in2[idx] ? 1.0f : 0.0f;
@@ -389,14 +479,14 @@
   }
 }
 __global__ void KernelBLT(const size_t num, const float *in1, const float *in2,
-                         float *out) {
+                          float *out) {
   for (size_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num;
        idx += blockDim.x * gridDim.x) {
     out[idx] = in1[idx] < in2[idx] ? 1.0f : 0.0f;
   }
 }
-__global__ void KernelRowMax(const size_t nrow, const size_t ncol, const float *inPtr,
-    float *outPtr) {
+__global__ void KernelRowMax(const size_t nrow, const size_t ncol,
+                             const float *inPtr, float *outPtr) {
   for (size_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < nrow;
        idx += blockDim.x * gridDim.x) {
     int offset = idx * ncol;
@@ -407,7 +497,47 @@
     outPtr[idx] = maxval;
   }
 }
-__global__ void KernelComputeCrossEntropy(const bool int_target, const size_t batchsize,
+
+__global__ void KernelComputeCrossEntropy(const bool int_target,
+                                          const size_t batchsize,
+                                          const size_t dim, const __half *p,
+                                          const int *t, __half *loss) {
+  size_t sample = blockIdx.x * blockDim.x + threadIdx.x;
+  size_t num_threads = blockDim.x * gridDim.x;
+  __half __HALF_EPS = 0.0000001;
+  if (int_target) {
+    for (; sample < batchsize; sample += num_threads) {
+      __half prob_of_truth = p[sample * dim + t[sample]];
+      if (prob_of_truth > __HALF_EPS) {
+        loss[sample] = -hlog(prob_of_truth);
+      } else {
+        loss[sample] = -hlog(__HALF_EPS);
+      }
+    }
+  } else {
+    for (; sample < batchsize; sample += num_threads) {
+      __half sum = 0;
+      for (size_t j = 0; j < dim; j++) {
+        sum = __hadd(sum, __int2half_rd(t[sample * dim + j]));
+      }
+      loss[sample] = 0;
+      for (size_t j = 0, offset = sample * dim; j < dim; j++, offset++) {
+        if (__hgt(p[offset], __HALF_EPS)) {
+          loss[sample] = __hsub(
+              loss[sample],
+              __hmul(__hdiv(__int2half_rd(t[offset]), sum), hlog(p[offset])));
+        } else {
+          loss[sample] = __hsub(
+              loss[sample],
+              __hmul(__hdiv(__int2half_rd(t[offset]), sum), hlog(__HALF_EPS)));
+        }
+      }
+    }
+  }
+}
+
+__global__ void KernelComputeCrossEntropy(const bool int_target,
+                                          const size_t batchsize,
                                           const size_t dim, const float *p,
                                           const int *t, float *loss) {
   size_t sample = blockIdx.x * blockDim.x + threadIdx.x;
@@ -431,7 +561,8 @@
   }
 }
 
-__global__ void KernelSoftmaxCrossEntropyBwd(const bool int_target, const size_t batchsize,
+__global__ void KernelSoftmaxCrossEntropyBwd(const bool int_target,
+                                             const size_t batchsize,
                                              const size_t dim, const float *p,
                                              const int *t, float *grad) {
   size_t sample = blockIdx.x * blockDim.x + threadIdx.x;
@@ -454,6 +585,31 @@
   }
 }
 
+__global__ void KernelSoftmaxCrossEntropyBwd(const bool int_target,
+                                             const size_t batchsize,
+                                             const size_t dim, const __half *p,
+                                             const int *t, __half *grad) {
+  size_t sample = blockIdx.x * blockDim.x + threadIdx.x;
+  size_t num_threads = blockDim.x * gridDim.x;
+  if (int_target) {
+    for (; sample < batchsize; sample += num_threads) {
+      size_t pos = sample * dim + t[sample];
+      grad[pos] = __hsub(p[pos], 1);
+    }
+  } else {
+    for (; sample < batchsize; sample += num_threads) {
+      __half sum = 0;
+      for (size_t j = 0; j < dim; j++) {
+        sum = __hadd(sum, __int2half_rd(t[sample * dim + j]));
+      }
+      for (size_t j = 0, offset = sample * dim; j < dim; j++, offset++) {
+        grad[offset] =
+            __hsub(grad[offset], __hdiv(__int2half_rd(t[offset]), sum));
+      }
+    }
+  }
+}
+
 __global__ void KernelFloat2Half(const size_t n, const float *in, __half *out) {
   for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
        i += blockDim.x * gridDim.x) {
@@ -468,15 +624,16 @@
   }
 }
 
-//kernal used by the threshold based sparsification
-__global__ void KernelSparsAbs(const size_t n, const float threshold, const float *in, float *out) {
+// kernal used by the threshold based sparsification
+__global__ void KernelSparsAbs(const size_t n, const float threshold,
+                               const float *in, float *out) {
   for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
        i += blockDim.x * gridDim.x) {
     out[i] = fabs(in[i]) >= threshold ? in[i] : 0.0f;
   }
 }
 
-//kernal used by the threshold based sparsification
+// kernal used by the threshold based sparsification
 __global__ void KernelSparsIndex(const size_t n, const float *in, int *out) {
   for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
        i += blockDim.x * gridDim.x) {
@@ -484,7 +641,7 @@
   }
 }
 
-//kernal used by the topK based sparsification
+// kernal used by the topK based sparsification
 __global__ void KernelGenerateIndex(const size_t n, int *out) {
   for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
        i += blockDim.x * gridDim.x) {
@@ -492,165 +649,174 @@
   }
 }
 
-//cuda unary elementwise ops kernel template 
-#define GenUnaryCudaKernel(fn,kernelfn,cudafn)                                \
-  __global__ void kernelfn(const size_t n, const float *in, float *out) {     \
-    for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;                \
-         i += blockDim.x * gridDim.x) {                                       \
-      out[i] = cudafn(in[i]);                                                 \
-    }                                                                         \
-  }                                                                           \
-  void fn(const size_t n, const float *in, float *out, cudaStream_t s) {      \
-    kernelfn <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);             \
+// cuda unary elementwise ops kernel template
+#define GenUnaryCudaKernel(fn, kernelfn, cudafn)                          \
+  __global__ void kernelfn(const size_t n, const float *in, float *out) { \
+    for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;            \
+         i += blockDim.x * gridDim.x) {                                   \
+      out[i] = cudafn(in[i]);                                             \
+    }                                                                     \
+  }                                                                       \
+  void fn(const size_t n, const float *in, float *out, cudaStream_t s) {  \
+    kernelfn<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);     \
   }
 
-GenUnaryCudaKernel(cos,KernelCos,cosf);
-GenUnaryCudaKernel(cosh,KernelCosh,coshf);
-GenUnaryCudaKernel(acos,KernelAcos,acosf);
-GenUnaryCudaKernel(acosh,KernelAcosh,acoshf);
-GenUnaryCudaKernel(sin,KernelSin,sinf);
-GenUnaryCudaKernel(sinh,KernelSinh,sinhf);
-GenUnaryCudaKernel(asin,KernelAsin,asinf);
-GenUnaryCudaKernel(asinh,KernelAsinh,asinhf);
-GenUnaryCudaKernel(tan,KernelTan,tanf);
-GenUnaryCudaKernel(tanh,KernelTanh,tanhf);
-GenUnaryCudaKernel(atan,KernelAtan,atanf);
-GenUnaryCudaKernel(atanh,KernelAtanh,atanhf);
-
+GenUnaryCudaKernel(cos, KernelCos, cosf);
+GenUnaryCudaKernel(cosh, KernelCosh, coshf);
+GenUnaryCudaKernel(acos, KernelAcos, acosf);
+GenUnaryCudaKernel(acosh, KernelAcosh, acoshf);
+GenUnaryCudaKernel(sin, KernelSin, sinf);
+GenUnaryCudaKernel(sinh, KernelSinh, sinhf);
+GenUnaryCudaKernel(asin, KernelAsin, asinf);
+GenUnaryCudaKernel(asinh, KernelAsinh, asinhf);
+GenUnaryCudaKernel(tan, KernelTan, tanf);
+GenUnaryCudaKernel(tanh, KernelTanh, tanhf);
+GenUnaryCudaKernel(atan, KernelAtan, atanf);
+GenUnaryCudaKernel(atanh, KernelAtanh, atanhf);
 
 // ********************************
 // Functions call kernels
 // ********************************
 
 void float2half(const size_t n, const float *in, __half *out, cudaStream_t s) {
-  KernelFloat2Half <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelFloat2Half<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void half2float(const size_t n, const __half *in, float *out, cudaStream_t s) {
-  KernelHalf2Float <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelHalf2Float<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
-void sparsabs(const size_t n, const float threshold, const float *in, float *out, cudaStream_t s) {
-  KernelSparsAbs <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, threshold, in, out);
+void sparsabs(const size_t n, const float threshold, const float *in,
+              float *out, cudaStream_t s) {
+  KernelSparsAbs<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, threshold, in,
+                                                             out);
 }
 
 void sparsindex(const size_t n, const float *in, int *out, cudaStream_t s) {
-  KernelSparsIndex <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelSparsIndex<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void generateindex(const size_t n, int *out, cudaStream_t s) {
-  KernelGenerateIndex <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, out);
+  KernelGenerateIndex<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, out);
 }
 
-//used by the threshold based sparsification
+// used by the threshold based sparsification
 void removezeroval(const size_t n, float *in, cudaStream_t s) {
   thrust::remove(thrust::cuda::par.on(s), in, in + n, float(0));
 }
 
-//used by the threshold based sparsification
+// used by the threshold based sparsification
 void removezeroidx(const size_t n, int *in, cudaStream_t s, int *address) {
-  thrust::remove(thrust::cuda::par.on(s), in, in + n, int(0));  
+  thrust::remove(thrust::cuda::par.on(s), in, in + n, int(0));
   int a = thrust::count(thrust::cuda::par.on(s), in, in + n, int(0));
   *address = n - a;
 }
 
-struct absgreater : public thrust::binary_function<float,float,bool>
-{
+struct absgreater : public thrust::binary_function<float, float, bool> {
   thrust::maximum<int> max;
-  __host__ __device__ bool operator()(const float &lhs, const float &rhs) const {
-     return max(lhs, -lhs) > max(rhs, -rhs);
+  __host__ __device__ bool operator()(const float &lhs,
+                                      const float &rhs) const {
+    return max(lhs, -lhs) > max(rhs, -rhs);
   }
 };
 
-//used by the topK based sparsification
+// used by the topK based sparsification
 void sortbykey(const size_t n, float *key, int *value, cudaStream_t s) {
-  thrust::sort_by_key(thrust::cuda::par.on(s), key, key + n, value, absgreater());
+  thrust::sort_by_key(thrust::cuda::par.on(s), key, key + n, value,
+                      absgreater());
 }
 
 void set(const size_t n, const float v, float *out, cudaStream_t s) {
-  KernelSet <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, v, out);
+  KernelSet<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, v, out);
 }
 
 void abs(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelAbs <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelAbs<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
-void cast_float_2_int(const size_t n, const float *src, int *dst, cudaStream_t s) {
-  KernelCastFloat2Int <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, src, dst);
+void cast_float_2_int(const size_t n, const float *src, int *dst,
+                      cudaStream_t s) {
+  KernelCastFloat2Int<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, src, dst);
 }
 
-void cast_int_2_float(const size_t n, const int *src, float *dst, cudaStream_t s) {
-  KernelCastInt2Float <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, src, dst);
+void cast_int_2_float(const size_t n, const int *src, float *dst,
+                      cudaStream_t s) {
+  KernelCastInt2Float<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, src, dst);
 }
 
 void sign(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelSign <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelSign<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void exp(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelExp <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelExp<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void erf(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelErf <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelErf<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void ceil2(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelCeil2 <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelCeil2<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void floor(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelFloor <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelFloor<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void round(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelRound <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelRound<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void rounde(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelRoundE <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelRoundE<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void log(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelLog <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelLog<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void sqrt(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelSqrt <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelSqrt<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void square(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelSquare <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelSquare<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void relu(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelRelu <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelRelu<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
+}
+
+void relu(const size_t n, const __half *in, __half *out, cudaStream_t s) {
+  KernelRelu<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 void sigmoid(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelSigmoid <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelSigmoid<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void softplus(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelSoftplus <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, out);
+  KernelSoftplus<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, out);
 }
 
 void softsign(const size_t n, const float *in, float *out, cudaStream_t s) {
-  KernelSoftsign <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF>>> (n, in, out);
+  KernelSoftsign<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF>>>(n, in, out);
 }
 
 void clamp(const size_t n, const float low, const float high, const float *in,
            float *out, cudaStream_t s) {
-  KernelClamp <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, low, high, in, out);
+  KernelClamp<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, low, high, in,
+                                                          out);
 }
 
 void pow(const size_t n, const float *in, const float x, float *out,
          cudaStream_t s) {
-  KernelPow <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, x, out);
+  KernelPow<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, x, out);
 }
 
 void add(const size_t n, const float *in, const float x, float *out,
          cudaStream_t s) {
-  KernelAdd <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, x, out);
+  KernelAdd<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, x, out);
 }
 
 void traverse_unary_transform(const size_t n, size_t nDim, const float *in,
@@ -660,89 +826,128 @@
       n, nDim, in, shape, stride, out);
 }
 
+void traverse_unary_transform(const size_t n, size_t nDim, const __half *in,
+                              const int *shape, const int *stride, __half *out,
+                              cudaStream_t s) {
+  KernelTraverseUnaryTransform<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF>>>(
+      n, nDim, in, shape, stride, out);
+}
+
 void mult(const size_t n, const float *in, const float x, float *out,
           cudaStream_t s) {
-  KernelMult <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in, x, out);
+  KernelMult<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, x, out);
+}
+
+void mult(const size_t n, const __half *in, const __half x, __half *out,
+          cudaStream_t s) {
+  KernelMult<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in, x, out);
 }
 
 void div(const size_t n, const float x, const float *in, float *out,
-          cudaStream_t s) {
-  KernelDiv <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, x, in, out);
+         cudaStream_t s) {
+  KernelDiv<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, x, in, out);
 }
 
 void threshold(const size_t n, const float x, const float *in, float *out,
                cudaStream_t s) {
-  KernelThreshold <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, x, in, out);
+  KernelThreshold<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, x, in, out);
 }
 
-void relubackward(const size_t num, const float *in1, const float *in2, float *out,
-        cudaStream_t s) {
-  KernelReLUBackward <<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (num, in1, in2, out);
+void relubackward(const size_t num, const float *in1, const float *in2,
+                  float *out, cudaStream_t s) {
+  KernelReLUBackward<<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(num, in1,
+                                                                   in2, out);
+}
+
+void relubackward(const size_t num, const __half *in1, const __half *in2,
+                  __half *out, cudaStream_t s) {
+  KernelReLUBackward<<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(num, in1,
+                                                                   in2, out);
 }
 
 void gt(const size_t num, const float *in, const float x, float *out,
         cudaStream_t s) {
-  KernelGT <<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (num, in, x, out);
+  KernelGT<<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(num, in, x, out);
 }
 void gt(const size_t num, const float *in1, const float *in2, float *out,
         cudaStream_t s) {
-  KernelBGT <<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (num, in1, in2, out);
+  KernelBGT<<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(num, in1, in2, out);
 }
 void ge(const size_t num, const float *in, const float x, float *out,
         cudaStream_t s) {
-  KernelGE <<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (num, in, x, out);
+  KernelGE<<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(num, in, x, out);
 }
 void ge(const size_t num, const float *in1, const float *in2, float *out,
         cudaStream_t s) {
-  KernelBGE <<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (num, in1, in2, out);
+  KernelBGE<<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(num, in1, in2, out);
 }
 void eq(const size_t num, const float *in, const float x, float *out,
         cudaStream_t s) {
-  KernelEQ <<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (num, in, x, out);
+  KernelEQ<<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(num, in, x, out);
 }
 void eq(const size_t num, const float *in1, const float *in2, float *out,
         cudaStream_t s) {
-  KernelBEQ <<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (num, in1, in2, out);
+  KernelBEQ<<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(num, in1, in2, out);
 }
 void lt(const size_t num, const float *in, const float x, float *out,
         cudaStream_t s) {
-  KernelLT <<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (num, in, x, out);
+  KernelLT<<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(num, in, x, out);
 }
 void lt(const size_t num, const float *in1, const float *in2, float *out,
         cudaStream_t s) {
-  KernelBLT <<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (num, in1, in2, out);
+  KernelBLT<<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(num, in1, in2, out);
 }
 void le(const size_t num, const float *in, const float x, float *out,
         cudaStream_t s) {
-  KernelLE <<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (num, in, x, out);
+  KernelLE<<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(num, in, x, out);
 }
 void le(const size_t num, const float *in1, const float *in2, float *out,
         cudaStream_t s) {
-  KernelBLE <<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (num, in1, in2, out);
+  KernelBLE<<<ceil(num / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(num, in1, in2, out);
 }
 void pow(const size_t n, const float *in1, const float *in2, float *out,
          cudaStream_t s) {
-  KernelPow <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in1, in2, out);
+  KernelPow<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in1, in2, out);
+}
+void pow(const size_t n, const __half *in1, const __half *in2, __half *out,
+         cudaStream_t s) {
+  KernelPow<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in1, in2, out);
 }
 
 void add(const size_t n, const float *in1, const float *in2, float *out,
          cudaStream_t s) {
-  KernelAdd <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in1, in2, out);
+  KernelAdd<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in1, in2, out);
+}
+void add(const size_t n, const __half *in1, const __half *in2, __half *out,
+         cudaStream_t s) {
+  KernelAdd<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in1, in2, out);
 }
 
 void sub(const size_t n, const float *in1, const float *in2, float *out,
          cudaStream_t s) {
-  KernelSub <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in1, in2, out);
+  KernelSub<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in1, in2, out);
+}
+void sub(const size_t n, const __half *in1, const __half *in2, __half *out,
+         cudaStream_t s) {
+  KernelSub<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in1, in2, out);
 }
 
 void mult(const size_t n, const float *in1, const float *in2, float *out,
           cudaStream_t s) {
-  KernelMult <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in1, in2, out);
+  KernelMult<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in1, in2, out);
+}
+void mult(const size_t n, const __half *in1, const __half *in2, __half *out,
+          cudaStream_t s) {
+  KernelMult<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in1, in2, out);
 }
 
 void div(const size_t n, const float *in1, const float *in2, float *out,
          cudaStream_t s) {
-  KernelDiv <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (n, in1, in2, out);
+  KernelDiv<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in1, in2, out);
+}
+void div(const size_t n, const __half *in1, const __half *in2, __half *out,
+         cudaStream_t s) {
+  KernelDiv<<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>>(n, in1, in2, out);
 }
 
 /*
@@ -754,21 +959,40 @@
 }
 */
 
-void ComputeCrossEntropy(const bool int_target, size_t batchsize, const size_t dim, const float *p,
-                         const int *t, float *loss, cudaStream_t stream) {
-  KernelComputeCrossEntropy <<<ceil(batchsize / CU1DBLOCKF), CU1DBLOCKF, 0, stream>>>
-      (int_target, batchsize, dim, p, t, loss);
+void ComputeCrossEntropy(const bool int_target, size_t batchsize,
+                         const size_t dim, const float *p, const int *t,
+                         float *loss, cudaStream_t stream) {
+  KernelComputeCrossEntropy<<<ceil(batchsize / CU1DBLOCKF), CU1DBLOCKF, 0,
+                              stream>>>(int_target, batchsize, dim, p, t, loss);
 }
 
-void SoftmaxCrossEntropyBwd(const bool int_target, size_t batchsize, const size_t dim, const float *p,
-                            const int *t, float *grad, cudaStream_t stream) {
-  KernelSoftmaxCrossEntropyBwd <<<ceil(batchsize / CU1DBLOCKF), CU1DBLOCKF, 0, stream>>>
-      (int_target, batchsize, dim, p, t, grad);
+void ComputeCrossEntropy(const bool int_target, size_t batchsize,
+                         const size_t dim, const __half *p, const int *t,
+                         __half *loss, cudaStream_t stream) {
+  KernelComputeCrossEntropy<<<ceil(batchsize / CU1DBLOCKF), CU1DBLOCKF, 0,
+                              stream>>>(int_target, batchsize, dim, p, t, loss);
+}
+
+void SoftmaxCrossEntropyBwd(const bool int_target, size_t batchsize,
+                            const size_t dim, const float *p, const int *t,
+                            float *grad, cudaStream_t stream) {
+  KernelSoftmaxCrossEntropyBwd<<<ceil(batchsize / CU1DBLOCKF), CU1DBLOCKF, 0,
+                                 stream>>>(int_target, batchsize, dim, p, t,
+                                           grad);
+}
+
+void SoftmaxCrossEntropyBwd(const bool int_target, size_t batchsize,
+                            const size_t dim, const __half *p, const int *t,
+                            __half *grad, cudaStream_t stream) {
+  KernelSoftmaxCrossEntropyBwd<<<ceil(batchsize / CU1DBLOCKF), CU1DBLOCKF, 0,
+                                 stream>>>(int_target, batchsize, dim, p, t,
+                                           grad);
 }
 
 void RowMax(const size_t nrow, const size_t ncol, const float *inPtr,
-    float *outPtr, cudaStream_t stream) {
-  KernelRowMax <<<ceil(nrow / CU1DBLOCKF), CU1DBLOCKF, 0, stream>>>(nrow, ncol, inPtr, outPtr);
+            float *outPtr, cudaStream_t stream) {
+  KernelRowMax<<<ceil(nrow / CU1DBLOCKF), CU1DBLOCKF, 0, stream>>>(
+      nrow, ncol, inPtr, outPtr);
 }
 
 /*
@@ -791,7 +1015,8 @@
 }
 
 void softplus_grad(int n, const float *in, float *out, cudaStream_t s) {
-  kernel_softplus_grad <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (in, out, n);
+  kernel_softplus_grad <<<ceil(n / CU1DBLOCKF), CU1DBLOCKF, 0, s>>> (in, out,
+n);
 }
 
 
@@ -922,7 +1147,6 @@
 }
 */
 
-
 }  // namespace cuda
 }  // namespace singa
 
diff --git a/src/core/tensor/math_kernel.h b/src/core/tensor/math_kernel.h
index 69e5047..668df4d 100644
--- a/src/core/tensor/math_kernel.h
+++ b/src/core/tensor/math_kernel.h
@@ -69,6 +69,7 @@
 void atan(const size_t n, const float *in, float *out, cudaStream_t s);
 void atanh(const size_t n, const float *in, float *out, cudaStream_t s);
 void relu(const size_t n, const float *in, float *out, cudaStream_t s);
+void relu(const size_t n, const __half *in, __half *out, cudaStream_t s);
 void sigmoid(const size_t n, const float *in, float *out, cudaStream_t s);
 void softplus(const size_t n, const float *in, float *out, cudaStream_t s);
 void softsign(const size_t n, const float *in, float *out, cudaStream_t s);
@@ -83,10 +84,15 @@
 
 void mult(const size_t n, const float *in, const float x, float *out,
           cudaStream_t s);
+void mult(const size_t n, const __half *in, const __half x, __half *out,
+          cudaStream_t s);
 
 void traverse_unary_transform(const size_t n, size_t nDim, const float *in,
                               const int *shape, const int *stride, float *out,
                               cudaStream_t s);
+void traverse_unary_transform(const size_t n, size_t nDim, const __half *in,
+                              const int *shape, const int *stride, __half *out,
+                              cudaStream_t s);
 
 void div(const size_t n, const float x, const float *in, float *out,
          cudaStream_t s);
@@ -97,6 +103,9 @@
 void relubackward(const size_t num, const float *in1, const float *in2,
                   float *out, cudaStream_t s);
 
+void relubackward(const size_t num, const __half *in1, const __half *in2,
+                  __half *out, cudaStream_t s);
+
 void gt(const size_t num, const float *in, const float x, float *out,
         cudaStream_t s);
 void gt(const size_t num, const float *in1, const float *in2, float *out,
@@ -125,27 +134,43 @@
 // 2 inputs
 void pow(const size_t n, const float *in1, const float *in2, float *out,
          cudaStream_t s);
+void pow(const size_t n, const __half *in1, const __half *in2, __half *out,
+         cudaStream_t s);
 
 void add(const size_t n, const float *in1, const float *in2, float *out,
          cudaStream_t s);
+void add(const size_t n, const __half *in1, const __half *in2, __half *out,
+         cudaStream_t s);
 
 void sub(const size_t n, const float *in1, const float *in2, float *out,
          cudaStream_t s);
+void sub(const size_t n, const __half *in1, const __half *in2, __half *out,
+         cudaStream_t s);
 
 void mult(const size_t n, const float *in1, const float *in2, float *out,
           cudaStream_t s);
+void mult(const size_t n, const __half *in1, const __half *in2, __half *out,
+          cudaStream_t s);
 
 void div(const size_t n, const float *in1, const float *in2, float *out,
          cudaStream_t s);
+void div(const size_t n, const __half *in1, const __half *in2, __half *out,
+         cudaStream_t s);
 
 // void sum(const size_t n, const float *in, float *out, cudaStream_t s);
 
 void ComputeCrossEntropy(bool int_target, const size_t batchsize,
                          const size_t dim, const float *p, const int *t,
                          float *loss, cudaStream_t stream);
+void ComputeCrossEntropy(bool int_target, const size_t batchsize,
+                         const size_t dim, const __half *p, const int *t,
+                         __half *loss, cudaStream_t stream);
 void SoftmaxCrossEntropyBwd(bool int_target, const size_t batchsize,
                             const size_t dim, const float *p, const int *t,
                             float *grad, cudaStream_t stream);
+void SoftmaxCrossEntropyBwd(bool int_target, const size_t batchsize,
+                            const size_t dim, const __half *p, const int *t,
+                            __half *grad, cudaStream_t stream);
 
 void RowMax(const size_t nrow, const size_t ncol, const float *inPtr,
             float *outPtr, cudaStream_t stream);
diff --git a/src/core/tensor/tensor.cc b/src/core/tensor/tensor.cc
index 99d9e2a..08e5d41 100644
--- a/src/core/tensor/tensor.cc
+++ b/src/core/tensor/tensor.cc
@@ -16,7 +16,6 @@
  * limitations under the License.
  */
 #include "singa/core/tensor.h"
-// #include "singa/utils/stacktrace.h"
 #include <algorithm>
 #include <utility>
 
@@ -29,6 +28,11 @@
 
 namespace singa {
 
+template half_float::half TypeCast(const float &x);
+template float TypeCast(const half_float::half &x);
+template int TypeCast(const float &x);
+template float TypeCast(const int &x);
+
 Tensor::~Tensor() {
   if (block_ != nullptr && block_->DecRefCount() == 0) {
     device_->FreeBlock(block_);
@@ -68,7 +72,6 @@
       block_(in.block()),
       shape_(in.shape_),
       stride_(in.stride_) {
-  // printf("i am here in &in\n");
   if (block_ != nullptr) block_->IncRefCount();
 }
 
@@ -77,7 +80,6 @@
       device_(in.device_),
       shape_(std::move(in.shape_)),
       stride_(std::move(in.stride_)) {
-  // printf("i am here in &&in\n");
   block_ = in.block_;
   in.block_ = nullptr;
 }
@@ -119,6 +121,38 @@
     int _SwitchHash =                                                          \
         ((ldtype) << _SwitchShift * 2) + ((rdtype) << _SwitchShift) + (ltype); \
     switch (_SwitchHash) {                                                     \
+      case (((kFloat16) << _SwitchShift * 2) + (kFloat32 << _SwitchShift) +    \
+            kCpp): {                                                           \
+        typedef half_float::half LDType;                                       \
+        typedef float RDType;                                                  \
+        typedef lang::Cpp Lang;                                                \
+        { __VA_ARGS__ }                                                        \
+        break;                                                                 \
+      }                                                                        \
+      case (((kFloat32) << _SwitchShift * 2) + (kFloat16 << _SwitchShift) +    \
+            kCpp): {                                                           \
+        typedef float LDType;                                                  \
+        typedef half_float::half RDType;                                       \
+        typedef lang::Cpp Lang;                                                \
+        { __VA_ARGS__ }                                                        \
+        break;                                                                 \
+      }                                                                        \
+      case (((kFloat16) << _SwitchShift * 2) + (kFloat32 << _SwitchShift) +    \
+            kCuda): {                                                          \
+        typedef half_float::half LDType;                                       \
+        typedef float RDType;                                                  \
+        typedef lang::Cuda Lang;                                               \
+        { __VA_ARGS__ }                                                        \
+        break;                                                                 \
+      }                                                                        \
+      case (((kFloat32) << _SwitchShift * 2) + (kFloat16 << _SwitchShift) +    \
+            kCuda): {                                                          \
+        typedef float LDType;                                                  \
+        typedef half_float::half RDType;                                       \
+        typedef lang::Cuda Lang;                                               \
+        { __VA_ARGS__ }                                                        \
+        break;                                                                 \
+      }                                                                        \
       case (((kFloat32) << _SwitchShift * 2) + (kInt << _SwitchShift) +        \
             kCuda): {                                                          \
         typedef float LDType;                                                  \
@@ -160,12 +194,9 @@
   } while (0)
 
 // return new tensor
-Tensor Tensor::AsType(const DataType type) {
-  CHECK(block() && block()->initialized() == true)
-      << "the data of the tensor needs be initialized before casting to "
-         "another type";
+Tensor Tensor::AsType(const DataType type) const {
   if (data_type_ != type) {
-    Tensor &thisRef = *this;
+    const Tensor &thisRef = *this;
     Tensor ret(shape_, device_, type);
     TYPE_TYPE_LANG_SWITCH(
         data_type_, LDType, type, RDType, device_->lang(), Lang, {
@@ -182,6 +213,18 @@
   }
 }
 
+Tensor &Tensor::ToType(const DataType type) {
+  CHECK(block() && block()->initialized() == true)
+      << "the data of the tensor needs be initialized before casting to "
+         "another type";
+  if (data_type_ != type) {
+    auto ret = this->AsType(type);
+    std::swap(ret.block_, block_);
+    data_type_ = type;
+  }
+  return *this;
+}
+
 Tensor &Tensor::ToDevice(std::shared_ptr<Device> dst) {
   // TODO(wangwei) the comparison is restricted. May compare against device ID?
   if (device_ != dst) {
@@ -227,6 +270,9 @@
 template void Tensor::CopyDataFromHostPtr(const unsigned char *src,
                                           const size_t num,
                                           const size_t offset) const;
+template void Tensor::CopyDataFromHostPtr(const half_float::half *src,
+                                          const size_t num,
+                                          const size_t offset) const;
 template void Tensor::CopyDataFromHostPtr(const float *src, const size_t num,
                                           const size_t offset) const;
 template void Tensor::CopyDataFromHostPtr(const int *src, const size_t num,
@@ -235,6 +281,8 @@
 void Tensor::CopyData(const Tensor &src) {
   CHECK_EQ(Size(), src.Size());
   CHECK(block_ != nullptr);
+  CHECK_EQ(src.data_type(), data_type_)
+    << "Could not copy data between different data type";
   // Do copy only if the src's block is already initialized.
   if (src.block_ != nullptr) {
     singa::CopyDataToFrom(this, src, Size(), 0, 0);
@@ -675,6 +723,11 @@
 #define TYPE_SWITCH(type, DType, ...)                               \
   do {                                                              \
     switch (type) {                                                 \
+      case kFloat16: {                                              \
+        typedef half_float::half DType;                             \
+        { __VA_ARGS__ }                                             \
+        break;                                                      \
+      }                                                             \
       case kFloat32: {                                              \
         typedef float DType;                                        \
         { __VA_ARGS__ }                                             \
@@ -709,6 +762,18 @@
     const int _SwitchShift = 3;                                \
     int _SwitchHash = ((dtype) << _SwitchShift) + (ltype);     \
     switch (_SwitchHash) {                                     \
+      case ((kFloat16 << _SwitchShift) + kCpp): {              \
+        typedef half_float::half DType;                        \
+        typedef lang::Cpp Lang;                                \
+        { __VA_ARGS__ }                                        \
+        break;                                                 \
+      }                                                        \
+      case ((kFloat16 << _SwitchShift) + kCuda): {             \
+        typedef half_float::half DType;                        \
+        typedef lang::Cuda Lang;                               \
+        { __VA_ARGS__ }                                        \
+        break;                                                 \
+      }                                                        \
       case ((kFloat32 << _SwitchShift) + kCuda): {             \
         typedef float DType;                                   \
         typedef lang::Cuda Lang;                               \
@@ -782,21 +847,21 @@
 
 template <typename SType>
 void Tensor::SetValue(const SType x) {
-  CHECK_EQ(sizeof(SType), SizeOf(data_type_));
   // auto size = Size();
   auto ptr = block_;
 
   TYPE_LANG_SWITCH(data_type_, DType, device_->lang(), Lang, {
-    // TODO(wangwei) cast x to DType
+    DType tmp = TypeCast<SType, DType>(x);
     Tensor &thisRef = *this;
     device_->Exec(
-        [thisRef, x](Context *ctx) mutable {
-          Set<DType, Lang>(x, &thisRef, ctx);
+        [thisRef, tmp](Context *ctx) mutable {
+          Set<DType, Lang>(tmp, &thisRef, ctx);
         },
         {}, {ptr}, "SetValue");
   });
 }
 template void Tensor::SetValue<float>(const float x);
+template void Tensor::SetValue<half_float::half>(const half_float::half x);
 template void Tensor::SetValue<int>(const int x);
 
 template <typename SType>
@@ -809,6 +874,8 @@
   for (size_t i = 0; i < num; i++) value[i] = ptr[i];
 }
 template void Tensor::get_value<float>(float *value, const size_t num) const;
+template void Tensor::get_value<half_float::half>(half_float::half *value,
+                                                  const size_t num) const;
 template void Tensor::get_value<int>(int *value, const size_t num) const;
 
 // DEPRECATED
@@ -958,7 +1025,9 @@
 #define EltwiseBinaryTensorFn(fn, lhs, rhs, ret)                           \
   do {                                                                     \
     TYPE_LANG_SWITCH(lhs.data_type(), DType, lhs.device()->lang(), Lang, { \
-      CHECK_EQ(sizeof(DType), SizeOf(rhs.data_type()));                    \
+      CHECK_EQ(sizeof(DType), SizeOf(rhs.data_type()))                     \
+          << "lhs dtype size" << sizeof(DType) << " rhs dtype size"        \
+          << SizeOf(rhs.data_type());                                      \
       Tensor &retRef = *ret;                                               \
       ret->device()->Exec(                                                 \
           [lhs, rhs, retRef](Context *ctx) mutable {                       \
@@ -968,58 +1037,32 @@
     });                                                                    \
   } while (0)
 
-#define GenBinaryTensorFn(op, fn)                                           \
-  Tensor op(const Tensor &lhs, const Tensor &rhs) {                         \
-    if (lhs.shape() != rhs.shape()) {                                       \
-      if (lhs.data_type() == kFloat32 && rhs.data_type() == kFloat32) {     \
-        auto lhs_ = Broadcast(lhs, rhs.shape());                            \
-        auto rhs_ = Broadcast(rhs, lhs.shape());                            \
-        Tensor ret(lhs_.shape(), lhs.device(), lhs.data_type());            \
-        fn(lhs_, rhs_, &ret);                                               \
-        return ret;                                                         \
-      } else {                                                              \
-        /* lhs tensor and rhs tensor are not both in float, cast to float */\
-        Tensor tmp_lhs = lhs.Clone().AsType(kFloat32);                      \
-        Tensor tmp_rhs = rhs.Clone().AsType(kFloat32);                      \
-        tmp_lhs = Broadcast(tmp_lhs, tmp_rhs.shape());                      \
-        tmp_rhs = Broadcast(tmp_rhs, tmp_lhs.shape());                      \
-        Tensor ret(tmp_lhs.shape(), tmp_lhs.device(), tmp_lhs.data_type()); \
-        fn(tmp_lhs, tmp_rhs, &ret);                                         \
-        /* if lhs and rhs are both int, cast back to int */                 \
-        if (lhs.data_type() == kInt && rhs.data_type() == kInt)             \
-          return ret.Clone().AsType(kInt);                                  \
-        return ret;                                                         \
-      }                                                                     \
-    } else {                                                                \
-      if (lhs.data_type() == kFloat32 && rhs.data_type() == kFloat32) {     \
-        Tensor ret(lhs.shape(), lhs.device(), lhs.data_type());             \
-        fn(lhs, rhs, &ret);                                                 \
-        return ret;                                                         \
-      } else {                                                              \
-        /* lhs tensor and rhs tensor are not both in float, cast to float */\
-        Tensor tmp_lhs = lhs.Clone().AsType(kFloat32);                      \
-        Tensor tmp_rhs = rhs.Clone().AsType(kFloat32);                      \
-        Tensor ret(tmp_lhs.shape(), tmp_lhs.device(), tmp_lhs.data_type()); \
-        fn(tmp_lhs, tmp_rhs, &ret);                                         \
-        /* if lhs and rhs are both int, cast back to int */                 \
-        if (lhs.data_type() == kInt && rhs.data_type() == kInt)             \
-          return ret.Clone().AsType(kInt);                                  \
-        return ret;                                                         \
-      }                                                                     \
-    }                                                                       \
-  }                                                                         \
-  void fn(const Tensor &lhs, const Tensor &rhs, Tensor *ret) {              \
-    CHECK_EQ(lhs.device(), ret->device());                                  \
-    CHECK_EQ(rhs.device(), ret->device());                                  \
-    if (lhs.shape() != rhs.shape()) {                                       \
-      auto lhs_ = Broadcast(lhs, rhs.shape());                              \
-      auto rhs_ = Broadcast(rhs, lhs.shape());                              \
-      CHECK(lhs_.shape() == ret->shape());                                  \
-      EltwiseBinaryTensorFn(fn, lhs_, rhs_, ret);                           \
-    } else {                                                                \
-      CHECK(lhs.shape() == ret->shape());                                   \
-      EltwiseBinaryTensorFn(fn, lhs, rhs, ret);                             \
-    }                                                                       \
+#define GenBinaryTensorFn(op, fn)                              \
+  Tensor op(const Tensor &lhs, const Tensor &rhs) {            \
+    if (lhs.shape() != rhs.shape()) {                          \
+      auto lhs_ = Broadcast(lhs, rhs.shape());                 \
+      auto rhs_ = Broadcast(rhs, lhs.shape());                 \
+      Tensor ret(lhs_.shape(), lhs.device(), lhs.data_type()); \
+      fn(lhs_, rhs_, &ret);                                    \
+      return ret;                                              \
+    } else {                                                   \
+      Tensor ret(lhs.shape(), lhs.device(), lhs.data_type());  \
+      fn(lhs, rhs, &ret);                                      \
+      return ret;                                              \
+    }                                                          \
+  }                                                            \
+  void fn(const Tensor &lhs, const Tensor &rhs, Tensor *ret) { \
+    CHECK_EQ(lhs.device(), ret->device());                     \
+    CHECK_EQ(rhs.device(), ret->device());                     \
+    if (lhs.shape() != rhs.shape()) {                          \
+      auto lhs_ = Broadcast(lhs, rhs.shape());                 \
+      auto rhs_ = Broadcast(rhs, lhs.shape());                 \
+      CHECK(lhs_.shape() == ret->shape());                     \
+      EltwiseBinaryTensorFn(fn, lhs_, rhs_, ret);              \
+    } else {                                                   \
+      CHECK(lhs.shape() == ret->shape());                      \
+      EltwiseBinaryTensorFn(fn, lhs, rhs, ret);                \
+    }                                                          \
   }  // namespace singa
 
 // boradcasting operations:
@@ -1039,39 +1082,23 @@
 #define EltwiseTensorScalarFn(fn, t, x, ret)                            \
   do {                                                                  \
     TYPE_LANG_SWITCH(t.data_type(), DType, t.device()->lang(), Lang, {  \
+      DType tmp_x = TypeCast<SType, DType>(x);                         \
       Tensor &retRef = *ret;                                            \
       ret->device()->Exec(                                              \
-          [t, x, retRef](Context *ctx) mutable {                        \
-            fn<DType, Lang>(t, x, &retRef, ctx);                        \
+          [t, tmp_x, retRef](Context *ctx) mutable {                   \
+            fn<DType, Lang>(t, tmp_x, &retRef, ctx);                   \
           },                                                            \
           {t.block()}, {ret->block()}, #fn);                            \
     });                                                                 \
   } while (0)
 
 #define GenTensorScalarFn(op, fn)                                          \
-  template <typename SType>                                                \
-  Tensor op(const Tensor &in, const SType x) {                             \
-    if (in.data_type() == kFloat32 && std::is_same<SType, float>::value){  \
-      Tensor ret(in.shape(), in.device(), in.data_type());                 \
-      fn(in, x, &ret);                                                     \
-      return ret;                                                          \
-    } else if (in.data_type() == kFloat32) {                               \
-      Tensor ret(in.shape(), in.device(), in.data_type());                 \
-      float tmp_x = x;                                                     \
-      fn(in, tmp_x, &ret);                                                 \
-      return ret;                                                          \
-    } else {                                                               \
-      /* tensor and scalar are not both in float, cast to float */         \
-      Tensor tmp_in = in.Clone().AsType(kFloat32);                         \
-      float tmp_x = x;                                                     \
-      Tensor ret(tmp_in.shape(), tmp_in.device(), tmp_in.data_type());     \
-      fn(tmp_in, tmp_x, &ret);                                             \
-      /* if tensor and scalar are both int, cast back to int */            \
-      if (in.data_type() == kInt && std::is_same<SType, int>::value)       \
-        return ret.Clone().AsType(kInt);                                   \
-      return ret;                                                          \
-    }                                                                      \
-  }                                                                        \
+  template <typename SType>                                   \
+  Tensor op(const Tensor &in, const SType x) {                \
+    Tensor ret(in.shape(), in.device(), in.data_type());      \
+    fn(in, x, &ret);                                          \
+    return ret;                                               \
+  }                                                                         \
   template <typename SType>                                                \
   void fn(const Tensor &in, const SType x, Tensor *ret) {                  \
     EltwiseTensorScalarFn(fn, in, x, ret);                                 \
@@ -1103,11 +1130,11 @@
   CheckDataTypeAndLang(in, *out);
   CHECK(in.shape() == out->shape());
   TYPE_LANG_SWITCH(in.data_type(), DType, in.device()->lang(), Lang, {
-    // TODO(wangwei) type cast SType to DType;
+    DType tmp_alpha = TypeCast<SType, DType>(alpha);
     Tensor &outRef = *out;
     in.device()->Exec(
-        [alpha, in, outRef](Context *ctx) mutable {
-          Div<DType, Lang>(alpha, in, &outRef, ctx);
+        [tmp_alpha, in, outRef](Context *ctx) mutable {
+          Div<DType, Lang>(tmp_alpha, in, &outRef, ctx);
         },
         {in.block()}, {out->block()}, "Div");
   });
@@ -1532,23 +1559,15 @@
 template void Axpy<float>(const float alpha, const Tensor &in, Tensor *out);
 
 void Axpy(const Tensor &alpha, const Tensor &in, Tensor *out) {
-  TYPE_SWITCH(alpha.data_type(), SType, {
     TYPE_LANG_SWITCH(in.data_type(), DType, in.device()->lang(), Lang, {
       Tensor fake(*out);
       Tensor &outRef = *out;
       out->device()->Exec(
           [alpha, in, outRef, fake](Context *ctx) mutable {
-            Tensor alphaHost = alpha.Clone(defaultDevice);
-            // synchronize the stream to wait for the data transfer to complete
-            alpha.device()->Sync();
-            const SType value =
-                static_cast<const SType *>(alphaHost.block()->data())[0];
-            auto a = TypeCast<SType, DType>(value);
-            Axpy<DType, Lang>(a, in, &outRef, ctx);
+            Axpy<DType, Lang>(alpha, in, &outRef, ctx);
           },
           {alpha.block(), in.block(), out->block()}, {out->block()}, "Axpy");
     });
-  });
 }
 
 Tensor Mult(const Tensor &A, const Tensor &B) {
@@ -1669,9 +1688,8 @@
     p.device()->Exec(
         [batchsize, dim, t, p, lossRef](Context *ctx) mutable {
           bool int_target = t.Size() == batchsize;
-          ComputeCrossEntropy<DType, Lang>(int_target, batchsize, dim,
-                                           p.block(), t.block(),
-                                           lossRef.block(), ctx);
+          ComputeCrossEntropy<DType, Lang>(int_target, batchsize, dim, p, t,
+                                           &lossRef, ctx);
         },
         {p.block(), t.block()}, {loss->block()}, "ComputeCrossEntropy");
   });
@@ -1687,11 +1705,10 @@
     Tensor &pRef = *p;
     Tensor pFake(*p);  // just add a ref count
     p->device()->Exec(
-        [batchsize, dim, t, pRef, pFake](Context *ctx) mutable {
+        [batchsize, dim, t, pRef, pFake, p](Context *ctx) mutable {
           bool int_target = t.Size() == batchsize;
-          SoftmaxCrossEntropyBwd<DType, Lang>(int_target, batchsize, dim,
-                                              pRef.block(), t.block(),
-                                              pRef.block(), ctx);
+          SoftmaxCrossEntropyBwd<DType, Lang>(int_target, batchsize, dim, pRef,
+                                              t, &pRef, ctx);
         },
         {p->block(), t.block()}, {p->block()}, "SoftmaxCrossEntropyBackward");
   });
diff --git a/src/core/tensor/tensor_math.h b/src/core/tensor/tensor_math.h
index 3236e7c..2e6a08a 100644
--- a/src/core/tensor/tensor_math.h
+++ b/src/core/tensor/tensor_math.h
@@ -355,13 +355,13 @@
 // The random generator should be extracted from ctx.
 // If DType is not float, then convert the mean and std to DType
 template <typename DType, typename Lang>
-void Gaussian(const float mean, const float std, Tensor *out, Context *ctx) {
+void Gaussian(const DType mean, const DType std, Tensor *out, Context *ctx) {
   LOG(FATAL) << "Gaussian Not Implemented";
 }
 // The random generator should be extracted from ctx.
 // If DType is not float, then convert the low and high to DType
 template <typename DType, typename Lang>
-void Uniform(const float low, const float high, Tensor *out, Context *ctx) {
+void Uniform(const DType low, const DType high, Tensor *out, Context *ctx) {
   LOG(FATAL) << "Uniform Not Implemented";
 }
 
@@ -392,6 +392,12 @@
   LOG(FATAL) << "Axpy Not Implemented";
 }
 
+/// out = alpha * in + out
+template <typename DType, typename Lang>
+void Axpy(const Tensor &alpha, const Tensor &in, Tensor *out, Context *ctx) {
+  LOG(FATAL) << "Axpy Not Implemented";
+}
+
 /// out = ||in||_2^2, i.e, L2 norm.
 template <typename DType, typename Lang>
 void Nrm2(const Tensor &in, float *out, Context *ctx) {
@@ -458,15 +464,15 @@
 // yisen todo
 template <typename DType, typename Lang>
 void ComputeCrossEntropy(bool int_target, const size_t batchsize,
-                         const size_t dim, const Block *p, const Block *t,
-                         Block *loss, Context *ctx) {
+                         const size_t dim, const Tensor &p, const Tensor &t,
+                         Tensor *loss, Context *ctx) {
   LOG(FATAL) << "Not Implemented";
 }
 
 template <typename DType, typename Lang>
 void SoftmaxCrossEntropyBwd(bool int_target, const size_t batchsize,
-                            const size_t dim, const Block *p, const Block *t,
-                            Block *grad, Context *ctx) {
+                            const size_t dim, const Tensor &p, const Tensor &t,
+                            Tensor *grad, Context *ctx) {
   LOG(FATAL) << "Not Implemented";
 }
 
diff --git a/src/core/tensor/tensor_math_cpp.h b/src/core/tensor/tensor_math_cpp.h
index 5be46c6..2c06f63 100644
--- a/src/core/tensor/tensor_math_cpp.h
+++ b/src/core/tensor/tensor_math_cpp.h
@@ -246,6 +246,26 @@
 }
 
 template <>
+void CastCopy<float, half_float::half, lang::Cpp>(const Tensor *src,
+                                                  Tensor *dst, Context *ctx) {
+  half_float::half *dst_array =
+      static_cast<half_float::half *>(dst->block()->mutable_data());
+  const float *src_array = static_cast<const float *>(src->block()->data());
+  for (int i = 0; i < dst->Size(); ++i)
+    dst_array[i] = static_cast<half_float::half>(src_array[i]);
+}
+
+template <>
+void CastCopy<half_float::half, float, lang::Cpp>(const Tensor *src,
+                                                  Tensor *dst, Context *ctx) {
+  float *dst_array = static_cast<float *>(dst->block()->mutable_data());
+  const half_float::half *src_array =
+      static_cast<const half_float::half *>(src->block()->data());
+  for (int i = 0; i < dst->Size(); ++i)
+    dst_array[i] = static_cast<float>(src_array[i]);
+}
+
+template <>
 void CastCopy<float, int, lang::Cpp>(const Tensor *src, Tensor *dst,
                                      Context *ctx) {
   int *dst_array = static_cast<int *>(dst->block()->mutable_data());
@@ -279,9 +299,9 @@
 template <>
 void RoundE<float, lang::Cpp>(const Tensor &in, Tensor *out, Context *ctx) {
   traverse_unary<float>(in, out, [](float x) {
-    float doub = x*2;
+    float doub = x * 2;
     if (ceilf(doub) == doub) {
-      return std::round(x/2)*2;
+      return std::round(x / 2) * 2;
     } else {
       return std::round(x);
     }
@@ -339,8 +359,9 @@
 #else
 // native Softmax without DNNL
 template <>
-void SoftMax<float, lang::Cpp>(const Tensor &in, Tensor *out, Context* ctx) {
-  CHECK_LE(in.nDim(), 2u) << "Axis is required for SoftMax on multi dimemsional tensor";
+void SoftMax<float, lang::Cpp>(const Tensor &in, Tensor *out, Context *ctx) {
+  CHECK_LE(in.nDim(), 2u)
+      << "Axis is required for SoftMax on multi dimemsional tensor";
   out->CopyData(in);
   size_t nrow = 1, ncol = in.Size(), size = ncol;
   if (in.nDim() == 2u) {
@@ -450,7 +471,7 @@
 
 template <>
 void GE<int, lang::Cpp>(const Tensor &in1, const Tensor &in2, Tensor *out,
-                          Context *ctx) {
+                        Context *ctx) {
   auto ge_lambda_binary = [](int a, int b) { return (a >= b) ? 1.f : 0.f; };
   traverse_binary<int>(in1, in2, out, ge_lambda_binary);
 }
@@ -471,7 +492,7 @@
 
 template <>
 void GT<int, lang::Cpp>(const Tensor &in1, const Tensor &in2, Tensor *out,
-                          Context *ctx) {
+                        Context *ctx) {
   auto gt_lambda_binary = [](int a, int b) { return (a > b) ? 1.f : 0.f; };
   traverse_binary<int>(in1, in2, out, gt_lambda_binary);
 }
@@ -492,7 +513,7 @@
 
 template <>
 void LE<int, lang::Cpp>(const Tensor &in1, const Tensor &in2, Tensor *out,
-                          Context *ctx) {
+                        Context *ctx) {
   auto le_lambda_binary = [](int a, int b) { return (a <= b) ? 1.f : 0.f; };
   traverse_binary<int>(in1, in2, out, le_lambda_binary);
 }
@@ -522,7 +543,7 @@
 
 template <>
 void LT<int, lang::Cpp>(const Tensor &in1, const Tensor &in2, Tensor *out,
-                          Context *ctx) {
+                        Context *ctx) {
   auto lt_lambda_binary = [](int a, int b) { return (a < b) ? 1.f : 0.f; };
   traverse_binary<int>(in1, in2, out, lt_lambda_binary);
 }
@@ -543,7 +564,7 @@
 
 template <>
 void EQ<int, lang::Cpp>(const Tensor &in1, const Tensor &in2, Tensor *out,
-                          Context *ctx) {
+                        Context *ctx) {
   auto eq_lambda_binary = [](int a, int b) { return (a == b) ? 1.f : 0.f; };
   traverse_binary<int>(in1, in2, out, eq_lambda_binary);
 }
@@ -580,6 +601,14 @@
 }
 
 template <>
+void Set<half_float::half, lang::Cpp>(const half_float::half x, Tensor *out,
+                                      Context *ctx) {
+  half_float::half *outPtr =
+      static_cast<half_float::half *>(out->block()->mutable_data());
+  for (size_t i = 0; i < out->Size(); i++) outPtr[i] = x;
+}
+
+template <>
 void Sigmoid<float, lang::Cpp>(const Tensor &in, Tensor *out, Context *ctx) {
   auto sigmoid_lambda = [](float a) { return 1.f / (1.f + exp(-a)); };
   traverse_unary<float>(in, out, sigmoid_lambda);
@@ -665,6 +694,13 @@
 }
 
 template <>
+void Transform<half_float::half, lang::Cpp>(const Tensor &in, Tensor *out,
+                                            Context *ctx) {
+  auto identity = [](half_float::half a) { return a; };
+  traverse_unary<half_float::half>(in, out, identity);
+}
+
+template <>
 void Bernoulli<float, lang::Cpp>(const float p, Tensor *out, Context *ctx) {
   std::bernoulli_distribution distribution(p);
   float *outPtr = static_cast<float *>(out->block()->mutable_data());
@@ -684,6 +720,16 @@
 }
 
 template <>
+void Gaussian<half_float::half, lang::Cpp>(const half_float::half mean,
+                                           const half_float::half std,
+                                           Tensor *out, Context *ctx) {
+  Tensor tmp(out->shape(), out->device(), kFloat32);
+  Gaussian<float, lang::Cpp>(static_cast<float>(mean), static_cast<float>(std),
+                             &tmp, ctx);
+  CastCopy<float, half_float::half, lang::Cpp>(&tmp, out, ctx);
+}
+
+template <>
 void Uniform<float, lang::Cpp>(const float low, const float high, Tensor *out,
                                Context *ctx) {
   std::uniform_real_distribution<float> distribution(low, high);
@@ -1021,12 +1067,12 @@
 template <>
 void ComputeCrossEntropy<float, lang::Cpp>(bool int_target,
                                            const size_t batchsize,
-                                           const size_t dim, const Block *p,
-                                           const Block *t, Block *loss,
+                                           const size_t dim, const Tensor &p,
+                                           const Tensor &t, Tensor *loss,
                                            Context *ctx) {
-  const float *pPtr = static_cast<const float *>(p->data());
-  const int *tPtr = static_cast<const int *>(t->data());
-  float *lossPtr = static_cast<float *>(loss->mutable_data());
+  const float *pPtr = static_cast<const float *>(p.block()->data());
+  const int *tPtr = static_cast<const int *>(t.block()->data());
+  float *lossPtr = static_cast<float *>(loss->block()->mutable_data());
   if (int_target) {
     for (size_t i = 0; i < batchsize; i++) {
       int truth_idx = tPtr[i];
@@ -1053,13 +1099,14 @@
 template <>
 void SoftmaxCrossEntropyBwd<float, lang::Cpp>(bool int_target,
                                               const size_t batchsize,
-                                              const size_t dim, const Block *p,
-                                              const Block *t, Block *grad,
+                                              const size_t dim, const Tensor &p,
+                                              const Tensor &t, Tensor *grad,
                                               Context *ctx) {
-  CHECK_EQ(p, grad) << "Use the same pointer to optimize performance";
+  CHECK_EQ(p.block(), grad->block())
+      << "Use the same pointer to optimize performance";
   // const float* pPtr = static_cast<const float*>(p->data());
-  const int *tPtr = static_cast<const int *>(t->data());
-  float *gradPtr = static_cast<float *>(grad->mutable_data());
+  const int *tPtr = static_cast<const int *>(t.block()->data());
+  float *gradPtr = static_cast<float *>(grad->block()->mutable_data());
 
   if (int_target) {
     for (size_t i = 0; i < batchsize; i++) {
diff --git a/src/core/tensor/tensor_math_cuda.h b/src/core/tensor/tensor_math_cuda.h
index b3ff100..bc46bb2 100644
--- a/src/core/tensor/tensor_math_cuda.h
+++ b/src/core/tensor/tensor_math_cuda.h
@@ -24,6 +24,7 @@
 #include <cuda_runtime.h>
 #include <cudnn.h>
 
+#include "../../model/layer/cudnn_utils.h"
 #include "./math_kernel.h"
 #include "./tensor_math.h"
 #include "singa/core/common.h"
@@ -137,9 +138,9 @@
   // LOG(INFO) << vec2str(shape);
   // LOG(INFO) << vec2str(stride);
   // LOG(INFO) << "";
-  check_cudnn(cudnnSetTensorNdDescriptor(x_desc, CUDNN_DATA_FLOAT,
-                                         generate_dim_cuda(y), shape.data(),
-                                         stride.data()));
+  check_cudnn(cudnnSetTensorNdDescriptor(
+      x_desc, GetCudnnDataType(x.data_type()), generate_dim_cuda(y),
+      shape.data(), stride.data()));
 
   return x_desc;
 }
@@ -189,6 +190,24 @@
 }
 
 template <>
+void CastCopy<float, half_float::half, lang::Cuda>(const Tensor* src,
+                                                   Tensor* dst, Context* ctx) {
+  /* cpp half is for labeling only, cuda requires __half */
+  const float* srcPtr = static_cast<const float*>(src->block()->data());
+  __half* dstPtr = static_cast<__half*>(dst->block()->mutable_data());
+  cuda::float2half(dst->Size(), srcPtr, dstPtr, ctx->stream);
+}
+
+template <>
+void CastCopy<half_float::half, float, lang::Cuda>(const Tensor* src,
+                                                   Tensor* dst, Context* ctx) {
+  /* cpp half is for labeling only, cuda requires __half */
+  const __half* srcPtr = static_cast<const __half*>(src->block()->data());
+  float* dstPtr = static_cast<float*>(dst->block()->mutable_data());
+  cuda::half2float(dst->Size(), srcPtr, dstPtr, ctx->stream);
+}
+
+template <>
 void Set<float, lang::Cuda>(const float x, Tensor* out, Context* ctx) {
   float* outPtr = static_cast<float*>(out->block()->mutable_data());
 
@@ -197,6 +216,13 @@
 }
 
 template <>
+void Set<half_float::half, lang::Cuda>(const half_float::half x, Tensor* out,
+                                       Context* ctx) {
+  vector<half_float::half> data_src(out->size(), x);
+  out->CopyDataFromHostPtr(data_src.data(), out->size(), 0);
+}
+
+template <>
 void Add<float, lang::Cuda>(const Tensor& in, const float x, Tensor* out,
                             Context* ctx) {
   Set<float, lang::Cuda>(x, out, ctx);
@@ -231,84 +257,117 @@
 }
 template void TraverseUnaryTransformImpl<float>(const Tensor& in1,
                                                 Tensor* in1Bc, Context* ctx);
+template void TraverseUnaryTransformImpl<__half>(const Tensor& in1,
+                                                 Tensor* in1Bc, Context* ctx);
+
+template <typename T>
+void TransformImpl(const Tensor& in, Tensor* out, Context* ctx) {
+  if (in.broadcasted()) {
+    TraverseUnaryTransformImpl<T>(in, out, ctx);
+  } else {
+    const void* inPtr = in.block()->data();
+    void* outPtr = out->block()->mutable_data();
+
+    float alpha = 1.0;
+    float beta = 0.0;
+
+    check_cudnn(cudnnTransformTensor(
+        ctx->cudnn_handle, (void*)(&alpha), generate_tensor_nd_desc(in), inPtr,
+        (void*)(&beta), generate_tensor_nd_desc(*out), outPtr));
+  }
+}
+template void TransformImpl<__half>(const Tensor& in, Tensor* out,
+                                    Context* ctx);
+template void TransformImpl<float>(const Tensor& in, Tensor* out, Context* ctx);
+
+template <>
+void Transform<half_float::half, lang::Cuda>(const Tensor& in, Tensor* out,
+                                             Context* ctx) {
+  TransformImpl<__half>(in, out, ctx);
+}
+
+template <>
+void Transform<__half, lang::Cuda>(const Tensor& in, Tensor* out,
+                                   Context* ctx) {
+  TransformImpl<__half>(in, out, ctx);
+}
 
 template <>
 void Transform<float, lang::Cuda>(const Tensor& in, Tensor* out, Context* ctx) {
-  if (in.broadcasted()) {
-    TraverseUnaryTransformImpl<float>(in, out, ctx);
-    return;
-  }
-
-  const float* inPtr = static_cast<const float*>(in.block()->data());
-  float* outPtr = static_cast<float*>(out->block()->mutable_data());
-
-  float alpha = 1.0;
-  float beta = 0.0;
-
-  check_cudnn(cudnnTransformTensor(
-      ctx->cudnn_handle, (void*)(&alpha), generate_tensor_nd_desc(in), inPtr,
-      (void*)(&beta), generate_tensor_nd_desc(*out), outPtr));
+  TransformImpl<float>(in, out, ctx);
 }
 
 /// add sub div mul pow on two tensors
-#define GenBinaryMathFn(fn, kernel)                                       \
-  template <>                                                             \
-  void fn<float, lang::Cuda>(const Tensor& in1, const Tensor& in2,        \
-                             Tensor* out, Context* ctx) {                 \
-    const float* inPtr1 = static_cast<const float*>(in1.block()->data()); \
-    const float* inPtr2 = static_cast<const float*>(in2.block()->data()); \
-    float* outPtr = static_cast<float*>(out->block()->mutable_data());    \
-    const size_t num = out->Size();                                       \
-                                                                          \
-    if (!in1.broadcasted() && !in2.broadcasted()) {                       \
-      if (!in1.transpose() && !in2.transpose() &&                         \
-          (in1.stride() == in2.stride())) {                               \
-        kernel(num, inPtr1, inPtr2, outPtr, ctx->stream);                 \
-      } else {                                                            \
-        if (in1.transpose() && in2.transpose()) {                         \
-          Tensor t(in1.shape(), in1.device(), in1.data_type());           \
-          Transform<float, lang::Cuda>(in1, &t, ctx);                     \
-          Transform<float, lang::Cuda>(in2, out, ctx);                    \
-                                                                          \
-          float* tPtr = static_cast<float*>(t.block()->mutable_data());   \
-          kernel(num, tPtr, outPtr, outPtr, ctx->stream);                 \
-        } else if (in1.transpose()) {                                     \
-          Transform<float, lang::Cuda>(in1, out, ctx);                    \
-          kernel(num, outPtr, inPtr2, outPtr, ctx->stream);               \
-        } else if (in2.transpose()) {                                     \
-          Transform<float, lang::Cuda>(in2, out, ctx);                    \
-          kernel(num, inPtr1, outPtr, outPtr, ctx->stream);               \
-        }                                                                 \
-      }                                                                   \
-    } else {                                                              \
-      Tensor in1bc;                                                       \
-      Tensor in2bc;                                                       \
-      if (in1.broadcasted()) {                                            \
-        in1bc = Tensor(in1.shape(), in1.device(), in1.data_type());       \
-        Transform<float, lang::Cuda>(in1, &in1bc, ctx);                   \
-        inPtr1 = static_cast<const float*>(in1bc.block()->data());        \
-      }                                                                   \
-                                                                          \
-      if (in2.broadcasted()) {                                            \
-        in2bc = Tensor(in2.shape(), in2.device(), in2.data_type());       \
-        Transform<float, lang::Cuda>(in2, &in2bc, ctx);                   \
-        inPtr2 = static_cast<const float*>(in2bc.block()->data());        \
-      }                                                                   \
-                                                                          \
-      kernel(num, inPtr1, inPtr2, outPtr, ctx->stream);                   \
-    }                                                                     \
+#define GenBinaryMathFn(fn, fn_impl, kernel)                                  \
+  template <typename T>                                                       \
+  void fn_impl(const Tensor& in1, const Tensor& in2, Tensor* out,             \
+               Context* ctx) {                                                \
+    const T* inPtr1 = static_cast<const T*>(in1.block()->data());             \
+    const T* inPtr2 = static_cast<const T*>(in2.block()->data());             \
+    T* outPtr = static_cast<T*>(out->block()->mutable_data());                \
+    const size_t num = out->Size();                                           \
+                                                                              \
+    if (!in1.broadcasted() && !in2.broadcasted()) {                           \
+      if (!in1.transpose() && !in2.transpose() &&                             \
+          (in1.stride() == in2.stride())) {                                   \
+        kernel(num, inPtr1, inPtr2, outPtr, ctx->stream);                     \
+      } else {                                                                \
+        if (in1.transpose() && in2.transpose()) {                             \
+          Tensor t(in1.shape(), in1.device(), in1.data_type());               \
+          Transform<T, lang::Cuda>(in1, &t, ctx);                             \
+          Transform<T, lang::Cuda>(in2, out, ctx);                            \
+                                                                              \
+          T* tPtr = static_cast<T*>(t.block()->mutable_data());               \
+          kernel(num, tPtr, outPtr, outPtr, ctx->stream);                     \
+        } else if (in1.transpose()) {                                         \
+          Transform<T, lang::Cuda>(in1, out, ctx);                            \
+          kernel(num, outPtr, inPtr2, outPtr, ctx->stream);                   \
+        } else if (in2.transpose()) {                                         \
+          Transform<T, lang::Cuda>(in2, out, ctx);                            \
+          kernel(num, inPtr1, outPtr, outPtr, ctx->stream);                   \
+        }                                                                     \
+      }                                                                       \
+    } else {                                                                  \
+      Tensor in1bc, in2bc;                                                    \
+      if (in1.broadcasted()) {                                                \
+        in1bc = Tensor(in1.shape(), in1.device(), in1.data_type());           \
+        Transform<T, lang::Cuda>(in1, &in1bc, ctx);                           \
+        inPtr1 = static_cast<const T*>(in1bc.block()->data());                \
+      }                                                                       \
+      if (in2.broadcasted()) {                                                \
+        in2bc = Tensor(in2.shape(), in2.device(), in2.data_type());           \
+        Transform<T, lang::Cuda>(in2, &in2bc, ctx);                           \
+        inPtr2 = static_cast<const T*>(in2bc.block()->data());                \
+      }                                                                       \
+      kernel(num, inPtr1, inPtr2, outPtr, ctx->stream);                       \
+    }                                                                         \
+  }                                                                           \
+  template void fn_impl<__half>(const Tensor& in1, const Tensor& in2,         \
+                                Tensor* out, Context* ctx);                   \
+  template void fn_impl<float>(const Tensor& in1, const Tensor& in2,          \
+                               Tensor* out, Context* ctx);                    \
+                                                                              \
+  template <>                                                                 \
+  void fn<float, lang::Cuda>(const Tensor& in1, const Tensor& in2,            \
+                             Tensor* out, Context* ctx) {                     \
+    fn_impl<float>(in1, in2, out, ctx);                                       \
+  }                                                                           \
+  template <>                                                                 \
+  void fn<half_float::half, lang::Cuda>(const Tensor& in1, const Tensor& in2, \
+                                        Tensor* out, Context* ctx) {          \
+    fn_impl<__half>(in1, in2, out, ctx);                                      \
   }
 
 /// out = in1 * in2
-GenBinaryMathFn(EltwiseMult, cuda::mult);
+GenBinaryMathFn(EltwiseMult, EltwiseMultImpl, cuda::mult);
 /// out = in1 + in2
-GenBinaryMathFn(Add, cuda::add);
+GenBinaryMathFn(Add, AddImpl, cuda::add);
 /// out = in1 - in2
-GenBinaryMathFn(Sub, cuda::sub);
+GenBinaryMathFn(Sub, SubImpl, cuda::sub);
 /// out = in1 / in2
-GenBinaryMathFn(Div, cuda::div);
+GenBinaryMathFn(Div, DivImpl, cuda::div);
 /// out = in1 ^ in2
-GenBinaryMathFn(Pow, cuda::pow);
+GenBinaryMathFn(Pow, PowImpl, cuda::pow);
 
 /// Element-wise operation, clamp every element into [low, high]
 /// if x>high, then x=high; if x<low, then x=low.
@@ -352,6 +411,16 @@
   cuda::mult(num, inPtr, x, outPtr, ctx->stream);
 }
 
+template <>
+void EltwiseMult<half_float::half, lang::Cuda>(const Tensor& in,
+                                               const half_float::half x,
+                                               Tensor* out, Context* ctx) {
+  const __half* inPtr = static_cast<const __half*>(in.block()->data());
+  __half* outPtr = static_cast<__half*>(out->block()->mutable_data());
+  const size_t num = in.Size();
+  cuda::mult(num, inPtr, static_cast<__half>(x), outPtr, ctx->stream);
+}
+
 /// Base is e. out[i]=e^in[i]
 template <>
 void Exp<float, lang::Cuda>(const Tensor& in, Tensor* out, Context* ctx) {
@@ -592,6 +661,16 @@
   const size_t num = in1.Size();
   cuda::relubackward(num, in1Ptr, in2Ptr, outPtr, ctx->stream);
 }
+template <>
+void ReLUBackward<half_float::half, lang::Cuda>(const Tensor& in1,
+                                                const Tensor& in2, Tensor* out,
+                                                Context* ctx) {
+  const __half* in1Ptr = static_cast<const __half*>(in1.block()->data());
+  const __half* in2Ptr = static_cast<const __half*>(in2.block()->data());
+  __half* outPtr = static_cast<__half*>(out->block()->mutable_data());
+  const size_t num = in1.Size();
+  cuda::relubackward(num, in1Ptr, in2Ptr, outPtr, ctx->stream);
+}
 
 /// Element-wise operation, out[i]=max(0, in[i])
 // template <>
@@ -639,6 +718,20 @@
     cuda::relu(num, outPtr, outPtr, ctx->stream);
   }
 }
+template <>
+void ReLU<half_float::half, lang::Cuda>(const Tensor& in, Tensor* out,
+                                        Context* ctx) {
+  const __half* inPtr = static_cast<const __half*>(in.block()->data());
+  __half* outPtr = static_cast<__half*>(out->block()->mutable_data());
+  const size_t num = in.Size();
+
+  if (in.stride() == out->stride()) {
+    cuda::relu(num, inPtr, outPtr, ctx->stream);
+  } else {  // else we transform in to out to store first
+    Transform<half_float::half, lang::Cuda>(in, out, ctx);
+    cuda::relu(num, outPtr, outPtr, ctx->stream);
+  }
+}
 
 // /// Element-wise operation, out[i]=sigmoid([in[i])
 // template <>
@@ -863,6 +956,15 @@
   cuda::mult(num, outPtr, high - low, outPtr, ctx->stream);
   cuda::add(num, outPtr, low, outPtr, ctx->stream);
 }
+template <>
+void Uniform<half_float::half, lang::Cuda>(const half_float::half low,
+                                           const half_float::half high,
+                                           Tensor* out, Context* ctx) {
+  Tensor tmp(out->shape(), out->device(), kFloat32);
+  Uniform<float, lang::Cuda>(static_cast<float>(low), static_cast<float>(high),
+                             &tmp, ctx);
+  CastCopy<float, half_float::half, lang::Cuda>(&tmp, out, ctx);
+}
 
 // The random generator should be extracted from ctx.
 // If DType is not float, then convert the mean and delta to DType
@@ -883,6 +985,15 @@
     CURAND_CHECK(curandGenerateNormal(rgen, outPtr, num, mean, std));
   }
 }
+template <>
+void Gaussian<half_float::half, lang::Cuda>(const half_float::half mean,
+                                            const half_float::half std,
+                                            Tensor* out, Context* ctx) {
+  Tensor tmp(out->shape(), out->device(), kFloat32);
+  Gaussian<float, lang::Cuda>(static_cast<float>(mean), static_cast<float>(std),
+                              &tmp, ctx);
+  CastCopy<float, half_float::half, lang::Cuda>(&tmp, out, ctx);
+}
 
 // =========================Blas operations==================================
 // ref to http://docs.nvidia.com/cuda/cublas
@@ -927,6 +1038,38 @@
   CUBLAS_CHECK(cublasSaxpy(handle, num, &alpha, inPtr, 1, outPtr, 1));
 }
 
+/// out = alpha * in + out
+template <>
+void Axpy<float, lang::Cuda>(const Tensor &alpha, const Tensor& in, Tensor* out, Context* ctx) {
+  auto handle = ctx->cublas_handle;
+  const size_t num = in.Size();
+  CUBLAS_CHECK(cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE));
+  CUBLAS_CHECK(cublasAxpyEx(handle, num, alpha.block()->data(), CUDA_R_32F, in.block()->data(), CUDA_R_32F, 1, out->block()->mutable_data(), CUDA_R_32F, 1, CUDA_R_32F));
+  CUBLAS_CHECK(cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST));
+}
+
+template<>
+void Axpy<half_float::half, lang::Cuda>(const Tensor &alpha, const Tensor &in, Tensor *out, Context *ctx) {
+  auto handle = ctx->cublas_handle;
+  const size_t num = in.Size();
+
+  auto _alpha = alpha.AsType(kFloat32);
+
+  CUBLAS_CHECK(cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE));
+  CUBLAS_CHECK(cublasAxpyEx(handle, num, _alpha.block()->data(), CUDA_R_32F, in.block()->data(), CUDA_R_16F, 1, out->block()->mutable_data(), CUDA_R_16F, 1, CUDA_R_32F));
+  CUBLAS_CHECK(cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST));
+}
+
+template <>
+void Axpy<half_float::half, lang::Cuda>(const half_float::half alpha,
+                                        const Tensor& in, Tensor* out,
+                                        Context* ctx) {
+  auto handle = ctx->cublas_handle;
+  const size_t num = in.Size();
+  const float _alpha = static_cast<const float>(alpha);
+  CUBLAS_CHECK(cublasAxpyEx(handle, num, &alpha, CUDA_R_32F, in.block()->data(), CUDA_R_16F, 1, out->block()->mutable_data(), CUDA_R_16F, 1, CUDA_R_32F));
+}
+
 /// out = \sum_i in1[i] * in2[i]
 template <>
 void Dot<float, lang::Cuda>(const Tensor& in1, const Tensor& in2, float* out,
@@ -1002,6 +1145,62 @@
                              1, &beta, outPtr, 1));
 }
 
+template <>
+void GEMV<half_float::half, lang::Cuda>(const half_float::half alpha,
+                                        const Tensor& A, const Tensor& v,
+                                        const half_float::half beta,
+                                        Tensor* out, Context* ctx) {
+  // Fp16 not supported
+  // https://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-gemv
+  auto _A = A.AsType(kFloat32);
+  auto _v = v.AsType(kFloat32);
+  Tensor _out = Tensor(out->shape(), out->device(), kFloat32);
+  GEMV<float, lang::Cuda>(static_cast<float>(alpha), _A, _v,
+                          static_cast<float>(beta), &_out, ctx);
+  CastCopy<float, half_float::half, lang::Cuda>(&_out, out, ctx);
+}
+
+template <>
+void GEMM<half_float::half, lang::Cuda>(const half_float::half alpha,
+                                        const Tensor& A, const Tensor& B,
+                                        const half_float::half beta, Tensor* C,
+                                        Context* ctx) {
+  auto transA = A.transpose();
+  auto transa = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
+  auto transB = B.transpose();
+  auto transb = transB ? CUBLAS_OP_T : CUBLAS_OP_N;
+  const size_t nrowA = A.shape()[0];
+  const size_t ncolA = A.shape()[1];
+  const size_t ncolB = B.shape()[1];
+  int lda = transA ? nrowA : ncolA;
+  int ldb = transB ? ncolA : ncolB;
+  int ldc = ncolB;
+  const __half* APtr = static_cast<const __half*>(A.block()->data());
+  const __half* BPtr = static_cast<const __half*>(B.block()->data());
+  __half* CPtr = static_cast<__half*>(C->block()->mutable_data());
+  const __half* alphaPtr =
+      static_cast<const __half*>(static_cast<const void*>(&alpha));
+  const __half* betaPtr =
+      static_cast<const __half*>(static_cast<const void*>(&beta));
+  auto handle = ctx->cublas_handle;  // TODO(wangwei) set cudastream
+  CUBLAS_CHECK(cublasHgemm(handle, transb, transa, ncolB, nrowA, ncolA,
+                           alphaPtr, BPtr, ldb, APtr, lda, betaPtr, CPtr, ldc));
+}
+
+template <>
+void Dot<half_float::half, lang::Cuda>(const Tensor& in1, const Tensor& in2,
+                                       Tensor* out, Context* ctx) {
+  const __half* inPtr1 = static_cast<const __half*>(in1.block()->data());
+  const __half* inPtr2 = static_cast<const __half*>(in2.block()->data());
+  __half* outPtr = static_cast<__half*>(out->block()->mutable_data());
+  auto handle = ctx->cublas_handle;
+  const size_t num = in1.Size();
+  CUBLAS_CHECK(cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE));
+  CUBLAS_CHECK(cublasDotEx(handle, num, inPtr1, CUDA_R_16F, 1, inPtr2,
+                           CUDA_R_16F, 1, outPtr, CUDA_R_16F, CUDA_R_32F));
+  CUBLAS_CHECK(cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST));
+}
+
 // http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm
 template <>
 void GEMM<float, lang::Cuda>(const float alpha, const Tensor& A,
@@ -1104,6 +1303,39 @@
                                   inPtr, (void*)(&beta),
                                   generate_tensor_nd_desc(tmp), outPtr));
 }
+template <>
+void SoftMax<half_float::half, lang::Cuda>(const Tensor& in, Tensor* out,
+                                           Context* ctx) {
+  cudnnSoftmaxAlgorithm_t algorithm = CUDNN_SOFTMAX_ACCURATE;
+  cudnnSoftmaxMode_t mode = CUDNN_SOFTMAX_MODE_INSTANCE;
+
+  /*
+   * tensor tmp is for generating cudnn descriptor
+   *   as for cudnn softmax, it required shape of {N, C, 1, 1}
+   *   while helper func `generate_shape_cuda` generate shape of {1, 1, N, C}
+   *   Thus this part serve similar purpose as `generate_shape_cuda` but in
+   * reverse manner
+   */
+  CHECK_LE(in.shape().size(), 5)
+      << "Dimensions (shape) beyond 5 are currently not supported";
+  auto tmp = in;
+  while (tmp.shape().size() < 4) {
+    auto s = tmp.shape();
+    s.push_back(1);
+    tmp.Reshape(s);
+  }
+
+  const __half* inPtr = static_cast<const __half*>(in.block()->data());
+  __half* outPtr = static_cast<__half*>(out->block()->mutable_data());
+
+  float alpha = 1.0f;
+  float beta = 0.0f;
+
+  check_cudnn(cudnnSoftmaxForward(
+      ctx->cudnn_handle, algorithm, mode, static_cast<void*>(&alpha),
+      generate_tensor_nd_desc(tmp), inPtr, static_cast<void*>(&beta),
+      generate_tensor_nd_desc(tmp), outPtr));
+}
 
 template <>
 void SoftMaxBackward<float, lang::Cuda>(const Tensor& in, Tensor* out,
@@ -1143,25 +1375,51 @@
 template <>
 void ComputeCrossEntropy<float, lang::Cuda>(bool int_target,
                                             const size_t batchsize,
-                                            const size_t dim, const Block* p,
-                                            const Block* t, Block* loss,
+                                            const size_t dim, const Tensor& p,
+                                            const Tensor& t, Tensor* loss,
                                             Context* ctx) {
-  const float* pPtr = static_cast<const float*>(p->data());
-  const int* tPtr = static_cast<const int*>(t->data());
-  float* lossPtr = static_cast<float*>(loss->mutable_data());
+  const float* pPtr = static_cast<const float*>(p.block()->data());
+  const int* tPtr = static_cast<const int*>(t.block()->data());
+  float* lossPtr = static_cast<float*>(loss->block()->mutable_data());
   cuda::ComputeCrossEntropy(int_target, batchsize, dim, pPtr, tPtr, lossPtr,
                             ctx->stream);
 }
+
+template <>
+void ComputeCrossEntropy<half_float::half, lang::Cuda>(
+    bool int_target, const size_t batchsize, const size_t dim, const Tensor& p,
+    const Tensor& t, Tensor* loss, Context* ctx) {
+  const __half* pPtr = static_cast<const __half*>(p.block()->data());
+  const int* tPtr = static_cast<const int*>(t.block()->data());
+  __half* lossPtr = static_cast<__half*>(loss->block()->mutable_data());
+  cuda::ComputeCrossEntropy(int_target, batchsize, dim, pPtr, tPtr, lossPtr,
+                            ctx->stream);
+}
+
 template <>
 void SoftmaxCrossEntropyBwd<float, lang::Cuda>(bool int_target,
                                                const size_t batchsize,
-                                               const size_t dim, const Block* p,
-                                               const Block* t, Block* grad,
-                                               Context* ctx) {
-  CHECK_EQ(p, grad) << "Use the same pointer to optimize performance";
-  const float* pPtr = static_cast<const float*>(p->data());
-  const int* tPtr = static_cast<const int*>(t->data());
-  float* gradPtr = static_cast<float*>(grad->mutable_data());
+                                               const size_t dim,
+                                               const Tensor& p, const Tensor& t,
+                                               Tensor* grad, Context* ctx) {
+  CHECK_EQ(p.block(), grad->block())
+      << "Use the same pointer to optimize performance";
+  const float* pPtr = static_cast<const float*>(p.block()->data());
+  const int* tPtr = static_cast<const int*>(t.block()->data());
+  float* gradPtr = static_cast<float*>(grad->block()->mutable_data());
+  cuda::SoftmaxCrossEntropyBwd(int_target, batchsize, dim, pPtr, tPtr, gradPtr,
+                               ctx->stream);
+}
+
+template <>
+void SoftmaxCrossEntropyBwd<half_float::half, lang::Cuda>(
+    bool int_target, const size_t batchsize, const size_t dim, const Tensor& p,
+    const Tensor& t, Tensor* grad, Context* ctx) {
+  CHECK_EQ(p.block(), grad->block())
+      << "Use the same pointer to optimize performance";
+  const __half* pPtr = static_cast<const __half*>(p.block()->data());
+  const int* tPtr = static_cast<const int*>(t.block()->data());
+  __half* gradPtr = static_cast<__half*>(grad->block()->mutable_data());
   cuda::SoftmaxCrossEntropyBwd(int_target, batchsize, dim, pPtr, tPtr, gradPtr,
                                ctx->stream);
 }
diff --git a/src/io/communicator.cc b/src/io/communicator.cc
index a64c79d..97ac18d 100644
--- a/src/io/communicator.cc
+++ b/src/io/communicator.cc
@@ -140,8 +140,8 @@
 
 void Communicator::allReduce(int size, void *sendbuff, void *recvbuff,
                              ncclDataType_t ncclType, Context *ctx) {
-  NCCLCHECK(ncclAllReduce((const void *)sendbuff, (void *)recvbuff, size,
-                          ncclType, ncclSum, comm, ctx->s));
+  NCCLCHECK(ncclAllReduce((const void *)sendbuff, recvbuff, size, ncclType,
+                          ncclSum, comm, ctx->s));
 }
 
 void Communicator::generateBlocks(Tensor &t) {
@@ -214,6 +214,14 @@
 
   generateBlocks(t);
 
+  if (t[0].data_type() == kFloat16) {
+    ncclType = ncclHalf;
+    dataSize = sizeof(__half);
+  } else {
+    ncclType = ncclFloat;
+    dataSize = sizeof(float);
+  }
+
   if (!send) {
     // buffer the tensors
     device_->Exec(
@@ -228,11 +236,17 @@
         [this, t](Context *ctx) mutable {
           // memory copy to fusedBuff
           for (size_t i = 0; i < t.size(); i++) {
-            CUDA_CHECK(
-                cudaMemcpyAsync((void *)(fusedSendBuff + sendBuffOffset),
-                                (const void *)t[i].block()->mutable_data(),
-                                t[i].Size() * sizeof(float),
-                                cudaMemcpyDeviceToDevice, ctx->c1));
+            if (t[0].data_type() == kFloat16) {
+              offsetPointer = (void *)(static_cast<__half *>(fusedSendBuff) +
+                                       sendBuffOffset);
+            } else {
+              offsetPointer = (void *)(static_cast<float *>(fusedSendBuff) +
+                                       sendBuffOffset);
+            }
+            CUDA_CHECK(cudaMemcpyAsync(
+                (void *)offsetPointer,
+                (const void *)t[i].block()->mutable_data(),
+                t[i].Size() * dataSize, cudaMemcpyDeviceToDevice, ctx->c1));
             sendBuffOffset += t[i].Size();
           }
         },
@@ -247,13 +261,15 @@
           CUDA_CHECK(cudaStreamWaitEvent(ctx->s, event, 0));
         },
         prev_blocks_, prev_blocks_, "Waiting");
+
     device_->Exec(
         [this](Context *ctx) mutable {
-          allReduce((int)sendBuffOffset, (void *)fusedSendBuff,
-                    (void *)fusedRecvBuff, ncclFloat, ctx);
+          allReduce((int)sendBuffOffset, fusedSendBuff, fusedRecvBuff, ncclType,
+                    ctx);
           sendBuffOffset = 0;
         },
         prev_blocks_, blocks_, "Dist_s_fusedSynch_allreduce");
+
     device_->Exec(
         [this](Context *ctx) mutable {
           // wait for the allreduce to complete
@@ -261,14 +277,22 @@
           CUDA_CHECK(cudaStreamWaitEvent(ctx->c1, event, 0));
         },
         blocks_, blocks_, "Waiting");
+
     device_->Exec(
         [this, t](Context *ctx) mutable {
           // copy data back to tensors after allreduce
           size_t offset = 0;
           for (size_t i = 0; i < t.size(); i++) {
+            if (t[0].data_type() == kFloat16) {
+              offsetPointer =
+                  (void *)(static_cast<__half *>(fusedRecvBuff) + offset);
+            } else {
+              offsetPointer =
+                  (void *)(static_cast<float *>(fusedRecvBuff) + offset);
+            }
             CUDA_CHECK(cudaMemcpyAsync((void *)t[i].block()->mutable_data(),
-                                       (const void *)(fusedRecvBuff + offset),
-                                       t[i].Size() * sizeof(float),
+                                       (const void *)offsetPointer,
+                                       t[i].Size() * dataSize,
                                        cudaMemcpyDeviceToDevice, ctx->c1));
             offset += t[i].Size();
           }
@@ -281,6 +305,11 @@
   // generateBlocks(t);
   device_ = t.device();
 
+  if (t.data_type() == kFloat16)
+    ncclType = ncclHalf;
+  else
+    ncclType = ncclFloat;
+
   device_->Exec(
       [this, t](Context *ctx) mutable {
         // record the event of the default cuda stream and follow it
@@ -292,12 +321,17 @@
   device_->Exec(
       [this, t](Context *ctx) mutable {
         void *addr = t.block()->mutable_data();
-        allReduce(t.Size(), addr, addr, ncclFloat, ctx);
+        allReduce(t.Size(), addr, addr, ncclType, ctx);
       },
       {t.block()}, {t.block()}, "Dist_s_synch_allreduce");
-}
+
+}  // namespace singa
 
 void Communicator::fusedSynchHalf(vector<Tensor> &t, bool send) {
+  CHECK_EQ(t[0].data_type(), kFloat32)
+      << "This function is only available for input tensor precision 32 bit, "
+         "which are converted into 16 bits before transmit";
+
   CHECK_GT(t.size(), 0);
 
   generateBlocks(t);
@@ -318,11 +352,11 @@
           size_t offset = 0;
           // memory copy to fusedBuff
           for (size_t i = 0; i < t.size(); i++) {
-            CUDA_CHECK(
-                cudaMemcpyAsync((void *)(fusedSendBuff + sendBuffOffset),
-                                (const void *)t[i].block()->mutable_data(),
-                                t[i].Size() * sizeof(float),
-                                cudaMemcpyDeviceToDevice, ctx->c1));
+            CUDA_CHECK(cudaMemcpyAsync(
+                (void *)(static_cast<float *>(fusedSendBuff) + sendBuffOffset),
+                (const void *)t[i].block()->mutable_data(),
+                t[i].Size() * sizeof(float), cudaMemcpyDeviceToDevice,
+                ctx->c1));
             sendBuffOffset += t[i].Size();
             offset += t[i].Size();
           }
@@ -332,8 +366,8 @@
     // send the tensors in the buffer
     device_->Exec(
         [this](Context *ctx) mutable {
-          cuda::float2half(sendBuffOffset, fusedSendBuff, fusedSendBuffHalf,
-                           ctx->c1);
+          cuda::float2half(sendBuffOffset, static_cast<float *>(fusedSendBuff),
+                           static_cast<__half *>(fusedSendBuffHalf), ctx->c1);
         },
         prev_blocks_, blocks_, "Dist_c1_fusedSynchHalf_float2half");
     device_->Exec(
@@ -345,31 +379,33 @@
         blocks_, blocks_, "Waiting");
     device_->Exec(
         [this](Context *ctx) mutable {
-          allReduce((int)sendBuffOffset, (void *)fusedSendBuffHalf,
-                    (void *)fusedRecvBuffHalf, ncclHalf, ctx);
+          allReduce((int)sendBuffOffset, fusedSendBuffHalf, fusedRecvBuffHalf,
+                    ncclHalf, ctx);
         },
         blocks_, blocks_, "Dist_s_fusedSynchHalf_allreduce");
     device_->Exec(
         [this](Context *ctx) mutable {
           // wait for the allreduce to complete
           CUDA_CHECK(cudaEventRecord(event, ctx->s));
-          CUDA_CHECK(cudaStreamWaitEvent(ctx->c2, event, 0));          
+          CUDA_CHECK(cudaStreamWaitEvent(ctx->c2, event, 0));
         },
         blocks_, blocks_, "Waiting");
     device_->Exec(
         [this, t](Context *ctx) mutable {
-          cuda::half2float(sendBuffOffset, fusedRecvBuffHalf, fusedRecvBuff,
-                           ctx->c2);
+          cuda::half2float(sendBuffOffset,
+                           static_cast<__half *>(fusedRecvBuffHalf),
+                           static_cast<float *>(fusedRecvBuff), ctx->c2);
 
           sendBuffOffset = 0;
 
           // copy data back to tensors after allreduce
           size_t offset = 0;
           for (size_t i = 0; i < t.size(); i++) {
-            CUDA_CHECK(cudaMemcpyAsync((void *)t[i].block()->mutable_data(),
-                                       (const void *)(fusedRecvBuff + offset),
-                                       t[i].Size() * sizeof(float),
-                                       cudaMemcpyDeviceToDevice, ctx->c2));
+            CUDA_CHECK(cudaMemcpyAsync(
+                (void *)t[i].block()->mutable_data(),
+                (const void *)(static_cast<float *>(fusedRecvBuff) + offset),
+                t[i].Size() * sizeof(float), cudaMemcpyDeviceToDevice,
+                ctx->c2));
             offset += t[i].Size();
           }
         },
@@ -378,6 +414,11 @@
 }
 
 void Communicator::synchHalf(Tensor &t) {
+  // tensor precision is 32 bit, convert to 16 bit before transmit
+  CHECK_EQ(t.data_type(), kFloat32)
+      << "This function is only available for input tensor precision 32 bit, "
+         "which are converted into 16 bits before transmit";
+
   generateBlocks(t);
 
   if (halfInitialized == false) halfInit();
@@ -392,7 +433,8 @@
   device_->Exec(
       [this, t](Context *ctx) mutable {
         float *addr = static_cast<float *>(t.block()->mutable_data());
-        cuda::float2half(t.Size(), addr, fusedSendBuffHalf, ctx->c1);
+        cuda::float2half(t.Size(), addr,
+                         static_cast<__half *>(fusedSendBuffHalf), ctx->c1);
       },
       blocks_, blocks_, "Dist_c1_synchHalf_float2half");
   device_->Exec(
@@ -404,8 +446,8 @@
       blocks_, blocks_, "Waiting");
   device_->Exec(
       [this, t](Context *ctx) mutable {
-        allReduce(t.Size(), (void *)fusedSendBuffHalf,
-                  (void *)fusedRecvBuffHalf, ncclHalf, ctx);
+        allReduce(t.Size(), fusedSendBuffHalf, fusedRecvBuffHalf, ncclHalf,
+                  ctx);
       },
       blocks_, blocks_, "Dist_s_synchHalf_allreduce");
   device_->Exec(
@@ -418,10 +460,10 @@
   device_->Exec(
       [this, t](Context *ctx) mutable {
         float *addr = static_cast<float *>(t.block()->mutable_data());
-        cuda::half2float(t.Size(), fusedRecvBuffHalf, addr, ctx->c2);
+        cuda::half2float(t.Size(), static_cast<__half *>(fusedRecvBuffHalf),
+                         addr, ctx->c2);
       },
       blocks_, blocks_, "Dist_c2_synchHalf_half2float");
-
 }
 
 void Communicator::sparsification(Tensor &t, Tensor &accumulation,
@@ -449,6 +491,14 @@
 void Communicator::_sparsification(Tensor &t, Tensor *accumulation,
                                    float sparsThreshold, bool topK,
                                    Context *ctx) {
+  if (t.data_type() == kFloat16) {
+    ncclType = ncclHalf;
+    dataSize = sizeof(__half);
+  } else {
+    ncclType = ncclFloat;
+    dataSize = sizeof(float);
+  }
+
   // threshold for sprasification
   threshold = sparsThreshold;
 
@@ -458,13 +508,13 @@
 
   // memory copy to fusedBuff
   CUDA_CHECK(cudaMemcpyAsync(
-      (void *)fusedSendBuff, (const void *)t.block()->mutable_data(),
+      fusedSendBuff, (const void *)t.block()->mutable_data(),
       t.Size() * sizeof(float), cudaMemcpyDeviceToDevice, ctx->c1));
 
-  float *accumPtr;
+  void *accumPtr;
 
   if (accumulation != NULL)
-    accumPtr = (float *)accumulation->block()->mutable_data();
+    accumPtr = accumulation->block()->mutable_data();
   else
     accumPtr = NULL;
 
@@ -474,9 +524,9 @@
     topKSparsAllReduce(t.Size(), accumPtr, ctx);
 
   // copy data back to tensor after allreduce
-  CUDA_CHECK(cudaMemcpyAsync(
-      (void *)t.block()->mutable_data(), (const void *)fusedRecvBuff,
-      t.Size() * sizeof(float), cudaMemcpyDeviceToDevice, ctx->c2));
+  CUDA_CHECK(cudaMemcpyAsync((void *)t.block()->mutable_data(),
+                             (const void *)fusedRecvBuff, t.Size() * dataSize,
+                             cudaMemcpyDeviceToDevice, ctx->c2));
 }
 
 void Communicator::fusedSparsification(vector<Tensor> &t, Tensor &accumulation,
@@ -509,6 +559,14 @@
 void Communicator::_fusedSparsification(vector<Tensor> &t, Tensor *accumulation,
                                         float sparsThreshold, bool topK,
                                         Context *ctx) {
+  if (t[0].data_type() == kFloat16) {
+    ncclType = ncclHalf;
+    dataSize = sizeof(__half);
+  } else {
+    ncclType = ncclFloat;
+    dataSize = sizeof(float);
+  }
+
   // threshold for sprasification
   threshold = sparsThreshold;
 
@@ -520,17 +578,21 @@
 
   // memory copy to fusedBuff
   for (size_t i = 0; i < t.size(); i++) {
-    CUDA_CHECK(cudaMemcpyAsync((void *)(fusedSendBuff + offset),
-                               (const void *)t[i].block()->mutable_data(),
-                               t[i].Size() * sizeof(float),
-                               cudaMemcpyDeviceToDevice, ctx->c1));
+    if (t[0].data_type() == kFloat16) {
+      offsetPointer = (void *)(static_cast<__half *>(fusedSendBuff) + offset);
+    } else {
+      offsetPointer = (void *)(static_cast<float *>(fusedSendBuff) + offset);
+    }
+    CUDA_CHECK(cudaMemcpyAsync(
+        offsetPointer, (const void *)t[i].block()->mutable_data(),
+        t[i].Size() * dataSize, cudaMemcpyDeviceToDevice, ctx->c1));
     offset += t[i].Size();
   }
 
-  float *accumPtr;
+  void *accumPtr;
 
   if (accumulation != NULL)
-    accumPtr = (float *)accumulation->block()->mutable_data();
+    accumPtr = accumulation->block()->mutable_data();
   else
     accumPtr = NULL;
 
@@ -542,36 +604,49 @@
   // copy data back to tensors after allreduce
   offset = 0;
   for (size_t i = 0; i < t.size(); i++) {
-    CUDA_CHECK(cudaMemcpyAsync((void *)t[i].block()->mutable_data(),
-                               (const void *)(fusedRecvBuff + offset),
-                               t[i].Size() * sizeof(float),
-                               cudaMemcpyDeviceToDevice, ctx->c2));
+    if (t[0].data_type() == kFloat16) {
+      offsetPointer = (void *)(static_cast<__half *>(fusedRecvBuff) + offset);
+    } else {
+      offsetPointer = (void *)(static_cast<float *>(fusedRecvBuff) + offset);
+    }
+    CUDA_CHECK(cudaMemcpyAsync(
+        (void *)t[i].block()->mutable_data(), (const void *)(offsetPointer),
+        t[i].Size() * dataSize, cudaMemcpyDeviceToDevice, ctx->c2));
     offset += t[i].Size();
   }
 }
 
-void Communicator::valSparsAllReduce(size_t num, float *accumulation,
+void Communicator::valSparsAllReduce(size_t num, void *accumulation,
                                      Context *ctx) {
+  CHECK_EQ(dataSize, sizeof(float))
+      << "This function depends on thrust and support only fp32 currently";
+
   if (sparsInitialized == false) sparsInit();
 
   if (accumulation != NULL) {
     // add the previous accumulation
-    cuda::add(num, fusedSendBuff, accumulation, fusedSendBuff, ctx->c1);
+    cuda::add(num, static_cast<float *>(fusedSendBuff),
+              static_cast<float *>(accumulation),
+              static_cast<float *>(fusedSendBuff), ctx->c1);
     // backup the fusedSendBuff
-    CUDA_CHECK(cudaMemcpyAsync((void *)backupBuff, (const void *)fusedSendBuff,
+    CUDA_CHECK(cudaMemcpyAsync(backupBuff, (const void *)fusedSendBuff,
                                sizeof(float) * num, cudaMemcpyDeviceToDevice,
                                ctx->c1));
   }
 
   // sparsification based on threshold
-  cuda::sparsabs(num, threshold, fusedSendBuff, fusedSendBuff, ctx->c1);
+  cuda::sparsabs(num, threshold, static_cast<float *>(fusedSendBuff),
+                 static_cast<float *>(fusedSendBuff), ctx->c1);
 
   // output the gradient accumulation
   if (accumulation != NULL)
-    cuda::sub(num, backupBuff, fusedSendBuff, accumulation, ctx->c1);
+    cuda::sub(num, static_cast<float *>(backupBuff),
+              static_cast<float *>(fusedSendBuff),
+              static_cast<float *>(accumulation), ctx->c1);
 
   // produce the index of the sparse array
-  cuda::sparsindex(num, fusedSendBuff, fusedIndex, ctx->c1);
+  cuda::sparsindex(num, static_cast<float *>(fusedSendBuff), fusedIndex,
+                   ctx->c1);
 
   // remove zero of index to become sprase array and get the num of non-zero nnz
   cuda::removezeroidx(num, fusedIndex, ctx->c1, nnz);
@@ -594,21 +669,22 @@
     if (nnzAll[i] > nnzMax) nnzMax = nnzAll[i];
 
   // remove zero of values to become sprase array
-  cuda::removezeroval(num, fusedSendBuff, ctx->c1);
+  cuda::removezeroval(num, static_cast<float *>(fusedSendBuff), ctx->c1);
 
-  CUDA_CHECK(cudaMemcpyAsync((void *)(sparsSendBuff), (const void *)fusedIndex,
+  CUDA_CHECK(cudaMemcpyAsync(sparsSendBuff, (const void *)fusedIndex,
                              sizeof(int) * (*nnz), cudaMemcpyDeviceToDevice,
                              ctx->c1));
-  CUDA_CHECK(cudaMemcpyAsync(
-      (void *)(sparsSendBuff + (*nnz)), (const void *)fusedSendBuff,
-      sizeof(float) * (*nnz), cudaMemcpyDeviceToDevice, ctx->c1));
+  CUDA_CHECK(
+      cudaMemcpyAsync((void *)(static_cast<float *>(sparsSendBuff) + (*nnz)),
+                      (const void *)fusedSendBuff, sizeof(float) * (*nnz),
+                      cudaMemcpyDeviceToDevice, ctx->c1));
 
   // wait for the memcpy to complete
   CUDA_CHECK(cudaEventRecord(event, ctx->c1));
   CUDA_CHECK(cudaStreamWaitEvent(ctx->s, event, 0));
 
   // all-gather all the sparse gradients
-  NCCLCHECK(ncclAllGather((const void *)sparsSendBuff, (void *)sparsRecvBuff,
+  NCCLCHECK(ncclAllGather((const void *)sparsSendBuff, sparsRecvBuff,
                           2 * nnzMax, ncclFloat, comm, ctx->s));
 
   // wait for the all-gather to complete
@@ -627,36 +703,44 @@
 
   for (int i = 0; i < world_size; i++) {
     CUDA_CHECK(cudaMemcpyAsync(
-        (void *)xInd, (const void *)(sparsRecvBuff + offset),
+        (void *)xInd,
+        (const void *)(static_cast<float *>(sparsRecvBuff) + offset),
         sizeof(int) * nnzAll[i], cudaMemcpyDeviceToDevice, ctx->c2));
     offset += nnzAll[i];
     CUDA_CHECK(cudaMemcpyAsync(
-        (void *)xVal, (const void *)(sparsRecvBuff + offset),
+        (void *)xVal,
+        (const void *)(static_cast<float *>(sparsRecvBuff) + offset),
         sizeof(float) * nnzAll[i], cudaMemcpyDeviceToDevice, ctx->c2));
     offset += (2 * nnzMax - nnzAll[i]);
     CUSPARSE_CHECK(cusparseSaxpyi(cusparse_handle, nnzAll[i], &alpha, xVal,
-                                  xInd, fusedRecvBuff,
+                                  xInd, static_cast<float *>(fusedRecvBuff),
                                   CUSPARSE_INDEX_BASE_ONE));
   }
 }
 
-void Communicator::topKSparsAllReduce(size_t num, float *accumulation,
+void Communicator::topKSparsAllReduce(size_t num, void *accumulation,
                                       Context *ctx) {
+  CHECK_EQ(dataSize, sizeof(float))
+      << "This function depends on thrust and support only fp32 currently";
+
   if (sparsInitialized == false) sparsInit();
 
   // use gradient accumulation
   if (accumulation != NULL) {
     // add the previous accumulation
-    cuda::add(num, fusedSendBuff, accumulation, fusedSendBuff, ctx->c1);
+    cuda::add(num, static_cast<float *>(fusedSendBuff),
+              static_cast<float *>(accumulation),
+              static_cast<float *>(fusedSendBuff), ctx->c1);
     // backup the fusedSendBuff
-    CUDA_CHECK(cudaMemcpyAsync((void *)backupBuff, (const void *)fusedSendBuff,
+    CUDA_CHECK(cudaMemcpyAsync(backupBuff, (const void *)fusedSendBuff,
                                sizeof(float) * num, cudaMemcpyDeviceToDevice,
                                ctx->c1));
   }
 
   // generate an index and sort the fusedSendBuff from large to small values
   cuda::generateindex(num, fusedIndex, ctx->c1);
-  cuda::sortbykey(num, fusedSendBuff, fusedIndex, ctx->c1);
+  cuda::sortbykey(num, static_cast<float *>(fusedSendBuff), fusedIndex,
+                  ctx->c1);
 
   // determine the number of topK for communication
   int nnzMax = (int)ceil(threshold * num);
@@ -666,19 +750,23 @@
   if (accumulation != NULL) {
     CUDA_CHECK(cudaMemsetAsync(accumulation, 0, num * sizeof(float), ctx->c1));
     CUSPARSE_CHECK(cusparseSetStream(cusparse_handle, ctx->c1));
-    CUSPARSE_CHECK(cusparseSaxpyi(cusparse_handle, nnzMax, &alpha,
-                                  fusedSendBuff, fusedIndex, accumulation,
-                                  CUSPARSE_INDEX_BASE_ONE));
-    cuda::sub(num, backupBuff, accumulation, accumulation, ctx->c1);
+    CUSPARSE_CHECK(cusparseSaxpyi(
+        cusparse_handle, nnzMax, &alpha, static_cast<float *>(fusedSendBuff),
+        fusedIndex, static_cast<float *>(accumulation),
+        CUSPARSE_INDEX_BASE_ONE));
+    cuda::sub(num, static_cast<float *>(backupBuff),
+              static_cast<float *>(accumulation),
+              static_cast<float *>(accumulation), ctx->c1);
   }
 
   // the topK value and index will be sent
-  CUDA_CHECK(cudaMemcpyAsync((void *)(sparsSendBuff), (const void *)fusedIndex,
+  CUDA_CHECK(cudaMemcpyAsync(sparsSendBuff, (const void *)fusedIndex,
                              sizeof(int) * nnzMax, cudaMemcpyDeviceToDevice,
                              ctx->c1));
-  CUDA_CHECK(cudaMemcpyAsync(
-      (void *)(sparsSendBuff + nnzMax), (const void *)fusedSendBuff,
-      sizeof(float) * nnzMax, cudaMemcpyDeviceToDevice, ctx->c1));
+  CUDA_CHECK(
+      cudaMemcpyAsync((void *)(static_cast<float *>(sparsSendBuff) + nnzMax),
+                      (const void *)fusedSendBuff, sizeof(float) * nnzMax,
+                      cudaMemcpyDeviceToDevice, ctx->c1));
 
   // wait for the memcpy to complete
   CUDA_CHECK(cudaEventRecord(event, ctx->c1));
@@ -703,15 +791,18 @@
   // all-reduce process
   for (int i = 0; i < world_size; i++) {
     CUDA_CHECK(cudaMemcpyAsync(
-        (void *)xInd, (const void *)(sparsRecvBuff + offset),
+        (void *)xInd,
+        (const void *)(static_cast<float *>(sparsRecvBuff) + offset),
         sizeof(int) * nnzMax, cudaMemcpyDeviceToDevice, ctx->c2));
     offset += nnzMax;
     CUDA_CHECK(cudaMemcpyAsync(
-        (void *)xVal, (const void *)(sparsRecvBuff + offset),
+        (void *)xVal,
+        (const void *)(static_cast<float *>(sparsRecvBuff) + offset),
         sizeof(float) * nnzMax, cudaMemcpyDeviceToDevice, ctx->c2));
     offset += nnzMax;
     CUSPARSE_CHECK(cusparseSaxpyi(cusparse_handle, nnzMax, &alpha, xVal, xInd,
-                                  fusedRecvBuff, CUSPARSE_INDEX_BASE_ONE));
+                                  static_cast<float *>(fusedRecvBuff),
+                                  CUSPARSE_INDEX_BASE_ONE));
   }
 }
 }  // namespace singa
diff --git a/src/model/operation/convolution.cc b/src/model/operation/convolution.cc
index 052e521..96313d2 100644
--- a/src/model/operation/convolution.cc
+++ b/src/model/operation/convolution.cc
@@ -493,7 +493,14 @@
   CUDNN_CHECK(cudnnSetFilter4dDescriptor(
       filter_desc, GetCudnnDataType(dtype), CUDNN_TENSOR_NCHW, num_filters,
       channels / groups, kernel_h, kernel_w));
-  if (prefer == "fastest" || prefer == "limited_workspace" ||
+
+  if (prefer == "tensor_ops") {
+    // std::cout<<"using tensor op\n";
+    CUDNN_CHECK(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
+    fp_alg = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
+    bp_filter_alg = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
+    bp_data_alg = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
+  } else if (prefer == "fastest" || prefer == "limited_workspace" ||
       prefer == "no_workspace") {
     cudnnConvolutionFwdPreference_t fwd_pref;
     cudnnConvolutionBwdFilterPreference_t bwd_filt_pref;
@@ -554,11 +561,11 @@
       ctx->cudnn_handle, x_desc, y_desc, conv_desc, filter_desc, bp_filter_alg,
       &bp_filter_byte));
   workspace_count = std::max(std::max(fp_byte, bp_data_byte), bp_filter_byte) /
-                        sizeof(float) +
+                        SizeOf(dtype) +
                     1;
-  if (workspace_count * sizeof(float) > workspace_byte_limit)
+  if (workspace_count * SizeOf(dtype) > workspace_byte_limit)
     LOG(WARNING) << "The required memory for workspace ("
-                 << workspace_count * sizeof(float)
+                 << workspace_count * SizeOf(dtype)
                  << ") is larger than the expected Bytes ("
                  << workspace_byte_limit << ")";
   workspace = Tensor(Shape{workspace_count}, dev, dtype);
@@ -602,7 +609,7 @@
                                 inblock->data(), cch.filter_desc,
                                 wblock->data(), cch.conv_desc, cch.fp_alg,
                                 cch.workspace.block()->mutable_data(),
-                                cch.workspace_count * sizeof(float), &beta,
+                                cch.workspace_count * SizeOf(x.data_type()), &beta,
                                 cch.y_desc, outblock->mutable_data());
       },
       {x.block(), W.block()}, {output.block(), cch.workspace.block()},
@@ -639,7 +646,7 @@
             ctx->cudnn_handle, &alpha, cch.filter_desc, wblock->data(),
             cch.y_desc, dyblock->data(), cch.conv_desc, cch.bp_data_alg,
             cch.workspace.block()->mutable_data(),
-            cch.workspace_count * sizeof(float), &beta, cch.x_desc,
+            cch.workspace_count * SizeOf(dx.data_type()), &beta, cch.x_desc,
             dxblock->mutable_data());
       },
       {dy.block(), W.block()}, {dx.block(), cch.workspace.block()},
@@ -664,7 +671,7 @@
             ctx->cudnn_handle, &alpha, cch.x_desc, inblock->data(), cch.y_desc,
             dyblock->data(), cch.conv_desc, cch.bp_filter_alg,
             cch.workspace.block()->mutable_data(),
-            cch.workspace_count * sizeof(float), &beta, cch.filter_desc,
+            cch.workspace_count * SizeOf(x.data_type()), &beta, cch.filter_desc,
             dwblock->mutable_data());
       },
       {dy.block(), x.block()}, {dW.block(), cch.workspace.block()},
diff --git a/test/gtest/gtest_main.cc b/test/gtest/gtest_main.cc
index f302822..22477f3 100644
--- a/test/gtest/gtest_main.cc
+++ b/test/gtest/gtest_main.cc
@@ -30,9 +30,11 @@
 #include <stdio.h>
 
 #include "gtest/gtest.h"
+#include "singa/utils/logging.h"
 
 GTEST_API_ int main(int argc, char **argv) {
   printf("Running main() from gtest_main.cc\n");
+  singa::InitLogging("");
   testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
 }
diff --git a/test/singa/test_logging.cc b/test/singa/test_logging.cc
index 16efa8f..71b6d96 100644
--- a/test/singa/test_logging.cc
+++ b/test/singa/test_logging.cc
@@ -23,7 +23,6 @@
 #include "singa/utils/logging.h"
 
 TEST(Logging, InfoLogging) {
-  singa::InitLogging("");
   int a = 3;
   CHECK_EQ(a, 3);
   LOG(INFO) << "test info logging";
diff --git a/test/singa/test_operation_benchmark.cc b/test/singa/test_operation_benchmark.cc
new file mode 100644
index 0000000..a8e6160
--- /dev/null
+++ b/test/singa/test_operation_benchmark.cc
@@ -0,0 +1,147 @@
+
+/************************************************************
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ *************************************************************/
+#include <chrono>
+#include <iostream>
+
+#include "../src/core/tensor/tensor_math_cuda.h"
+#include "../src/model/operation/convolution.h"
+#include "gtest/gtest.h"
+#include "singa/core/tensor.h"
+#include "singa/singa_config.h"
+
+using namespace singa;
+using namespace std;
+using namespace std::chrono;
+
+#ifdef USE_CUDNN
+TEST(OperationBenchmark, CrossEntropyFwd) {
+  auto cuda = std::make_shared<singa::CudaGPU>();
+  auto ctx = cuda->context(0);
+  int bs = 64;
+  int dim = 10;
+  vector<DataType> dtypes = {kFloat16, kFloat32};
+
+  Tensor t(Shape{bs}, cuda);
+  t.SetValue(0.0f);
+  t = t.AsType(kInt);
+
+  for (auto dtype : dtypes) {
+    Tensor p(Shape{bs, dim}, cuda, dtype);
+    Uniform(0.0f, 1.0f, &p);
+
+    high_resolution_clock::time_point t1 = high_resolution_clock::now();
+
+    for (int i = 0; i < 1000; ++i) {
+      auto l = CrossEntropyFwd(p, t);
+      cudaStreamSynchronize(cuda->context(0)->stream);
+    }
+
+    high_resolution_clock::time_point t2 = high_resolution_clock::now();
+    duration<double> time_span = duration_cast<duration<double>>(t2 - t1);
+    cout << " dtype " << dtype;
+    cout << " - " << time_span.count() << " sec";
+    cout << endl;
+  }
+}
+
+TEST(OperationBenchmark, Mult) {
+  auto cuda = std::make_shared<singa::CudaGPU>();
+  vector<DataType> dtypes = {kFloat32, kFloat16};
+  vector<unsigned long> second_dims = {16 * 100 - 5, 16 * 100, 16 * 100 + 5};
+
+  for (auto second_dim : second_dims) {
+    cout << endl;
+    for (auto dtype : dtypes) {
+      Tensor x(Shape{64, second_dim}, cuda, dtype);
+      Tensor w(Shape{second_dim, 2048}, cuda, dtype);
+      Gaussian(0.0f, 1.0f, &x);
+      Gaussian(0.0f, 1.0f, &w);
+
+      high_resolution_clock::time_point t1 = high_resolution_clock::now();
+
+      for (int i = 0; i < 1000; ++i) {
+        auto y = Mult(x, w);
+        cudaStreamSynchronize(cuda->context(0)->stream);
+      }
+
+      high_resolution_clock::time_point t2 = high_resolution_clock::now();
+      duration<double> time_span = duration_cast<duration<double>>(t2 - t1);
+      cout << " second dim " << second_dim;
+      cout << " dtype " << dtype;
+      cout << " - " << time_span.count() << " sec";
+      cout << endl;
+    }
+  }
+}
+
+TEST(OperationBenchmark, Conv) {
+  auto cuda = std::make_shared<singa::CudaGPU>();
+  vector<DataType> dtypes = {kFloat16, kFloat32};
+  vector<vector<size_t>> kernels{{1, 1}};
+  vector<string> prefers{"tensor_ops", "fastest"};
+  vector<unsigned long> in_chans{1024, 256, 64};
+  int img_hw = 28;
+  size_t out_chan = 64;
+  auto has_bias = false;
+  int batch = 64;
+
+  vector<size_t> stride{2, 2};
+  vector<size_t> padding{0, 0};
+  for (auto kernel : kernels) {
+    for (auto in_chan : in_chans) {
+      for (auto prefer : prefers) {
+        cout << endl;
+        for (auto dtype : dtypes) {
+
+          Tensor x(Shape{batch, in_chan, img_hw, img_hw}, cuda, dtype);
+          Gaussian(0.0f, 1.0f, &x);
+          Tensor w(Shape{out_chan, in_chan, kernel[0], kernel[1]}, cuda, dtype);
+          Gaussian(0.0f, 1.0f, &w);
+          Tensor b(Shape{out_chan}, cuda, dtype);
+          Gaussian(0.0f, 1.0f, &b);
+
+          auto h =
+              CudnnConvHandle(x, kernel, stride, padding, in_chan, out_chan,
+                              has_bias, 1, 1024 * 1024 * 1024, prefer);
+
+          high_resolution_clock::time_point t1 = high_resolution_clock::now();
+
+          for (int i = 0; i < 1000; ++i) {
+            auto out = GpuConvForward(x, w, b, h);
+            cudaDeviceSynchronize();
+          }
+
+          high_resolution_clock::time_point t2 = high_resolution_clock::now();
+          duration<double> time_span = duration_cast<duration<double>>(t2 - t1);
+          cout << " inchan " << in_chan;
+          cout << " outchan " << out_chan;
+          cout << " ker sz " << kernel[0];
+          cout << " prefer " << prefer;
+          cout << " dtype " << dtype;
+          cout << " - " << time_span.count() << " sec";
+          cout << endl;
+        }
+      }
+    }
+  }
+}
+#endif  // USE_CUDNN
\ No newline at end of file
diff --git a/tool/conda/README.md b/tool/conda/README.md
new file mode 100644
index 0000000..c3bf637
--- /dev/null
+++ b/tool/conda/README.md
@@ -0,0 +1,72 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+# Package SINGA using conda-build
+
+This note is written for the repo maintainer.
+
+To create conda package and upload, you can follow the following steps:
+
+## 1. Build the docker image
+
+To have the build environment, use the dockerfile from https://github.com/apache/singa/blob/dev/tool/conda/docker/cuda10/Dockerfile
+
+For example:
+
+    docker build SINGA_DIR/singa/tool/conda/docker/cpu -t nusdbsystem/singa:conda-cpu
+
+## 2. Build the container from the image and access the container
+
+For example:
+
+    docker run -it -p 2222:22 --runtime nvidia --name singa_conda_cpu --rm nusdbsystem/singa:conda-cpu bash
+
+## 3. Build conda packages
+
+There are three versions of conda package: (i) CPU, (ii) GPU, and (iii) DIST (distributed training)
+
+To build the specific versions, follow the steps: 
+
+(i) cd to the folder tool/conda/singa, follow the instruction in https://github.com/apache/singa/blob/dev/tool/conda/singa/README.md
+
+For example, for CPU version:
+
+    cd tool/conda/singa
+    conda config --add channels conda-forge
+    conda config --add channels nusdbsystem
+    conda-build .  --python 3.6
+    anaconda -t $ANACONDA_UPLOAD_TOKEN upload -u nusdbsystem -l main /root/miniconda/conda-bld/linux-64/singa-3.1.0-cpu_py36.tar.bz2
+
+The above will generate the SINGA package. The next step will be wrapping it to generate SINGA-CPU / SINGA-GPU / SINGA-DIST conda packages.
+
+(ii) For different versions, cd to different folder:
+
+For CPU version, cd to tool/conda/cpu
+For GPU version, cd to tool/conda/gpu
+For DIST version, cd to tool/conda/dist
+
+(iii) Generate the SINGA-CPU / SINGA-GPU / SINGA-DIST anaconda package and upload.
+
+For an example of SINGA-CPU version:
+
+    cd tool/conda/cpu
+    conda-build .  --python 3.6
+    anaconda -t $ANACONDA_UPLOAD_TOKEN upload -u nusdbsystem -l main /root/miniconda/conda-bld/linux-64/singa-cpu-3.1.0-py36.tar.bz2
+
+Since SINGA-CPU, SINGA-GPU and SINGA-DIST packages are wrapped from SINGA package, the steps from (i) to (iii) are necessary.
diff --git a/tool/conda/cpu/README.md b/tool/conda/cpu/README.md
index 566ac09..d201a3a 100644
--- a/tool/conda/cpu/README.md
+++ b/tool/conda/cpu/README.md
@@ -42,7 +42,7 @@
 To build this package and upload it
 
     conda config --add channels nusdbsystem
-    conda build .
+    conda-build .  --python 3.6
     anaconda -t $ANACONDA_UPLOAD_TOKEN upload -u nusdbsystem -l main <path to the singa-cpu package>
 
 where $ANACONDA_UPLOAD_TOKEN is the upload token associated with nusdbsystem account on anaconda cloud.
diff --git a/tool/conda/dist/README.md b/tool/conda/dist/README.md
index 03d7050..b5ba374 100644
--- a/tool/conda/dist/README.md
+++ b/tool/conda/dist/README.md
@@ -45,7 +45,7 @@
 To build this package and upload it
 
     conda config --add channels nusdbsystem
-    conda build .
+    conda-build .  --python 3.6
     anaconda -t $ANACONDA_UPLOAD_TOKEN upload -u nusdbsystem -l main <path to the singa-dist package>
 
 where $ANACONDA_UPLOAD_TOKEN is the upload token associated with nusdbsystem account on anaconda cloud.
diff --git a/tool/conda/docker/cpu/Dockerfile b/tool/conda/docker/cpu/Dockerfile
new file mode 100644
index 0000000..f8e6b30
--- /dev/null
+++ b/tool/conda/docker/cpu/Dockerfile
@@ -0,0 +1,58 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# 18.04 has erros in ssh
+FROM ubuntu:18.04
+
+# install dependencies
+RUN apt-get update \
+    && apt-get install -y --no-install-recommends \
+        git \
+        build-essential \
+        cmake \
+        wget \
+        openssh-server \
+        ca-certificates \
+    && apt-get clean \
+    && apt-get autoremove \
+    && apt-get autoclean \
+    && rm -rf /var/lib/apt/lists/* \
+    #
+    # install conda, conda-build and anaconda-client
+    #
+    && wget --no-check-certificate https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh \
+    && bash miniconda.sh -b -p /root/miniconda \
+    && /root/miniconda/bin/conda config --set always_yes yes --set changeps1 no \
+    && /root/miniconda/bin/conda update -q conda \
+    && /root/miniconda/bin/conda install -y \
+        conda-build \
+        anaconda-client \
+    && /root/miniconda/bin/conda clean -tipsy
+
+# config ssh service
+RUN mkdir /var/run/sshd \
+    && echo 'root:singa' | chpasswd \
+    && sed -ri 's/^#?PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config \
+    && sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config \
+    && mkdir /root/.ssh
+
+# Add conda to PATH. Doing this here so other RUN steps can be grouped above
+ENV PATH /root/miniconda/bin:${PATH}
+
+EXPOSE 22
+
+CMD ["/usr/sbin/sshd", "-D"]
diff --git a/tool/conda/gpu/README.md b/tool/conda/gpu/README.md
index 91be515..e15cd1a 100644
--- a/tool/conda/gpu/README.md
+++ b/tool/conda/gpu/README.md
@@ -44,7 +44,7 @@
 To build this package and upload it
 
     conda config --add channels nusdbsystem
-    conda build .
+    conda-build .  --python 3.6
     anaconda -t $ANACONDA_UPLOAD_TOKEN upload -u nusdbsystem -l main <path to the singa-cpu package>
 
 where $ANACONDA_UPLOAD_TOKEN is the upload token associated with nusdbsystem account on anaconda cloud.