Merge pull request #860 from NLGithubWP/add-cifar10-multiprocess-autograd

autograd for cifar10 multiprocess
diff --git a/examples/cifar_distributed_cnn/autograd/cifar10_multiprocess.py b/examples/cifar_distributed_cnn/autograd/cifar10_multiprocess.py
new file mode 100755
index 0000000..b5e51ad
--- /dev/null
+++ b/examples/cifar_distributed_cnn/autograd/cifar10_multiprocess.py
@@ -0,0 +1,43 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from resnet_cifar10 import *
+import multiprocessing
+import sys
+
+if __name__ == '__main__':
+
+    # Generate a NCCL ID to be used for collective communication
+    nccl_id = singa.NcclIdHolder()
+
+    # number of GPUs to be used
+    world_size = int(sys.argv[1])
+
+    # Testing the experimental partial-parameter update asynchronous training
+    partial_update = True
+
+    process = []
+    for local_rank in range(0, world_size):
+        process.append(
+            multiprocessing.Process(target=train_cifar10,
+                                    args=(True, local_rank, world_size, nccl_id,
+                                          partial_update)))
+
+    for p in process:
+        p.start()