| """ |
| Auto-tuning a convolutional network for x86 CPU (NNVM) |
| ====================================================== |
| **Author**: `Yao Wang <https://github.com/kevinthesun>`_ |
| |
| This is a tutorial about how to tune convolution neural network |
| for x86 cpu. |
| """ |
| import os |
| import numpy as np |
| |
| import nnvm.testing |
| import nnvm.compiler |
| import tvm |
| from tvm import autotvm |
| from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner |
| import tvm.contrib.graph_runtime as runtime |
| |
| ################################################################# |
| # Define network |
| # -------------- |
| # First we need to define the network in nnvm symbol API. |
| # We can load some pre-defined network from :code:`nnvm.testing`. |
| # We can also load models from MXNet, ONNX and TensorFlow (see NNVM |
| # tutorials :ref:`tutorial-nnvm` for more details). |
| # |
| # In this tutorial, we choose resnet-18 as tuning example. |
| |
| def get_network(name, batch_size): |
| """Get the symbol definition and random weight of a network""" |
| input_shape = (batch_size, 3, 224, 224) |
| output_shape = (batch_size, 1000) |
| |
| if "resnet" in name: |
| n_layer = int(name.split('-')[1]) |
| net, params = nnvm.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size) |
| elif "vgg" in name: |
| n_layer = int(name.split('-')[1]) |
| net, params = nnvm.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size) |
| elif name == 'mobilenet': |
| net, params = nnvm.testing.mobilenet.get_workload(batch_size=batch_size) |
| elif name == 'squeezenet_v1.1': |
| net, params = nnvm.testing.squeezenet.get_workload(batch_size=batch_size, version='1.1') |
| elif name == 'inception_v3': |
| input_shape = (1, 3, 299, 299) |
| net, params = nnvm.testing.inception_v3.get_workload(batch_size=batch_size) |
| elif name == 'custom': |
| # an example for custom network |
| from nnvm.testing import utils |
| net = nnvm.sym.Variable('data') |
| net = nnvm.sym.conv2d(net, channels=4, kernel_size=(3,3), padding=(1,1)) |
| net = nnvm.sym.flatten(net) |
| net = nnvm.sym.dense(net, units=1000) |
| net, params = utils.create_workload(net, batch_size, (3, 224, 224)) |
| elif name == 'mxnet': |
| # an example for mxnet model |
| from mxnet.gluon.model_zoo.vision import get_model |
| block = get_model('resnet18_v1', pretrained=True) |
| net, params = nnvm.frontend.from_mxnet(block) |
| net = nnvm.sym.softmax(net) |
| else: |
| raise ValueError("Unsupported network: " + name) |
| |
| return net, params, input_shape, output_shape |
| |
| # Replace "llvm" with the correct target of your cpu. |
| # For example, for AWS EC2 c5 instance with Intel Xeon |
| # Platinum 8000 series, the target should be "llvm -mcpu=skylake-avx512". |
| # For AWS EC2 c4 instance with Intel Xeon E5-2666 v3, it should be |
| # "llvm -mcpu=core-avx2". |
| target = "llvm" |
| |
| batch_size = 1 |
| dtype = "float32" |
| model_name = "resnet-18" |
| log_file = "%s.log" % model_name |
| |
| # Set number of threads used for tuning based on the number of |
| # physical cpu cores on your machine. |
| num_threads = 1 |
| os.environ["TVM_NUM_THREADS"] = str(num_threads) |
| |
| |
| ################################################################# |
| # Configure tensor tuning settings and create tasks |
| # ------------------------------------------------- |
| # To get better kernel execution performance on x86 cpu, |
| # we need to change data layout of convolution kernel from |
| # "NCHW" to "NCHWc". To deal with this situation, we define |
| # conv2d_NCHWc operator in topi. We will tune this operator |
| # instead of plain conv2d. |
| # |
| # We will use local mode for tuning configuration. RPC tracker |
| # mode can be setup similarly to the approach in |
| # :ref:`tune_nnvm_arm` tutorial. |
| |
| tuning_option = { |
| 'log_filename': log_file, |
| 'tuner': 'random', |
| 'early_stopping': None, |
| |
| 'measure_option': autotvm.measure_option( |
| builder=autotvm.LocalBuilder(), |
| runner=autotvm.LocalRunner(number=10, repeat=1, |
| min_repeat_ms=1000), |
| ), |
| } |
| |
| # You can skip the implementation of this function for this tutorial. |
| def tune_kernels(tasks, |
| measure_option, |
| tuner='gridsearch', |
| early_stopping=None, |
| log_filename='tuning.log'): |
| |
| for i, tsk in enumerate(tasks): |
| prefix = "[Task %2d/%2d] " % (i+1, len(tasks)) |
| |
| # converting conv2d tasks to conv2d_NCHWc tasks |
| op_name = tsk.workload[0] |
| if op_name == 'conv2d': |
| func_create = 'topi_x86_conv2d_NCHWc' |
| elif op_name == 'depthwise_conv2d_nchw': |
| func_create = 'topi_x86_depthwise_conv2d_NCHWc_from_nchw' |
| else: |
| raise ValueError("Tuning {} is not supported on x86".format(op_name)) |
| |
| task = autotvm.task.create(func_create, args=tsk.args, |
| target=target, template_key='direct') |
| task.workload = tsk.workload |
| |
| # create tuner |
| if tuner == 'xgb' or tuner == 'xgb-rank': |
| tuner_obj = XGBTuner(task, loss_type='rank') |
| elif tuner == 'ga': |
| tuner_obj = GATuner(task, pop_size=50) |
| elif tuner == 'random': |
| tuner_obj = RandomTuner(task) |
| elif tuner == 'gridsearch': |
| tuner_obj = GridSearchTuner(task) |
| else: |
| raise ValueError("Invalid tuner: " + tuner) |
| |
| # do tuning |
| n_trial=len(task.config_space) |
| tuner_obj.tune(n_trial=n_trial, |
| early_stopping=early_stopping, |
| measure_option=measure_option, |
| callbacks=[ |
| autotvm.callback.progress_bar(n_trial, prefix=prefix), |
| autotvm.callback.log_to_file(log_filename)]) |
| |
| |
| ######################################################################## |
| # Finally, we launch tuning jobs and evaluate the end-to-end performance. |
| |
| def tune_and_evaluate(tuning_opt): |
| # extract workloads from nnvm graph |
| print("Extract tasks...") |
| net, params, data_shape, out_shape = get_network(model_name, batch_size) |
| tasks = autotvm.task.extract_from_graph(net, target=target, |
| shape={'data': data_shape}, dtype=dtype, |
| symbols=(nnvm.sym.conv2d,)) |
| |
| # run tuning tasks |
| print("Tuning...") |
| tune_kernels(tasks, **tuning_opt) |
| |
| # compile kernels with history best records |
| with autotvm.apply_history_best(log_file): |
| print("Compile...") |
| with nnvm.compiler.build_config(opt_level=3): |
| graph, lib, params = nnvm.compiler.build( |
| net, target=target, shape={'data': data_shape}, params=params, dtype=dtype) |
| |
| # upload parameters to device |
| ctx = tvm.cpu() |
| data_tvm = tvm.nd.array((np.random.uniform(size=data_shape)).astype(dtype)) |
| module = runtime.create(graph, lib, ctx) |
| module.set_input('data', data_tvm) |
| module.set_input(**params) |
| |
| # evaluate |
| print("Evaluate inference time cost...") |
| ftimer = module.module.time_evaluator("run", ctx, number=100, repeat=3) |
| prof_res = np.array(ftimer().results) * 1000 # convert to millisecond |
| print("Mean inference time (std dev): %.2f ms (%.2f ms)" % |
| (np.mean(prof_res), np.std(prof_res))) |
| |
| # We do not run the tuning in our webpage server since it takes too long. |
| # Uncomment the following line to run it by yourself. |
| |
| # tune_and_evaluate(tuning_option) |
| |
| ###################################################################### |
| # Sample Output |
| # ------------- |
| # The tuning needs to compile many programs and extract feature from them. |
| # So a high performance CPU is recommended. |
| # One sample output is listed below. |
| # |
| # .. code-block:: bash |
| # |
| # Extract tasks... |
| # Tuning... |
| # [Task 1/12] Current/Best: 598.05/2497.63 GFLOPS | Progress: (252/252) | 1357.95 s Done. |
| # [Task 2/12] Current/Best: 522.63/2279.24 GFLOPS | Progress: (784/784) | 3989.60 s Done. |
| # [Task 3/12] Current/Best: 447.33/1927.69 GFLOPS | Progress: (784/784) | 3869.14 s Done. |
| # [Task 4/12] Current/Best: 481.11/1912.34 GFLOPS | Progress: (672/672) | 3274.25 s Done. |
| # [Task 5/12] Current/Best: 414.09/1598.45 GFLOPS | Progress: (672/672) | 2720.78 s Done. |
| # [Task 6/12] Current/Best: 508.96/2273.20 GFLOPS | Progress: (768/768) | 3718.75 s Done. |
| # [Task 7/12] Current/Best: 469.14/1955.79 GFLOPS | Progress: (576/576) | 2665.67 s Done. |
| # [Task 8/12] Current/Best: 230.91/1658.97 GFLOPS | Progress: (576/576) | 2435.01 s Done. |
| # [Task 9/12] Current/Best: 487.75/2295.19 GFLOPS | Progress: (648/648) | 3009.95 s Done. |
| # [Task 10/12] Current/Best: 182.33/1734.45 GFLOPS | Progress: (360/360) | 1755.06 s Done. |
| # [Task 11/12] Current/Best: 372.18/1745.15 GFLOPS | Progress: (360/360) | 1684.50 s Done. |
| # [Task 12/12] Current/Best: 215.34/2271.11 GFLOPS | Progress: (400/400) | 2128.74 s Done. |
| # Compile... |
| # Evaluate inference time cost... |
| # Mean inference time (std dev): 3.16 ms (0.03 ms) |