blob: f202521e8199f6aa477c0a65765dfc5ff8c1f3f2 [file] [log] [blame]
{% set test_method_names = [] %}
// We have to do this whacky split of the code from where it's used since the
// JVM limits method length to 64k and we easily exceed that with all this
// autogenerated code. This makes it so each test step is in its own method so
// that each individual method isn't too big.
{% call(shard_index, num_shards) m.sharded_test_step(
name="unittest: GPU",
num_shards=3,
node="GPU",
ws="tvm/ut-python-gpu",
platform="gpu",
docker_image="ci_gpu",
test_method_names=test_method_names,
) %}
{% if shard_index == 1 %}
{{ m.download_artifacts(tag='gpu2', filenames=tvm_multilib) }}
cpp_unittest(ci_gpu)
{{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }}
ci_setup(ci_gpu)
cpp_unittest(ci_gpu)
{% else %}
{{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }}
ci_setup(ci_gpu)
{% endif %}
{% if shard_index == 2 or num_shards < 2 %}
sh (
script: "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh",
label: 'Run Java unit tests',
)
{% endif %}
sh (
script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh",
label: 'Run Python GPU unit tests',
)
sh (
script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh",
label: 'Run Python GPU integration tests',
)
{% endcall %}
{% call(shard_index, num_shards) m.sharded_test_step(
name="integration: CPU",
node="CPU-SMALL",
num_shards=6,
ws="tvm/integration-python-cpu",
platform="cpu",
docker_image="ci_cpu",
test_method_names=test_method_names,
) %}
{{ m.download_artifacts(tag='cpu', filenames=tvm_multilib_tsim) }}
ci_setup(ci_cpu)
sh (
script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh",
label: 'Run CPU integration tests',
)
{% endcall %}
{% call(shard_index, num_shards) m.sharded_test_step(
name="python: i386",
node="CPU-SMALL",
num_shards=5,
ws="tvm/integration-python-i386",
platform="i386",
docker_image="ci_i386",
test_method_names=test_method_names,
) %}
{{ m.download_artifacts(tag='i386', filenames=tvm_multilib) }}
ci_setup(ci_i386)
{% if shard_index == 1 %}
cpp_unittest(ci_i386)
{% endif %}
python_unittest(ci_i386)
sh (
script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh",
label: 'Run i386 integration tests',
)
{% if shard_index == 2 or num_shards < 2 %}
fsim_test(ci_i386)
{% endif %}
{% endcall %}
{% call(shard_index, num_shards) m.sharded_test_step(
name="test: Hexagon",
node="CPU-SMALL",
ws="tvm/test-hexagon",
platform="hexagon",
docker_image="ci_hexagon",
test_method_names=test_method_names,
num_shards=7,
) %}
{{ m.download_artifacts(tag='hexagon', filenames=tvm_lib, folders=hexagon_api) }}
add_hexagon_permissions()
ci_setup(ci_hexagon)
{% if shard_index == 1 %}
cpp_unittest(ci_hexagon)
{% endif %}
sh (
script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh",
label: 'Run Hexagon tests',
)
{% endcall %}
{% call(shard_index, num_shards) m.sharded_test_step(
name="integration: aarch64",
num_shards=4,
node="ARM-SMALL",
ws="tvm/ut-python-arm",
platform="arm",
docker_image="ci_arm",
test_method_names=test_method_names,
) %}
{{ m.download_artifacts(tag='arm', filenames=tvm_multilib) }}
ci_setup(ci_arm)
python_unittest(ci_arm)
sh (
script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh",
label: 'Run CPU integration tests',
)
{% endcall %}
{% call(shard_index, num_shards) m.sharded_test_step(
name="topi: GPU",
node="GPU",
num_shards=4,
ws="tvm/topi-python-gpu",
platform="gpu",
docker_image="ci_gpu",
test_method_names=test_method_names,
) %}
{{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }}
ci_setup(ci_gpu)
sh (
script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh",
label: 'Run TOPI tests',
)
{% endcall %}
{% call(shard_index, num_shards) m.sharded_test_step(
name="frontend: GPU",
node="GPU",
num_shards=6,
ws="tvm/frontend-python-gpu",
platform="gpu",
docker_image="ci_gpu",
test_method_names=test_method_names,
) %}
{{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }}
ci_setup(ci_gpu)
sh (
script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh",
label: 'Run Python frontend tests',
)
{% endcall %}
{% call(shard_index, num_shards) m.sharded_test_step(
name="topi: aarch64",
node="ARM-SMALL",
ws="tvm/ut-python-arm",
platform="arm",
docker_image="ci_arm",
num_shards=2,
test_method_names=test_method_names,
) %}
{{ m.download_artifacts(tag='arm', filenames=tvm_multilib) }}
ci_setup(ci_arm)
{% if shard_index == 1 %}
cpp_unittest(ci_arm)
{% endif %}
sh (
script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh",
label: 'Run test_arm_compute_lib test',
)
sh (
script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_topi.sh",
label: 'Run TOPI tests',
)
{% endcall %}
{% call(shard_index, num_shards) m.sharded_test_step(
name="frontend: aarch64",
node="ARM-SMALL",
ws="tvm/frontend-python-arm",
platform="arm",
docker_image="ci_arm",
num_shards=2,
test_method_names=test_method_names,
) %}
{{ m.download_artifacts(tag='arm', filenames=tvm_multilib) }}
ci_setup(ci_arm)
sh (
script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_frontend_cpu.sh",
label: 'Run Python frontend tests',
)
{% endcall %}
def test() {
stage('Test') {
environment {
SKIP_SLOW_TESTS = "${skip_slow_tests}"
}
parallel(
{% for stage_name, method_name in test_method_names %}
'{{ stage_name }}': {
{{ method_name }}()
},
{% endfor %}
{% call m.test_step(
name="unittest: CPU",
node="CPU-SMALL",
ws="tvm/ut-python-cpu",
platform="cpu",
docker_image="ci_cpu",
) %}
{{ m.download_artifacts(tag='cpu', filenames=tvm_multilib_tsim) }}
ci_setup(ci_cpu)
cpp_unittest(ci_cpu)
python_unittest(ci_cpu)
fsim_test(ci_cpu)
// sh (
// script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh",
// label: 'Run VTA tests in TSIM',
// )
{% endcall %}
{% call m.test_step(
name="test: QEMU",
node="CPU-SMALL",
ws="tvm/test-qemu",
platform="qemu",
docker_image="ci_qemu",
) %}
{{ m.download_artifacts(tag='qemu', filenames=tvm_lib, folders=microtvm_template_projects) }}
add_microtvm_permissions()
ci_setup(ci_qemu)
cpp_unittest(ci_qemu)
sh (
script: "${docker_run} ${ci_qemu} ./tests/scripts/task_python_microtvm.sh",
label: 'Run microTVM tests',
)
sh (
script: "${docker_run} ${ci_qemu} ./tests/scripts/task_demo_microtvm.sh",
label: 'Run microTVM demos',
)
{% endcall %}
{% call m.test_step(
name="frontend: CPU",
node="CPU-SMALL",
ws="tvm/frontend-python-cpu",
platform="cpu",
docker_image="ci_cpu",
) %}
{{ m.download_artifacts(tag='cpu', filenames=tvm_multilib) }}
ci_setup(ci_cpu)
sh (
script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh",
label: 'Run Python frontend tests',
)
{% endcall %}
'docs: GPU': {
if (!skip_ci) {
node('GPU') {
ws({{ m.per_exec_ws('tvm/docs-python-gpu') }}) {
docker_init(ci_gpu)
init_git()
{{ m.download_artifacts(tag='gpu', filenames=tvm_multilib, folders=microtvm_template_projects) }}
add_microtvm_permissions()
timeout(time: 180, unit: 'MINUTES') {
ci_setup(ci_gpu)
sh (
script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_docs.sh",
label: 'Build docs',
)
}
{{ m.upload_artifacts(tag='docs', filenames=["docs.tgz"]) }}
sh(
script: "aws s3 cp --no-progress _docs s3://${s3_prefix}/docs --recursive",
label: 'Upload docs to S3',
)
}
}
}
},
)
}
}