| #!groovy |
| // -*- mode: groovy -*- |
| |
| // Licensed to the Apache Software Foundation (ASF) under one |
| // or more contributor license agreements. See the NOTICE file |
| // distributed with this work for additional information |
| // regarding copyright ownership. The ASF licenses this file |
| // to you under the Apache License, Version 2.0 (the |
| // "License"); you may not use this file except in compliance |
| // with the License. You may obtain a copy of the License at |
| // |
| // http://www.apache.org/licenses/LICENSE-2.0 |
| // |
| // Unless required by applicable law or agreed to in writing, |
| // software distributed under the License is distributed on an |
| // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| // KIND, either express or implied. See the License for the |
| // specific language governing permissions and limitations |
| // under the License. |
| |
| // Jenkins pipeline |
| // See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ |
| |
| // Docker env used for testing |
| // Different image may have different version tag |
| // because some of them are more stable than anoter. |
| // |
| // Docker images are maintained by PMC, cached in dockerhub |
| // and remains relatively stable over the time. |
| // Flow for upgrading docker env(need commiter) |
| // |
| // - Send PR to upgrade build script in the repo |
| // - Build the new docker image |
| // - Tag the docker image with a new version and push to a binary cache. |
| // - Update the version in the Jenkinsfile, send a PR |
| // - Fix any issues wrt to the new image version in the PR |
| // - Merge the PR and now we are in new version |
| // - Tag the new version as the lates |
| // - Periodically cleanup the old versions on local workers |
| // |
| |
| // ============================= IMPORTANT NOTE ============================= |
| // This file is generated by 'jenkins/generate.py'. Do not edit this file directly! |
| // Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with |
| // 'python3 jenkins/generate.py' |
| // Note: This timestamp is here to ensure that updates to the Jenkinsfile are |
| // always rebased on main before merging: |
| // Generated at 2022-10-05T19:59:41.371226 |
| |
| import org.jenkinsci.plugins.pipeline.modeldefinition.Utils |
| // NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. --> |
| ci_lint = 'tlcpack/ci-lint:20220908-060034-62bdc91b1' |
| ci_gpu = 'tlcpack/ci-gpu:20220908-060034-62bdc91b1' |
| ci_cpu = 'tlcpack/ci-cpu:20220908-060034-62bdc91b1' |
| ci_minimal = 'tlcpack/ci-minimal:20220908-060034-62bdc91b1' |
| ci_wasm = 'tlcpack/ci-wasm:20220908-060034-62bdc91b1' |
| ci_i386 = 'tlcpack/ci-i386:20220908-060034-62bdc91b1' |
| ci_cortexm = 'tlcpack/ci-cortexm:20220909-090211-cb08a1251' |
| ci_arm = 'tlcpack/ci-arm:20220908-060034-62bdc91b1' |
| ci_hexagon = 'tlcpack/ci-hexagon:20220908-060034-62bdc91b1' |
| ci_riscv = 'tlcpack/ci-riscv:20220908-060034-62bdc91b1' |
| // <--- End of regex-scanned config. |
| |
| // Parameters to allow overriding (in Jenkins UI), the images |
| // to be used by a given build. When provided, they take precedence |
| // over default values above. |
| properties([ |
| parameters([ |
| string(name: 'ci_arm_param', defaultValue: ''), |
| string(name: 'ci_cortexm_param', defaultValue: ''), |
| string(name: 'ci_cpu_param', defaultValue: ''), |
| string(name: 'ci_gpu_param', defaultValue: ''), |
| string(name: 'ci_hexagon_param', defaultValue: ''), |
| string(name: 'ci_i386_param', defaultValue: ''), |
| string(name: 'ci_lint_param', defaultValue: ''), |
| string(name: 'ci_minimal_param', defaultValue: ''), |
| string(name: 'ci_riscv_param', defaultValue: ''), |
| string(name: 'ci_wasm_param', defaultValue: ''), |
| ]) |
| ]) |
| |
| // Placeholders for newly built Docker image names (if rebuild_docker_images |
| // is used) |
| built_ci_arm = null; |
| built_ci_cortexm = null; |
| built_ci_cpu = null; |
| built_ci_gpu = null; |
| built_ci_hexagon = null; |
| built_ci_i386 = null; |
| built_ci_lint = null; |
| built_ci_minimal = null; |
| built_ci_riscv = null; |
| built_ci_wasm = null; |
| |
| // Global variable assigned during Sanity Check that holds the sha1 which should be |
| // merged into the PR in all branches. |
| upstream_revision = null |
| |
| // command to start a docker container |
| docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' |
| docker_build = 'docker/build.sh' |
| // timeout in minutes |
| max_time = 180 |
| rebuild_docker_images = false |
| |
| // Filenames for stashing between build and test steps |
| s3_prefix = "tvm-jenkins-artifacts-prod/tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" |
| |
| |
| // General note: Jenkins has limits on the size of a method (or top level code) |
| // that are pretty strict, so most usage of groovy methods in these templates |
| // are purely to satisfy the JVM |
| def per_exec_ws(folder) { |
| return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder |
| } |
| |
| // initialize source codes |
| def init_git() { |
| checkout scm |
| |
| |
| // Add more info about job node |
| sh ( |
| script: './tests/scripts/task_show_node_info.sh', |
| label: 'Show executor node info', |
| ) |
| |
| // Determine merge commit to use for all stages |
| sh ( |
| script: 'git fetch origin v0.10.0', |
| label: 'Fetch upstream', |
| ) |
| if (upstream_revision == null) { |
| upstream_revision = sh( |
| script: 'git log -1 FETCH_HEAD --format=\'%H\'', |
| label: 'Determine upstream revision', |
| returnStdout: true, |
| ).trim() |
| } |
| sh ( |
| script: "git -c user.name=TVM-Jenkins -c user.email=jenkins@tvm.apache.org merge ${upstream_revision}", |
| label: 'Merge to origin/v0.10.0' |
| ) |
| |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 timeout 5m git submodule update --init -f --jobs 0 |
| """, |
| label: 'Update git submodules', |
| ) |
| checkout_trusted_files() |
| } |
| |
| def docker_init(image) { |
| // Clear out all Docker images that aren't going to be used |
| sh( |
| script: """ |
| set -eux |
| docker image ls --all |
| IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}} {{.ID}}') |
| |
| echo -e "Found images:\\n\$IMAGES" |
| echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs docker rmi || test \$? = 123; } |
| |
| docker image ls --all |
| """, |
| label: 'Clean old Docker images', |
| ) |
| |
| if (image.contains("amazonaws.com")) { |
| // If this string is in the image name it's from ECR and needs to be pulled |
| // with the right credentials |
| ecr_pull(image) |
| } else { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 5 docker pull ${image} |
| """, |
| label: 'Pull docker image', |
| ) |
| } |
| } |
| |
| def should_skip_slow_tests(pr_number) { |
| withCredentials([string( |
| credentialsId: 'tvm-bot-jenkins-reader', |
| variable: 'GITHUB_TOKEN', |
| )]) { |
| // Exit code of 1 means run slow tests, exit code of 0 means skip slow tests |
| result = sh ( |
| returnStatus: true, |
| script: "./ci/scripts/should_run_slow_tests.py --pr '${pr_number}'", |
| label: 'Check if CI should run slow tests', |
| ) |
| } |
| return result == 0 |
| } |
| |
| def cancel_previous_build() { |
| // cancel previous build if it is not on main. |
| if (env.BRANCH_NAME != 'v0.10.0') { |
| def buildNumber = env.BUILD_NUMBER as int |
| // Milestone API allows us to cancel previous build |
| // with the same milestone number |
| if (buildNumber > 1) milestone(buildNumber - 1) |
| milestone(buildNumber) |
| } |
| } |
| |
| def checkout_trusted_files() { |
| // trust everything from branch builds |
| if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { |
| return; |
| } |
| |
| // trust peoople listed in CONTRIBUTING.md |
| grep_code = sh( |
| returnStatus: true, |
| script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep '@${env.CHANGE_AUTHOR}'", |
| label: 'Check if change is from a contributor', |
| ) |
| |
| if (grep_code == 1) { |
| // Any scripts that run on the bare host and not inside a Docker container |
| // (especially those that access secrets) should be checked out here so |
| // only trusted versions are used in CI |
| sh( |
| script: "git checkout ${upstream_revision} ci/scripts/.", |
| label: 'Check out trusted files', |
| ) |
| } |
| } |
| |
| def should_skip_ci(pr_number) { |
| if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { |
| // never skip CI on build sourced from a branch |
| return false |
| } |
| glob_skip_ci_code = sh ( |
| returnStatus: true, |
| script: "./ci/scripts/git_skip_ci_globs.py", |
| label: 'Check if CI should be skipped due to changed files', |
| ) |
| if (glob_skip_ci_code == 0) { |
| return true |
| } |
| withCredentials([string( |
| credentialsId: 'tvm-bot-jenkins-reader', |
| variable: 'GITHUB_TOKEN', |
| )]) { |
| // Exit code of 1 means run full CI (or the script had an error, so run |
| // full CI just in case). Exit code of 0 means skip CI. |
| git_skip_ci_code = sh ( |
| returnStatus: true, |
| script: "./ci/scripts/git_skip_ci.py --pr '${pr_number}'", |
| label: 'Check if CI should be skipped', |
| ) |
| } |
| return git_skip_ci_code == 0 |
| } |
| |
| def check_pr(pr_number) { |
| if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { |
| // never skip CI on build sourced from a branch |
| return false |
| } |
| withCredentials([string( |
| credentialsId: 'tvm-bot-jenkins-reader', |
| variable: 'GITHUB_TOKEN', |
| )]) { |
| sh ( |
| script: "python3 ci/scripts/check_pr.py --pr ${pr_number}", |
| label: 'Check PR title and body', |
| ) |
| } |
| |
| } |
| |
| def prepare() { |
| stage('Prepare') { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { |
| init_git() |
| |
| check_pr(env.CHANGE_ID) |
| |
| if (env.DETERMINE_DOCKER_IMAGES == 'yes') { |
| sh( |
| script: "./ci/scripts/determine_docker_images.py ci_arm=${ci_arm} ci_cortexm=${ci_cortexm} ci_cpu=${ci_cpu} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_minimal=${ci_minimal} ci_riscv=${ci_riscv} ci_wasm=${ci_wasm} ", |
| label: 'Decide whether to use tlcpack or tlcpackstaging for Docker images', |
| ) |
| // Pull image names from the results of should_rebuild_docker.py |
| ci_arm = sh( |
| script: "cat .docker-image-names/ci_arm", |
| label: "Find docker image name for ci_arm", |
| returnStdout: true, |
| ).trim() |
| ci_cortexm = sh( |
| script: "cat .docker-image-names/ci_cortexm", |
| label: "Find docker image name for ci_cortexm", |
| returnStdout: true, |
| ).trim() |
| ci_cpu = sh( |
| script: "cat .docker-image-names/ci_cpu", |
| label: "Find docker image name for ci_cpu", |
| returnStdout: true, |
| ).trim() |
| ci_gpu = sh( |
| script: "cat .docker-image-names/ci_gpu", |
| label: "Find docker image name for ci_gpu", |
| returnStdout: true, |
| ).trim() |
| ci_hexagon = sh( |
| script: "cat .docker-image-names/ci_hexagon", |
| label: "Find docker image name for ci_hexagon", |
| returnStdout: true, |
| ).trim() |
| ci_i386 = sh( |
| script: "cat .docker-image-names/ci_i386", |
| label: "Find docker image name for ci_i386", |
| returnStdout: true, |
| ).trim() |
| ci_lint = sh( |
| script: "cat .docker-image-names/ci_lint", |
| label: "Find docker image name for ci_lint", |
| returnStdout: true, |
| ).trim() |
| ci_minimal = sh( |
| script: "cat .docker-image-names/ci_minimal", |
| label: "Find docker image name for ci_minimal", |
| returnStdout: true, |
| ).trim() |
| ci_riscv = sh( |
| script: "cat .docker-image-names/ci_riscv", |
| label: "Find docker image name for ci_riscv", |
| returnStdout: true, |
| ).trim() |
| ci_wasm = sh( |
| script: "cat .docker-image-names/ci_wasm", |
| label: "Find docker image name for ci_wasm", |
| returnStdout: true, |
| ).trim() |
| } |
| |
| ci_arm = params.ci_arm_param ?: ci_arm |
| ci_cortexm = params.ci_cortexm_param ?: ci_cortexm |
| ci_cpu = params.ci_cpu_param ?: ci_cpu |
| ci_gpu = params.ci_gpu_param ?: ci_gpu |
| ci_hexagon = params.ci_hexagon_param ?: ci_hexagon |
| ci_i386 = params.ci_i386_param ?: ci_i386 |
| ci_lint = params.ci_lint_param ?: ci_lint |
| ci_minimal = params.ci_minimal_param ?: ci_minimal |
| ci_riscv = params.ci_riscv_param ?: ci_riscv |
| ci_wasm = params.ci_wasm_param ?: ci_wasm |
| |
| sh (script: """ |
| echo "Docker images being used in this build:" |
| echo " ci_arm = ${ci_arm}" |
| echo " ci_cortexm = ${ci_cortexm}" |
| echo " ci_cpu = ${ci_cpu}" |
| echo " ci_gpu = ${ci_gpu}" |
| echo " ci_hexagon = ${ci_hexagon}" |
| echo " ci_i386 = ${ci_i386}" |
| echo " ci_lint = ${ci_lint}" |
| echo " ci_minimal = ${ci_minimal}" |
| echo " ci_riscv = ${ci_riscv}" |
| echo " ci_wasm = ${ci_wasm}" |
| """, label: 'Docker image names') |
| |
| is_docs_only_build = sh ( |
| returnStatus: true, |
| script: './ci/scripts/git_change_docs.sh', |
| label: 'Check for docs only changes', |
| ) |
| skip_ci = should_skip_ci(env.CHANGE_ID) |
| skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) |
| rebuild_docker_images = sh ( |
| returnStatus: true, |
| script: './ci/scripts/git_change_docker.sh', |
| label: 'Check for any docker changes', |
| ) |
| |
| if (skip_ci) { |
| // Don't rebuild when skipping CI |
| rebuild_docker_images = false |
| } |
| } |
| } |
| } |
| } |
| def ecr_push(full_name) { |
| aws_account_id = sh( |
| returnStdout: true, |
| script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', |
| label: 'Get AWS ID' |
| ).trim() |
| |
| def ecr_name = "${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com/${full_name}" |
| try { |
| withEnv([ |
| "AWS_ACCOUNT_ID=${aws_account_id}", |
| 'AWS_DEFAULT_REGION=us-west-2', |
| "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) { |
| sh( |
| script: ''' |
| set -eux |
| aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO |
| ''', |
| label: 'Log in to ECR' |
| ) |
| sh( |
| script: """ |
| set -x |
| . ci/scripts/retry.sh |
| docker tag ${full_name} \$AWS_ECR_REPO/${full_name} |
| retry 5 docker push \$AWS_ECR_REPO/${full_name} |
| """, |
| label: 'Upload image to ECR' |
| ) |
| } |
| } finally { |
| withEnv([ |
| "AWS_ACCOUNT_ID=${aws_account_id}", |
| 'AWS_DEFAULT_REGION=us-west-2', |
| "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) { |
| sh( |
| script: 'docker logout $AWS_ECR_REPO', |
| label: 'Clean up login credentials' |
| ) |
| } |
| } |
| return ecr_name |
| } |
| |
| def ecr_pull(full_name) { |
| aws_account_id = sh( |
| returnStdout: true, |
| script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', |
| label: 'Get AWS ID' |
| ).trim() |
| |
| try { |
| withEnv([ |
| "AWS_ACCOUNT_ID=${aws_account_id}", |
| 'AWS_DEFAULT_REGION=us-west-2', |
| "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) { |
| sh( |
| script: ''' |
| set -eux |
| aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO |
| ''', |
| label: 'Log in to ECR' |
| ) |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 5 docker pull ${full_name} |
| """, |
| label: 'Pull image from ECR' |
| ) |
| } |
| } finally { |
| withEnv([ |
| "AWS_ACCOUNT_ID=${aws_account_id}", |
| 'AWS_DEFAULT_REGION=us-west-2', |
| "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) { |
| sh( |
| script: 'docker logout $AWS_ECR_REPO', |
| label: 'Clean up login credentials' |
| ) |
| } |
| } |
| } |
| |
| def build_image(image_name) { |
| hash = sh( |
| returnStdout: true, |
| script: 'git log -1 --format=\'%h\'' |
| ).trim() |
| def full_name = "${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}" |
| sh( |
| script: "${docker_build} ${image_name} --spec ${full_name}", |
| label: 'Build docker image' |
| ) |
| return ecr_push(full_name) |
| } |
| |
| |
| def build_docker_images() { |
| stage('Docker Image Build') { |
| parallel( |
| 'ci_arm': { |
| node('ARM') { |
| timeout(time: max_time, unit: 'MINUTES') { |
| init_git() |
| // We're purposefully not setting the built image here since they |
| // are not yet being uploaded to tlcpack |
| // ci_arm = build_image('ci_arm') |
| built_ci_arm = build_image('ci_arm'); |
| } |
| } |
| }, |
| 'ci_cortexm': { |
| node('CPU') { |
| timeout(time: max_time, unit: 'MINUTES') { |
| init_git() |
| // We're purposefully not setting the built image here since they |
| // are not yet being uploaded to tlcpack |
| // ci_cortexm = build_image('ci_cortexm') |
| built_ci_cortexm = build_image('ci_cortexm'); |
| } |
| } |
| }, |
| 'ci_cpu': { |
| node('CPU') { |
| timeout(time: max_time, unit: 'MINUTES') { |
| init_git() |
| // We're purposefully not setting the built image here since they |
| // are not yet being uploaded to tlcpack |
| // ci_cpu = build_image('ci_cpu') |
| built_ci_cpu = build_image('ci_cpu'); |
| } |
| } |
| }, |
| 'ci_gpu': { |
| node('CPU') { |
| timeout(time: max_time, unit: 'MINUTES') { |
| init_git() |
| // We're purposefully not setting the built image here since they |
| // are not yet being uploaded to tlcpack |
| // ci_gpu = build_image('ci_gpu') |
| built_ci_gpu = build_image('ci_gpu'); |
| } |
| } |
| }, |
| 'ci_hexagon': { |
| node('CPU') { |
| timeout(time: max_time, unit: 'MINUTES') { |
| init_git() |
| // We're purposefully not setting the built image here since they |
| // are not yet being uploaded to tlcpack |
| // ci_hexagon = build_image('ci_hexagon') |
| built_ci_hexagon = build_image('ci_hexagon'); |
| } |
| } |
| }, |
| 'ci_i386': { |
| node('CPU') { |
| timeout(time: max_time, unit: 'MINUTES') { |
| init_git() |
| // We're purposefully not setting the built image here since they |
| // are not yet being uploaded to tlcpack |
| // ci_i386 = build_image('ci_i386') |
| built_ci_i386 = build_image('ci_i386'); |
| } |
| } |
| }, |
| 'ci_lint': { |
| node('CPU') { |
| timeout(time: max_time, unit: 'MINUTES') { |
| init_git() |
| // We're purposefully not setting the built image here since they |
| // are not yet being uploaded to tlcpack |
| // ci_lint = build_image('ci_lint') |
| built_ci_lint = build_image('ci_lint'); |
| } |
| } |
| }, |
| 'ci_minimal': { |
| node('CPU') { |
| timeout(time: max_time, unit: 'MINUTES') { |
| init_git() |
| // We're purposefully not setting the built image here since they |
| // are not yet being uploaded to tlcpack |
| // ci_minimal = build_image('ci_minimal') |
| built_ci_minimal = build_image('ci_minimal'); |
| } |
| } |
| }, |
| 'ci_riscv': { |
| node('CPU') { |
| timeout(time: max_time, unit: 'MINUTES') { |
| init_git() |
| // We're purposefully not setting the built image here since they |
| // are not yet being uploaded to tlcpack |
| // ci_riscv = build_image('ci_riscv') |
| built_ci_riscv = build_image('ci_riscv'); |
| } |
| } |
| }, |
| 'ci_wasm': { |
| node('CPU') { |
| timeout(time: max_time, unit: 'MINUTES') { |
| init_git() |
| // We're purposefully not setting the built image here since they |
| // are not yet being uploaded to tlcpack |
| // ci_wasm = build_image('ci_wasm') |
| built_ci_wasm = build_image('ci_wasm'); |
| } |
| } |
| }, |
| ) |
| } |
| } |
| def lint() { |
| stage('Lint') { |
| parallel( |
| 'Lint 1 of 2': { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/lint") { |
| init_git() |
| docker_init(ci_lint) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'TVM_NUM_SHARDS=2', |
| 'TEST_STEP_NAME=Lint', |
| 'TVM_SHARD_INDEX=0', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh ( |
| script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", |
| label: 'Run lint', |
| ) |
| }) |
| } |
| } |
| } |
| }, |
| 'Lint 2 of 2': { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/lint") { |
| init_git() |
| docker_init(ci_lint) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'TVM_NUM_SHARDS=2', |
| 'TEST_STEP_NAME=Lint', |
| 'TVM_SHARD_INDEX=1', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh ( |
| script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", |
| label: 'Run lint', |
| ) |
| }) |
| } |
| } |
| } |
| }, |
| ) |
| } |
| } |
| def ci_setup(image) { |
| sh ( |
| script: "${docker_run} ${image} ./tests/scripts/task_ci_setup.sh", |
| label: 'Set up CI environment', |
| ) |
| } |
| |
| def python_unittest(image) { |
| sh ( |
| script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", |
| label: 'Run Python unit tests', |
| ) |
| } |
| |
| def fsim_test(image) { |
| // sh ( |
| // script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", |
| // label: 'Run VTA tests in FSIM', |
| // ) |
| } |
| |
| def cmake_build(image, path, make_flag) { |
| sh ( |
| script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", |
| label: 'Run cmake build', |
| ) |
| } |
| |
| def cpp_unittest(image) { |
| sh ( |
| script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", |
| label: 'Build and run C++ tests', |
| ) |
| } |
| |
| def add_microtvm_permissions() { |
| sh( |
| script: 'find build/microtvm_template_projects -type f | grep qemu-hack | xargs chmod +x', |
| label: 'Add execute permissions for microTVM files', |
| ) |
| } |
| |
| def add_hexagon_permissions() { |
| sh( |
| script: 'find build/hexagon_api_output -type f | xargs chmod +x', |
| label: 'Add execute permissions for hexagon files', |
| ) |
| } |
| |
| // Run make. First try to do an incremental make from a previous workspace in hope to |
| // accelerate the compilation. If something is wrong, clean the workspace and then |
| // build from scratch. |
| def make(docker_type, path, make_flag) { |
| timeout(time: max_time, unit: 'MINUTES') { |
| try { |
| cmake_build(docker_type, path, make_flag) |
| } catch (hudson.AbortException ae) { |
| // script exited due to user abort, directly throw instead of retry |
| if (ae.getMessage().contains('script returned exit code 143')) { |
| throw ae |
| } |
| echo 'Incremental compilation failed. Fall back to build from scratch' |
| sh ( |
| script: "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh ${path}", |
| label: 'Clear old cmake workspace', |
| ) |
| cmake_build(docker_type, path, make_flag) |
| } |
| } |
| } |
| |
| |
| def build() { |
| stage('Build') { |
| environment { |
| SKIP_SLOW_TESTS = "${skip_slow_tests}" |
| } |
| parallel( |
| 'BUILD: GPU': { |
| if (!skip_ci) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-gpu") { |
| init_git() |
| docker_init(ci_gpu) |
| sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh build" |
| make("${ci_gpu} --no-gpu", 'build', '-j2') |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress build/libtvm.so s3://${s3_prefix}/gpu/build/libtvm.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress build/libtvm_runtime.so s3://${s3_prefix}/gpu/build/libtvm_runtime.so |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress build/config.cmake s3://${s3_prefix}/gpu/build/config.cmake |
| retry 3 aws s3 cp --no-progress build/microtvm_template_projects s3://${s3_prefix}/gpu/build/microtvm_template_projects --recursive |
| """, |
| label: 'Upload artifacts to S3', |
| ) |
| |
| |
| // compiler test |
| sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu_other.sh build2" |
| make("${ci_gpu} --no-gpu", 'build2', '-j2') |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress build/libtvm.so s3://${s3_prefix}/gpu2/build/libtvm.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress build/libtvm_runtime.so s3://${s3_prefix}/gpu2/build/libtvm_runtime.so |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress build/config.cmake s3://${s3_prefix}/gpu2/build/config.cmake |
| """, |
| label: 'Upload artifacts to S3', |
| ) |
| |
| } |
| } |
| } |
| }, |
| 'BUILD: CPU': { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-cpu") { |
| init_git() |
| docker_init(ci_cpu) |
| sh ( |
| script: "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh build", |
| label: 'Create CPU cmake config', |
| ) |
| make(ci_cpu, 'build', '-j2') |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress build/libtvm.so s3://${s3_prefix}/cpu/build/libtvm.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress build/libtvm_runtime.so s3://${s3_prefix}/cpu/build/libtvm_runtime.so |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress build/config.cmake s3://${s3_prefix}/cpu/build/config.cmake |
| """, |
| label: 'Upload artifacts to S3', |
| ) |
| |
| timeout(time: max_time, unit: 'MINUTES') { |
| ci_setup(ci_cpu) |
| // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" |
| // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch |
| sh (script: "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh", label: 'Rust build and test') |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('BUILD: CPU') |
| } |
| }, |
| 'BUILD: CPU MINIMAL': { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-cpu-minimal") { |
| init_git() |
| docker_init(ci_minimal) |
| sh ( |
| script: "${docker_run} ${ci_minimal} ./tests/scripts/task_config_build_minimal.sh build", |
| label: 'Create CPU minimal cmake config', |
| ) |
| make(ci_minimal, 'build', '-j2') |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress build/libtvm.so s3://${s3_prefix}/cpu-minimal/build/libtvm.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress build/libtvm_runtime.so s3://${s3_prefix}/cpu-minimal/build/libtvm_runtime.so |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress build/config.cmake s3://${s3_prefix}/cpu-minimal/build/config.cmake |
| """, |
| label: 'Upload artifacts to S3', |
| ) |
| |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('BUILD: CPU MINIMAL') |
| } |
| }, |
| 'BUILD: WASM': { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-wasm") { |
| init_git() |
| docker_init(ci_wasm) |
| sh ( |
| script: "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh build", |
| label: 'Create WASM cmake config', |
| ) |
| make(ci_wasm, 'build', '-j2') |
| cpp_unittest(ci_wasm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| ci_setup(ci_wasm) |
| sh ( |
| script: "${docker_run} ${ci_wasm} ./tests/scripts/task_web_wasm.sh", |
| label: 'Run WASM lint and tests', |
| ) |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('BUILD: WASM') |
| } |
| }, |
| 'BUILD: i386': { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-i386") { |
| init_git() |
| docker_init(ci_i386) |
| sh ( |
| script: "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh build", |
| label: 'Create i386 cmake config', |
| ) |
| make(ci_i386, 'build', '-j2') |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress build/libtvm.so s3://${s3_prefix}/i386/build/libtvm.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress build/libtvm_runtime.so s3://${s3_prefix}/i386/build/libtvm_runtime.so |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress build/config.cmake s3://${s3_prefix}/i386/build/config.cmake |
| """, |
| label: 'Upload artifacts to S3', |
| ) |
| |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('BUILD: i386') |
| } |
| }, |
| 'BUILD: arm': { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('ARM-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-arm") { |
| init_git() |
| docker_init(ci_arm) |
| sh ( |
| script: "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh build", |
| label: 'Create ARM cmake config', |
| ) |
| make(ci_arm, 'build', '-j4') |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress build/libtvm.so s3://${s3_prefix}/arm/build/libtvm.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress build/libtvm_runtime.so s3://${s3_prefix}/arm/build/libtvm_runtime.so |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress build/config.cmake s3://${s3_prefix}/arm/build/config.cmake |
| """, |
| label: 'Upload artifacts to S3', |
| ) |
| |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('BUILD: arm') |
| } |
| }, |
| 'BUILD: Cortex-M': { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-cortexm") { |
| init_git() |
| docker_init(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_config_build_cortexm.sh build", |
| label: 'Create Cortex-M cmake config', |
| ) |
| make(ci_cortexm, 'build', '-j2') |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress build/libtvm.so s3://${s3_prefix}/cortexm/build/libtvm.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress build/libtvm_runtime.so s3://${s3_prefix}/cortexm/build/libtvm_runtime.so |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress build/config.cmake s3://${s3_prefix}/cortexm/build/config.cmake |
| retry 3 aws s3 cp --no-progress build/microtvm_template_projects s3://${s3_prefix}/cortexm/build/microtvm_template_projects --recursive |
| """, |
| label: 'Upload artifacts to S3', |
| ) |
| |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('BUILD: Cortex-M') |
| } |
| }, |
| 'BUILD: Hexagon': { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-hexagon") { |
| init_git() |
| docker_init(ci_hexagon) |
| sh ( |
| script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_config_build_hexagon.sh build", |
| label: 'Create Hexagon cmake config', |
| ) |
| make(ci_hexagon, 'build', '-j2') |
| sh ( |
| script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_build_hexagon_api.sh", |
| label: 'Build Hexagon API', |
| ) |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress build/libtvm.so s3://${s3_prefix}/hexagon/build/libtvm.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress build/libtvm_runtime.so s3://${s3_prefix}/hexagon/build/libtvm_runtime.so |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress build/config.cmake s3://${s3_prefix}/hexagon/build/config.cmake |
| retry 3 aws s3 cp --no-progress build/hexagon_api_output s3://${s3_prefix}/hexagon/build/hexagon_api_output --recursive |
| """, |
| label: 'Upload artifacts to S3', |
| ) |
| |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('BUILD: Hexagon') |
| } |
| }, |
| 'BUILD: RISC-V': { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-riscv") { |
| init_git() |
| docker_init(ci_riscv) |
| sh ( |
| script: "${docker_run} ${ci_riscv} ./tests/scripts/task_config_build_riscv.sh build", |
| label: 'Create RISC-V cmake config', |
| ) |
| make(ci_riscv, 'build', '-j2') |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress build/libtvm.so s3://${s3_prefix}/riscv/build/libtvm.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress build/libtvm_runtime.so s3://${s3_prefix}/riscv/build/libtvm_runtime.so |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress build/config.cmake s3://${s3_prefix}/riscv/build/config.cmake |
| retry 3 aws s3 cp --no-progress build/microtvm_template_projects s3://${s3_prefix}/riscv/build/microtvm_template_projects --recursive |
| """, |
| label: 'Upload artifacts to S3', |
| ) |
| |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('BUILD: RISC-V') |
| } |
| }, |
| ) |
| } |
| } |
| |
| // We have to do this whacky split of the code from where it's used since the |
| // JVM limits method length to 64k and we easily exceed that with all this |
| // autogenerated code. This makes it so each test step is in its own method so |
| // that each individual method isn't too big. |
| |
| def shard_run_unittest_GPU_1_of_3() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('GPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") { |
| try { |
| init_git() |
| docker_init(ci_gpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=gpu', |
| 'TEST_STEP_NAME=unittest: GPU', |
| 'TVM_NUM_SHARDS=3', |
| 'TVM_SHARD_INDEX=0', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu2/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu2/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu2/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| cpp_unittest(ci_gpu) |
| |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_gpu) |
| cpp_unittest(ci_gpu) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", |
| label: 'Run Python GPU unit tests', |
| ) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", |
| label: 'Run Python GPU integration tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/unittest_GPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('unittest: GPU 1 of 3') |
| } |
| } |
| |
| def shard_run_unittest_GPU_2_of_3() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('GPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") { |
| try { |
| init_git() |
| docker_init(ci_gpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=gpu', |
| 'TEST_STEP_NAME=unittest: GPU', |
| 'TVM_NUM_SHARDS=3', |
| 'TVM_SHARD_INDEX=1', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_gpu) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh", |
| label: 'Run Java unit tests', |
| ) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", |
| label: 'Run Python GPU unit tests', |
| ) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", |
| label: 'Run Python GPU integration tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/unittest_GPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('unittest: GPU 2 of 3') |
| } |
| } |
| |
| def shard_run_unittest_GPU_3_of_3() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('GPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") { |
| try { |
| init_git() |
| docker_init(ci_gpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=gpu', |
| 'TEST_STEP_NAME=unittest: GPU', |
| 'TVM_NUM_SHARDS=3', |
| 'TVM_SHARD_INDEX=2', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_gpu) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", |
| label: 'Run Python GPU unit tests', |
| ) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", |
| label: 'Run Python GPU integration tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/unittest_GPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('unittest: GPU 3 of 3') |
| } |
| } |
| |
| |
| def shard_run_integration_CPU_1_of_4() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { |
| try { |
| init_git() |
| docker_init(ci_cpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cpu', |
| 'TEST_STEP_NAME=integration: CPU', |
| 'TVM_NUM_SHARDS=4', |
| 'TVM_SHARD_INDEX=0', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_cpu) |
| sh ( |
| script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", |
| label: 'Run CPU integration tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/integration_CPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('integration: CPU 1 of 4') |
| } |
| } |
| |
| def shard_run_integration_CPU_2_of_4() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { |
| try { |
| init_git() |
| docker_init(ci_cpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cpu', |
| 'TEST_STEP_NAME=integration: CPU', |
| 'TVM_NUM_SHARDS=4', |
| 'TVM_SHARD_INDEX=1', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_cpu) |
| sh ( |
| script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", |
| label: 'Run CPU integration tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/integration_CPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('integration: CPU 2 of 4') |
| } |
| } |
| |
| def shard_run_integration_CPU_3_of_4() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { |
| try { |
| init_git() |
| docker_init(ci_cpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cpu', |
| 'TEST_STEP_NAME=integration: CPU', |
| 'TVM_NUM_SHARDS=4', |
| 'TVM_SHARD_INDEX=2', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_cpu) |
| sh ( |
| script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", |
| label: 'Run CPU integration tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/integration_CPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('integration: CPU 3 of 4') |
| } |
| } |
| |
| def shard_run_integration_CPU_4_of_4() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { |
| try { |
| init_git() |
| docker_init(ci_cpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cpu', |
| 'TEST_STEP_NAME=integration: CPU', |
| 'TVM_NUM_SHARDS=4', |
| 'TVM_SHARD_INDEX=3', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_cpu) |
| sh ( |
| script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", |
| label: 'Run CPU integration tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/integration_CPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('integration: CPU 4 of 4') |
| } |
| } |
| |
| |
| def shard_run_python_i386_1_of_3() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { |
| try { |
| init_git() |
| docker_init(ci_i386) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=i386', |
| 'TEST_STEP_NAME=python: i386', |
| 'TVM_NUM_SHARDS=3', |
| 'TVM_SHARD_INDEX=0', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/i386/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_i386) |
| cpp_unittest(ci_i386) |
| python_unittest(ci_i386) |
| sh ( |
| script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", |
| label: 'Run i386 integration tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/python_i386 --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('python: i386 1 of 3') |
| } |
| } |
| |
| def shard_run_python_i386_2_of_3() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { |
| try { |
| init_git() |
| docker_init(ci_i386) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=i386', |
| 'TEST_STEP_NAME=python: i386', |
| 'TVM_NUM_SHARDS=3', |
| 'TVM_SHARD_INDEX=1', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/i386/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_i386) |
| python_unittest(ci_i386) |
| sh ( |
| script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", |
| label: 'Run i386 integration tests', |
| ) |
| fsim_test(ci_i386) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/python_i386 --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('python: i386 2 of 3') |
| } |
| } |
| |
| def shard_run_python_i386_3_of_3() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { |
| try { |
| init_git() |
| docker_init(ci_i386) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=i386', |
| 'TEST_STEP_NAME=python: i386', |
| 'TVM_NUM_SHARDS=3', |
| 'TVM_SHARD_INDEX=2', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/i386/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_i386) |
| python_unittest(ci_i386) |
| sh ( |
| script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", |
| label: 'Run i386 integration tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/python_i386 --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('python: i386 3 of 3') |
| } |
| } |
| |
| |
| def shard_run_test_Hexagon_1_of_8() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { |
| try { |
| init_git() |
| docker_init(ci_hexagon) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=hexagon', |
| 'TEST_STEP_NAME=test: Hexagon', |
| 'TVM_NUM_SHARDS=8', |
| 'TVM_SHARD_INDEX=0', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_hexagon_permissions() |
| ci_setup(ci_hexagon) |
| cpp_unittest(ci_hexagon) |
| sh ( |
| script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", |
| label: 'Run Hexagon tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Hexagon --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Hexagon 1 of 8') |
| } |
| } |
| |
| def shard_run_test_Hexagon_2_of_8() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { |
| try { |
| init_git() |
| docker_init(ci_hexagon) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=hexagon', |
| 'TEST_STEP_NAME=test: Hexagon', |
| 'TVM_NUM_SHARDS=8', |
| 'TVM_SHARD_INDEX=1', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_hexagon_permissions() |
| ci_setup(ci_hexagon) |
| sh ( |
| script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", |
| label: 'Run Hexagon tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Hexagon --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Hexagon 2 of 8') |
| } |
| } |
| |
| def shard_run_test_Hexagon_3_of_8() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { |
| try { |
| init_git() |
| docker_init(ci_hexagon) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=hexagon', |
| 'TEST_STEP_NAME=test: Hexagon', |
| 'TVM_NUM_SHARDS=8', |
| 'TVM_SHARD_INDEX=2', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_hexagon_permissions() |
| ci_setup(ci_hexagon) |
| sh ( |
| script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", |
| label: 'Run Hexagon tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Hexagon --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Hexagon 3 of 8') |
| } |
| } |
| |
| def shard_run_test_Hexagon_4_of_8() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { |
| try { |
| init_git() |
| docker_init(ci_hexagon) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=hexagon', |
| 'TEST_STEP_NAME=test: Hexagon', |
| 'TVM_NUM_SHARDS=8', |
| 'TVM_SHARD_INDEX=3', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_hexagon_permissions() |
| ci_setup(ci_hexagon) |
| sh ( |
| script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", |
| label: 'Run Hexagon tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Hexagon --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Hexagon 4 of 8') |
| } |
| } |
| |
| def shard_run_test_Hexagon_5_of_8() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { |
| try { |
| init_git() |
| docker_init(ci_hexagon) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=hexagon', |
| 'TEST_STEP_NAME=test: Hexagon', |
| 'TVM_NUM_SHARDS=8', |
| 'TVM_SHARD_INDEX=4', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_hexagon_permissions() |
| ci_setup(ci_hexagon) |
| sh ( |
| script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", |
| label: 'Run Hexagon tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Hexagon --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Hexagon 5 of 8') |
| } |
| } |
| |
| def shard_run_test_Hexagon_6_of_8() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { |
| try { |
| init_git() |
| docker_init(ci_hexagon) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=hexagon', |
| 'TEST_STEP_NAME=test: Hexagon', |
| 'TVM_NUM_SHARDS=8', |
| 'TVM_SHARD_INDEX=5', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_hexagon_permissions() |
| ci_setup(ci_hexagon) |
| sh ( |
| script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", |
| label: 'Run Hexagon tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Hexagon --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Hexagon 6 of 8') |
| } |
| } |
| |
| def shard_run_test_Hexagon_7_of_8() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { |
| try { |
| init_git() |
| docker_init(ci_hexagon) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=hexagon', |
| 'TEST_STEP_NAME=test: Hexagon', |
| 'TVM_NUM_SHARDS=8', |
| 'TVM_SHARD_INDEX=6', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_hexagon_permissions() |
| ci_setup(ci_hexagon) |
| sh ( |
| script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", |
| label: 'Run Hexagon tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Hexagon --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Hexagon 7 of 8') |
| } |
| } |
| |
| def shard_run_test_Hexagon_8_of_8() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { |
| try { |
| init_git() |
| docker_init(ci_hexagon) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=hexagon', |
| 'TEST_STEP_NAME=test: Hexagon', |
| 'TVM_NUM_SHARDS=8', |
| 'TVM_SHARD_INDEX=7', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_hexagon_permissions() |
| ci_setup(ci_hexagon) |
| sh ( |
| script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", |
| label: 'Run Hexagon tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Hexagon --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Hexagon 8 of 8') |
| } |
| } |
| |
| |
| def shard_run_integration_aarch64_1_of_4() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('ARM-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { |
| try { |
| init_git() |
| docker_init(ci_arm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=arm', |
| 'TEST_STEP_NAME=integration: aarch64', |
| 'TVM_NUM_SHARDS=4', |
| 'TVM_SHARD_INDEX=0', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_arm) |
| python_unittest(ci_arm) |
| sh ( |
| script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", |
| label: 'Run CPU integration tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/integration_aarch64 --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('integration: aarch64 1 of 4') |
| } |
| } |
| |
| def shard_run_integration_aarch64_2_of_4() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('ARM-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { |
| try { |
| init_git() |
| docker_init(ci_arm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=arm', |
| 'TEST_STEP_NAME=integration: aarch64', |
| 'TVM_NUM_SHARDS=4', |
| 'TVM_SHARD_INDEX=1', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_arm) |
| python_unittest(ci_arm) |
| sh ( |
| script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", |
| label: 'Run CPU integration tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/integration_aarch64 --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('integration: aarch64 2 of 4') |
| } |
| } |
| |
| def shard_run_integration_aarch64_3_of_4() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('ARM-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { |
| try { |
| init_git() |
| docker_init(ci_arm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=arm', |
| 'TEST_STEP_NAME=integration: aarch64', |
| 'TVM_NUM_SHARDS=4', |
| 'TVM_SHARD_INDEX=2', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_arm) |
| python_unittest(ci_arm) |
| sh ( |
| script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", |
| label: 'Run CPU integration tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/integration_aarch64 --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('integration: aarch64 3 of 4') |
| } |
| } |
| |
| def shard_run_integration_aarch64_4_of_4() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('ARM-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { |
| try { |
| init_git() |
| docker_init(ci_arm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=arm', |
| 'TEST_STEP_NAME=integration: aarch64', |
| 'TVM_NUM_SHARDS=4', |
| 'TVM_SHARD_INDEX=3', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_arm) |
| python_unittest(ci_arm) |
| sh ( |
| script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", |
| label: 'Run CPU integration tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/integration_aarch64 --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('integration: aarch64 4 of 4') |
| } |
| } |
| |
| |
| def shard_run_topi_GPU_1_of_3() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('GPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { |
| try { |
| init_git() |
| docker_init(ci_gpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=gpu', |
| 'TEST_STEP_NAME=topi: GPU', |
| 'TVM_NUM_SHARDS=3', |
| 'TVM_SHARD_INDEX=0', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_gpu) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", |
| label: 'Run TOPI tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/topi_GPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('topi: GPU 1 of 3') |
| } |
| } |
| |
| def shard_run_topi_GPU_2_of_3() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('GPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { |
| try { |
| init_git() |
| docker_init(ci_gpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=gpu', |
| 'TEST_STEP_NAME=topi: GPU', |
| 'TVM_NUM_SHARDS=3', |
| 'TVM_SHARD_INDEX=1', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_gpu) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", |
| label: 'Run TOPI tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/topi_GPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('topi: GPU 2 of 3') |
| } |
| } |
| |
| def shard_run_topi_GPU_3_of_3() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('GPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { |
| try { |
| init_git() |
| docker_init(ci_gpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=gpu', |
| 'TEST_STEP_NAME=topi: GPU', |
| 'TVM_NUM_SHARDS=3', |
| 'TVM_SHARD_INDEX=2', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_gpu) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", |
| label: 'Run TOPI tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/topi_GPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('topi: GPU 3 of 3') |
| } |
| } |
| |
| |
| def shard_run_frontend_GPU_1_of_6() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('GPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { |
| try { |
| init_git() |
| docker_init(ci_gpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=gpu', |
| 'TEST_STEP_NAME=frontend: GPU', |
| 'TVM_NUM_SHARDS=6', |
| 'TVM_SHARD_INDEX=0', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_gpu) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", |
| label: 'Run Python frontend tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/frontend_GPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('frontend: GPU 1 of 6') |
| } |
| } |
| |
| def shard_run_frontend_GPU_2_of_6() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('GPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { |
| try { |
| init_git() |
| docker_init(ci_gpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=gpu', |
| 'TEST_STEP_NAME=frontend: GPU', |
| 'TVM_NUM_SHARDS=6', |
| 'TVM_SHARD_INDEX=1', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_gpu) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", |
| label: 'Run Python frontend tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/frontend_GPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('frontend: GPU 2 of 6') |
| } |
| } |
| |
| def shard_run_frontend_GPU_3_of_6() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('GPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { |
| try { |
| init_git() |
| docker_init(ci_gpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=gpu', |
| 'TEST_STEP_NAME=frontend: GPU', |
| 'TVM_NUM_SHARDS=6', |
| 'TVM_SHARD_INDEX=2', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_gpu) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", |
| label: 'Run Python frontend tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/frontend_GPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('frontend: GPU 3 of 6') |
| } |
| } |
| |
| def shard_run_frontend_GPU_4_of_6() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('GPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { |
| try { |
| init_git() |
| docker_init(ci_gpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=gpu', |
| 'TEST_STEP_NAME=frontend: GPU', |
| 'TVM_NUM_SHARDS=6', |
| 'TVM_SHARD_INDEX=3', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_gpu) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", |
| label: 'Run Python frontend tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/frontend_GPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('frontend: GPU 4 of 6') |
| } |
| } |
| |
| def shard_run_frontend_GPU_5_of_6() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('GPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { |
| try { |
| init_git() |
| docker_init(ci_gpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=gpu', |
| 'TEST_STEP_NAME=frontend: GPU', |
| 'TVM_NUM_SHARDS=6', |
| 'TVM_SHARD_INDEX=4', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_gpu) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", |
| label: 'Run Python frontend tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/frontend_GPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('frontend: GPU 5 of 6') |
| } |
| } |
| |
| def shard_run_frontend_GPU_6_of_6() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('GPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { |
| try { |
| init_git() |
| docker_init(ci_gpu) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=gpu', |
| 'TEST_STEP_NAME=frontend: GPU', |
| 'TVM_NUM_SHARDS=6', |
| 'TVM_SHARD_INDEX=5', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_gpu) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", |
| label: 'Run Python frontend tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/frontend_GPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('frontend: GPU 6 of 6') |
| } |
| } |
| |
| |
| def shard_run_topi_aarch64_1_of_2() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('ARM-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { |
| try { |
| init_git() |
| docker_init(ci_arm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=arm', |
| 'TEST_STEP_NAME=topi: aarch64', |
| 'TVM_NUM_SHARDS=2', |
| 'TVM_SHARD_INDEX=0', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_arm) |
| cpp_unittest(ci_arm) |
| sh ( |
| script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh", |
| label: 'Run test_arm_compute_lib test', |
| ) |
| sh ( |
| script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_topi.sh", |
| label: 'Run TOPI tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/topi_aarch64 --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('topi: aarch64 1 of 2') |
| } |
| } |
| |
| def shard_run_topi_aarch64_2_of_2() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('ARM-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { |
| try { |
| init_git() |
| docker_init(ci_arm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=arm', |
| 'TEST_STEP_NAME=topi: aarch64', |
| 'TVM_NUM_SHARDS=2', |
| 'TVM_SHARD_INDEX=1', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_arm) |
| sh ( |
| script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh", |
| label: 'Run test_arm_compute_lib test', |
| ) |
| sh ( |
| script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_topi.sh", |
| label: 'Run TOPI tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/topi_aarch64 --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('topi: aarch64 2 of 2') |
| } |
| } |
| |
| |
| def shard_run_frontend_aarch64_1_of_2() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('ARM-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-arm") { |
| try { |
| init_git() |
| docker_init(ci_arm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=arm', |
| 'TEST_STEP_NAME=frontend: aarch64', |
| 'TVM_NUM_SHARDS=2', |
| 'TVM_SHARD_INDEX=0', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_arm) |
| sh ( |
| script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_frontend_cpu.sh", |
| label: 'Run Python frontend tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/frontend_aarch64 --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('frontend: aarch64 1 of 2') |
| } |
| } |
| |
| def shard_run_frontend_aarch64_2_of_2() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('ARM-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-arm") { |
| try { |
| init_git() |
| docker_init(ci_arm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=arm', |
| 'TEST_STEP_NAME=frontend: aarch64', |
| 'TVM_NUM_SHARDS=2', |
| 'TVM_SHARD_INDEX=1', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_arm) |
| sh ( |
| script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_frontend_cpu.sh", |
| label: 'Run Python frontend tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/frontend_aarch64 --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('frontend: aarch64 2 of 2') |
| } |
| } |
| |
| |
| def shard_run_test_Cortex_M_1_of_12() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { |
| try { |
| init_git() |
| docker_init(ci_cortexm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cortexm', |
| 'TEST_STEP_NAME=test: Cortex-M', |
| 'TVM_NUM_SHARDS=12', |
| 'TVM_SHARD_INDEX=0', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| ci_setup(ci_cortexm) |
| cpp_unittest(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_demo_microtvm.sh", |
| label: 'Run microTVM demos', |
| ) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", |
| label: 'Run microTVM tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Cortex_M --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Cortex-M 1 of 12') |
| } |
| } |
| |
| def shard_run_test_Cortex_M_2_of_12() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { |
| try { |
| init_git() |
| docker_init(ci_cortexm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cortexm', |
| 'TEST_STEP_NAME=test: Cortex-M', |
| 'TVM_NUM_SHARDS=12', |
| 'TVM_SHARD_INDEX=1', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| ci_setup(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", |
| label: 'Run microTVM tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Cortex_M --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Cortex-M 2 of 12') |
| } |
| } |
| |
| def shard_run_test_Cortex_M_3_of_12() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { |
| try { |
| init_git() |
| docker_init(ci_cortexm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cortexm', |
| 'TEST_STEP_NAME=test: Cortex-M', |
| 'TVM_NUM_SHARDS=12', |
| 'TVM_SHARD_INDEX=2', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| ci_setup(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", |
| label: 'Run microTVM tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Cortex_M --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Cortex-M 3 of 12') |
| } |
| } |
| |
| def shard_run_test_Cortex_M_4_of_12() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { |
| try { |
| init_git() |
| docker_init(ci_cortexm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cortexm', |
| 'TEST_STEP_NAME=test: Cortex-M', |
| 'TVM_NUM_SHARDS=12', |
| 'TVM_SHARD_INDEX=3', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| ci_setup(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", |
| label: 'Run microTVM tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Cortex_M --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Cortex-M 4 of 12') |
| } |
| } |
| |
| def shard_run_test_Cortex_M_5_of_12() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { |
| try { |
| init_git() |
| docker_init(ci_cortexm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cortexm', |
| 'TEST_STEP_NAME=test: Cortex-M', |
| 'TVM_NUM_SHARDS=12', |
| 'TVM_SHARD_INDEX=4', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| ci_setup(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", |
| label: 'Run microTVM tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Cortex_M --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Cortex-M 5 of 12') |
| } |
| } |
| |
| def shard_run_test_Cortex_M_6_of_12() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { |
| try { |
| init_git() |
| docker_init(ci_cortexm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cortexm', |
| 'TEST_STEP_NAME=test: Cortex-M', |
| 'TVM_NUM_SHARDS=12', |
| 'TVM_SHARD_INDEX=5', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| ci_setup(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", |
| label: 'Run microTVM tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Cortex_M --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Cortex-M 6 of 12') |
| } |
| } |
| |
| def shard_run_test_Cortex_M_7_of_12() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { |
| try { |
| init_git() |
| docker_init(ci_cortexm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cortexm', |
| 'TEST_STEP_NAME=test: Cortex-M', |
| 'TVM_NUM_SHARDS=12', |
| 'TVM_SHARD_INDEX=6', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| ci_setup(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", |
| label: 'Run microTVM tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Cortex_M --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Cortex-M 7 of 12') |
| } |
| } |
| |
| def shard_run_test_Cortex_M_8_of_12() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { |
| try { |
| init_git() |
| docker_init(ci_cortexm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cortexm', |
| 'TEST_STEP_NAME=test: Cortex-M', |
| 'TVM_NUM_SHARDS=12', |
| 'TVM_SHARD_INDEX=7', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| ci_setup(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", |
| label: 'Run microTVM tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Cortex_M --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Cortex-M 8 of 12') |
| } |
| } |
| |
| def shard_run_test_Cortex_M_9_of_12() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { |
| try { |
| init_git() |
| docker_init(ci_cortexm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cortexm', |
| 'TEST_STEP_NAME=test: Cortex-M', |
| 'TVM_NUM_SHARDS=12', |
| 'TVM_SHARD_INDEX=8', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| ci_setup(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", |
| label: 'Run microTVM tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Cortex_M --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Cortex-M 9 of 12') |
| } |
| } |
| |
| def shard_run_test_Cortex_M_10_of_12() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { |
| try { |
| init_git() |
| docker_init(ci_cortexm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cortexm', |
| 'TEST_STEP_NAME=test: Cortex-M', |
| 'TVM_NUM_SHARDS=12', |
| 'TVM_SHARD_INDEX=9', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| ci_setup(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", |
| label: 'Run microTVM tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Cortex_M --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Cortex-M 10 of 12') |
| } |
| } |
| |
| def shard_run_test_Cortex_M_11_of_12() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { |
| try { |
| init_git() |
| docker_init(ci_cortexm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cortexm', |
| 'TEST_STEP_NAME=test: Cortex-M', |
| 'TVM_NUM_SHARDS=12', |
| 'TVM_SHARD_INDEX=10', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| ci_setup(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", |
| label: 'Run microTVM tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Cortex_M --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Cortex-M 11 of 12') |
| } |
| } |
| |
| def shard_run_test_Cortex_M_12_of_12() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { |
| try { |
| init_git() |
| docker_init(ci_cortexm) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=cortexm', |
| 'TEST_STEP_NAME=test: Cortex-M', |
| 'TVM_NUM_SHARDS=12', |
| 'TVM_SHARD_INDEX=11', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cortexm/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| ci_setup(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", |
| label: 'Run microTVM tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_Cortex_M --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: Cortex-M 12 of 12') |
| } |
| } |
| |
| |
| def shard_run_test_RISC_V_1_of_1() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-riscv") { |
| try { |
| init_git() |
| docker_init(ci_riscv) |
| timeout(time: max_time, unit: 'MINUTES') { |
| withEnv([ |
| 'PLATFORM=riscv', |
| 'TEST_STEP_NAME=test: RISC-V', |
| 'TVM_NUM_SHARDS=1', |
| 'TVM_SHARD_INDEX=0', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/riscv/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/riscv/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/riscv/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/riscv/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| ci_setup(ci_riscv) |
| cpp_unittest(ci_cortexm) |
| sh ( |
| script: "${docker_run} ${ci_riscv} ./tests/scripts/task_riscv_microtvm.sh", |
| label: 'Run microTVM tests', |
| ) |
| }) |
| } |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/test_RISC_V --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('test: RISC-V 1 of 1') |
| } |
| } |
| |
| |
| def run_unittest_minimal() { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-cpu-minimal") { |
| timeout(time: max_time, unit: 'MINUTES') { |
| try { |
| init_git() |
| docker_init(ci_minimal) |
| withEnv(['PLATFORM=minimal'], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu-minimal/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu-minimal/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu-minimal/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| cpp_unittest(ci_minimal) |
| python_unittest(ci_minimal) |
| }) |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/unittest_CPU_MINIMAL --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('unittest: CPU MINIMAL') |
| } |
| } |
| |
| def test() { |
| stage('Test') { |
| environment { |
| SKIP_SLOW_TESTS = "${skip_slow_tests}" |
| } |
| parallel( |
| 'unittest: GPU 1 of 3': { |
| shard_run_unittest_GPU_1_of_3() |
| }, |
| 'unittest: GPU 2 of 3': { |
| shard_run_unittest_GPU_2_of_3() |
| }, |
| 'unittest: GPU 3 of 3': { |
| shard_run_unittest_GPU_3_of_3() |
| }, |
| 'integration: CPU 1 of 4': { |
| shard_run_integration_CPU_1_of_4() |
| }, |
| 'integration: CPU 2 of 4': { |
| shard_run_integration_CPU_2_of_4() |
| }, |
| 'integration: CPU 3 of 4': { |
| shard_run_integration_CPU_3_of_4() |
| }, |
| 'integration: CPU 4 of 4': { |
| shard_run_integration_CPU_4_of_4() |
| }, |
| 'python: i386 1 of 3': { |
| shard_run_python_i386_1_of_3() |
| }, |
| 'python: i386 2 of 3': { |
| shard_run_python_i386_2_of_3() |
| }, |
| 'python: i386 3 of 3': { |
| shard_run_python_i386_3_of_3() |
| }, |
| 'test: Hexagon 1 of 8': { |
| shard_run_test_Hexagon_1_of_8() |
| }, |
| 'test: Hexagon 2 of 8': { |
| shard_run_test_Hexagon_2_of_8() |
| }, |
| 'test: Hexagon 3 of 8': { |
| shard_run_test_Hexagon_3_of_8() |
| }, |
| 'test: Hexagon 4 of 8': { |
| shard_run_test_Hexagon_4_of_8() |
| }, |
| 'test: Hexagon 5 of 8': { |
| shard_run_test_Hexagon_5_of_8() |
| }, |
| 'test: Hexagon 6 of 8': { |
| shard_run_test_Hexagon_6_of_8() |
| }, |
| 'test: Hexagon 7 of 8': { |
| shard_run_test_Hexagon_7_of_8() |
| }, |
| 'test: Hexagon 8 of 8': { |
| shard_run_test_Hexagon_8_of_8() |
| }, |
| 'integration: aarch64 1 of 4': { |
| shard_run_integration_aarch64_1_of_4() |
| }, |
| 'integration: aarch64 2 of 4': { |
| shard_run_integration_aarch64_2_of_4() |
| }, |
| 'integration: aarch64 3 of 4': { |
| shard_run_integration_aarch64_3_of_4() |
| }, |
| 'integration: aarch64 4 of 4': { |
| shard_run_integration_aarch64_4_of_4() |
| }, |
| 'topi: GPU 1 of 3': { |
| shard_run_topi_GPU_1_of_3() |
| }, |
| 'topi: GPU 2 of 3': { |
| shard_run_topi_GPU_2_of_3() |
| }, |
| 'topi: GPU 3 of 3': { |
| shard_run_topi_GPU_3_of_3() |
| }, |
| 'frontend: GPU 1 of 6': { |
| shard_run_frontend_GPU_1_of_6() |
| }, |
| 'frontend: GPU 2 of 6': { |
| shard_run_frontend_GPU_2_of_6() |
| }, |
| 'frontend: GPU 3 of 6': { |
| shard_run_frontend_GPU_3_of_6() |
| }, |
| 'frontend: GPU 4 of 6': { |
| shard_run_frontend_GPU_4_of_6() |
| }, |
| 'frontend: GPU 5 of 6': { |
| shard_run_frontend_GPU_5_of_6() |
| }, |
| 'frontend: GPU 6 of 6': { |
| shard_run_frontend_GPU_6_of_6() |
| }, |
| 'topi: aarch64 1 of 2': { |
| shard_run_topi_aarch64_1_of_2() |
| }, |
| 'topi: aarch64 2 of 2': { |
| shard_run_topi_aarch64_2_of_2() |
| }, |
| 'frontend: aarch64 1 of 2': { |
| shard_run_frontend_aarch64_1_of_2() |
| }, |
| 'frontend: aarch64 2 of 2': { |
| shard_run_frontend_aarch64_2_of_2() |
| }, |
| 'test: Cortex-M 1 of 12': { |
| shard_run_test_Cortex_M_1_of_12() |
| }, |
| 'test: Cortex-M 2 of 12': { |
| shard_run_test_Cortex_M_2_of_12() |
| }, |
| 'test: Cortex-M 3 of 12': { |
| shard_run_test_Cortex_M_3_of_12() |
| }, |
| 'test: Cortex-M 4 of 12': { |
| shard_run_test_Cortex_M_4_of_12() |
| }, |
| 'test: Cortex-M 5 of 12': { |
| shard_run_test_Cortex_M_5_of_12() |
| }, |
| 'test: Cortex-M 6 of 12': { |
| shard_run_test_Cortex_M_6_of_12() |
| }, |
| 'test: Cortex-M 7 of 12': { |
| shard_run_test_Cortex_M_7_of_12() |
| }, |
| 'test: Cortex-M 8 of 12': { |
| shard_run_test_Cortex_M_8_of_12() |
| }, |
| 'test: Cortex-M 9 of 12': { |
| shard_run_test_Cortex_M_9_of_12() |
| }, |
| 'test: Cortex-M 10 of 12': { |
| shard_run_test_Cortex_M_10_of_12() |
| }, |
| 'test: Cortex-M 11 of 12': { |
| shard_run_test_Cortex_M_11_of_12() |
| }, |
| 'test: Cortex-M 12 of 12': { |
| shard_run_test_Cortex_M_12_of_12() |
| }, |
| 'test: RISC-V 1 of 1': { |
| shard_run_test_RISC_V_1_of_1() |
| }, |
| 'unittest: CPU MINIMAL': { |
| run_unittest_minimal() |
| }, |
| 'unittest: CPU': { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-cpu") { |
| timeout(time: max_time, unit: 'MINUTES') { |
| try { |
| init_git() |
| docker_init(ci_cpu) |
| withEnv(['PLATFORM=cpu', |
| 'TEST_STEP_NAME=unittest: CPU', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_cpu) |
| cpp_unittest(ci_cpu) |
| python_unittest(ci_cpu) |
| fsim_test(ci_cpu) |
| // sh ( |
| // script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh", |
| // label: 'Run VTA tests in TSIM', |
| // ) |
| }) |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/unittest_CPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('unittest: CPU') |
| } |
| }, |
| 'frontend: CPU': { |
| if (!skip_ci && is_docs_only_build != 1) { |
| node('CPU-SMALL') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-cpu") { |
| timeout(time: max_time, unit: 'MINUTES') { |
| try { |
| init_git() |
| docker_init(ci_cpu) |
| withEnv(['PLATFORM=cpu', |
| 'TEST_STEP_NAME=frontend: CPU', |
| "SKIP_SLOW_TESTS=${skip_slow_tests}"], { |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| ci_setup(ci_cpu) |
| sh ( |
| script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh", |
| label: 'Run Python frontend tests', |
| ) |
| }) |
| } finally { |
| sh( |
| script: """ |
| set -eux |
| aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/frontend_CPU --recursive |
| """, |
| label: 'Upload JUnits to S3', |
| ) |
| |
| junit 'build/pytest-results/*.xml' |
| } |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('frontend: CPU') |
| } |
| }, |
| 'docs: GPU': { |
| if (!skip_ci) { |
| node('GPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/docs-python-gpu") { |
| init_git() |
| docker_init(ci_gpu) |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so |
| md5sum build/libtvm.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so |
| md5sum build/libtvm_runtime.so |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake |
| md5sum build/config.cmake |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/microtvm_template_projects build/microtvm_template_projects --recursive |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| add_microtvm_permissions() |
| timeout(time: 180, unit: 'MINUTES') { |
| ci_setup(ci_gpu) |
| sh ( |
| script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_docs.sh", |
| label: 'Build docs', |
| ) |
| } |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| md5sum docs.tgz |
| retry 3 aws s3 cp --no-progress docs.tgz s3://${s3_prefix}/docs/docs.tgz |
| """, |
| label: 'Upload artifacts to S3', |
| ) |
| |
| sh( |
| script: "aws s3 cp --no-progress _docs s3://${s3_prefix}/docs --recursive", |
| label: 'Upload docs to S3', |
| ) |
| } |
| } |
| } |
| }, |
| ) |
| } |
| } |
| /* |
| stage('Build packages') { |
| parallel 'conda CPU': { |
| node('CPU') { |
| sh "${docker_run} tlcpack/conda-cpu ./conda/build_cpu.sh |
| } |
| }, |
| 'conda cuda': { |
| node('CPU') { |
| sh "${docker_run} tlcpack/conda-cuda90 ./conda/build_cuda.sh |
| sh "${docker_run} tlcpack/conda-cuda100 ./conda/build_cuda.sh |
| } |
| } |
| // Here we could upload the packages to anaconda for releases |
| // and/or the main branch |
| } |
| */ |
| |
| |
| def update_docker(ecr_image, hub_image) { |
| if (ecr_image == null) { |
| sh("image was not rebuilt, skipping") |
| return |
| } |
| if (!ecr_image.contains("amazonaws.com")) { |
| sh("echo \"Skipping '${ecr_image}' -> '${hub_image}' since it doesn\'t look like an ECR image\"") |
| return |
| } |
| docker_init(ecr_image) |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| docker tag \ |
| ${ecr_image} \ |
| ${hub_image} |
| retry 5 docker push ${hub_image} |
| """, |
| label: "Update ${hub_image} on Docker Hub", |
| ) |
| } |
| |
| def deploy_docs() { |
| // Note: This code must stay in the Jenkinsfile to ensure that it runs |
| // from a trusted context only |
| sh( |
| script: ''' |
| set -eux |
| rm -rf tvm-site |
| git clone -b $DOCS_DEPLOY_BRANCH --depth=1 https://github.com/apache/tvm-site |
| cd tvm-site |
| git status |
| git checkout -B $DOCS_DEPLOY_BRANCH |
| |
| git ls-tree HEAD docs/ --name-only | grep -vP '^docs/v\\d' | xargs rm -rf |
| mkdir -p docs |
| tar xf ../docs.tgz -C docs |
| COMMIT=$(cat docs/commit_hash) |
| git add . |
| git config user.name tvm-bot |
| git config user.email 95660001+tvm-bot@users.noreply.github.com |
| git commit -m"deploying docs (apache/tvm@$COMMIT)" |
| git status |
| ''', |
| label: 'Unpack docs and update tvm-site' |
| ) |
| |
| withCredentials([string( |
| credentialsId: 'docs-push-token', |
| variable: 'GITHUB_TOKEN', |
| )]) { |
| sh( |
| script: ''' |
| cd tvm-site |
| git remote add deploy https://$GITHUB_TOKEN:x-oauth-basic@github.com/apache/tvm-site.git |
| git push deploy $DOCS_DEPLOY_BRANCH || true |
| ''', |
| label: 'Upload docs to apache/tvm-site' |
| ) |
| } |
| } |
| |
| |
| def deploy() { |
| stage('Deploy') { |
| if (env.BRANCH_NAME == 'main') { |
| parallel( |
| 'Deploy Docs': { |
| if (env.DOCS_DEPLOY_ENABLED == 'yes') { |
| node('CPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/deploy-docs") { |
| timeout(time: max_time, unit: 'MINUTES') { |
| init_git() |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| retry 3 aws s3 cp --no-progress s3://${s3_prefix}/docs/docs.tgz docs.tgz |
| md5sum docs.tgz |
| """, |
| label: 'Download artifacts from S3', |
| ) |
| |
| deploy_docs() |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('Deploy Docs') |
| } |
| }, |
| 'Upload built Docker images': { |
| if (env.DEPLOY_DOCKER_IMAGES == 'yes' && rebuild_docker_images && upstream_revision != null) { |
| node('CPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/deploy-docker") { |
| timeout(time: max_time, unit: 'MINUTES') { |
| try { |
| withCredentials([string( |
| credentialsId: 'dockerhub-tlcpackstaging-key', |
| variable: 'DOCKERHUB_KEY', |
| )]) { |
| sh( |
| script: 'docker login -u tlcpackstaging -p ${DOCKERHUB_KEY}', |
| label: 'Log in to Docker Hub', |
| ) |
| } |
| def date_Ymd_HMS = sh( |
| script: 'python3 -c \'import datetime; print(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))\'', |
| label: 'Determine date', |
| returnStdout: true, |
| ).trim() |
| def tag = "${date_Ymd_HMS}-${upstream_revision.substring(0, 8)}" |
| update_docker(built_ci_arm, "tlcpackstaging/ci_arm:${tag}") |
| update_docker(built_ci_cortexm, "tlcpackstaging/ci_cortexm:${tag}") |
| update_docker(built_ci_cpu, "tlcpackstaging/ci_cpu:${tag}") |
| update_docker(built_ci_gpu, "tlcpackstaging/ci_gpu:${tag}") |
| update_docker(built_ci_hexagon, "tlcpackstaging/ci_hexagon:${tag}") |
| update_docker(built_ci_i386, "tlcpackstaging/ci_i386:${tag}") |
| update_docker(built_ci_lint, "tlcpackstaging/ci_lint:${tag}") |
| update_docker(built_ci_minimal, "tlcpackstaging/ci_minimal:${tag}") |
| update_docker(built_ci_riscv, "tlcpackstaging/ci_riscv:${tag}") |
| update_docker(built_ci_wasm, "tlcpackstaging/ci_wasm:${tag}") |
| } finally { |
| sh( |
| script: 'docker logout', |
| label: 'Clean up login credentials' |
| ) |
| } |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('Upload built Docker images') |
| } |
| }, |
| 'Tag tlcpackstaging to tlcpack': { |
| if (env.DOCS_DEPLOY_ENABLED == 'yes') { |
| node('CPU') { |
| ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/tag-images") { |
| timeout(time: max_time, unit: 'MINUTES') { |
| withCredentials([string( |
| credentialsId: 'dockerhub-tlcpack-key', |
| variable: 'TLCPACK_TOKEN', |
| )]) { |
| try { |
| sh( |
| script: 'echo $TLCPACK_TOKEN | docker login --username octomldriazati --password-stdin', |
| label: 'Log in to Docker Hub' |
| ) |
| if (ci_arm.contains("tlcpackstaging")) { |
| // Push image to tlcpack |
| def tag = ci_arm.split(":")[1] |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| docker pull tlcpackstaging/ci_arm:${tag} |
| docker tag tlcpackstaging/ci_arm:${tag} tlcpack/ci-arm:${tag} |
| retry 5 docker push tlcpack/ci-arm:${tag} |
| """, |
| label: 'Tag tlcpackstaging/ci_arm image to tlcpack', |
| ) |
| } |
| if (ci_cortexm.contains("tlcpackstaging")) { |
| // Push image to tlcpack |
| def tag = ci_cortexm.split(":")[1] |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| docker pull tlcpackstaging/ci_cortexm:${tag} |
| docker tag tlcpackstaging/ci_cortexm:${tag} tlcpack/ci-cortexm:${tag} |
| retry 5 docker push tlcpack/ci-cortexm:${tag} |
| """, |
| label: 'Tag tlcpackstaging/ci_cortexm image to tlcpack', |
| ) |
| } |
| if (ci_cpu.contains("tlcpackstaging")) { |
| // Push image to tlcpack |
| def tag = ci_cpu.split(":")[1] |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| docker pull tlcpackstaging/ci_cpu:${tag} |
| docker tag tlcpackstaging/ci_cpu:${tag} tlcpack/ci-cpu:${tag} |
| retry 5 docker push tlcpack/ci-cpu:${tag} |
| """, |
| label: 'Tag tlcpackstaging/ci_cpu image to tlcpack', |
| ) |
| } |
| if (ci_gpu.contains("tlcpackstaging")) { |
| // Push image to tlcpack |
| def tag = ci_gpu.split(":")[1] |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| docker pull tlcpackstaging/ci_gpu:${tag} |
| docker tag tlcpackstaging/ci_gpu:${tag} tlcpack/ci-gpu:${tag} |
| retry 5 docker push tlcpack/ci-gpu:${tag} |
| """, |
| label: 'Tag tlcpackstaging/ci_gpu image to tlcpack', |
| ) |
| } |
| if (ci_hexagon.contains("tlcpackstaging")) { |
| // Push image to tlcpack |
| def tag = ci_hexagon.split(":")[1] |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| docker pull tlcpackstaging/ci_hexagon:${tag} |
| docker tag tlcpackstaging/ci_hexagon:${tag} tlcpack/ci-hexagon:${tag} |
| retry 5 docker push tlcpack/ci-hexagon:${tag} |
| """, |
| label: 'Tag tlcpackstaging/ci_hexagon image to tlcpack', |
| ) |
| } |
| if (ci_i386.contains("tlcpackstaging")) { |
| // Push image to tlcpack |
| def tag = ci_i386.split(":")[1] |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| docker pull tlcpackstaging/ci_i386:${tag} |
| docker tag tlcpackstaging/ci_i386:${tag} tlcpack/ci-i386:${tag} |
| retry 5 docker push tlcpack/ci-i386:${tag} |
| """, |
| label: 'Tag tlcpackstaging/ci_i386 image to tlcpack', |
| ) |
| } |
| if (ci_lint.contains("tlcpackstaging")) { |
| // Push image to tlcpack |
| def tag = ci_lint.split(":")[1] |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| docker pull tlcpackstaging/ci_lint:${tag} |
| docker tag tlcpackstaging/ci_lint:${tag} tlcpack/ci-lint:${tag} |
| retry 5 docker push tlcpack/ci-lint:${tag} |
| """, |
| label: 'Tag tlcpackstaging/ci_lint image to tlcpack', |
| ) |
| } |
| if (ci_minimal.contains("tlcpackstaging")) { |
| // Push image to tlcpack |
| def tag = ci_minimal.split(":")[1] |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| docker pull tlcpackstaging/ci_minimal:${tag} |
| docker tag tlcpackstaging/ci_minimal:${tag} tlcpack/ci-minimal:${tag} |
| retry 5 docker push tlcpack/ci-minimal:${tag} |
| """, |
| label: 'Tag tlcpackstaging/ci_minimal image to tlcpack', |
| ) |
| } |
| if (ci_riscv.contains("tlcpackstaging")) { |
| // Push image to tlcpack |
| def tag = ci_riscv.split(":")[1] |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| docker pull tlcpackstaging/ci_riscv:${tag} |
| docker tag tlcpackstaging/ci_riscv:${tag} tlcpack/ci-riscv:${tag} |
| retry 5 docker push tlcpack/ci-riscv:${tag} |
| """, |
| label: 'Tag tlcpackstaging/ci_riscv image to tlcpack', |
| ) |
| } |
| if (ci_wasm.contains("tlcpackstaging")) { |
| // Push image to tlcpack |
| def tag = ci_wasm.split(":")[1] |
| sh( |
| script: """ |
| set -eux |
| . ci/scripts/retry.sh |
| docker pull tlcpackstaging/ci_wasm:${tag} |
| docker tag tlcpackstaging/ci_wasm:${tag} tlcpack/ci-wasm:${tag} |
| retry 5 docker push tlcpack/ci-wasm:${tag} |
| """, |
| label: 'Tag tlcpackstaging/ci_wasm image to tlcpack', |
| ) |
| } |
| } finally { |
| sh( |
| script: 'docker logout', |
| label: 'Clean up login credentials' |
| ) |
| } |
| } |
| } |
| } |
| } |
| } else { |
| Utils.markStageSkippedForConditional('Tag tlcpackstaging to tlcpack') |
| } |
| }, |
| ) |
| } |
| } |
| } |
| |
| |
| cancel_previous_build() |
| |
| prepare() |
| |
| if (rebuild_docker_images) { |
| build_docker_images() |
| } |
| |
| lint() |
| |
| build() |
| |
| test() |
| |
| deploy() |