Merge pull request #1569 from godaddy/allowed-address-pairs

Support updating allowed_address_pairs on OpenStack ports
diff --git a/.github/actions/skip-duplicate-actions b/.github/actions/skip-duplicate-actions
new file mode 160000
index 0000000..fda4aba
--- /dev/null
+++ b/.github/actions/skip-duplicate-actions
@@ -0,0 +1 @@
+Subproject commit fda4aba6a2cfab52e09f05c7311ce91fd1e5bae3
diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml
new file mode 100644
index 0000000..9918715
--- /dev/null
+++ b/.github/workflows/integration-tests.yml
@@ -0,0 +1,77 @@
+# NOTE: Integration tests are part of a separate workflow so we can re-run just
+# this workflow on failure
+name: Integration Tests
+
+on:
+  push:
+    branches:
+      - trunk
+  pull_request:
+    branches:
+      - trunk
+  schedule:
+    - cron: '0 1 * * *'
+
+jobs:
+  # Special job which skips duplicate jobs
+  pre_job:
+    name: Skip Duplicate Jobs Pre Job
+    runs-on: ubuntu-latest
+    # Map a step output to a job output
+    outputs:
+      should_skip: ${{ steps.skip_check.outputs.should_skip }}
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@master
+        with:
+          persist-credentials: false
+          submodules: recursive
+
+      - id: skip_check
+        # NOTE: We store action as submodule since ASF doesn't allow directly referencing external
+        # actions
+        uses: ./.github/actions/skip-duplicate-actions # v3.4.0
+        with:
+          github_token: ${{ github.token }}
+
+  integration_tests:
+    name: Run Integration Tests
+    runs-on: ubuntu-latest
+
+    needs: pre_job
+    if: ${{ needs.pre_job.outputs.should_skip == 'false' || github.ref == 'refs/heads/trunk' }}
+
+    strategy:
+      matrix:
+        python_version: [3.7]
+
+    steps:
+      - uses: actions/checkout@master
+        with:
+          fetch-depth: 1
+
+      - name: Use Python ${{ matrix.python_version }}
+        uses: actions/setup-python@v2
+        with:
+          python-version: ${{ matrix.python_version }}
+
+      - name: Install OS / deb dependencies
+        run: |
+          sudo DEBIAN_FRONTEND=noninteractive apt-get update
+          sudo DEBIAN_FRONTEND=noninteractive apt-get install -yq gcc libvirt-dev
+
+      - name: Cache Python Dependencies
+        uses: actions/cache@v2
+        with:
+          path: ~/.cache/pip
+          key: ${{ runner.os }}-pip-${{ hashFiles('requirements-tests.txt', 'integration/storage/requirements.txt') }}
+          restore-keys: |
+            ${{ runner.os }}-pip-
+
+      - name: Install Python Dependencies
+        run: |
+          pip install "tox==3.20.1"
+
+      - name: Run tox target
+        run: |
+          script -e -c "tox -e integration-storage"
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index f9d2750..6b5e08b 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -12,25 +12,32 @@
 
 jobs:
   # Special job which skips duplicate jobs
-  # NOTE: It looks like Apache disabled using external actions
-  # pre_job:
-  #   name: Skip Duplicate Jobs Pre Job
-  #   runs-on: ubuntu-latest
-  #   # Map a step output to a job output
-  #   outputs:
-  #     should_skip: ${{ steps.skip_check.outputs.should_skip }}
-  #   steps:
-  #     - id: skip_check
-  #       uses: fkirc/skip-duplicate-actions@f05289cf5f432138afd3408d79ca931eb0df74c3 # v3.0.0
-  #       with:
-  #         github_token: ${{ github.token }}
+  pre_job:
+    name: Skip Duplicate Jobs Pre Job
+    runs-on: ubuntu-latest
+    # Map a step output to a job output
+    outputs:
+      should_skip: ${{ steps.skip_check.outputs.should_skip }}
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@master
+        with:
+          persist-credentials: false
+          submodules: recursive
+
+      - id: skip_check
+        # NOTE: We store action as submodule since ASF doesn't allow directly referencing external
+        # actions
+        uses: ./.github/actions/skip-duplicate-actions # v3.4.0
+        with:
+          github_token: ${{ github.token }}
 
   unit_tests:
     name: Run Unit Tests
     runs-on: ${{ matrix.os }}
 
-    # needs: pre_job
-    # if: ${{ needs.pre_job.outputs.should_skip == 'false' || github.ref == 'refs/heads/trunk' }}
+    needs: pre_job
+    if: ${{ needs.pre_job.outputs.should_skip == 'false' || github.ref == 'refs/heads/trunk' }}
 
     strategy:
       fail-fast: false
@@ -57,22 +64,31 @@
 
       - name: Install OS / deb dependencies
         run: |
+          sudo DEBIAN_FRONTEND=noninteractive apt-get update
           sudo DEBIAN_FRONTEND=noninteractive apt-get install -yq gcc libvirt-dev
 
+      - name: Cache Python Dependencies
+        uses: actions/cache@v2
+        with:
+          path: ~/.cache/pip
+          key: ${{ runner.os }}-pip-${{ hashFiles('requirements-tests.txt', '') }}
+          restore-keys: |
+            ${{ runner.os }}-pip-
+
       - name: Install Python Dependencies
         run: |
           pip install "tox==3.20.1"
 
       - name: Run tox target
         run: |
-          tox -e py${{ matrix.python_version }}
+         script -e -c "tox -e py${{ matrix.python_version }}"
 
   code_coverage:
     name: Generate Code Coverage
     runs-on: ubuntu-latest
 
-    # needs: pre_job
-    # if: ${{ needs.pre_job.outputs.should_skip == 'false' || github.ref == 'refs/heads/trunk' }}
+    needs: pre_job
+    if: ${{ needs.pre_job.outputs.should_skip == 'false' || github.ref == 'refs/heads/trunk' }}
 
     strategy:
       matrix:
@@ -90,22 +106,31 @@
 
       - name: Install OS / deb dependencies
         run: |
+          sudo DEBIAN_FRONTEND=noninteractive apt-get update
           sudo DEBIAN_FRONTEND=noninteractive apt-get install -yq graphviz gcc libvirt-dev
 
+      - name: Cache Python Dependencies
+        uses: actions/cache@v2
+        with:
+          path: ~/.cache/pip
+          key: ${{ runner.os }}-pip-${{ hashFiles('requirements-tests.txt') }}
+          restore-keys: |
+            ${{ runner.os }}-pip-
+
       - name: Install Python Dependencies
         run: |
           pip install "tox==3.20.1"
 
       - name: Run Checks
         run: |
-          tox -e coverage-ci
+          script -e -c "tox -e coverage-ci"
 
   lint_checks:
     name: Run Various Lint and Other Checks
     runs-on: ubuntu-latest
 
-    # needs: pre_job
-    # if: ${{ needs.pre_job.outputs.should_skip == 'false' || github.ref == 'refs/heads/trunk' }}
+    needs: pre_job
+    if: ${{ needs.pre_job.outputs.should_skip == 'false' || github.ref == 'refs/heads/trunk' }}
 
     strategy:
       matrix:
@@ -123,22 +148,31 @@
 
       - name: Install OS / deb dependencies
         run: |
+          sudo DEBIAN_FRONTEND=noninteractive apt-get update
           sudo DEBIAN_FRONTEND=noninteractive apt-get install -yq graphviz gcc libvirt-dev
 
+      - name: Cache Python Dependencies
+        uses: actions/cache@v2
+        with:
+          path: ~/.cache/pip
+          key: ${{ runner.os }}-pip-${{ hashFiles('requirements-tests.txt') }}
+          restore-keys: |
+            ${{ runner.os }}-pip-
+
       - name: Install Python Dependencies
         run: |
           pip install "tox==3.20.1"
 
       - name: Run Checks
         run: |
-          tox -e checks,import-timings,lint,pylint
+          script -e -c "tox -e checks,import-timings,lint,pylint"
 
   docs:
     name: Build and upload Documentation
     runs-on: ubuntu-latest
 
-    # needs: pre_job
-    # if: ${{ needs.pre_job.outputs.should_skip == 'false' || github.ref == 'refs/heads/trunk' }}
+    needs: pre_job
+    if: ${{ needs.pre_job.outputs.should_skip == 'false' || github.ref == 'refs/heads/trunk' }}
 
     strategy:
       matrix:
@@ -161,15 +195,24 @@
 
       - name: Install OS / deb dependencies
         run: |
+          sudo DEBIAN_FRONTEND=noninteractive apt-get update
           sudo DEBIAN_FRONTEND=noninteractive apt-get install -yq graphviz gcc libvirt-dev
 
+      - name: Cache Python Dependencies
+        uses: actions/cache@v2
+        with:
+          path: ~/.cache/pip
+          key: ${{ runner.os }}-pip-${{ hashFiles('requirements-tests.txt') }}
+          restore-keys: |
+            ${{ runner.os }}-pip-
+
       - name: Install Python Dependencies
         run: |
           pip install "tox==3.20.1"
 
       - name: Build Docs
         run: |
-          tox -e docs-travis
+          script -e -c "tox -e docs-travis"
 
       - name: Trigger ReadTheDocs build
         env:
diff --git a/.github/workflows/publish_pricing_to_s3.yml b/.github/workflows/publish_pricing_to_s3.yml
index b1d2626..1f079a0 100644
--- a/.github/workflows/publish_pricing_to_s3.yml
+++ b/.github/workflows/publish_pricing_to_s3.yml
@@ -40,4 +40,4 @@
           AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
           AWS_ACCESS_KEY_SECRET: ${{ secrets.AWS_ACCESS_KEY_SECRET }}
         run: |
-          tox -escrape-and-publish-provider-prices
+          script -e -c "tox -escrape-and-publish-provider-prices"
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..41e2e31
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule ".github/actions/skip-duplicate-actions"]
+	path = .github/actions/skip-duplicate-actions
+	url = https://github.com/fkirc/skip-duplicate-actions.git
diff --git a/CHANGES.rst b/CHANGES.rst
index a99a8bd..7c012fe 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,6 +1,68 @@
 Changelog
 =========
 
+Changes in Apache Libcloud 3.3.2
+--------------------------------
+
+Storage
+~~~~~~~
+
+- [Azure Blobs] Respect Content-Encoding, Content-Language and Cache-Control
+  headers when uploading blobs via stream.
+
+  Reported by Veith Röthlingshöfer - @RunOrVeith.
+  (GITHUB-1550)
+
+- [Azure Blobs] Enable the Azure storage driver to be used with
+  Azure Government, Azure China, and Azure Private Link by setting
+  the driver host argument to the endpoint suffix for the environment.
+
+  Reported by Melissa Kersh - @mkcello96
+  (GITHUB-1551)
+
+Compute
+~~~~~~~
+
+- [Equinix Metal] Various improvements to the driver. 
+
+  (GITHUB-1548)
+  [Dimitris Galanis - @dimgal1]
+
+- [OpenStack] Fix error getting non existing description of Ports.
+
+  (GITHUB-1543)
+  [Miguel Caballer - @micafer]
+
+- [Outscale] Various updates to the driver.
+  (GITHUB-1549)
+  [Tio Gobin - @tgn-outscale]
+
+- [Ovh] Fix driver so it doesn't throw if a node is in resizing state.
+  (GITHUB-1555)
+  [Rob Juffermans - @robjuffermans]
+
+- [OpenStack] Support volume v3 API endpoint in OpenStack driver.
+
+  (GITHUB-1561)
+  [Miguel Caballer - @micafer]
+
+- [GCE] Get accelerators field in the GCE machineType.
+
+  (GITHUB-1565)
+  [Miguel Caballer - @micafer]
+
+DNS
+~~~
+
+- [CloudFlare] Enable authentication via API Tokens.
+  [Clemens Wolff - @c-w]
+
+- [DigitalOcean] Fix ``create_record()`` and ``update_record()`` method and
+  pass ``None`` instead of string value ``null`` for priority, port and weight
+  parameters if they are not provided as method arguments.
+  (GITHUB-1570)
+  [Gasper Vozel - @karantan]
+
 Changes in Apache Libcloud 3.3.1
 --------------------------------
 
diff --git a/README.rst b/README.rst
index 978dac1..1fb4ed1 100644
--- a/README.rst
+++ b/README.rst
@@ -15,6 +15,9 @@
 .. image:: https://github.com/apache/libcloud/workflows/CI/badge.svg?branch=trunk
         :target: https://github.com/apache/libcloud/actions?query=workflow%3ACI
 
+.. image:: https://github.com/apache/libcloud/actions/workflows/integration-tests.yml/badge.svg?branch=trunk
+        :target: https://github.com/apache/libcloud/actions/workflows/integration-tests.yml
+
 .. image:: https://github.com/apache/libcloud/workflows/Publish%20pricing.json%20to%20S3%20bucket/badge.svg?branch=trunk
         :target: https://github.com/apache/libcloud/actions?query=workflow%3A%22Publish+pricing.json+to+S3+bucket%22
 
diff --git a/docs/compute/drivers/outscale.rst b/docs/compute/drivers/outscale.rst
index 16db968..9494db9 100644
--- a/docs/compute/drivers/outscale.rst
+++ b/docs/compute/drivers/outscale.rst
@@ -342,3 +342,18 @@
 * ``ex_list_node_types`` - Returns a ``list`` of ``dict``
 * ``ex_list_nodes_states`` - Returns a ``list`` of ``dict``
 * ``ex_update_node`` - Returns a ``list`` of ``dict``
+
+Certificate Authority
+---------------------
+* ``ex_create_certificate_authority`` - Returns a ``dict``
+* ``ex_delete_certificate_authority`` - Returns a ``bool``
+* ``ex_read_certificate_authorities`` - Returns a ``list`` of ``dict``
+
+API Access Rules
+----------------
+* ``ex_create_api_access_rule`` - Returns a ``dict``
+* ``ex_delete_api_access_rule`` - Returns a ``bool``
+* ``ex_read_api_access_rules`` - Returns a ``list`` of ``dict``
+* ``ex_update_api_access_rule`` - Returns a ``dict``
+
+
diff --git a/docs/dns/drivers/cloudflare.rst b/docs/dns/drivers/cloudflare.rst
index b55951e..fb814d7 100644
--- a/docs/dns/drivers/cloudflare.rst
+++ b/docs/dns/drivers/cloudflare.rst
@@ -13,12 +13,18 @@
 ------------------------
 
 To instantiate the driver you need to pass email address associated with your
-account and API key available on the `account page`_ to the driver constructor
+account and a Global API key available on the `account page`_ to the driver constructor
 as shown below.
 
 .. literalinclude:: /examples/dns/cloudflare/instantiate_driver.py
    :language: python
 
+Alternatively, authentication can also be done via an API Token as shown below.
+It is recommended that the token at least has the Zone.DNS permissions.
+
+.. literalinclude:: /examples/dns/cloudflare/instantiate_driver_token.py
+   :language: python
+
 API Docs
 --------
 
diff --git a/docs/examples/dns/cloudflare/instantiate_driver_token.py b/docs/examples/dns/cloudflare/instantiate_driver_token.py
new file mode 100644
index 0000000..ebda102
--- /dev/null
+++ b/docs/examples/dns/cloudflare/instantiate_driver_token.py
@@ -0,0 +1,5 @@
+from libcloud.dns.types import Provider
+from libcloud.dns.providers import get_driver
+
+cls = get_driver(Provider.CLOUDFLARE)
+driver = cls('<api token>')
diff --git a/docs/examples/storage/azure/instantiate_gov.py b/docs/examples/storage/azure/instantiate_gov.py
new file mode 100644
index 0000000..46a6c01
--- /dev/null
+++ b/docs/examples/storage/azure/instantiate_gov.py
@@ -0,0 +1,8 @@
+from libcloud.storage.types import Provider
+from libcloud.storage.providers import get_driver
+
+cls = get_driver(Provider.AZURE_BLOBS)
+
+driver = cls(key='your storage account name',
+             secret='your access key',
+             host='blob.core.usgovcloudapi.net')
diff --git a/docs/storage/drivers/azure_blobs.rst b/docs/storage/drivers/azure_blobs.rst
index 824889c..956b3f1 100644
--- a/docs/storage/drivers/azure_blobs.rst
+++ b/docs/storage/drivers/azure_blobs.rst
@@ -33,6 +33,19 @@
 .. literalinclude:: /examples/storage/azure/instantiate.py
    :language: python
 
+Connecting to Azure Government
+------------------------------
+
+To target an `Azure Government`_ storage account, you can instantiate the driver
+by setting a custom storage host argument as shown below.
+
+.. literalinclude:: /examples/storage/azure/instantiate_gov.py
+   :language: python
+
+Setting a custom host argument can also be leveraged to customize the blob
+endpoint and connect to a storage account in `Azure China`_ or
+`Azure Private Link`_.
+
 Connecting to self-hosted Azure Storage implementations
 -------------------------------------------------------
 
@@ -51,3 +64,6 @@
 .. _`BlockBlobStorage accounts`: https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#blockblobstorage-accounts
 .. _`Azurite storage emulator`: https://github.com/Azure/Azurite
 .. _`Azure Blob Storage on IoT Edge`: https://docs.microsoft.com/en-us/azure/iot-edge/how-to-store-data-blob
+.. _`Azure Government`: https://docs.microsoft.com/en-us/azure/azure-government/documentation-government-developer-guide
+.. _`Azure China`: https://docs.microsoft.com/en-us/azure/china/resources-developer-guide
+.. _`Azure Private Link`: https://docs.microsoft.com/en-us/azure/private-link/private-link-overview
diff --git a/integration/README.rst b/integration/README.rst
deleted file mode 100644
index ed61b5a..0000000
--- a/integration/README.rst
+++ /dev/null
@@ -1,20 +0,0 @@
-Integration Test Module
-=======================
-
-This test suite is for running a live API endpoint and testing the apache-libcloud functionality as a full integration test
-
-Running the API service
------------------------
-
-.. code-block:: bash
-
-  pip install -r integration/requirements.txt
-  python -m integration.api
-
-Running the tests
------------------
-
-.. code-block:: bash
-
-   python -m integration
-   
diff --git a/integration/compute/README.rst b/integration/compute/README.rst
new file mode 100644
index 0000000..7c010f1
--- /dev/null
+++ b/integration/compute/README.rst
@@ -0,0 +1,20 @@
+Compute Integration Test Module
+===============================
+
+This test suite is for running a live API endpoint and testing the apache-libcloud compute functionality as a full integration test
+
+Running the API service
+-----------------------
+
+.. code-block:: bash
+
+  pip install -r integration/compute/requirements.txt
+  python -m integration.compute.api
+
+Running the tests
+-----------------
+
+.. code-block:: bash
+
+   python -m integration.compute
+   
diff --git a/integration/api/__init__.py b/integration/compute/__init__.py
similarity index 100%
copy from integration/api/__init__.py
copy to integration/compute/__init__.py
diff --git a/integration/__main__.py b/integration/compute/__main__.py
similarity index 94%
rename from integration/__main__.py
rename to integration/compute/__main__.py
index 1d04b00..21155bb 100644
--- a/integration/__main__.py
+++ b/integration/compute/__main__.py
@@ -16,9 +16,9 @@
 import unittest
 import sys
 
-from integration.driver.test import TestNodeDriver
+from integration.compute.driver.test import TestNodeDriver
 
-from integration.api.data import NODES, REPORT_DATA
+from integration.compute.api.data import NODES, REPORT_DATA
 
 
 class IntegrationTest(unittest.TestCase):
diff --git a/integration/api/__init__.py b/integration/compute/api/__init__.py
similarity index 100%
rename from integration/api/__init__.py
rename to integration/compute/api/__init__.py
diff --git a/integration/api/__main__.py b/integration/compute/api/__main__.py
similarity index 100%
rename from integration/api/__main__.py
rename to integration/compute/api/__main__.py
diff --git a/integration/api/data.py b/integration/compute/api/data.py
similarity index 100%
rename from integration/api/data.py
rename to integration/compute/api/data.py
diff --git a/integration/api/routes.py b/integration/compute/api/routes.py
similarity index 90%
rename from integration/api/routes.py
rename to integration/compute/api/routes.py
index 05fd0ec..b986ea3 100644
--- a/integration/api/routes.py
+++ b/integration/compute/api/routes.py
@@ -17,8 +17,8 @@
 
 from bottle import route
 
-from integration.api.data import NODES, REPORT_DATA
-from integration.api.util import secure
+from integration.compute.api.data import NODES, REPORT_DATA
+from integration.compute.api.util import secure
 
 
 @route('/compute/nodes', method='GET')
diff --git a/integration/api/util.py b/integration/compute/api/util.py
similarity index 95%
rename from integration/api/util.py
rename to integration/compute/api/util.py
index 679ff98..20c2e0e 100644
--- a/integration/api/util.py
+++ b/integration/compute/api/util.py
@@ -16,7 +16,7 @@
 from bottle import request
 from functools import wraps
 
-from integration.config import EXPECTED_AUTH
+from integration.compute.config import EXPECTED_AUTH
 
 
 def secure(f):
diff --git a/integration/config.py b/integration/compute/config.py
similarity index 100%
rename from integration/config.py
rename to integration/compute/config.py
diff --git a/integration/driver/__init__.py b/integration/compute/driver/__init__.py
similarity index 100%
rename from integration/driver/__init__.py
rename to integration/compute/driver/__init__.py
diff --git a/integration/driver/test.py b/integration/compute/driver/test.py
similarity index 100%
rename from integration/driver/test.py
rename to integration/compute/driver/test.py
diff --git a/integration/requirements.txt b/integration/compute/requirements.txt
similarity index 100%
rename from integration/requirements.txt
rename to integration/compute/requirements.txt
diff --git a/integration/storage/README.rst b/integration/storage/README.rst
new file mode 100644
index 0000000..91d6ca8
--- /dev/null
+++ b/integration/storage/README.rst
@@ -0,0 +1,18 @@
+Storage Integration Test Module
+===============================
+
+This test suite is for validating the apache-libcloud storage functionality against live storage backends.
+
+Setting up the test suite
+-------------------------
+
+.. code-block:: bash
+
+  pip install -r integration/storage/requirements.txt
+
+Running the tests
+-----------------
+
+.. code-block:: bash
+
+  python -m integration.storage
diff --git a/integration/api/__init__.py b/integration/storage/__init__.py
similarity index 100%
copy from integration/api/__init__.py
copy to integration/storage/__init__.py
diff --git a/integration/api/routes.py b/integration/storage/__main__.py
similarity index 70%
copy from integration/api/routes.py
copy to integration/storage/__main__.py
index 05fd0ec..06684cd 100644
--- a/integration/api/routes.py
+++ b/integration/storage/__main__.py
@@ -13,21 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json
-
-from bottle import route
-
-from integration.api.data import NODES, REPORT_DATA
-from integration.api.util import secure
+import os
+import sys
+import unittest
 
 
-@route('/compute/nodes', method='GET')
-@secure
-def list_nodes():
-    return json.dumps(NODES)
-
-
-@route('/compute/report_data', method='GET')
-@secure
-def ex_report_data():
-    return REPORT_DATA
+if __name__ == '__main__':
+    loader = unittest.TestLoader()
+    tests = loader.discover(os.path.dirname(__file__))
+    runner = unittest.runner.TextTestRunner(verbosity=3)
+    result = runner.run(tests)
+    sys.exit(len(result.errors))
diff --git a/integration/storage/base.py b/integration/storage/base.py
new file mode 100644
index 0000000..56786cd
--- /dev/null
+++ b/integration/storage/base.py
@@ -0,0 +1,391 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import gzip
+import io
+import os
+import random
+import re
+import socket
+import string
+import sys
+import tempfile
+import time
+import unittest
+
+import requests
+
+try:
+    import docker
+except ImportError:
+    docker = None
+
+from libcloud.common.types import LibcloudError
+from libcloud.storage import providers, types
+
+
+MB = 1024 * 1024
+
+
+class Integration:
+    class TestBase(unittest.TestCase):
+        provider = None
+        account = None
+        secret = None
+
+        def setUp(self):
+            for required in 'provider', 'account', 'secret':
+                value = getattr(self, required, None)
+                if value is None:
+                    raise unittest.SkipTest('config {} not set'.format(required))
+
+            kwargs = {'key': self.account, 'secret': self.secret}
+
+            for optional in 'host', 'port', 'secure':
+                value = getattr(self, optional, None)
+                if value is not None:
+                    kwargs[optional] = value
+
+            driver_class = providers.get_driver(self.provider)
+            self.driver = driver_class(**kwargs)
+
+        def tearDown(self):
+            for container in self.driver.list_containers():
+                for obj in container.list_objects():
+                    try:
+                        obj.delete()
+                    except LibcloudError as ex:
+                        print(
+                            'Unable to delete object {} in container {}: {}.'
+                            'Delete it manually.'
+                            .format(obj.name, container.name, ex),
+                            file=sys.stderr
+                        )
+
+                try:
+                    container.delete()
+                except LibcloudError as ex:
+                    print(
+                        'Unable to delete container {}: {}.'
+                        'Delete it manually.'
+                        .format(container.name, ex),
+                        file=sys.stderr
+                    )
+
+        def test_containers(self):
+            # make a new container
+            container_name = random_container_name()
+            container = self.driver.create_container(container_name)
+            self.assertEqual(container.name, container_name)
+            container = self.driver.get_container(container_name)
+            self.assertEqual(container.name, container_name)
+
+            # check that an existing container can't be re-created
+            with self.assertRaises(types.ContainerAlreadyExistsError):
+                self.driver.create_container(container_name)
+
+            # check that the new container can be listed
+            containers = self.driver.list_containers()
+            self.assertEqual([c.name for c in containers], [container_name])
+
+            # delete the container
+            self.driver.delete_container(container)
+
+            # check that a deleted container can't be looked up
+            with self.assertRaises(types.ContainerDoesNotExistError):
+                self.driver.get_container(container_name)
+
+            # check that the container is deleted
+            containers = self.driver.list_containers()
+            self.assertEqual([c.name for c in containers], [])
+
+        def _test_objects(self, do_upload, do_download, size=1 * MB):
+            content = os.urandom(size)
+            blob_name = 'testblob'
+            container = self.driver.create_container(random_container_name())
+
+            # upload a file
+            obj = do_upload(container, blob_name, content)
+            self.assertEqual(obj.name, blob_name)
+            obj = self.driver.get_object(container.name, blob_name)
+
+            # check that the file can be listed
+            blobs = self.driver.list_container_objects(container)
+            self.assertEqual([blob.name for blob in blobs], [blob_name])
+
+            # upload another file and check it's excluded in prefix listing
+            do_upload(container, blob_name[::-1], content[::-1])
+            blobs = self.driver.list_container_objects(
+                container, prefix=blob_name[0:3]
+            )
+            self.assertEqual([blob.name for blob in blobs], [blob_name])
+
+            # check that the file can be read back
+            self.assertEqual(do_download(obj), content)
+
+            # delete the file
+            self.driver.delete_object(obj)
+
+            # check that a missing file can't be deleted or looked up
+            self.assert_file_is_missing(container, obj)
+
+            # check that the file is deleted
+            blobs = self.driver.list_container_objects(container)
+            self.assertEqual([blob.name for blob in blobs], [blob_name[::-1]])
+
+        def assert_file_is_missing(self, container, obj):
+            with self.assertRaises(types.ObjectDoesNotExistError):
+                self.driver.delete_object(obj)
+            with self.assertRaises(types.ObjectDoesNotExistError):
+                self.driver.get_object(container.name, obj.name)
+
+        def test_objects(self, size=1 * MB):
+            def do_upload(container, blob_name, content):
+                infile = self._create_tempfile(content=content)
+                return self.driver.upload_object(infile, container, blob_name)
+
+            def do_download(obj):
+                outfile = self._create_tempfile()
+                self.driver.download_object(obj, outfile, overwrite_existing=True)
+                with open(outfile, 'rb') as fobj:
+                    return fobj.read()
+
+            self._test_objects(do_upload, do_download, size)
+
+        def test_objects_range_downloads(self):
+            blob_name = 'testblob-range'
+            content = b'0123456789'
+            container = self.driver.create_container(random_container_name())
+
+            obj = self.driver.upload_object(
+                self._create_tempfile(content=content),
+                container,
+                blob_name
+            )
+            self.assertEqual(obj.name, blob_name)
+            self.assertEqual(obj.size, len(content))
+
+            obj = self.driver.get_object(container.name, blob_name)
+            self.assertEqual(obj.name, blob_name)
+            self.assertEqual(obj.size, len(content))
+
+            values = [
+                {'start_bytes': 0, 'end_bytes': 1, 'expected_content': b'0'},
+                {'start_bytes': 1, 'end_bytes': 5, 'expected_content': b'1234'},
+                {'start_bytes': 5, 'end_bytes': None, 'expected_content': b'56789'},
+                {'start_bytes': 5, 'end_bytes': len(content), 'expected_content': b'56789'},
+                {'start_bytes': 0, 'end_bytes': None, 'expected_content': b'0123456789'},
+                {'start_bytes': 0, 'end_bytes': len(content), 'expected_content': b'0123456789'},
+            ]
+
+            for value in values:
+                # 1. download_object_range
+                start_bytes = value['start_bytes']
+                end_bytes = value['end_bytes']
+                outfile = self._create_tempfile()
+
+                result = self.driver.download_object_range(
+                    obj,
+                    outfile,
+                    start_bytes=start_bytes,
+                    end_bytes=end_bytes,
+                    overwrite_existing=True,
+                )
+                self.assertTrue(result)
+
+                with open(outfile, 'rb') as fobj:
+                    downloaded_content = fobj.read()
+
+                if end_bytes is not None:
+                    expected_content = content[start_bytes:end_bytes]
+                else:
+                    expected_content = content[start_bytes:]
+
+                msg = 'Expected "%s", got "%s" for values: %s' % (
+                    expected_content, downloaded_content, str(value)
+                )
+                self.assertEqual(downloaded_content, expected_content, msg)
+                self.assertEqual(downloaded_content, value['expected_content'], msg)
+
+                # 2. download_object_range_as_stream
+                downloaded_content = read_stream(
+                    self.driver.download_object_range_as_stream(
+                        obj, start_bytes=start_bytes, end_bytes=end_bytes
+                    )
+                )
+                self.assertEqual(downloaded_content, expected_content)
+
+        @unittest.skipUnless(os.getenv('LARGE_FILE_SIZE_MB'), 'config not set')
+        def test_objects_large(self):
+            size = int(float(os.environ['LARGE_FILE_SIZE_MB']) * MB)
+            self.test_objects(size)
+
+        def test_objects_stream_io(self):
+            def do_upload(container, blob_name, content):
+                content = io.BytesIO(content)
+                return self.driver.upload_object_via_stream(content, container, blob_name)
+
+            def do_download(obj):
+                return read_stream(self.driver.download_object_as_stream(obj))
+
+            self._test_objects(do_upload, do_download)
+
+        def test_objects_stream_iterable(self):
+            def do_upload(container, blob_name, content):
+                content = iter([content[i:i + 1] for i in range(len(content))])
+                return self.driver.upload_object_via_stream(content, container, blob_name)
+
+            def do_download(obj):
+                return read_stream(self.driver.download_object_as_stream(obj))
+
+            self._test_objects(do_upload, do_download)
+
+        def test_upload_via_stream_with_content_encoding(self):
+            object_name = 'content_encoding.gz'
+            content = gzip.compress(os.urandom(MB // 100))
+            container = self.driver.create_container(random_container_name())
+            self.driver.upload_object_via_stream(
+                iter(content),
+                container,
+                object_name,
+                headers={'Content-Encoding': 'gzip'},
+            )
+
+            obj = self.driver.get_object(container.name, object_name)
+
+            self.assertEqual(obj.extra.get('content_encoding'), 'gzip')
+
+        def test_cdn_url(self):
+            content = os.urandom(MB // 100)
+            container = self.driver.create_container(random_container_name())
+            obj = self.driver.upload_object_via_stream(iter(content), container, 'cdn')
+
+            response = requests.get(self.driver.get_object_cdn_url(obj))
+            response.raise_for_status()
+
+            self.assertEqual(response.content, content)
+
+        def _create_tempfile(self, prefix='', content=b''):
+            fobj, path = tempfile.mkstemp(prefix=prefix, text=False)
+            os.write(fobj, content)
+            os.close(fobj)
+            self.addCleanup(os.remove, path)
+            return path
+
+    class ContainerTestBase(TestBase):
+        image = None
+        version = 'latest'
+        environment = {}
+        command = None
+        ready_message = None
+
+        host = 'localhost'
+        port = None
+        secure = False
+
+        client = None
+        container = None
+        verbose = False
+
+        @classmethod
+        def setUpClass(cls):
+            if docker is None:
+                raise unittest.SkipTest('missing docker library')
+
+            try:
+                cls.client = docker.from_env()
+            except docker.errors.DockerException:
+                raise unittest.SkipTest('unable to create docker client')
+
+            for required in 'image', 'port':
+                value = getattr(cls, required, None)
+                if value is None:
+                    raise unittest.SkipTest('config {} not set'.format(required))
+
+            cls.container = cls.client.containers.run(
+                '{}:{}'.format(cls.image, cls.version),
+                command=cls.command,
+                detach=True,
+                auto_remove=True,
+                ports={cls.port: cls.port},
+                environment=cls.environment,
+            )
+
+            wait_for(cls.port, cls.host)
+
+            container_ready = cls.ready_message is None
+
+            while not container_ready:
+                time.sleep(1)
+
+                container_ready = any(
+                    cls.ready_message in line
+                    for line in cls.container.logs().splitlines()
+                )
+
+        @classmethod
+        def tearDownClass(cls):
+            if cls.verbose:
+                for line in cls.container.logs().splitlines():
+                    print(line)
+
+            try:
+                cls.container.kill()
+            except docker.errors.DockerException as ex:
+                print(
+                    'Unable to terminate docker container {}: {}.'
+                    'Stop it manually.'
+                    .format(cls.container.short_id, ex),
+                    file=sys.stderr
+                )
+
+
+def wait_for(port, host='localhost', timeout=10):
+    start_time = time.perf_counter()
+
+    while True:
+        try:
+            with socket.create_connection((host, port), timeout=timeout):
+                break
+        except OSError as ex:
+            if time.perf_counter() - start_time >= timeout:
+                raise TimeoutError(
+                    'Waited too long for the port {} on host {} to start accepting '
+                    'connections.'.format(port, host)
+                ) from ex
+
+            time.sleep(1)
+
+
+def random_string(length, alphabet=string.ascii_lowercase + string.digits):
+    return ''.join(random.choice(alphabet) for _ in range(length))
+
+
+def random_container_name(prefix='test'):
+    max_length = 63
+    suffix = random_string(max_length)
+    name = prefix + suffix
+    name = re.sub('[^a-z0-9-]', '-', name)
+    name = re.sub('-+', '-', name)
+    name = name[:max_length]
+    name = name.lower()
+    return name
+
+
+def read_stream(stream):
+    buffer = io.BytesIO()
+    buffer.writelines(stream)
+    buffer.seek(0)
+    return buffer.read()
diff --git a/integration/storage/requirements.txt b/integration/storage/requirements.txt
new file mode 100644
index 0000000..cbdd599
--- /dev/null
+++ b/integration/storage/requirements.txt
@@ -0,0 +1,5 @@
+azure-identity
+azure-mgmt-resource
+azure-mgmt-storage
+docker
+requests
diff --git a/integration/storage/test_azure_blobs.py b/integration/storage/test_azure_blobs.py
new file mode 100644
index 0000000..db1a7ad
--- /dev/null
+++ b/integration/storage/test_azure_blobs.py
@@ -0,0 +1,178 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the 'License'); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import os
+import string
+import sys
+import unittest
+
+try:
+    from azure import identity
+    from azure.mgmt import resource
+    from azure.mgmt import storage
+    from azure.mgmt.resource.resources import models as resource_models
+    from azure.mgmt.storage import models as storage_models
+except ImportError as e:
+    print("Failed to import from azure module: %s" % (str(e)))
+    identity = resource = storage = resource_models = storage_models = None
+
+from integration.storage.base import Integration, random_string
+
+DEFAULT_TIMEOUT_SECONDS = 300
+DEFAULT_AZURE_LOCATION = 'EastUS2'
+MAX_STORAGE_ACCOUNT_NAME_LENGTH = 24
+
+
+class AzuriteStorageTest(Integration.ContainerTestBase):
+    provider = 'azure_blobs'
+
+    account = 'devstoreaccount1'
+    secret = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
+
+    image = 'arafato/azurite'
+    port = 10000
+    environment = {'executable': 'blob'}
+    ready_message = b'Azure Blob Storage Emulator listening'
+
+    has_sas_support = False
+
+    def test_cdn_url(self):
+        if not self.has_sas_support:
+            self.skipTest('Storage backend has no account SAS support')
+
+
+class AzuriteV3StorageTest(AzuriteStorageTest):
+    image = 'mcr.microsoft.com/azure-storage/azurite'
+    ready_message = b'Azurite Blob service is successfully listening'
+
+    has_sas_support = True
+
+    def test_upload_via_stream_with_content_encoding(self):
+        self.skipTest('Possible bug in AzuriteV3, see https://github.com/Azure/Azurite/issues/629')
+
+
+class IotedgeStorageTest(Integration.ContainerTestBase):
+    provider = 'azure_blobs'
+
+    account = random_string(10, string.ascii_lowercase)
+    secret = base64.b64encode(random_string(20).encode('ascii')).decode('ascii')
+
+    image = 'mcr.microsoft.com/azure-blob-storage'
+    port = 11002
+    environment = {'LOCAL_STORAGE_ACCOUNT_NAME': account, 'LOCAL_STORAGE_ACCOUNT_KEY': secret}
+    ready_message = b'BlobService - StartAsync completed'
+
+
+class StorageTest(Integration.TestBase):
+    provider = 'azure_blobs'
+
+    kind = storage_models.Kind.STORAGE
+    access_tier = None  # type: storage_models.AccessTier
+
+    @classmethod
+    def setUpClass(cls):
+        if identity is None:
+            raise unittest.SkipTest('missing azure-identity library')
+
+        if resource is None or resource_models is None:
+            raise unittest.SkipTest('missing azure-mgmt-resource library')
+
+        if storage is None or storage_models is None:
+            raise unittest.SkipTest('missing azure-mgmt-storage library')
+
+        config = {
+            key: os.getenv(key)
+            for key in (
+                'AZURE_TENANT_ID',
+                'AZURE_SUBSCRIPTION_ID',
+                'AZURE_CLIENT_ID',
+                'AZURE_CLIENT_SECRET',
+            )
+        }
+
+        for key, value in config.items():
+            if not value:
+                raise unittest.SkipTest('missing environment variable %s' % key)
+
+        credentials = identity.ClientSecretCredential(
+            tenant_id=config['AZURE_TENANT_ID'],
+            client_id=config['AZURE_CLIENT_ID'],
+            client_secret=config['AZURE_CLIENT_SECRET'],
+        )
+
+        resource_client = resource.ResourceManagementClient(
+            credentials,
+            config['AZURE_SUBSCRIPTION_ID'],
+        )
+
+        storage_client = storage.StorageManagementClient(
+            credentials,
+            config['AZURE_SUBSCRIPTION_ID'],
+        )
+
+        location = os.getenv('AZURE_LOCATION', DEFAULT_AZURE_LOCATION)
+        name = 'libcloud'
+        name += random_string(MAX_STORAGE_ACCOUNT_NAME_LENGTH - len(name))
+        timeout = float(os.getenv('AZURE_TIMEOUT_SECONDS', DEFAULT_TIMEOUT_SECONDS))
+
+        group = resource_client.resource_groups.create_or_update(
+            resource_group_name=name,
+            parameters=resource_models.ResourceGroup(
+                location=location,
+                tags={
+                    'test': cls.__name__,
+                    'run': os.getenv('GITHUB_RUN_ID', '-'),
+                },
+            ),
+            timeout=timeout,
+        )
+
+        cls.addClassCleanup(lambda: resource_client.resource_groups
+                            .begin_delete(group.name)
+                            .result(timeout))
+
+        account = storage_client.storage_accounts.begin_create(
+            resource_group_name=group.name,
+            account_name=name,
+            parameters=storage_models.StorageAccountCreateParameters(
+                sku=storage_models.Sku(name=storage_models.SkuName.STANDARD_LRS),
+                access_tier=cls.access_tier,
+                kind=cls.kind,
+                location=location,
+            ),
+        ).result(timeout)
+
+        keys = storage_client.storage_accounts.list_keys(
+            resource_group_name=group.name,
+            account_name=account.name,
+            timeout=timeout,
+        )
+
+        cls.account = account.name
+        cls.secret = keys.keys[0].value
+
+
+class StorageV2Test(StorageTest):
+    kind = storage_models.Kind.STORAGE_V2
+
+
+class BlobStorageTest(StorageTest):
+    kind = storage_models.Kind.BLOB_STORAGE
+    access_tier = storage_models.AccessTier.HOT
+
+
+if __name__ == '__main__':
+    sys.exit(unittest.main())
diff --git a/integration/storage/test_minio.py b/integration/storage/test_minio.py
new file mode 100644
index 0000000..a6d9af5
--- /dev/null
+++ b/integration/storage/test_minio.py
@@ -0,0 +1,44 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the 'License'); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import unittest
+
+from integration.storage.base import Integration
+from libcloud.storage import types
+
+
+class MinioTest(Integration.ContainerTestBase):
+    provider = 'minio'
+
+    account = 'minioaccount'
+    secret = 'miniopassword'
+
+    image = 'minio/minio'
+    port = 9000
+    environment = {'MINIO_ROOT_USER': account, 'MINIO_ROOT_PASSWORD': secret}
+    command = ['server', '/data']
+    ready_message = b'IAM initialization complete'
+
+    def test_cdn_url(self):
+        self.skipTest('Not implemented in driver')
+
+    def assert_file_is_missing(self, container, obj):
+        with self.assertRaises(types.ObjectDoesNotExistError):
+            self.driver.get_object(container.name, obj.name)
+
+
+if __name__ == '__main__':
+    sys.exit(unittest.main())
diff --git a/libcloud/common/base.py b/libcloud/common/base.py
index 48b0e22..b944107 100644
--- a/libcloud/common/base.py
+++ b/libcloud/common/base.py
@@ -984,19 +984,6 @@
         self.key = key
         self.secret = secret
         self.secure = secure
-        args = [self.key]
-
-        if self.secret is not None:
-            args.append(self.secret)
-
-        args.append(secure)
-
-        if host is not None:
-            args.append(host)
-
-        if port is not None:
-            args.append(port)
-
         self.api_version = api_version
         self.region = region
 
@@ -1005,8 +992,23 @@
                             'retry_delay': kwargs.pop('retry_delay', None),
                             'backoff': kwargs.pop('backoff', None),
                             'proxy_url': kwargs.pop('proxy_url', None)})
-        self.connection = self.connectionCls(*args, **conn_kwargs)
 
+        args = [self.key]
+
+        if self.secret is not None:
+            args.append(self.secret)
+
+        args.append(secure)
+
+        host = conn_kwargs.pop('host', None) or host
+
+        if host is not None:
+            args.append(host)
+
+        if port is not None:
+            args.append(port)
+
+        self.connection = self.connectionCls(*args, **conn_kwargs)
         self.connection.driver = self
         self.connection.connect()
 
diff --git a/libcloud/compute/drivers/equinixmetal.py b/libcloud/compute/drivers/equinixmetal.py
index 2dc93fa..b14e54a 100644
--- a/libcloud/compute/drivers/equinixmetal.py
+++ b/libcloud/compute/drivers/equinixmetal.py
@@ -208,7 +208,12 @@
         result = yield from future
         retval.extend(result)
     return retval""" % resource_type, glob, loc)
-        loop = asyncio.get_event_loop()
+        try:
+            loop = asyncio.get_event_loop()
+        except RuntimeError:
+            asyncio.set_event_loop(asyncio.new_event_loop())
+            loop = asyncio.get_event_loop()
+
         return loop.run_until_complete(loc['_list_async'](loc['self']))
 
     def ex_list_nodes_for_project(self, ex_project_id, include='plan', page=1,
@@ -246,7 +251,7 @@
 
     def create_node(self, name, size, image, location,
                     ex_project_id=None, ip_addresses=[], cloud_init=None,
-                    **kwargs):
+                    disk=None, disk_size=0, **kwargs):
         """
         Create a node.
 
@@ -281,10 +286,10 @@
             error_message = data.object.get('error_message', message)
             raise ValueError('Failed to create node: %s' % (error_message))
         node = self._to_node(data=data.object)
-        if kwargs.get('disk'):
-            self.attach_volume(node, kwargs.get('disk'))
-        if kwargs.get('disk_size'):
-            volume = self.create_volume(size=kwargs.get('disk_size'),
+        if disk:
+            self.attach_volume(node, disk)
+        if disk_size:
+            volume = self.create_volume(size=disk_size,
                                         location=location)
             self.attach_volume(node, volume)
         return node
@@ -433,7 +438,11 @@
         return node
 
     def _to_image(self, data):
-        extra = {'distro': data['distro'], 'version': data['version']}
+        extra = {
+            'distro': data['distro'],
+            'version': data['version'],
+            'supported_sizes': data.get('provisionable_on', [])
+        }
         return NodeImage(id=data['slug'], name=data['name'], extra=extra,
                          driver=self)
 
@@ -443,22 +452,31 @@
                             driver=self, extra=extra)
 
     def _to_size(self, data):
-        cpus = data['specs']['cpus'][0].get('count')
+        try:
+            cpus = data['specs']['cpus'][0].get('count')
+        except KeyError:
+            cpus = None
+        regions = [region.get('href').replace('/metal/v1/facilities/', '')
+                   for region in data.get('available_in', [])]
         extra = {'description': data['description'], 'line': data['line'],
-                 'cpus': cpus}
-
-        ram = data['specs']['memory']['total']
-        disk = 0
-        for disks in data['specs']['drives']:
-            disk_size = disks['size'].replace('GB', '')
-            if 'TB' in disk_size:
-                disk_size = float(disks['size'].replace('TB', '')) * 1000
-            disk += disks['count'] * int(disk_size)
+                 'cpus': cpus, 'regions': regions}
+        try:
+            ram = int(data['specs']['memory']['total'].replace('GB', '')) * 1024  # noqa
+        except KeyError:
+            ram = None
+        disk = None
+        if data['specs'].get('drives', ''):
+            disk = 0
+            for disks in data['specs']['drives']:
+                disk_size = disks['size'].replace('GB', '')
+                if 'TB' in disk_size:
+                    disk_size = float(disks['size'].replace('TB', '')) * 1000
+                disk += disks['count'] * int(disk_size)
         name = "%s - %s RAM" % (data.get('name'), ram)
         price = data['pricing'].get('hour')
         return NodeSize(id=data['slug'], name=name,
-                        ram=int(ram.replace('GB', '')) * 1024, disk=disk,
-                        bandwidth=0, price=price, extra=extra, driver=self)
+                        ram=ram, disk=disk, bandwidth=0,
+                        price=price, extra=extra, driver=self)
 
     def _to_key_pairs(self, data):
         extra = {'label': data['label'],
diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py
index 414cf75..917d5f9 100644
--- a/libcloud/compute/drivers/gce.py
+++ b/libcloud/compute/drivers/gce.py
@@ -9009,6 +9009,7 @@
         extra['description'] = machine_type.get('description')
         extra['guestCpus'] = machine_type.get('guestCpus')
         extra['creationTimestamp'] = machine_type.get('creationTimestamp')
+        extra['accelerators'] = machine_type.get('accelerators', [])
         try:
             size_name = machine_type['name'][:2]
             location = extra['zone'].name
diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py
index a33f31b..1116c0b 100644
--- a/libcloud/compute/drivers/openstack.py
+++ b/libcloud/compute/drivers/openstack.py
@@ -45,7 +45,7 @@
                                    StorageVolume, VolumeSnapshot)
 from libcloud.compute.base import KeyPair
 from libcloud.compute.types import NodeState, StorageVolumeState, Provider, \
-    VolumeSnapshotState, Type
+    VolumeSnapshotState, Type, LibcloudError
 from libcloud.pricing import get_size_price
 from libcloud.utils.xml import findall
 from libcloud.utils.py3 import ET
@@ -99,6 +99,12 @@
     service_region = 'RegionOne'
 
 
+class OpenStackVolumeV3Connection(OpenStackBaseConnection):
+    service_type = 'volumev3'
+    service_name = 'cinderv3'
+    service_region = 'RegionOne'
+
+
 class OpenStackNodeDriver(NodeDriver, OpenStackDriverMixin):
     """
     Base OpenStack node driver. Should not be used directly.
@@ -120,6 +126,7 @@
         'PASSWORD': NodeState.PENDING,
         'RESCUE': NodeState.PENDING,
         'REBOOT': NodeState.REBOOTING,
+        'RESIZE': NodeState.RECONFIGURING,
         'HARD_REBOOT': NodeState.REBOOTING,
         'SHARE_IP': NodeState.PENDING,
         'SHARE_IP_NO_CONFIG': NodeState.PENDING,
@@ -2741,6 +2748,15 @@
         return json.dumps(data)
 
 
+class OpenStack_2_VolumeV3Connection(OpenStackVolumeV3Connection):
+    responseCls = OpenStack_1_1_Response
+    accept_format = 'application/json'
+    default_content_type = 'application/json; charset=UTF-8'
+
+    def encode_data(self, data):
+        return json.dumps(data)
+
+
 class OpenStack_2_PortInterfaceState(Type):
     """
     Standard states of OpenStack_2_PortInterfaceState
@@ -2794,7 +2810,10 @@
     # API of the cinder service:
     # https://developer.openstack.org/api-ref/block-storage/
     volumev2_connectionCls = OpenStack_2_VolumeV2Connection
+    volumev3_connectionCls = OpenStack_2_VolumeV3Connection
     volumev2_connection = None
+    volumev3_connection = None
+    volume_connection = None
 
     type = Provider.OPENSTACK
 
@@ -2834,6 +2853,11 @@
             kwargs['ex_force_base_url'] = \
                 str(kwargs.pop('ex_force_volume_url',
                                original_ex_force_base_url))
+        # the V3 API
+        self.connectionCls = self.volumev3_connectionCls
+        super(OpenStack_2_NodeDriver, self).__init__(*args, **kwargs)
+        self.volumev3_connection = self.connection
+        # the V2 API
         self.connectionCls = self.volumev2_connectionCls
         super(OpenStack_2_NodeDriver, self).__init__(*args, **kwargs)
         self.volumev2_connection = self.connection
@@ -2874,7 +2898,7 @@
                 allowed_address_pairs=element['allowed_address_pairs'],
                 binding_vnic_type=element['binding:vnic_type'],
                 device_id=element['device_id'],
-                description=element['description'],
+                description=element.get('description', None),
                 device_owner=element['device_owner'],
                 fixed_ips=element['fixed_ips'],
                 mac_address=element['mac_address'],
@@ -3440,6 +3464,21 @@
         )
         return self._to_port(response.object['port'])
 
+    def _get_volume_connection(self):
+        """
+        Get the correct Volume connection (v3 or v2)
+        """
+        if not self.volume_connection:
+            try:
+                # Try to use v3 API first
+                # if the endpoint is not found
+                self.volumev3_connection.get_service_catalog()
+                self.volume_connection = self.volumev3_connection
+            except LibcloudError:
+                # then return the v2 conn
+                self.volume_connection = self.volumev2_connection
+        return self.volume_connection
+
     def list_volumes(self):
         """
         Get a list of Volumes that are available.
@@ -3447,7 +3486,7 @@
         :rtype: ``list`` of :class:`StorageVolume`
         """
         return self._to_volumes(self._paginated_request(
-            '/volumes/detail', 'volumes', self.volumev2_connection))
+            '/volumes/detail', 'volumes', self._get_volume_connection()))
 
     def ex_get_volume(self, volumeId):
         """
@@ -3459,7 +3498,8 @@
         :return: :class:`StorageVolume`
         """
         return self._to_volume(
-            self.volumev2_connection.request('/volumes/%s' % volumeId).object)
+            self._get_volume_connection().request('/volumes/%s' % volumeId)
+                .object)
 
     def create_volume(self, size, name, location=None, snapshot=None,
                       ex_volume_type=None, ex_image_ref=None):
@@ -3513,9 +3553,9 @@
         if snapshot:
             volume['snapshot_id'] = snapshot.id
 
-        resp = self.volumev2_connection.request('/volumes',
-                                                method='POST',
-                                                data={'volume': volume})
+        resp = self._get_volume_connection().request('/volumes',
+                                                     method='POST',
+                                                     data={'volume': volume})
         return self._to_volume(resp.object)
 
     def destroy_volume(self, volume):
@@ -3527,8 +3567,8 @@
 
         :rtype: ``bool``
         """
-        return self.volumev2_connection.request('/volumes/%s' % volume.id,
-                                                method='DELETE').success()
+        return self._get_volume_connection().request('/volumes/%s' % volume.id,
+                                                     method='DELETE').success()
 
     def ex_list_snapshots(self):
         """
@@ -3537,7 +3577,7 @@
         :rtype: ``list`` of :class:`VolumeSnapshot`
         """
         return self._to_snapshots(self._paginated_request(
-            '/snapshots/detail', 'snapshots', self.volumev2_connection))
+            '/snapshots/detail', 'snapshots', self._get_volume_connection()))
 
     def create_volume_snapshot(self, volume, name=None, ex_description=None,
                                ex_force=True):
@@ -3569,8 +3609,8 @@
             data['snapshot']['description'] = ex_description
 
         return self._to_snapshot(
-            self.volumev2_connection.request('/snapshots', method='POST',
-                                             data=data).object)
+            self._get_volume_connection().request('/snapshots', method='POST',
+                                                  data=data).object)
 
     def destroy_volume_snapshot(self, snapshot):
         """
@@ -3581,8 +3621,8 @@
 
         :rtype: ``bool``
         """
-        resp = self.volumev2_connection.request('/snapshots/%s' % snapshot.id,
-                                                method='DELETE')
+        resp = self._get_volume_connection().request(
+            '/snapshots/%s' % snapshot.id, method='DELETE')
         return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
 
     def ex_list_security_groups(self):
diff --git a/libcloud/compute/drivers/outscale.py b/libcloud/compute/drivers/outscale.py
index 5b84095..5a59803 100644
--- a/libcloud/compute/drivers/outscale.py
+++ b/libcloud/compute/drivers/outscale.py
@@ -7709,6 +7709,328 @@
             return response.json()["VpnConnections"]
         return response.json()
 
+    def ex_create_certificate_authority(
+        self,
+        ca_perm: str,
+        description: str = None,
+        dry_run: bool = False
+    ):
+        """
+        Creates a Client Certificate Authority (CA).
+
+        :param      ca_perm: The CA in PEM format. (required)
+        :type       ca_perm: ``str``
+
+        :param      description: The description of the CA.
+        :type       description: ``bool``
+
+        :param      dry_run: If true, checks whether you have the required
+        permissions to perform the action.
+        :type       dry_run: ``bool``
+
+        :return: the created Ca.
+        :rtype: ``dict``
+        """
+        action = "CreateCa"
+        data = {"DryRun": dry_run, "CaPerm": ca_perm}
+        if description is not None:
+            data.update({"Description": description})
+        response = self._call_api(action, json.dumps(data))
+        if response.status_code == 200:
+            return response.json()["Ca"]
+        return response.json()
+
+    def ex_delete_certificate_authority(
+        self,
+        ca_id: str,
+        dry_run: bool = False
+    ):
+        """
+        Deletes a specified Client Certificate Authority (CA).
+
+        :param      ca_id: The ID of the CA you want to delete. (required)
+        :type       ca_id: ``str``
+
+        :param      dry_run: If true, checks whether you have the required
+        permissions to perform the action.
+        :type       dry_run: ``bool``
+        """
+        action = "DeleteCa"
+        data = {"DryRun": dry_run, "CaId": ca_id}
+        response = self._call_api(action, json.dumps(data))
+        if response.status_code == 200:
+            return True
+        return response.json()
+
+    def ex_read_certificate_authorities(
+        self,
+        ca_fingerprints: List[str] = None,
+        ca_ids: List[str] = None,
+        descriptions: List[str] = None,
+        dry_run: bool = False
+    ):
+        """
+        Returns information about one or more of your Client Certificate
+        Authorities (CAs).
+
+        :param      ca_fingerprints: The fingerprints of the CAs.
+        :type       ca_fingerprints: ``list`` of ``str``
+
+        :param      ca_ids: The IDs of the CAs.
+        :type       ca_ids: ``list`` of ``str``
+
+        :param      descriptions: The descriptions of the CAs.
+        :type       descriptions: ``list`` of ``str``
+
+        :param      dry_run: If true, checks whether you have the required
+        permissions to perform the action.
+        :type       dry_run: ``bool``
+
+        :return: a list of all Ca matching filled filters.
+        :rtype: ``list`` of  ``dict``
+        """
+        action = "ReadCas"
+        data = {"DryRun": dry_run, "Filters": {}}
+        if ca_fingerprints is not None:
+            data["Filters"].update({"CaFingerprints": ca_fingerprints})
+        if ca_ids is not None:
+            data["Filters"].update({"CaIds": ca_ids})
+        if descriptions is not None:
+            data["Filters"].update({"Descriptions": descriptions})
+        response = self._call_api(action, json.dumps(data))
+        if response.status_code == 200:
+            return response.json()["Cas"]
+        return response.json()
+
+    def ex_update_certificate_authority(
+        self,
+        ca_id: str,
+        description: str = None,
+        dry_run: bool = False
+    ):
+        """
+        Modifies the specified attribute of a Client Certificate Authority
+        (CA).
+
+        :param      ca_id: The ID of the CA. (required)
+        :type       ca_id: ``str``
+
+        :param      description: The description of the CA.
+        :type       description: ``str``
+
+        :param      dry_run: If true, checks whether you have the required
+        permissions to perform the action.
+        :type       dry_run: ``bool``
+
+        :return: a the created Ca or the request result.
+        :rtype: ``dict``
+        """
+        action = "UpdateCa"
+        data = {"DryRun": dry_run, "CaId": ca_id}
+        if description is not None:
+            data.update({"Description": description})
+        response = self._call_api(action, json.dumps(data))
+        if response.status_code == 200:
+            return response.json()["Ca"]
+        return response.json()
+
+    def ex_create_api_access_rule(
+        self,
+        description: str = None,
+        ip_ranges: List[str] = None,
+        ca_ids: List[str] = None,
+        cns: List[str] = None,
+        dry_run: bool = False,
+    ):
+        """
+        Create an API access rule.
+        It is a rule to allow access to the API from your account.
+        You need to specify at least the CaIds or the IpRanges parameter.
+
+        :param      description: The description of the new rule.
+        :type       description: ``str``
+
+        :param      ip_ranges: One or more IP ranges, in CIDR notation
+        (for example, 192.0.2.0/16).
+        :type       ip_ranges: ``List`` of ``str``
+
+        :param      ca_ids: One or more IDs of Client Certificate Authorities
+        (CAs).
+        :type       ca_ids: ``List`` of ``str``
+
+        :param      cns: One or more Client Certificate Common Names (CNs).
+        If this parameter is specified, you must also specify the ca_ids
+        parameter.
+        :type       cns: ``List`` of ``str``
+
+        :param      dry_run: If true, checks whether you have the required
+        permissions to perform the action.
+        :type       dry_run: ``bool``
+
+        :return: a dict containing the API access rule created.
+        :rtype: ``dict``
+        """
+
+        if not ca_ids and not ip_ranges:
+            raise ValueError(
+                "Either ca_ids or ip_ranges argument must be provided.")
+
+        action = "CreateApiAccessRule"
+        data = {"DryRun": dry_run}
+        if description is not None:
+            data["Description"] = description
+        if ip_ranges is not None:
+            data["IpRanges"] = ip_ranges
+        if ca_ids is not None:
+            data["CaIds"] = ca_ids
+        if cns is not None:
+            data["Cns"] = cns
+        response = self._call_api(action, json.dumps(data))
+        if response.status_code == 200:
+            return response.json()["ApiAccessRule"]
+        return response.json()
+
+    def ex_delete_api_access_rule(
+        self,
+        api_access_rule_id: str,
+        dry_run: bool = False,
+    ):
+        """
+        Delete an API access rule.
+        You cannot delete the last remaining API access rule.
+
+        :param      api_access_rule_id: The id of the targeted rule
+        (required).
+        :type       api_access_rule_id: ``str``
+
+        :param      dry_run: If true, checks whether you have the required
+        permissions to perform the action.
+        :type       dry_run: ``bool``
+
+        :return: true if successfull.
+        :rtype: ``bool`` if successful or  ``dict``
+        """
+        action = "DeleteApiAccessRule"
+        data = {"ApiAccessRuleId": api_access_rule_id, "DryRun": dry_run}
+        response = self._call_api(action, json.dumps(data))
+        if response.status_code == 200:
+            return True
+        return response.json()
+
+    def ex_read_api_access_rules(
+        self,
+        api_access_rules_ids: List[str] = None,
+        ca_ids: List[str] = None,
+        cns: List[str] = None,
+        descriptions: List[str] = None,
+        ip_ranges: List[str] = None,
+        dry_run: bool = False,
+    ):
+        """
+        Read API access rules.
+
+        :param      api_access_rules_ids: The List containing rules ids to
+        filter the request.
+        :type       api_access_rules_ids: ``List`` of ``str``
+
+        :param      ca_ids: The List containing CA ids to filter the request.
+        :type       ca_ids: ``List`` of ``str``
+
+        :param      cns: The List containing cns to filter the request.
+        :type       cns: ``List`` of ``str``
+
+        :param      descriptions: The List containing descriptions to filter
+        the request.
+        :type       descriptions: ``List`` of ``str``
+
+        :param      ip_ranges: The List containing ip ranges in CIDR notation
+        (for example, 192.0.2.0/16) to filter the request.
+        :type       ip_ranges: ``List`` of ``str``
+
+        :param      dry_run: If true, checks whether you have the required
+        permissions to perform the action.
+        :type       dry_run: ``bool``
+
+        :return: a List of API access rules.
+        :rtype: ``List`` of ``dict`` if successfull or  ``dict``
+        """
+
+        action = "ReadApiAccessRules"
+        filters = {}
+        if api_access_rules_ids is not None:
+            filters["ApiAccessRulesIds"] = api_access_rules_ids
+        if ca_ids is not None:
+            filters["CaIds"] = ca_ids
+        if cns is not None:
+            filters["Cns"] = cns
+        if descriptions is not None:
+            filters["Descriptions"] = descriptions
+        if ip_ranges is not None:
+            filters["IpRanges"] = ip_ranges
+        data = {"Filters": filters, "DryRun": dry_run}
+        response = self._call_api(action, json.dumps(data))
+        if response.status_code == 200:
+            return response.json()["ApiAccessRules"]
+        return response.json()
+
+    def ex_update_api_access_rule(
+        self,
+        api_access_rule_id: str,
+        ca_ids: List[str] = None,
+        cns: List[str] = None,
+        description: str = None,
+        ip_ranges: List[str] = None,
+        dry_run: bool = False,
+    ):
+        """
+        Update an API access rules.
+        The new rule you specify fully replaces the old rule. Therefore,
+        for a parameter that is not specified, any previously set value
+        is deleted.
+
+        :param      api_access_rule_id: The id of the rule we want to update
+        (required).
+        :type       api_access_rule_id: ``str``
+
+        :param      ca_ids: One or more IDs of Client Certificate Authorities
+        (CAs).
+        :type       ca_ids: ``List`` of ``str``
+
+        :param      cns: One or more Client Certificate Common Names (CNs).
+        If this parameter is specified, you must also specify the ca_ids
+        parameter.
+        :type       cns: ``List`` of ``str``
+
+        :param      description: The description of the new rule.
+        :type       description: ``str``
+
+        :param      ip_ranges: One or more IP ranges, in CIDR notation
+        (for example, 192.0.2.0/16).
+        :type       ip_ranges: ``List`` of ``str``
+
+        :param      dry_run: If true, checks whether you have the required
+        permissions to perform the action.
+        :type       dry_run: ``bool``
+
+        :return: a List of API access rules.
+        :rtype: ``List`` of ``dict`` if successfull or  ``dict``
+        """
+
+        action = "UpdateApiAccessRule"
+        data = {"DryRun": dry_run, "ApiAccessRuleId": api_access_rule_id}
+        if description is not None:
+            data["Description"] = description
+        if ip_ranges is not None:
+            data["IpRanges"] = ip_ranges
+        if ca_ids is not None:
+            data["CaIds"] = ca_ids
+        if cns is not None:
+            data["Cns"] = cns
+        response = self._call_api(action, json.dumps(data))
+        if response.status_code == 200:
+            return response.json()["ApiAccessRules"]
+        return response.json()
+
     def _get_outscale_endpoint(self, region: str, version: str, action: str):
         return "https://api.{}.{}/api/{}/{}".format(
             region,
diff --git a/libcloud/dns/drivers/cloudflare.py b/libcloud/dns/drivers/cloudflare.py
index 84bb5bd..af6b36e 100644
--- a/libcloud/dns/drivers/cloudflare.py
+++ b/libcloud/dns/drivers/cloudflare.py
@@ -20,7 +20,8 @@
 import itertools
 import json
 
-from libcloud.common.base import JsonResponse, ConnectionUserAndKey
+from libcloud.common.base import ConnectionKey, ConnectionUserAndKey
+from libcloud.common.base import JsonResponse
 from libcloud.common.types import InvalidCredsError, LibcloudError
 from libcloud.dns.base import DNSDriver, Zone, Record
 from libcloud.dns.types import Provider, RecordType
@@ -146,11 +147,16 @@
             raise exception_class(**kwargs)
 
 
-class CloudFlareDNSConnection(ConnectionUserAndKey):
+class BaseDNSConnection(object):
     host = API_HOST
     secure = True
     responseCls = CloudFlareDNSResponse
 
+    def encode_data(self, data):
+        return json.dumps(data)
+
+
+class GlobalAPIKeyDNSConnection(BaseDNSConnection, ConnectionUserAndKey):
     def add_default_headers(self, headers):
         headers['Content-Type'] = 'application/json'
         headers['X-Auth-Email'] = self.user_id
@@ -158,15 +164,20 @@
 
         return headers
 
-    def encode_data(self, data):
-        return json.dumps(data)
+
+class TokenDNSConnection(BaseDNSConnection, ConnectionKey):
+    def add_default_headers(self, headers):
+        headers['Content-Type'] = 'application/json'
+        headers['Authorization'] = 'Bearer %s' % self.key
+
+        return headers
 
 
 class CloudFlareDNSDriver(DNSDriver):
     type = Provider.CLOUDFLARE
     name = 'CloudFlare DNS'
     website = 'https://www.cloudflare.com'
-    connectionCls = CloudFlareDNSConnection
+    connectionCls = GlobalAPIKeyDNSConnection
 
     RECORD_TYPE_MAP = {
         RecordType.A: 'A',
@@ -184,6 +195,16 @@
     RECORDS_PAGE_SIZE = 100
     MEMBERSHIPS_PAGE_SIZE = 50
 
+    def __init__(self, key, secret=None, secure=True, host=None, port=None,
+                 **kwargs):
+
+        if secret is None:
+            self.connectionCls = TokenDNSConnection
+
+        super(CloudFlareDNSDriver, self).__init__(
+            key=key, secret=secret, secure=secure,
+            host=host, port=port, **kwargs)
+
     def iterate_zones(self):
         def _iterate_zones(params):
             url = '{}/zones'.format(API_BASE)
diff --git a/libcloud/dns/drivers/digitalocean.py b/libcloud/dns/drivers/digitalocean.py
index e393b05..9e88b57 100644
--- a/libcloud/dns/drivers/digitalocean.py
+++ b/libcloud/dns/drivers/digitalocean.py
@@ -171,15 +171,15 @@
             try:
                 params['priority'] = extra['priority']
             except KeyError:
-                params['priority'] = 'null'
+                params['priority'] = None
             try:
                 params['port'] = extra['port']
             except KeyError:
-                params['port'] = 'null'
+                params['port'] = None
             try:
                 params['weight'] = extra['weight']
             except KeyError:
-                params['weight'] = 'null'
+                params['weight'] = None
 
             if 'ttl' in extra:
                 params['ttl'] = extra['ttl']
@@ -230,15 +230,15 @@
             try:
                 params['priority'] = extra['priority']
             except KeyError:
-                params['priority'] = 'null'
+                params['priority'] = None
             try:
                 params['port'] = extra['port']
             except KeyError:
-                params['port'] = 'null'
+                params['port'] = None
             try:
                 params['weight'] = extra['weight']
             except KeyError:
-                params['weight'] = 'null'
+                params['weight'] = None
 
             if 'ttl' in extra:
                 params['ttl'] = extra['ttl']
diff --git a/libcloud/storage/drivers/azure_blobs.py b/libcloud/storage/drivers/azure_blobs.py
index 3393784..8cebc73 100644
--- a/libcloud/storage/drivers/azure_blobs.py
+++ b/libcloud/storage/drivers/azure_blobs.py
@@ -68,6 +68,9 @@
 )
 
 AZURE_STORAGE_HOST_SUFFIX = 'blob.core.windows.net'
+AZURE_STORAGE_HOST_SUFFIX_CHINA = 'blob.core.chinacloudapi.cn'
+AZURE_STORAGE_HOST_SUFFIX_GOVERNMENT = 'blob.core.usgovcloudapi.net'
+AZURE_STORAGE_HOST_SUFFIX_PRIVATELINK = 'privatelink.blob.core.windows.net'
 
 AZURE_STORAGE_CDN_URL_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
 
@@ -173,11 +176,12 @@
     these deployments, the parameter ``account_prefix`` must be set on the
     connection. This is done by instantiating the driver with arguments such
     as ``host='somewhere.tld'`` and ``key='theaccount'``. To specify a custom
-    host without an account prefix, e.g. for use-cases where the custom host
-    implements an auditing proxy or similar, the driver can be instantiated
-    with ``host='theaccount.somewhere.tld'`` and ``key=''``.
+    host without an account prefix, e.g. to connect to Azure Government or
+    Azure China, the driver can be instantiated with the appropriate storage
+    endpoint suffix, e.g. ``host='blob.core.usgovcloudapi.net'`` and
+    ``key='theaccount'``.
 
-    :param account_prefix: Optional prefix identifying the sotrage account.
+    :param account_prefix: Optional prefix identifying the storage account.
                            Used when connecting to a custom deployment of the
                            storage service like Azurite or IoT Edge Storage.
     :type account_prefix: ``str``
@@ -206,7 +210,7 @@
 
     def __init__(self, key, secret=None, secure=True, host=None, port=None,
                  **kwargs):
-        self._host_argument_set = bool(host)
+        self._host = host
 
         # B64decode() this key and keep it, so that we don't have to do
         # so for every request. Minor performance improvement
@@ -217,15 +221,34 @@
                                                       port=port, **kwargs)
 
     def _ex_connection_class_kwargs(self):
-        result = {}
+        # if the user didn't provide a custom host value, assume we're
+        # targeting the default Azure Storage endpoints
+        if self._host is None:
+            return {'host': '%s.%s' % (self.key, AZURE_STORAGE_HOST_SUFFIX)}
 
-        # host argument has precedence
-        if not self._host_argument_set:
-            result['host'] = '%s.%s' % (self.key, AZURE_STORAGE_HOST_SUFFIX)
+        # connecting to a special storage region like Azure Government or
+        # Azure China requires setting a custom storage endpoint but we
+        # still use the same scheme to identify a specific account as for
+        # the standard storage endpoint
+        try:
+            host_suffix = next(
+                host_suffix
+                for host_suffix in (
+                    AZURE_STORAGE_HOST_SUFFIX_CHINA,
+                    AZURE_STORAGE_HOST_SUFFIX_GOVERNMENT,
+                    AZURE_STORAGE_HOST_SUFFIX_PRIVATELINK,
+                )
+                if self._host.endswith(host_suffix)
+            )
+        except StopIteration:
+            pass
         else:
-            result['account_prefix'] = self.key
+            return {'host': '%s.%s' % (self.key, host_suffix)}
 
-        return result
+        # if the host isn't targeting one of the special storage regions, it
+        # must be pointing to Azurite or IoT Edge Storage so switch to prefix
+        # identification
+        return {'account_prefix': self.key}
 
     def _xml_to_container(self, node):
         """
@@ -803,6 +826,7 @@
         response = self._commit_blocks(object_path=object_path,
                                        chunks=chunks,
                                        lease=lease,
+                                       headers=headers,
                                        meta_data=meta_data,
                                        content_type=content_type,
                                        data_hash=data_hash,
@@ -822,7 +846,7 @@
             'bytes_transferred': bytes_transferred,
         }
 
-    def _commit_blocks(self, object_path, chunks, lease,
+    def _commit_blocks(self, object_path, chunks, lease, headers,
                        meta_data, content_type, data_hash,
                        object_name, file_path):
         """
@@ -837,7 +861,7 @@
 
         data = tostring(root)
         params = {'comp': 'blocklist'}
-        headers = {}
+        headers = headers or {}
 
         lease.update_headers(headers)
         lease.renew()
@@ -857,6 +881,8 @@
 
         headers['Content-Length'] = len(data)
 
+        headers = self._fix_headers(headers)
+
         response = self.connection.request(object_path, data=data,
                                            params=params, headers=headers,
                                            method='PUT')
@@ -929,6 +955,31 @@
 
         return False
 
+    def _fix_headers(self, headers):
+        """
+        Update common HTTP headers to their equivalent in Azure Storage
+
+        :param headers: The headers dictionary to be updated
+        :type headers: ``dict``
+        """
+        to_fix = (
+            'cache-control',
+            'content-encoding',
+            'content-language',
+        )
+
+        fixed = {}
+
+        for key, value in headers.items():
+            key_lower = key.lower()
+
+            if key_lower in to_fix:
+                fixed['x-ms-blob-%s' % key_lower] = value
+            else:
+                fixed[key] = value
+
+        return fixed
+
     def _update_metadata(self, headers, meta_data):
         """
         Update the given metadata in the headers
diff --git a/libcloud/storage/drivers/s3.py b/libcloud/storage/drivers/s3.py
index a19d5c6..706e3a5 100644
--- a/libcloud/storage/drivers/s3.py
+++ b/libcloud/storage/drivers/s3.py
@@ -1065,6 +1065,9 @@
                  'etag': headers['etag']}
         meta_data = {}
 
+        if 'content-encoding' in headers:
+            extra['content_encoding'] = headers['content-encoding']
+
         if 'last-modified' in headers:
             extra['last_modified'] = headers['last-modified']
 
diff --git a/libcloud/test/common/test_openstack_identity.py b/libcloud/test/common/test_openstack_identity.py
index 8c35e13..31cb7b1 100644
--- a/libcloud/test/common/test_openstack_identity.py
+++ b/libcloud/test/common/test_openstack_identity.py
@@ -610,7 +610,7 @@
         catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
                                           auth_version='2.0')
         entries = catalog.get_entries()
-        self.assertEqual(len(entries), 9)
+        self.assertEqual(len(entries), 10)
 
         entry = [e for e in entries if e.service_name == 'cloudServers'][0]
         self.assertEqual(entry.service_type, 'compute')
@@ -678,7 +678,7 @@
         service_types = catalog.get_service_types()
         self.assertEqual(service_types, ['compute', 'image', 'network',
                                          'object-store', 'rax:object-cdn',
-                                         'volumev2'])
+                                         'volumev2', 'volumev3'])
 
         service_types = catalog.get_service_types(region='ORD')
         self.assertEqual(service_types, ['rax:object-cdn'])
@@ -692,7 +692,7 @@
                                           auth_version='2.0')
 
         service_names = catalog.get_service_names()
-        self.assertEqual(service_names, ['cinderv2', 'cloudFiles',
+        self.assertEqual(service_names, ['cinderv2', 'cinderv3', 'cloudFiles',
                                          'cloudFilesCDN', 'cloudServers',
                                          'cloudServersOpenStack',
                                          'cloudServersPreprod',
diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json
index 7cbae6f..14a77a5 100644
--- a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json
+++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json
@@ -16,6 +16,26 @@
       "zone": "us-central1-a"
     },
     {
+      "creationTimestamp": "2016-04-25T13:32:49.088-07:00",
+      "description": "12 vCPU and 85 GB RAM",
+      "guestCpus": 12,
+      "id": "1133568312750571514",
+      "imageSpaceGb": 10,
+      "kind": "compute#machineType",
+      "maximumPersistentDisks": 16,
+      "maximumPersistentDisksSizeGb": "10240",
+      "memoryMb": 87040,
+      "name": "a2-highgpu-1g",
+      "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/a2-highgpu-1g",
+      "accelerators": [
+       {
+          "guestAcceleratorType": "nvidia-tesla-v100",
+          "guestAcceleratorCount": 1
+       }
+      ],
+      "zone": "europe-west1-a"
+    },
+    {
       "creationTimestamp": "2013-04-25T13:32:45.550-07:00",
       "description": "1 vCPU (shared physical core) and 1.7 GB RAM",
       "guestCpus": 1,
diff --git a/libcloud/test/compute/fixtures/openstack/_v2_0__auth.json b/libcloud/test/compute/fixtures/openstack/_v2_0__auth.json
index 306cfda..70c8ba8 100644
--- a/libcloud/test/compute/fixtures/openstack/_v2_0__auth.json
+++ b/libcloud/test/compute/fixtures/openstack/_v2_0__auth.json
@@ -145,6 +145,28 @@
             {
                 "endpoints": [
                     {
+                        "region": "RegionOne",
+                        "tenantId": "1337",
+                        "publicURL": "https://test_endpoint.com/v3/1337",
+                        "versionInfo": "https://test_endpoint.com/v3/",
+                        "versionList": "https://test_endpoint.com/",
+                        "versionId": "2"
+                    },
+                    {
+                        "region": "fr1",
+                        "tenantId": "1337",
+                        "publicURL": "https://test_endpoint.com/v3/1337",
+                        "versionInfo": "https://test_endpoint.com/v3/",
+                        "versionList": "https://test_endpoint.com/",
+                        "versionId": "2"
+                    }
+                ],
+                "name": "cinderv3",
+                "type": "volumev3"
+            },
+            {
+                "endpoints": [
+                    {
                         "region": "DFW",
                         "tenantId": "613469",
                         "publicURL": "https://dfw.servers.api.rackspacecloud.com/v2/1337",
diff --git a/libcloud/test/compute/fixtures/openstack_v1.1/_ports_v2.json b/libcloud/test/compute/fixtures/openstack_v1.1/_ports_v2.json
index e31f8f3..61f2703 100644
--- a/libcloud/test/compute/fixtures/openstack_v1.1/_ports_v2.json
+++ b/libcloud/test/compute/fixtures/openstack_v1.1/_ports_v2.json
@@ -153,7 +153,6 @@
         {
             "status": "DOWN", 
             "extra_dhcp_opts": [], 
-            "description": "testport", 
             "allowed_address_pairs": [], 
             "tags": [], 
             "network_id": "123c8a8c-6427-4e8f-a805-2035365f4d43", 
diff --git a/libcloud/test/compute/test_equinixmetal.py b/libcloud/test/compute/test_equinixmetal.py
index 42d87b7..98979a7 100644
--- a/libcloud/test/compute/test_equinixmetal.py
+++ b/libcloud/test/compute/test_equinixmetal.py
@@ -61,7 +61,7 @@
         self.assertEqual(node.extra['billing_cycle'], 'hourly')
         self.assertEqual(node.extra['locked'], False)
         self.assertEqual(node.size.id, 'baremetal_1')
-        self.assertEqual(node.size.name, 'Type 1 - 16GB RAM')
+        self.assertEqual(node.size.name, 'Type 1 - 16384 RAM')
         self.assertEqual(node.size.ram, 16384)
         self.assertEqual(node.size.disk, 240)
         self.assertEqual(node.size.price, 0.4)
diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py
index 6a4cb56..4653043 100644
--- a/libcloud/test/compute/test_gce.py
+++ b/libcloud/test/compute/test_gce.py
@@ -729,10 +729,15 @@
     def test_list_sizes(self):
         sizes = self.driver.list_sizes()
         sizes_all = self.driver.list_sizes('all')
-        self.assertEqual(len(sizes), 22)
+        self.assertEqual(len(sizes), 23)
         self.assertEqual(len(sizes_all), 100)
         self.assertEqual(sizes[0].name, 'f1-micro')
         self.assertEqual(sizes[0].extra['zone'].name, 'us-central1-a')
+        self.assertEqual(sizes[1].name, 'a2-highgpu-1g')
+        self.assertEqual(len(sizes[1].extra['accelerators']), 1)
+        self.assertEqual(
+            sizes[1].extra['accelerators'][0]['guestAcceleratorType'],
+            "nvidia-tesla-v100")
         names = [s.name for s in sizes_all]
         self.assertEqual(names.count('n1-standard-1'), 5)
 
diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py
index 4d49818..fe6f298 100644
--- a/libcloud/test/compute/test_openstack.py
+++ b/libcloud/test/compute/test_openstack.py
@@ -1689,10 +1689,15 @@
         # normally authentication happens lazily, but we force it here
         self.driver.volumev2_connection._populate_hosts_and_request_paths()
 
+        self.driver_klass.volumev3_connectionCls.conn_class = OpenStack_2_0_MockHttp
+        self.driver_klass.volumev3_connectionCls.auth_url = "https://auth.api.example.com"
+        # normally authentication happens lazily, but we force it here
+        self.driver.volumev3_connection._populate_hosts_and_request_paths()
+
     def test__paginated_request_single_page(self):
         snapshots = self.driver._paginated_request(
             '/snapshots/detail', 'snapshots',
-            self.driver.volumev2_connection
+            self.driver._get_volume_connection()
         )['snapshots']
 
         self.assertEqual(len(snapshots), 3)
@@ -1701,7 +1706,7 @@
     def test__paginated_request_two_pages(self):
         snapshots = self.driver._paginated_request(
             '/snapshots/detail?unit_test=paginate', 'snapshots',
-            self.driver.volumev2_connection
+            self.driver._get_volume_connection()
         )['snapshots']
 
         self.assertEqual(len(snapshots), 6)
@@ -1721,7 +1726,7 @@
         with pytest.raises(OpenStackException):
             self.driver._paginated_request(
                 '/snapshots/detail?unit_test=pagination_loop', 'snapshots',
-                self.driver.volumev2_connection
+                self.driver._get_volume_connection()
             )
 
     def test_ex_force_auth_token_passed_to_connection(self):
@@ -2048,31 +2053,31 @@
         })
 
     def test_create_volume_passes_location_to_request_only_if_not_none(self):
-        with patch.object(self.driver.volumev2_connection, 'request') as mock_request:
+        with patch.object(self.driver._get_volume_connection(), 'request') as mock_request:
             self.driver.create_volume(1, 'test', location='mylocation')
             name, args, kwargs = mock_request.mock_calls[0]
             self.assertEqual(kwargs["data"]["volume"]["availability_zone"], "mylocation")
 
     def test_create_volume_does_not_pass_location_to_request_if_none(self):
-        with patch.object(self.driver.volumev2_connection, 'request') as mock_request:
+        with patch.object(self.driver._get_volume_connection(), 'request') as mock_request:
             self.driver.create_volume(1, 'test')
             name, args, kwargs = mock_request.mock_calls[0]
             self.assertFalse("availability_zone" in kwargs["data"]["volume"])
 
     def test_create_volume_passes_volume_type_to_request_only_if_not_none(self):
-        with patch.object(self.driver.volumev2_connection, 'request') as mock_request:
+        with patch.object(self.driver._get_volume_connection(), 'request') as mock_request:
             self.driver.create_volume(1, 'test', ex_volume_type='myvolumetype')
             name, args, kwargs = mock_request.mock_calls[0]
             self.assertEqual(kwargs["data"]["volume"]["volume_type"], "myvolumetype")
 
     def test_create_volume_does_not_pass_volume_type_to_request_if_none(self):
-        with patch.object(self.driver.volumev2_connection, 'request') as mock_request:
+        with patch.object(self.driver._get_volume_connection(), 'request') as mock_request:
             self.driver.create_volume(1, 'test')
             name, args, kwargs = mock_request.mock_calls[0]
             self.assertFalse("volume_type" in kwargs["data"]["volume"])
 
     def test_create_volume_passes_image_ref_to_request_only_if_not_none(self):
-        with patch.object(self.driver.volumev2_connection, 'request') as mock_request:
+        with patch.object(self.driver._get_volume_connection(), 'request') as mock_request:
             self.driver.create_volume(
                 1, 'test', ex_image_ref='353c4bd2-b28f-4857-9b7b-808db4397d03')
             name, args, kwargs = mock_request.mock_calls[0]
@@ -2081,7 +2086,7 @@
                 "353c4bd2-b28f-4857-9b7b-808db4397d03")
 
     def test_create_volume_does_not_pass_image_ref_to_request_if_none(self):
-        with patch.object(self.driver.volumev2_connection, 'request') as mock_request:
+        with patch.object(self.driver._get_volume_connection(), 'request') as mock_request:
             self.driver.create_volume(1, 'test')
             name, args, kwargs = mock_request.mock_calls[0]
             self.assertFalse("imageRef" in kwargs["data"]["volume"])
@@ -2089,7 +2094,7 @@
     def test_ex_create_snapshot_does_not_post_optional_parameters_if_none(self):
         volume = self.driver.list_volumes()[0]
         with patch.object(self.driver, '_to_snapshot'):
-            with patch.object(self.driver.volumev2_connection, 'request') as mock_request:
+            with patch.object(self.driver._get_volume_connection(), 'request') as mock_request:
                 self.driver.create_volume_snapshot(volume,
                                                    name=None,
                                                    ex_description=None,
@@ -2709,16 +2714,16 @@
             body = self.fixtures.load('_v2_0__subnets.json')
             return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
 
-    def _v2_1337_volumes_detail(self, method, url, body, headers):
+    def _v3_1337_volumes_detail(self, method, url, body, headers):
         body = self.fixtures.load('_v2_0__volumes.json')
         return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
 
-    def _v2_1337_volumes(self, method, url, body, headers):
+    def _v3_1337_volumes(self, method, url, body, headers):
         if method == 'POST':
             body = self.fixtures.load('_v2_0__volume.json')
             return (httplib.CREATED, body, self.json_content_headers, httplib.responses[httplib.OK])
 
-    def _v2_1337_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d(self, method, url, body, headers):
+    def _v3_1337_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d(self, method, url, body, headers):
         if method == 'GET':
             body = self.fixtures.load('_v2_0__volume.json')
             return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
@@ -2726,7 +2731,7 @@
             body = ''
             return (httplib.NO_CONTENT, body, self.json_content_headers, httplib.responses[httplib.OK])
 
-    def _v2_1337_volumes_abc6a3a1_c4ce_40f6_9b9f_07a61508938d(self, method, url, body, headers):
+    def _v3_1337_volumes_abc6a3a1_c4ce_40f6_9b9f_07a61508938d(self, method, url, body, headers):
         if method == 'GET':
             body = self.fixtures.load('_v2_0__volume_abc6a3a1_c4ce_40f6_9b9f_07a61508938d.json')
             return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
@@ -2734,7 +2739,7 @@
             body = ''
             return (httplib.NO_CONTENT, body, self.json_content_headers, httplib.responses[httplib.OK])
 
-    def _v2_1337_snapshots_detail(self, method, url, body, headers):
+    def _v3_1337_snapshots_detail(self, method, url, body, headers):
         if ('unit_test=paginate' in url and 'marker' not in url) or \
                 'unit_test=pagination_loop' in url:
             body = self.fixtures.load('_v2_0__snapshots_paginate_start.json')
@@ -2742,12 +2747,12 @@
             body = self.fixtures.load('_v2_0__snapshots.json')
         return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
 
-    def _v2_1337_snapshots(self, method, url, body, headers):
+    def _v3_1337_snapshots(self, method, url, body, headers):
         if method == 'POST':
             body = self.fixtures.load('_v2_0__snapshot.json')
             return (httplib.CREATED, body, self.json_content_headers, httplib.responses[httplib.OK])
 
-    def _v2_1337_snapshots_3fbbcccf_d058_4502_8844_6feeffdf4cb5(self, method, url, body, headers):
+    def _v3_1337_snapshots_3fbbcccf_d058_4502_8844_6feeffdf4cb5(self, method, url, body, headers):
         if method == 'GET':
             body = self.fixtures.load('_v2_0__snapshot.json')
             return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
diff --git a/libcloud/test/compute/test_ovh.py b/libcloud/test/compute/test_ovh.py
index f79c2b7..77f8110 100644
--- a/libcloud/test/compute/test_ovh.py
+++ b/libcloud/test/compute/test_ovh.py
@@ -231,6 +231,9 @@
                                        location=location)
         self.assertEqual(node.name, 'test_vm')
 
+    def test_resizing_node(self):
+        self.assertTrue(self.driver.NODE_STATE_MAP['RESIZE'])
+
     def test_destroy_node(self):
         node = self.driver.list_nodes()[0]
         self.driver.destroy_node(node)
diff --git a/libcloud/test/dns/test_cloudflare.py b/libcloud/test/dns/test_cloudflare.py
index 125edf2..5f97c55 100644
--- a/libcloud/test/dns/test_cloudflare.py
+++ b/libcloud/test/dns/test_cloudflare.py
@@ -21,6 +21,8 @@
 from libcloud.test import unittest
 
 from libcloud.dns.drivers.cloudflare import CloudFlareDNSDriver
+from libcloud.dns.drivers.cloudflare import GlobalAPIKeyDNSConnection
+from libcloud.dns.drivers.cloudflare import TokenDNSConnection
 from libcloud.dns.drivers.cloudflare import ZONE_EXTRA_ATTRIBUTES
 from libcloud.dns.drivers.cloudflare import RECORD_EXTRA_ATTRIBUTES
 from libcloud.dns.types import RecordType
@@ -42,6 +44,14 @@
         CloudFlareMockHttp.use_param = 'a'
         self.driver = CloudFlareDNSDriver(*DNS_PARAMS_CLOUDFLARE)
 
+    def test_auth_key(self):
+        driver = CloudFlareDNSDriver('user@example.com', 'key')
+        self.assertEqual(driver.connectionCls, GlobalAPIKeyDNSConnection)
+
+    def test_auth_token(self):
+        driver = CloudFlareDNSDriver('sometoken')
+        self.assertEqual(driver.connectionCls, TokenDNSConnection)
+
     def test_list_record_types(self):
         record_types = self.driver.list_record_types()
         self.assertEqual(len(record_types), 9)
diff --git a/libcloud/test/storage/test_azure_blobs.py b/libcloud/test/storage/test_azure_blobs.py
index 9d3eb3c..23d38ec 100644
--- a/libcloud/test/storage/test_azure_blobs.py
+++ b/libcloud/test/storage/test_azure_blobs.py
@@ -935,6 +935,52 @@
         self.assertEqual(host2, 'fakeaccount2.blob.core.windows.net')
         self.assertEqual(host3, 'test.foo.bar.com')
 
+    def test_normalize_http_headers(self):
+        driver = self.driver_type('fakeaccount1', 'deadbeafcafebabe==')
+
+        headers = driver._fix_headers({
+            # should be normalized to include x-ms-blob prefix
+            'Content-Encoding': 'gzip',
+            'content-language': 'en-us',
+            # should be passed through
+            'x-foo': 'bar',
+        })
+
+        self.assertEqual(headers, {
+            'x-ms-blob-content-encoding': 'gzip',
+            'x-ms-blob-content-language': 'en-us',
+            'x-foo': 'bar',
+        })
+
+    def test_storage_driver_host_govcloud(self):
+        driver1 = self.driver_type(
+            'fakeaccount1', 'deadbeafcafebabe==',
+            host='blob.core.usgovcloudapi.net')
+        driver2 = self.driver_type(
+            'fakeaccount2', 'deadbeafcafebabe==',
+            host='fakeaccount2.blob.core.usgovcloudapi.net')
+
+        host1 = driver1.connection.host
+        host2 = driver2.connection.host
+        account_prefix_1 = driver1.connection.account_prefix
+        account_prefix_2 = driver2.connection.account_prefix
+
+        self.assertEqual(host1, 'fakeaccount1.blob.core.usgovcloudapi.net')
+        self.assertEqual(host2, 'fakeaccount2.blob.core.usgovcloudapi.net')
+        self.assertIsNone(account_prefix_1)
+        self.assertIsNone(account_prefix_2)
+
+    def test_storage_driver_host_azurite(self):
+        driver = self.driver_type(
+            'fakeaccount1', 'deadbeafcafebabe==',
+            host='localhost', port=10000, secure=False)
+
+        host = driver.connection.host
+        account_prefix = driver.connection.account_prefix
+
+        self.assertEqual(host, 'localhost')
+        self.assertEqual(account_prefix, 'fakeaccount1')
+
 
 class AzuriteBlobsTests(AzureBlobsTests):
     driver_args = STORAGE_AZURITE_BLOBS_PARAMS
diff --git a/tox.ini b/tox.ini
index 4403ee1..7fff156 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,9 +1,9 @@
 [tox]
-envlist = py{pypy3.5,3.5,3.6,3.7,3.8,3.9},checks,lint,pylint,mypy,docs,coverage
+envlist = py{pypy3.5,3.5,3.6,3.7,3.8,3.9},checks,lint,pylint,mypy,docs,coverage,integration-storage
 skipsdist = true
 
 [testenv]
-passenv = TERM CI GITHUB_*
+passenv = TERM CI GITHUB_* DOCKER_*
 deps =
     -r{toxinidir}/requirements-tests.txt
     fasteners
@@ -14,7 +14,7 @@
     pypypy3: pypy3
     py3.5: python3.5
     py3.6: python3.6
-    {py3.7,docs,checks,lint,pylint,mypy,coverage,docs,py3.7-dist,py3.7-dist-wheel}: python3.7
+    {py3.7,docs,checks,lint,pylint,mypy,coverage,docs,integration-storage,py3.7-dist,py3.7-dist-wheel}: python3.7
     {py3.8,py3.8-windows}: python3.8
     {py3.9}: python3.9
 setenv =
@@ -198,7 +198,7 @@
 commands = flake8 libcloud/
            flake8 --max-line-length=160 libcloud/test/
            flake8 demos/
-           flake8 integration/
+           flake8 --max-line-length=160 integration/
            flake8 scripts/
            flake8 --ignore=E402,E902,W503,W504 docs/examples/
            flake8 --ignore=E402,E902,W503,W504 --max-line-length=160 contrib/
@@ -212,10 +212,24 @@
     bash ./scripts/check_file_names.sh
     python ./scripts/check_asf_license_headers.py .
 
-[testenv:integration]
-deps = -r{toxinidir}/integration/requirements.txt
+[testenv:integration-compute]
+deps = -r{toxinidir}/integration/compute/requirements.txt
 
-commands = python -m integration
+commands = python -m integration.compute
+
+[testenv:integration-storage]
+passenv = AZURE_CLIENT_SECRET
+
+setenv =
+  AZURE_CLIENT_ID=16cd65a3-dfa2-4272-bcdb-842cbbedb1b7
+  AZURE_TENANT_ID=982317c6-fb7e-4e92-abcd-196557e41c5b
+  AZURE_SUBSCRIPTION_ID=d6d608a6-e0c8-42ae-a548-2f41793709d2
+
+deps =
+    pytest==5.3.2
+    -r{toxinidir}/integration/storage/requirements.txt
+
+commands = pytest -vv -s --durations=10 integration/storage
 
 [testenv:coverage]
 deps =
@@ -224,7 +238,7 @@
     paramiko==2.7.1
     pyopenssl==19.1.0
     python-dateutil
-    libvirt-python==5.9.0
+    libvirt-python==5.10.0
     fasteners
 setenv =
   CRYPTOGRAPHY_ALLOW_OPENSSL_102=1
@@ -237,7 +251,7 @@
     -r{toxinidir}/requirements-tests.txt
     paramiko==2.7.1
     pyopenssl==19.1.0
-    libvirt-python==5.9.0
+    libvirt-python==5.10.0
     fasteners
 setenv =
   CRYPTOGRAPHY_ALLOW_OPENSSL_102=1