Merge branch 'jmac/virtual_directories' into juerg/googlecas
diff --git a/.coveragerc b/.coveragerc
index 6014b7f..739d1d0 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -6,6 +6,9 @@
 omit =
   # Omit profiling helper module
   */buildstream/_profile.py
+  # Omit generated code
+  */buildstream/*_pb2.py
+  */buildstream/*_pb2_grpc.py
   */.eggs/*
 
 [report]
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 0ba4a1c..8274158 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,4 +1,4 @@
-image: buildstream/testsuite-debian:8-master-57-be5a863
+image: buildstream/testsuite-debian:8-master-88-4d92c106
 
 cache:
   key: "$CI_JOB_NAME-"
@@ -92,18 +92,18 @@
   <<: *linux-tests
 
 tests-debian-9:
-  image: buildstream/buildstream-debian:master-81-caa5241
+  image: buildstream/buildstream-debian:master-88-4d92c106
   <<: *linux-tests
 
 tests-fedora-27:
-  image: buildstream/buildstream-fedora:master-56-5d7ee17
+  image: buildstream/buildstream-fedora:master-88-4d92c106
   <<: *linux-tests
 
 
 tests-unix:
   # Use fedora here, to a) run a test on fedora and b) ensure that we
   # can get rid of ostree - this is not possible with debian-8
-  image: buildstream/buildstream-fedora:master-56-5d7ee17
+  image: buildstream/buildstream-fedora:master-88-4d92c106
   stage: test
   variables:
     BST_FORCE_BACKEND: "unix"
diff --git a/.gitlab/issue_templates/bst_bug.md b/.gitlab/issue_templates/bst_bug.md
new file mode 100644
index 0000000..e40c308
--- /dev/null
+++ b/.gitlab/issue_templates/bst_bug.md
@@ -0,0 +1,36 @@
+## Summary
+
+[//]: # (Summarize the bug encountered concisely)
+
+
+## Steps to reproduce
+
+[//]: # (How one can reproduce the issue - this is very important)
+
+
+## What is the current bug behavior?
+
+[//]: # (What actually happens)
+
+
+## What is the expected correct behavior?
+
+[//]: # (What you should see instead)
+
+
+## Relevant logs and/or screenshots
+
+[//]: # (Paste any relevant logs - please use code blocks ``` to format console output, logs, and code as it's hard to read otherwise. You can also add a Snippet and link it here. Check the markdown giude on Gitlab for further tips)
+
+## Possible fixes
+
+[//]: # (If you can, link to the line of code that might be responsible for the problem)
+
+## Other relevant information
+
+[//]: # (Add the BuildStream version. Substitute below the "x" by "1", "2" or the right version. Add more than one version if necessary)
+
+* BuildStream version affected: /milestone %BuildStream_v1.x
+
+----
+/label ~bug
diff --git a/.gitlab/merge_request_templates/bst_merge_request.md b/.gitlab/merge_request_templates/bst_merge_request.md
new file mode 100644
index 0000000..f58487b
--- /dev/null
+++ b/.gitlab/merge_request_templates/bst_merge_request.md
@@ -0,0 +1,19 @@
+## Description
+
+[//]: # (Provide a description of the MR including what it resolves)
+
+
+[//]: # (Proposed changes)
+
+Changes proposed in this merge request:
+* Change 1
+* Change 2
+* Change 3
+
+[//]: # (Task or bug number that this MR solves preceded by #)
+
+This merge request,  when approved, will close issue/bug: 
+
+## CHANGELOG/Release Notes 
+
+[//]: # (Optional)
diff --git a/.pylintrc b/.pylintrc
index c383093..a70476a 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -11,7 +11,7 @@
 
 # Add files or directories matching the regex patterns to the blacklist. The
 # regex matches against base names, not paths.
-ignore-patterns=
+ignore-patterns=.*_pb2.py,.*_pb2_grpc.py
 
 # Python code to execute, usually for sys.path manipulation such as
 # pygtk.require().
@@ -190,7 +190,7 @@
 # (useful for modules/projects where namespaces are manipulated during runtime
 # and thus existing member attributes cannot be deduced by static analysis. It
 # supports qualified module names, as well as Unix pattern matching.
-ignored-modules=pkg_resources,gi.repository
+ignored-modules=pkg_resources,gi.repository,grpc,google.*,buildstream.buildstream_pb2,buildstream.buildstream_pb2_grpc
 
 # Show a hint with possible names when a member name was not found. The aspect
 # of finding the hint is based on edit distance.
diff --git a/NEWS b/NEWS
index dc1fdcf..3fa1821 100644
--- a/NEWS
+++ b/NEWS
@@ -1,4 +1,18 @@
 =================
+buildstream 1.1.4
+=================
+
+  o `bst workspace` commands and `bst track` will substitute their
+    source elements when performing those operations, e.g. performing
+    `bst track` on a filter element will track the sources on the
+    element that it depends on (if it has sources).
+
+  o Switch to Remote Execution CAS-based artifact cache on all platforms.
+
+    Artifact servers need to be migrated.
+
+
+=================
 buildstream 1.1.3
 =================
 
diff --git a/buildstream/_artifactcache/artifactcache.py b/buildstream/_artifactcache/artifactcache.py
index 9cc2815..1a0d14f 100644
--- a/buildstream/_artifactcache/artifactcache.py
+++ b/buildstream/_artifactcache/artifactcache.py
@@ -36,22 +36,38 @@
 #     push (bool): Whether we should attempt to push artifacts to this cache,
 #                  in addition to pulling from it.
 #
-class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push')):
+class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push server_cert client_key client_cert')):
 
     # _new_from_config_node
     #
     # Creates an ArtifactCacheSpec() from a YAML loaded node
     #
     @staticmethod
-    def _new_from_config_node(spec_node):
-        _yaml.node_validate(spec_node, ['url', 'push'])
+    def _new_from_config_node(spec_node, basedir=None):
+        _yaml.node_validate(spec_node, ['url', 'push', 'server-cert', 'client-key', 'client-cert'])
         url = _yaml.node_get(spec_node, str, 'url')
         push = _yaml.node_get(spec_node, bool, 'push', default_value=False)
         if not url:
             provenance = _yaml.node_get_provenance(spec_node)
             raise LoadError(LoadErrorReason.INVALID_DATA,
                             "{}: empty artifact cache URL".format(provenance))
-        return ArtifactCacheSpec(url, push)
+
+        server_cert = _yaml.node_get(spec_node, str, 'server-cert', default_value=None)
+        if server_cert and basedir:
+            server_cert = os.path.join(basedir, server_cert)
+
+        client_key = _yaml.node_get(spec_node, str, 'client-key', default_value=None)
+        if client_key and basedir:
+            client_key = os.path.join(basedir, client_key)
+
+        client_cert = _yaml.node_get(spec_node, str, 'client-cert', default_value=None)
+        if client_cert and basedir:
+            client_cert = os.path.join(basedir, client_cert)
+
+        return ArtifactCacheSpec(url, push, server_cert, client_key, client_cert)
+
+
+ArtifactCacheSpec.__new__.__defaults__ = (None, None, None)
 
 
 # An ArtifactCache manages artifacts.
@@ -139,6 +155,7 @@
     #
     # Args:
     #   config_node (dict): The config block, which may contain the 'artifacts' key
+    #   basedir (str): The base directory for relative paths
     #
     # Returns:
     #   A list of ArtifactCacheSpec instances.
@@ -147,15 +164,15 @@
     #   LoadError, if the config block contains invalid keys.
     #
     @staticmethod
-    def specs_from_config_node(config_node):
+    def specs_from_config_node(config_node, basedir=None):
         cache_specs = []
 
         artifacts = config_node.get('artifacts', [])
         if isinstance(artifacts, Mapping):
-            cache_specs.append(ArtifactCacheSpec._new_from_config_node(artifacts))
+            cache_specs.append(ArtifactCacheSpec._new_from_config_node(artifacts, basedir))
         elif isinstance(artifacts, list):
             for spec_node in artifacts:
-                cache_specs.append(ArtifactCacheSpec._new_from_config_node(spec_node))
+                cache_specs.append(ArtifactCacheSpec._new_from_config_node(spec_node, basedir))
         else:
             provenance = _yaml.node_get_provenance(config_node, key='artifacts')
             raise _yaml.LoadError(_yaml.LoadErrorReason.INVALID_DATA,
@@ -357,7 +374,7 @@
     #
     def _initialize_remotes(self):
         def remote_failed(url, error):
-            self._message(MessageType.WARN, "Failed to fetch remote refs from {}: {}".format(url, error))
+            self._message(MessageType.WARN, "Failed to initialize remote {}: {}".format(url, error))
 
         with self.context.timed_activity("Initializing remote caches", silent_nested=True):
             self.initialize_remotes(on_failure=remote_failed)
diff --git a/buildstream/_artifactcache/cascache.py b/buildstream/_artifactcache/cascache.py
new file mode 100644
index 0000000..880d93b
--- /dev/null
+++ b/buildstream/_artifactcache/cascache.py
@@ -0,0 +1,709 @@
+#!/usr/bin/env python3
+#
+#  Copyright (C) 2018 Codethink Limited
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU Lesser General Public
+#  License as published by the Free Software Foundation; either
+#  version 2 of the License, or (at your option) any later version.
+#
+#  This library is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+#  Lesser General Public License for more details.
+#
+#  You should have received a copy of the GNU Lesser General Public
+#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+#  Authors:
+#        Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+import hashlib
+import itertools
+import multiprocessing
+import os
+import signal
+import stat
+import tempfile
+from urllib.parse import urlparse
+
+import grpc
+
+from google.bytestream import bytestream_pb2, bytestream_pb2_grpc
+from google.devtools.remoteexecution.v1test import remote_execution_pb2, remote_execution_pb2_grpc
+from buildstream import buildstream_pb2, buildstream_pb2_grpc
+
+from .. import _signals, utils
+from .._exceptions import ArtifactError
+
+from . import ArtifactCache
+
+
+# A CASCache manages artifacts in a CAS repository as specified in the
+# Remote Execution API.
+#
+# Args:
+#     context (Context): The BuildStream context
+#     enable_push (bool): Whether pushing is allowed by the platform
+#
+# Pushing is explicitly disabled by the platform in some cases,
+# like when we are falling back to functioning without using
+# user namespaces.
+#
+class CASCache(ArtifactCache):
+
+    def __init__(self, context, *, enable_push=True):
+        super().__init__(context)
+
+        self.casdir = os.path.join(context.artifactdir, 'cas')
+        os.makedirs(os.path.join(self.casdir, 'tmp'), exist_ok=True)
+
+        self._enable_push = enable_push
+
+        # Per-project list of _CASRemote instances.
+        self._remotes = {}
+
+        self._has_fetch_remotes = False
+        self._has_push_remotes = False
+
+    ################################################
+    #     Implementation of abstract methods       #
+    ################################################
+    def contains(self, element, key):
+        refpath = self._refpath(self.get_artifact_fullname(element, key))
+
+        # This assumes that the repository doesn't have any dangling pointers
+        return os.path.exists(refpath)
+
+    def extract(self, element, key):
+        ref = self.get_artifact_fullname(element, key)
+
+        tree = self.resolve_ref(ref)
+
+        dest = os.path.join(self.extractdir, element._get_project().name, element.normal_name, tree.hash)
+        if os.path.isdir(dest):
+            # artifact has already been extracted
+            return dest
+
+        os.makedirs(self.extractdir, exist_ok=True)
+
+        with tempfile.TemporaryDirectory(prefix='tmp', dir=self.extractdir) as tmpdir:
+            checkoutdir = os.path.join(tmpdir, ref)
+            self._checkout(checkoutdir, tree)
+
+            os.makedirs(os.path.dirname(dest), exist_ok=True)
+            try:
+                os.rename(checkoutdir, dest)
+            except OSError as e:
+                # With rename it's possible to get either ENOTEMPTY or EEXIST
+                # in the case that the destination path is a not empty directory.
+                #
+                # If rename fails with these errors, another process beat
+                # us to it so just ignore.
+                if e.errno not in [os.errno.ENOTEMPTY, os.errno.EEXIST]:
+                    raise ArtifactError("Failed to extract artifact for ref '{}': {}"
+                                        .format(ref, e)) from e
+
+        return dest
+
+    def commit(self, element, content, keys):
+        refs = [self.get_artifact_fullname(element, key) for key in keys]
+
+        tree = self._create_tree(content)
+
+        for ref in refs:
+            self.set_ref(ref, tree)
+
+    def can_diff(self):
+        return True
+
+    def diff(self, element, key_a, key_b, *, subdir=None):
+        ref_a = self.get_artifact_fullname(element, key_a)
+        ref_b = self.get_artifact_fullname(element, key_b)
+
+        tree_a = self.resolve_ref(ref_a)
+        tree_b = self.resolve_ref(ref_b)
+
+        if subdir:
+            tree_a = self._get_subdir(tree_a, subdir)
+            tree_b = self._get_subdir(tree_b, subdir)
+
+        added = []
+        removed = []
+        modified = []
+
+        self._diff_trees(tree_a, tree_b, added=added, removed=removed, modified=modified)
+
+        return modified, removed, added
+
+    def initialize_remotes(self, *, on_failure=None):
+        remote_specs = self.global_remote_specs
+
+        for project in self.project_remote_specs:
+            remote_specs += self.project_remote_specs[project]
+
+        remote_specs = list(utils._deduplicate(remote_specs))
+
+        remotes = {}
+        q = multiprocessing.Queue()
+        for remote_spec in remote_specs:
+            # Use subprocess to avoid creation of gRPC threads in main BuildStream process
+            p = multiprocessing.Process(target=self._initialize_remote, args=(remote_spec, q))
+
+            try:
+                # Keep SIGINT blocked in the child process
+                with _signals.blocked([signal.SIGINT], ignore=False):
+                    p.start()
+
+                error = q.get()
+                p.join()
+            except KeyboardInterrupt:
+                utils._kill_process_tree(p.pid)
+                raise
+
+            if error and on_failure:
+                on_failure(remote_spec.url, error)
+            elif error:
+                raise ArtifactError(error)
+            else:
+                self._has_fetch_remotes = True
+                if remote_spec.push:
+                    self._has_push_remotes = True
+
+                remotes[remote_spec.url] = _CASRemote(remote_spec)
+
+        for project in self.context.get_projects():
+            remote_specs = self.global_remote_specs
+            if project in self.project_remote_specs:
+                remote_specs = list(utils._deduplicate(remote_specs + self.project_remote_specs[project]))
+
+            project_remotes = []
+
+            for remote_spec in remote_specs:
+                # Errors are already handled in the loop above,
+                # skip unreachable remotes here.
+                if remote_spec.url not in remotes:
+                    continue
+
+                remote = remotes[remote_spec.url]
+                project_remotes.append(remote)
+
+            self._remotes[project] = project_remotes
+
+    def has_fetch_remotes(self, *, element=None):
+        if not self._has_fetch_remotes:
+            # No project has fetch remotes
+            return False
+        elif element is None:
+            # At least one (sub)project has fetch remotes
+            return True
+        else:
+            # Check whether the specified element's project has fetch remotes
+            remotes_for_project = self._remotes[element._get_project()]
+            return bool(remotes_for_project)
+
+    def has_push_remotes(self, *, element=None):
+        if not self._has_push_remotes or not self._enable_push:
+            # No project has push remotes
+            return False
+        elif element is None:
+            # At least one (sub)project has push remotes
+            return True
+        else:
+            # Check whether the specified element's project has push remotes
+            remotes_for_project = self._remotes[element._get_project()]
+            return any(remote.spec.push for remote in remotes_for_project)
+
+    def pull(self, element, key, *, progress=None):
+        ref = self.get_artifact_fullname(element, key)
+
+        project = element._get_project()
+
+        for remote in self._remotes[project]:
+            try:
+                remote.init()
+
+                request = buildstream_pb2.GetArtifactRequest()
+                request.key = ref
+                response = remote.artifact_cache.GetArtifact(request)
+
+                tree = remote_execution_pb2.Digest()
+                tree.hash = response.artifact.hash
+                tree.size_bytes = response.artifact.size_bytes
+
+                self._fetch_directory(remote, tree)
+
+                self.set_ref(ref, tree)
+
+                # no need to pull from additional remotes
+                return True
+
+            except grpc.RpcError as e:
+                if e.code() != grpc.StatusCode.NOT_FOUND:
+                    raise
+
+        return False
+
+    def link_key(self, element, oldkey, newkey):
+        oldref = self.get_artifact_fullname(element, oldkey)
+        newref = self.get_artifact_fullname(element, newkey)
+
+        tree = self.resolve_ref(oldref)
+
+        self.set_ref(newref, tree)
+
+    def push(self, element, keys):
+        refs = [self.get_artifact_fullname(element, key) for key in keys]
+
+        project = element._get_project()
+
+        push_remotes = [r for r in self._remotes[project] if r.spec.push]
+
+        pushed = False
+
+        for remote in push_remotes:
+            remote.init()
+
+            for ref in refs:
+                tree = self.resolve_ref(ref)
+
+                # Check whether ref is already on the server in which case
+                # there is no need to push the artifact
+                try:
+                    request = buildstream_pb2.GetArtifactRequest()
+                    request.key = ref
+                    response = remote.artifact_cache.GetArtifact(request)
+
+                    if response.artifact.hash == tree.hash and response.artifact.size_bytes == tree.size_bytes:
+                        # ref is already on the server with the same tree
+                        continue
+
+                except grpc.RpcError as e:
+                    if e.code() != grpc.StatusCode.NOT_FOUND:
+                        raise
+
+                missing_blobs = {}
+                required_blobs = self._required_blobs(tree)
+
+                # Limit size of FindMissingBlobs request
+                for required_blobs_group in _grouper(required_blobs, 512):
+                    request = remote_execution_pb2.FindMissingBlobsRequest()
+
+                    for required_digest in required_blobs_group:
+                        d = request.blob_digests.add()
+                        d.hash = required_digest.hash
+                        d.size_bytes = required_digest.size_bytes
+
+                    response = remote.cas.FindMissingBlobs(request)
+                    for digest in response.missing_blob_digests:
+                        d = remote_execution_pb2.Digest()
+                        d.hash = digest.hash
+                        d.size_bytes = digest.size_bytes
+                        missing_blobs[d.hash] = d
+
+                # Upload any blobs missing on the server
+                for digest in missing_blobs.values():
+                    def request_stream():
+                        resource_name = os.path.join(digest.hash, str(digest.size_bytes))
+                        with open(self.objpath(digest), 'rb') as f:
+                            assert os.fstat(f.fileno()).st_size == digest.size_bytes
+                            offset = 0
+                            finished = False
+                            remaining = digest.size_bytes
+                            while not finished:
+                                chunk_size = min(remaining, 64 * 1024)
+                                remaining -= chunk_size
+
+                                request = bytestream_pb2.WriteRequest()
+                                request.write_offset = offset
+                                # max. 64 kB chunks
+                                request.data = f.read(chunk_size)
+                                request.resource_name = resource_name
+                                request.finish_write = remaining <= 0
+                                yield request
+                                offset += chunk_size
+                                finished = request.finish_write
+                    response = remote.bytestream.Write(request_stream())
+
+                request = buildstream_pb2.UpdateArtifactRequest()
+                request.keys.append(ref)
+                request.artifact.hash = tree.hash
+                request.artifact.size_bytes = tree.size_bytes
+                remote.artifact_cache.UpdateArtifact(request)
+
+                pushed = True
+
+        return pushed
+
+    ################################################
+    #                API Private Methods           #
+    ################################################
+
+    # objpath():
+    #
+    # Return the path of an object based on its digest.
+    #
+    # Args:
+    #     digest (Digest): The digest of the object
+    #
+    # Returns:
+    #     (str): The path of the object
+    #
+    def objpath(self, digest):
+        return os.path.join(self.casdir, 'objects', digest.hash[:2], digest.hash[2:])
+
+    # add_object():
+    #
+    # Hash and write object to CAS.
+    #
+    # Args:
+    #     digest (Digest): An optional Digest object to populate
+    #     path (str): Path to file to add
+    #     buffer (bytes): Byte buffer to add
+    #
+    # Returns:
+    #     (Digest): The digest of the added object
+    #
+    # Either `path` or `buffer` must be passed, but not both.
+    #
+    def add_object(self, *, digest=None, path=None, buffer=None):
+        # Exactly one of the two parameters has to be specified
+        assert (path is None) != (buffer is None)
+
+        if digest is None:
+            digest = remote_execution_pb2.Digest()
+
+        try:
+            h = hashlib.sha256()
+            # Always write out new file to avoid corruption if input file is modified
+            with tempfile.NamedTemporaryFile(dir=os.path.join(self.casdir, 'tmp')) as out:
+                if path:
+                    with open(path, 'rb') as f:
+                        for chunk in iter(lambda: f.read(4096), b""):
+                            h.update(chunk)
+                            out.write(chunk)
+                else:
+                    h.update(buffer)
+                    out.write(buffer)
+
+                out.flush()
+
+                digest.hash = h.hexdigest()
+                digest.size_bytes = os.fstat(out.fileno()).st_size
+
+                # Place file at final location
+                objpath = self.objpath(digest)
+                os.makedirs(os.path.dirname(objpath), exist_ok=True)
+                os.link(out.name, objpath)
+
+        except FileExistsError as e:
+            # We can ignore the failed link() if the object is already in the repo.
+            pass
+
+        except OSError as e:
+            raise ArtifactError("Failed to hash object: {}".format(e)) from e
+
+        return digest
+
+    # set_ref():
+    #
+    # Create or replace a ref.
+    #
+    # Args:
+    #     ref (str): The name of the ref
+    #
+    def set_ref(self, ref, tree):
+        refpath = self._refpath(ref)
+        os.makedirs(os.path.dirname(refpath), exist_ok=True)
+        with utils.save_file_atomic(refpath, 'wb') as f:
+            f.write(tree.SerializeToString())
+
+    # resolve_ref():
+    #
+    # Resolve a ref to a digest.
+    #
+    # Args:
+    #     ref (str): The name of the ref
+    #
+    # Returns:
+    #     (Digest): The digest stored in the ref
+    #
+    def resolve_ref(self, ref):
+        refpath = self._refpath(ref)
+
+        try:
+            with open(refpath, 'rb') as f:
+                digest = remote_execution_pb2.Digest()
+                digest.ParseFromString(f.read())
+                return digest
+
+        except FileNotFoundError as e:
+            raise ArtifactError("Attempt to access unavailable artifact: {}".format(e)) from e
+
+    ################################################
+    #             Local Private Methods            #
+    ################################################
+    def _checkout(self, dest, tree):
+        os.makedirs(dest, exist_ok=True)
+
+        directory = remote_execution_pb2.Directory()
+
+        with open(self.objpath(tree), 'rb') as f:
+            directory.ParseFromString(f.read())
+
+        for filenode in directory.files:
+            # regular file, create hardlink
+            fullpath = os.path.join(dest, filenode.name)
+            os.link(self.objpath(filenode.digest), fullpath)
+
+            if filenode.is_executable:
+                os.chmod(fullpath, stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP |
+                         stat.S_IROTH | stat.S_IXOTH)
+
+        for dirnode in directory.directories:
+            fullpath = os.path.join(dest, dirnode.name)
+            self._checkout(fullpath, dirnode.digest)
+
+        for symlinknode in directory.symlinks:
+            # symlink
+            fullpath = os.path.join(dest, symlinknode.name)
+            os.symlink(symlinknode.target, fullpath)
+
+    def _refpath(self, ref):
+        return os.path.join(self.casdir, 'refs', 'heads', ref)
+
+    def _create_tree(self, path, *, digest=None):
+        directory = remote_execution_pb2.Directory()
+
+        for name in sorted(os.listdir(path)):
+            full_path = os.path.join(path, name)
+            mode = os.lstat(full_path).st_mode
+            if stat.S_ISDIR(mode):
+                dirnode = directory.directories.add()
+                dirnode.name = name
+                self._create_tree(full_path, digest=dirnode.digest)
+            elif stat.S_ISREG(mode):
+                filenode = directory.files.add()
+                filenode.name = name
+                self.add_object(path=full_path, digest=filenode.digest)
+                filenode.is_executable = (mode & stat.S_IXUSR) == stat.S_IXUSR
+            elif stat.S_ISLNK(mode):
+                symlinknode = directory.symlinks.add()
+                symlinknode.name = name
+                symlinknode.target = os.readlink(full_path)
+            else:
+                raise ArtifactError("Unsupported file type for {}".format(full_path))
+
+        return self.add_object(digest=digest, buffer=directory.SerializeToString())
+
+    def _get_subdir(self, tree, subdir):
+        head, name = os.path.split(subdir)
+        if head:
+            tree = self._get_subdir(tree, head)
+
+        directory = remote_execution_pb2.Directory()
+
+        with open(self.objpath(tree), 'rb') as f:
+            directory.ParseFromString(f.read())
+
+        for dirnode in directory.directories:
+            if dirnode.name == name:
+                return dirnode.digest
+
+        raise ArtifactError("Subdirectory {} not found".format(name))
+
+    def _diff_trees(self, tree_a, tree_b, *, added, removed, modified, path=""):
+        dir_a = remote_execution_pb2.Directory()
+        dir_b = remote_execution_pb2.Directory()
+
+        if tree_a:
+            with open(self.objpath(tree_a), 'rb') as f:
+                dir_a.ParseFromString(f.read())
+        if tree_b:
+            with open(self.objpath(tree_b), 'rb') as f:
+                dir_b.ParseFromString(f.read())
+
+        a = 0
+        b = 0
+        while a < len(dir_a.files) or b < len(dir_b.files):
+            if b < len(dir_b.files) and (a >= len(dir_a.files) or
+                                         dir_a.files[a].name > dir_b.files[b].name):
+                added.append(os.path.join(path, dir_b.files[b].name))
+                b += 1
+            elif a < len(dir_a.files) and (b >= len(dir_b.files) or
+                                           dir_b.files[b].name > dir_a.files[a].name):
+                removed.append(os.path.join(path, dir_a.files[a].name))
+                a += 1
+            else:
+                # File exists in both directories
+                if dir_a.files[a].digest.hash != dir_b.files[b].digest.hash:
+                    modified.append(os.path.join(path, dir_a.files[a].name))
+                a += 1
+                b += 1
+
+        a = 0
+        b = 0
+        while a < len(dir_a.directories) or b < len(dir_b.directories):
+            if b < len(dir_b.directories) and (a >= len(dir_a.directories) or
+                                               dir_a.directories[a].name > dir_b.directories[b].name):
+                self._diff_trees(None, dir_b.directories[b].digest,
+                                 added=added, removed=removed, modified=modified,
+                                 path=os.path.join(path, dir_b.directories[b].name))
+                b += 1
+            elif a < len(dir_a.directories) and (b >= len(dir_b.directories) or
+                                                 dir_b.directories[b].name > dir_a.directories[a].name):
+                self._diff_trees(dir_a.directories[a].digest, None,
+                                 added=added, removed=removed, modified=modified,
+                                 path=os.path.join(path, dir_a.directories[a].name))
+                a += 1
+            else:
+                # Subdirectory exists in both directories
+                if dir_a.directories[a].digest.hash != dir_b.directories[b].digest.hash:
+                    self._diff_trees(dir_a.directories[a].digest, dir_b.directories[b].digest,
+                                     added=added, removed=removed, modified=modified,
+                                     path=os.path.join(path, dir_a.directories[a].name))
+                a += 1
+                b += 1
+
+    def _initialize_remote(self, remote_spec, q):
+        try:
+            remote = _CASRemote(remote_spec)
+            remote.init()
+
+            request = buildstream_pb2.StatusRequest()
+            response = remote.artifact_cache.Status(request)
+
+            if remote_spec.push and not response.allow_updates:
+                q.put('Artifact server does not allow push')
+            else:
+                # No error
+                q.put(None)
+
+        except Exception as e:               # pylint: disable=broad-except
+            # Whatever happens, we need to return it to the calling process
+            #
+            q.put(str(e))
+
+    def _required_blobs(self, tree):
+        # parse directory, and recursively add blobs
+        d = remote_execution_pb2.Digest()
+        d.hash = tree.hash
+        d.size_bytes = tree.size_bytes
+        yield d
+
+        directory = remote_execution_pb2.Directory()
+
+        with open(self.objpath(tree), 'rb') as f:
+            directory.ParseFromString(f.read())
+
+        for filenode in directory.files:
+            d = remote_execution_pb2.Digest()
+            d.hash = filenode.digest.hash
+            d.size_bytes = filenode.digest.size_bytes
+            yield d
+
+        for dirnode in directory.directories:
+            yield from self._required_blobs(dirnode.digest)
+
+    def _fetch_blob(self, remote, digest, out):
+        resource_name = os.path.join(digest.hash, str(digest.size_bytes))
+        request = bytestream_pb2.ReadRequest()
+        request.resource_name = resource_name
+        request.read_offset = 0
+        for response in remote.bytestream.Read(request):
+            out.write(response.data)
+
+        out.flush()
+        assert digest.size_bytes == os.fstat(out.fileno()).st_size
+
+    def _fetch_directory(self, remote, tree):
+        objpath = self.objpath(tree)
+        if os.path.exists(objpath):
+            # already in local cache
+            return
+
+        with tempfile.NamedTemporaryFile(dir=os.path.join(self.casdir, 'tmp')) as out:
+            self._fetch_blob(remote, tree, out)
+
+            directory = remote_execution_pb2.Directory()
+
+            with open(out.name, 'rb') as f:
+                directory.ParseFromString(f.read())
+
+            for filenode in directory.files:
+                fileobjpath = self.objpath(tree)
+                if os.path.exists(fileobjpath):
+                    # already in local cache
+                    continue
+
+                with tempfile.NamedTemporaryFile(dir=os.path.join(self.casdir, 'tmp')) as f:
+                    self._fetch_blob(remote, filenode.digest, f)
+
+                    digest = self.add_object(path=f.name)
+                    assert digest.hash == filenode.digest.hash
+
+            for dirnode in directory.directories:
+                self._fetch_directory(remote, dirnode.digest)
+
+            # place directory blob only in final location when we've downloaded
+            # all referenced blobs to avoid dangling references in the repository
+            digest = self.add_object(path=out.name)
+            assert digest.hash == tree.hash
+
+
+# Represents a single remote CAS cache.
+#
+class _CASRemote():
+    def __init__(self, spec):
+        self.spec = spec
+        self._initialized = False
+        self.channel = None
+        self.bytestream = None
+        self.cas = None
+        self.artifact_cache = None
+
+    def init(self):
+        if not self._initialized:
+            url = urlparse(self.spec.url)
+            if url.scheme == 'http':
+                port = url.port or 80
+                self.channel = grpc.insecure_channel('{}:{}'.format(url.hostname, port))
+            elif url.scheme == 'https':
+                port = url.port or 443
+
+                if self.spec.server_cert:
+                    with open(self.spec.server_cert, 'rb') as f:
+                        server_cert_bytes = f.read()
+                else:
+                    server_cert_bytes = None
+
+                if self.spec.client_key:
+                    with open(self.spec.client_key, 'rb') as f:
+                        client_key_bytes = f.read()
+                else:
+                    client_key_bytes = None
+
+                if self.spec.client_cert:
+                    with open(self.spec.client_cert, 'rb') as f:
+                        client_cert_bytes = f.read()
+                else:
+                    client_cert_bytes = None
+
+                credentials = grpc.ssl_channel_credentials(root_certificates=server_cert_bytes,
+                                                           private_key=client_key_bytes,
+                                                           certificate_chain=client_cert_bytes)
+                self.channel = grpc.secure_channel('{}:{}'.format(url.hostname, port), credentials)
+            else:
+                raise ArtifactError("Unsupported URL: {}".format(self.spec.url))
+
+            self.bytestream = bytestream_pb2_grpc.ByteStreamStub(self.channel)
+            self.cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self.channel)
+            self.artifact_cache = buildstream_pb2_grpc.ArtifactCacheStub(self.channel)
+
+            self._initialized = True
+
+
+def _grouper(iterable, n):
+    # pylint: disable=stop-iteration-return
+    while True:
+        yield itertools.chain([next(iterable)], itertools.islice(iterable, n - 1))
diff --git a/buildstream/_artifactcache/casserver.py b/buildstream/_artifactcache/casserver.py
new file mode 100644
index 0000000..59ba7fe
--- /dev/null
+++ b/buildstream/_artifactcache/casserver.py
@@ -0,0 +1,247 @@
+#!/usr/bin/env python3
+#
+#  Copyright (C) 2018 Codethink Limited
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU Lesser General Public
+#  License as published by the Free Software Foundation; either
+#  version 2 of the License, or (at your option) any later version.
+#
+#  This library is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+#  Lesser General Public License for more details.
+#
+#  You should have received a copy of the GNU Lesser General Public
+#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+#  Authors:
+#        Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+from concurrent import futures
+import os
+import signal
+import sys
+import tempfile
+
+import click
+import grpc
+
+from google.bytestream import bytestream_pb2, bytestream_pb2_grpc
+from google.devtools.remoteexecution.v1test import remote_execution_pb2, remote_execution_pb2_grpc
+from buildstream import buildstream_pb2, buildstream_pb2_grpc
+
+from .._exceptions import ArtifactError
+from .._context import Context
+
+from .cascache import CASCache
+
+
+# create_server():
+#
+# Create gRPC CAS artifact server as specified in the Remote Execution API.
+#
+# Args:
+#     repo (str): Path to CAS repository
+#     enable_push (bool): Whether to allow blob uploads and artifact updates
+#
+def create_server(repo, *, enable_push):
+    context = Context()
+    context.artifactdir = repo
+
+    artifactcache = CASCache(context)
+
+    # Use max_workers default from Python 3.5+
+    max_workers = (os.cpu_count() or 1) * 5
+    server = grpc.server(futures.ThreadPoolExecutor(max_workers))
+
+    bytestream_pb2_grpc.add_ByteStreamServicer_to_server(
+        _ByteStreamServicer(artifactcache, enable_push=enable_push), server)
+
+    remote_execution_pb2_grpc.add_ContentAddressableStorageServicer_to_server(
+        _ContentAddressableStorageServicer(artifactcache), server)
+
+    buildstream_pb2_grpc.add_ArtifactCacheServicer_to_server(
+        _ArtifactCacheServicer(artifactcache, enable_push=enable_push), server)
+
+    return server
+
+
+@click.command(short_help="CAS Artifact Server")
+@click.option('--port', '-p', type=click.INT, required=True, help="Port number")
+@click.option('--server-key', help="Private server key for TLS (PEM-encoded)")
+@click.option('--server-cert', help="Public server certificate for TLS (PEM-encoded)")
+@click.option('--client-certs', help="Public client certificates for TLS (PEM-encoded)")
+@click.option('--enable-push', default=False, is_flag=True,
+              help="Allow clients to upload blobs and update artifact cache")
+@click.argument('repo')
+def server_main(repo, port, server_key, server_cert, client_certs, enable_push):
+    server = create_server(repo, enable_push=enable_push)
+
+    use_tls = bool(server_key)
+
+    if bool(server_cert) != use_tls:
+        click.echo("ERROR: --server-key and --server-cert are both required for TLS", err=True)
+        sys.exit(-1)
+
+    if client_certs and not use_tls:
+        click.echo("ERROR: --client-certs can only be used with --server-key", err=True)
+        sys.exit(-1)
+
+    if use_tls:
+        # Read public/private key pair
+        with open(server_key, 'rb') as f:
+            server_key_bytes = f.read()
+        with open(server_cert, 'rb') as f:
+            server_cert_bytes = f.read()
+
+        if client_certs:
+            with open(client_certs, 'rb') as f:
+                client_certs_bytes = f.read()
+        else:
+            client_certs_bytes = None
+
+        credentials = grpc.ssl_server_credentials([(server_key_bytes, server_cert_bytes)],
+                                                  root_certificates=client_certs_bytes,
+                                                  require_client_auth=bool(client_certs))
+        server.add_secure_port('[::]:{}'.format(port), credentials)
+    else:
+        server.add_insecure_port('[::]:{}'.format(port))
+
+    # Run artifact server
+    server.start()
+    try:
+        while True:
+            signal.pause()
+    except KeyboardInterrupt:
+        server.stop(0)
+
+
+class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
+    def __init__(self, cas, *, enable_push):
+        super().__init__()
+        self.cas = cas
+        self.enable_push = enable_push
+
+    def Read(self, request, context):
+        resource_name = request.resource_name
+        client_digest = _digest_from_resource_name(resource_name)
+        assert request.read_offset <= client_digest.size_bytes
+
+        with open(self.cas.objpath(client_digest), 'rb') as f:
+            assert os.fstat(f.fileno()).st_size == client_digest.size_bytes
+            if request.read_offset > 0:
+                f.seek(request.read_offset)
+
+            remaining = client_digest.size_bytes - request.read_offset
+            while remaining > 0:
+                chunk_size = min(remaining, 64 * 1024)
+                remaining -= chunk_size
+
+                response = bytestream_pb2.ReadResponse()
+                # max. 64 kB chunks
+                response.data = f.read(chunk_size)
+                yield response
+
+    def Write(self, request_iterator, context):
+        response = bytestream_pb2.WriteResponse()
+
+        if not self.enable_push:
+            context.set_code(grpc.StatusCode.PERMISSION_DENIED)
+            return response
+
+        offset = 0
+        finished = False
+        resource_name = None
+        with tempfile.NamedTemporaryFile(dir=os.path.join(self.cas.casdir, 'tmp')) as out:
+            for request in request_iterator:
+                assert not finished
+                assert request.write_offset == offset
+                if resource_name is None:
+                    # First request
+                    resource_name = request.resource_name
+                    client_digest = _digest_from_resource_name(resource_name)
+                elif request.resource_name:
+                    # If it is set on subsequent calls, it **must** match the value of the first request.
+                    assert request.resource_name == resource_name
+                out.write(request.data)
+                offset += len(request.data)
+                if request.finish_write:
+                    assert client_digest.size_bytes == offset
+                    out.flush()
+                    digest = self.cas.add_object(path=out.name)
+                    assert digest.hash == client_digest.hash
+                    finished = True
+
+        assert finished
+
+        response.committed_size = offset
+        return response
+
+
+class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddressableStorageServicer):
+    def __init__(self, cas):
+        super().__init__()
+        self.cas = cas
+
+    def FindMissingBlobs(self, request, context):
+        response = remote_execution_pb2.FindMissingBlobsResponse()
+        for digest in request.blob_digests:
+            if not _has_object(self.cas, digest):
+                d = response.missing_blob_digests.add()
+                d.hash = digest.hash
+                d.size_bytes = digest.size_bytes
+        return response
+
+
+class _ArtifactCacheServicer(buildstream_pb2_grpc.ArtifactCacheServicer):
+    def __init__(self, cas, *, enable_push):
+        super().__init__()
+        self.cas = cas
+        self.enable_push = enable_push
+
+    def GetArtifact(self, request, context):
+        response = buildstream_pb2.GetArtifactResponse()
+
+        try:
+            tree = self.cas.resolve_ref(request.key)
+
+            response.artifact.hash = tree.hash
+            response.artifact.size_bytes = tree.size_bytes
+        except ArtifactError:
+            context.set_code(grpc.StatusCode.NOT_FOUND)
+
+        return response
+
+    def UpdateArtifact(self, request, context):
+        response = buildstream_pb2.UpdateArtifactResponse()
+
+        if not self.enable_push:
+            context.set_code(grpc.StatusCode.PERMISSION_DENIED)
+            return response
+
+        for key in request.keys:
+            self.cas.set_ref(key, request.artifact)
+
+        return response
+
+    def Status(self, request, context):
+        response = buildstream_pb2.StatusResponse()
+
+        response.allow_updates = self.enable_push
+
+        return response
+
+
+def _digest_from_resource_name(resource_name):
+    parts = resource_name.split('/')
+    assert len(parts) == 2
+    digest = remote_execution_pb2.Digest()
+    digest.hash = parts[0]
+    digest.size_bytes = int(parts[1])
+    return digest
+
+
+def _has_object(cas, digest):
+    objpath = cas.objpath(digest)
+    return os.path.exists(objpath)
diff --git a/buildstream/_artifactcache/ostreecache.py b/buildstream/_artifactcache/ostreecache.py
deleted file mode 100644
index c802fc2..0000000
--- a/buildstream/_artifactcache/ostreecache.py
+++ /dev/null
@@ -1,378 +0,0 @@
-#!/usr/bin/env python3
-#
-#  Copyright (C) 2017-2018 Codethink Limited
-#
-#  This program is free software; you can redistribute it and/or
-#  modify it under the terms of the GNU Lesser General Public
-#  License as published by the Free Software Foundation; either
-#  version 2 of the License, or (at your option) any later version.
-#
-#  This library is distributed in the hope that it will be useful,
-#  but WITHOUT ANY WARRANTY; without even the implied warranty of
-#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
-#  Lesser General Public License for more details.
-#
-#  You should have received a copy of the GNU Lesser General Public
-#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
-#
-#  Authors:
-#        Jürg Billeter <juerg.billeter@codethink.co.uk>
-
-import multiprocessing
-import os
-import signal
-import tempfile
-
-from .. import _ostree, _signals, utils
-from .._exceptions import ArtifactError
-from .._ostree import OSTreeError
-
-from . import ArtifactCache
-from .pushreceive import initialize_push_connection
-from .pushreceive import push as push_artifact
-from .pushreceive import PushException
-
-
-# An OSTreeCache manages artifacts in an OSTree repository
-#
-# Args:
-#     context (Context): The BuildStream context
-#     project (Project): The BuildStream project
-#     enable_push (bool): Whether pushing is allowed by the platform
-#
-# Pushing is explicitly disabled by the platform in some cases,
-# like when we are falling back to functioning without using
-# user namespaces.
-#
-class OSTreeCache(ArtifactCache):
-
-    def __init__(self, context, *, enable_push):
-        super().__init__(context)
-
-        self.enable_push = enable_push
-
-        ostreedir = os.path.join(context.artifactdir, 'ostree')
-        self.repo = _ostree.ensure(ostreedir, False)
-
-        # Per-project list of OSTreeRemote instances.
-        self._remotes = {}
-
-        self._has_fetch_remotes = False
-        self._has_push_remotes = False
-
-    ################################################
-    #     Implementation of abstract methods       #
-    ################################################
-    def has_fetch_remotes(self, *, element=None):
-        if not self._has_fetch_remotes:
-            # No project has push remotes
-            return False
-        elif element is None:
-            # At least one (sub)project has fetch remotes
-            return True
-        else:
-            # Check whether the specified element's project has fetch remotes
-            remotes_for_project = self._remotes[element._get_project()]
-            return bool(remotes_for_project)
-
-    def has_push_remotes(self, *, element=None):
-        if not self._has_push_remotes:
-            # No project has push remotes
-            return False
-        elif element is None:
-            # At least one (sub)project has push remotes
-            return True
-        else:
-            # Check whether the specified element's project has push remotes
-            remotes_for_project = self._remotes[element._get_project()]
-            return any(remote.spec.push for remote in remotes_for_project)
-
-    def contains(self, element, key):
-        ref = self.get_artifact_fullname(element, key)
-        return _ostree.exists(self.repo, ref)
-
-    def extract(self, element, key):
-        ref = self.get_artifact_fullname(element, key)
-
-        # resolve ref to checksum
-        rev = _ostree.checksum(self.repo, ref)
-
-        # Extracting a nonexistent artifact is a bug
-        assert rev, "Artifact missing for {}".format(ref)
-
-        dest = os.path.join(self.extractdir, element._get_project().name, element.normal_name, rev)
-        if os.path.isdir(dest):
-            # artifact has already been extracted
-            return dest
-
-        os.makedirs(self.extractdir, exist_ok=True)
-        with tempfile.TemporaryDirectory(prefix='tmp', dir=self.extractdir) as tmpdir:
-
-            checkoutdir = os.path.join(tmpdir, ref)
-
-            _ostree.checkout(self.repo, checkoutdir, rev, user=True)
-
-            os.makedirs(os.path.dirname(dest), exist_ok=True)
-            try:
-                os.rename(checkoutdir, dest)
-            except OSError as e:
-                # With rename, it's possible to get either ENOTEMPTY or EEXIST
-                # in the case that the destination path is a not empty directory.
-                #
-                # If rename fails with these errors, another process beat
-                # us to it so just ignore.
-                if e.errno not in [os.errno.ENOTEMPTY, os.errno.EEXIST]:
-                    raise ArtifactError("Failed to extract artifact for ref '{}': {}"
-                                        .format(ref, e)) from e
-
-        return dest
-
-    def commit(self, element, content, keys):
-        refs = [self.get_artifact_fullname(element, key) for key in keys]
-
-        try:
-            _ostree.commit(self.repo, content, refs)
-        except OSTreeError as e:
-            raise ArtifactError("Failed to commit artifact: {}".format(e)) from e
-
-    def can_diff(self):
-        return True
-
-    def diff(self, element, key_a, key_b, *, subdir=None):
-        _, a, _ = self.repo.read_commit(self.get_artifact_fullname(element, key_a))
-        _, b, _ = self.repo.read_commit(self.get_artifact_fullname(element, key_b))
-
-        if subdir:
-            a = a.get_child(subdir)
-            b = b.get_child(subdir)
-
-            subpath = a.get_path()
-        else:
-            subpath = '/'
-
-        modified, removed, added = _ostree.diff_dirs(a, b)
-
-        modified = [os.path.relpath(item.target.get_path(), subpath) for item in modified]
-        removed = [os.path.relpath(item.get_path(), subpath) for item in removed]
-        added = [os.path.relpath(item.get_path(), subpath) for item in added]
-
-        return modified, removed, added
-
-    def pull(self, element, key, *, progress=None):
-        project = element._get_project()
-
-        ref = self.get_artifact_fullname(element, key)
-
-        for remote in self._remotes[project]:
-            try:
-                # fetch the artifact from highest priority remote using the specified cache key
-                remote_name = self._ensure_remote(self.repo, remote.pull_url)
-                _ostree.fetch(self.repo, remote=remote_name, ref=ref, progress=progress)
-                return True
-            except OSTreeError:
-                # Try next remote
-                continue
-
-        return False
-
-    def link_key(self, element, oldkey, newkey):
-        oldref = self.get_artifact_fullname(element, oldkey)
-        newref = self.get_artifact_fullname(element, newkey)
-
-        # resolve ref to checksum
-        rev = _ostree.checksum(self.repo, oldref)
-
-        # create additional ref for the same checksum
-        _ostree.set_ref(self.repo, newref, rev)
-
-    def push(self, element, keys):
-        any_pushed = False
-
-        project = element._get_project()
-
-        push_remotes = [r for r in self._remotes[project] if r.spec.push]
-
-        if not push_remotes:
-            raise ArtifactError("Push is not enabled for any of the configured remote artifact caches.")
-
-        refs = [self.get_artifact_fullname(element, key) for key in keys]
-
-        for remote in push_remotes:
-            any_pushed |= self._push_to_remote(remote, element, refs)
-
-        return any_pushed
-
-    def initialize_remotes(self, *, on_failure=None):
-        remote_specs = self.global_remote_specs.copy()
-
-        for project in self.project_remote_specs:
-            remote_specs.extend(self.project_remote_specs[project])
-
-        remote_specs = list(utils._deduplicate(remote_specs))
-
-        remote_results = {}
-
-        # Callback to initialize one remote in a 'multiprocessing' subprocess.
-        #
-        # We cannot do this in the main process because of the way the tasks
-        # run by the main scheduler calls into libostree using
-        # fork()-without-exec() subprocesses. OSTree fetch operations in
-        # subprocesses hang if fetch operations were previously done in the
-        # main process.
-        #
-        def child_action(url, q):
-            try:
-                push_url, pull_url = self._initialize_remote(url)
-                q.put((None, push_url, pull_url))
-            except Exception as e:               # pylint: disable=broad-except
-                # Whatever happens, we need to return it to the calling process
-                #
-                q.put((str(e), None, None, None))
-
-        # Kick off all the initialization jobs one by one.
-        #
-        # Note that we cannot use multiprocessing.Pool here because it's not
-        # possible to pickle local functions such as child_action().
-        #
-        q = multiprocessing.Queue()
-        for remote_spec in remote_specs:
-            p = multiprocessing.Process(target=child_action, args=(remote_spec.url, q))
-
-            try:
-
-                # Keep SIGINT blocked in the child process
-                with _signals.blocked([signal.SIGINT], ignore=False):
-                    p.start()
-
-                error, push_url, pull_url = q.get()
-                p.join()
-            except KeyboardInterrupt:
-                utils._kill_process_tree(p.pid)
-                raise
-
-            if error and on_failure:
-                on_failure(remote_spec.url, error)
-            elif error:
-                raise ArtifactError(error)
-            else:
-                if remote_spec.push and push_url:
-                    self._has_push_remotes = True
-                if pull_url:
-                    self._has_fetch_remotes = True
-
-                remote_results[remote_spec.url] = (push_url, pull_url)
-
-        # Prepare push_urls and pull_urls for each project
-        for project in self.context.get_projects():
-            remote_specs = self.global_remote_specs
-            if project in self.project_remote_specs:
-                remote_specs = list(utils._deduplicate(remote_specs + self.project_remote_specs[project]))
-
-            remotes = []
-
-            for remote_spec in remote_specs:
-                # Errors are already handled in the loop above,
-                # skip unreachable remotes here.
-                if remote_spec.url not in remote_results:
-                    continue
-
-                push_url, pull_url = remote_results[remote_spec.url]
-
-                if remote_spec.push and not push_url:
-                    raise ArtifactError("Push enabled but not supported by repo at: {}".format(remote_spec.url))
-
-                remote = _OSTreeRemote(remote_spec, pull_url, push_url)
-                remotes.append(remote)
-
-            self._remotes[project] = remotes
-
-    ################################################
-    #             Local Private Methods            #
-    ################################################
-
-    # _initialize_remote():
-    #
-    # Do protocol-specific initialization necessary to use a given OSTree
-    # remote.
-    #
-    # The SSH protocol that we use only supports pushing so initializing these
-    # involves contacting the remote to find out the corresponding pull URL.
-    #
-    # Args:
-    #     url (str): URL of the remote
-    #
-    # Returns:
-    #     (str, str): the pull URL and push URL for the remote
-    #
-    # Raises:
-    #     ArtifactError: if there was an error
-    def _initialize_remote(self, url):
-        if url.startswith('ssh://'):
-            try:
-                push_url = url
-                pull_url = initialize_push_connection(url)
-            except PushException as e:
-                raise ArtifactError(e) from e
-        elif url.startswith('/'):
-            push_url = pull_url = 'file://' + url
-        elif url.startswith('file://'):
-            push_url = pull_url = url
-        elif url.startswith('http://') or url.startswith('https://'):
-            push_url = None
-            pull_url = url
-        else:
-            raise ArtifactError("Unsupported URL: {}".format(url))
-
-        return push_url, pull_url
-
-    # _ensure_remote():
-    #
-    # Ensure that our OSTree repo has a remote configured for the given URL.
-    # Note that SSH access to remotes is not handled by libostree itself.
-    #
-    # Args:
-    #     repo (OSTree.Repo): an OSTree repository
-    #     pull_url (str): the URL where libostree can pull from the remote
-    #
-    # Returns:
-    #     (str): the name of the remote, which can be passed to various other
-    #            operations implemented by the _ostree module.
-    #
-    # Raises:
-    #     OSTreeError: if there was a problem reported by libostree
-    def _ensure_remote(self, repo, pull_url):
-        remote_name = utils.url_directory_name(pull_url)
-        _ostree.configure_remote(repo, remote_name, pull_url)
-        return remote_name
-
-    def _push_to_remote(self, remote, element, refs):
-        with utils._tempdir(dir=self.context.artifactdir, prefix='push-repo-') as temp_repo_dir:
-
-            with element.timed_activity("Preparing compressed archive"):
-                # First create a temporary archive-z2 repository, we can
-                # only use ostree-push with archive-z2 local repo.
-                temp_repo = _ostree.ensure(temp_repo_dir, True)
-
-                # Now push the ref we want to push into our temporary archive-z2 repo
-                for ref in refs:
-                    _ostree.fetch(temp_repo, remote=self.repo.get_path().get_uri(), ref=ref)
-
-            with element.timed_activity("Sending artifact"), \
-                element._output_file() as output_file:
-                try:
-                    pushed = push_artifact(temp_repo.get_path().get_path(),
-                                           remote.push_url,
-                                           refs, output_file)
-                except PushException as e:
-                    raise ArtifactError("Failed to push artifact {}: {}".format(refs, e)) from e
-
-            return pushed
-
-
-# Represents a single remote OSTree cache.
-#
-class _OSTreeRemote():
-    def __init__(self, spec, pull_url, push_url):
-        self.spec = spec
-        self.pull_url = pull_url
-        self.push_url = push_url
diff --git a/buildstream/_artifactcache/pushreceive.py b/buildstream/_artifactcache/pushreceive.py
deleted file mode 100644
index 777065e..0000000
--- a/buildstream/_artifactcache/pushreceive.py
+++ /dev/null
@@ -1,812 +0,0 @@
-#!/usr/bin/python3
-
-# Push OSTree commits to a remote repo, based on Dan Nicholson's ostree-push
-#
-# Copyright (C) 2015  Dan Nicholson <nicholson@endlessm.com>
-# Copyright (C) 2017  Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-import logging
-import multiprocessing
-import os
-import re
-import subprocess
-import sys
-import shutil
-import tarfile
-import tempfile
-from enum import Enum
-from urllib.parse import urlparse
-
-import click
-import gi
-
-from .. import _signals  # nopep8
-from .._profile import Topics, profile_start, profile_end
-
-gi.require_version('OSTree', '1.0')
-# pylint: disable=wrong-import-position,wrong-import-order
-from gi.repository import GLib, Gio, OSTree  # nopep8
-
-
-PROTO_VERSION = 1
-HEADER_SIZE = 5
-
-
-# An error occurred
-class PushException(Exception):
-    pass
-
-
-# Trying to commit a ref which already exists in remote
-class PushExistsException(Exception):
-    pass
-
-
-class PushCommandType(Enum):
-    info = 0
-    update = 1
-    putobjects = 2
-    status = 3
-    done = 4
-
-
-def python_to_msg_byteorder(python_byteorder=sys.byteorder):
-    if python_byteorder == 'little':
-        return 'l'
-    elif python_byteorder == 'big':
-        return 'B'
-    else:
-        raise PushException('Unrecognized system byteorder {}'
-                            .format(python_byteorder))
-
-
-def msg_to_python_byteorder(msg_byteorder):
-    if msg_byteorder == 'l':
-        return 'little'
-    elif msg_byteorder == 'B':
-        return 'big'
-    else:
-        raise PushException('Unrecognized message byteorder {}'
-                            .format(msg_byteorder))
-
-
-def ostree_object_path(repo, obj):
-    repodir = repo.get_path().get_path()
-    return os.path.join(repodir, 'objects', obj[0:2], obj[2:])
-
-
-class PushCommand(object):
-    def __init__(self, cmdtype, args):
-        self.cmdtype = cmdtype
-        self.args = args
-        self.validate(self.cmdtype, self.args)
-        self.variant = GLib.Variant('a{sv}', self.args)
-
-    @staticmethod
-    def validate(command, args):
-        if not isinstance(command, PushCommandType):
-            raise PushException('Message command must be PushCommandType')
-        if not isinstance(args, dict):
-            raise PushException('Message args must be dict')
-        # Ensure all values are variants for a{sv} vardict
-        for val in args.values():
-            if not isinstance(val, GLib.Variant):
-                raise PushException('Message args values must be '
-                                    'GLib.Variant')
-
-
-class PushMessageWriter(object):
-    def __init__(self, file, byteorder=sys.byteorder):
-        self.file = file
-        self.byteorder = byteorder
-        self.msg_byteorder = python_to_msg_byteorder(self.byteorder)
-
-    def encode_header(self, cmdtype, size):
-        header = self.msg_byteorder.encode() + \
-            PROTO_VERSION.to_bytes(1, self.byteorder) + \
-            cmdtype.value.to_bytes(1, self.byteorder) + \
-            size.to_bytes(2, self.byteorder)
-        return header
-
-    def encode_message(self, command):
-        if not isinstance(command, PushCommand):
-            raise PushException('Command must be PushCommand')
-        data = command.variant.get_data_as_bytes()
-        size = data.get_size()
-
-        # Build the header
-        header = self.encode_header(command.cmdtype, size)
-
-        return header + data.get_data()
-
-    def write(self, command):
-        msg = self.encode_message(command)
-        self.file.write(msg)
-        self.file.flush()
-
-    def send_hello(self):
-        # The 'hello' message is used to check connectivity and discover the
-        # cache's pull URL. It's actually transmitted as an empty info request.
-        args = {
-            'mode': GLib.Variant('i', 0),
-            'refs': GLib.Variant('a{ss}', {})
-        }
-        command = PushCommand(PushCommandType.info, args)
-        self.write(command)
-
-    def send_info(self, repo, refs, pull_url=None):
-        cmdtype = PushCommandType.info
-        mode = repo.get_mode()
-
-        ref_map = {}
-        for ref in refs:
-            _, checksum = repo.resolve_rev(ref, True)
-            if checksum:
-                _, has_object = repo.has_object(OSTree.ObjectType.COMMIT, checksum, None)
-                if has_object:
-                    ref_map[ref] = checksum
-
-        args = {
-            'mode': GLib.Variant('i', mode),
-            'refs': GLib.Variant('a{ss}', ref_map)
-        }
-
-        # The server sends this so clients can discover the correct pull URL
-        # for this cache without requiring end-users to specify it.
-        if pull_url:
-            args['pull_url'] = GLib.Variant('s', pull_url)
-
-        command = PushCommand(cmdtype, args)
-        self.write(command)
-
-    def send_update(self, refs):
-        cmdtype = PushCommandType.update
-        args = {}
-        for branch, revs in refs.items():
-            args[branch] = GLib.Variant('(ss)', revs)
-        command = PushCommand(cmdtype, args)
-        self.write(command)
-
-    def send_putobjects(self, repo, objects):
-
-        logging.info('Sending {} objects'.format(len(objects)))
-
-        # Send command saying we're going to send a stream of objects
-        cmdtype = PushCommandType.putobjects
-        command = PushCommand(cmdtype, {})
-        self.write(command)
-
-        # Open a TarFile for writing uncompressed tar to a stream
-        tar = tarfile.TarFile.open(mode='w|', fileobj=self.file)
-        for obj in objects:
-
-            logging.info('Sending object {}'.format(obj))
-            objpath = ostree_object_path(repo, obj)
-            stat = os.stat(objpath)
-
-            tar_info = tarfile.TarInfo(obj)
-            tar_info.mtime = stat.st_mtime
-            tar_info.size = stat.st_size
-            with open(objpath, 'rb') as obj_fp:
-                tar.addfile(tar_info, obj_fp)
-
-        # We're done, close the tarfile
-        tar.close()
-
-    def send_status(self, result, message=''):
-        cmdtype = PushCommandType.status
-        args = {
-            'result': GLib.Variant('b', result),
-            'message': GLib.Variant('s', message)
-        }
-        command = PushCommand(cmdtype, args)
-        self.write(command)
-
-    def send_done(self):
-        command = PushCommand(PushCommandType.done, {})
-        self.write(command)
-
-
-class PushMessageReader(object):
-    def __init__(self, file, byteorder=sys.byteorder, tmpdir=None):
-        self.file = file
-        self.byteorder = byteorder
-        self.tmpdir = tmpdir
-
-    def decode_header(self, header):
-        if len(header) != HEADER_SIZE:
-            raise Exception('Header is {:d} bytes, not {:d}'.format(len(header), HEADER_SIZE))
-        order = msg_to_python_byteorder(chr(header[0]))
-        version = int(header[1])
-        if version != PROTO_VERSION:
-            raise Exception('Unsupported protocol version {:d}'.format(version))
-        cmdtype = PushCommandType(int(header[2]))
-        vlen = int.from_bytes(header[3:], order)
-        return order, version, cmdtype, vlen
-
-    def decode_message(self, message, size, order):
-        if len(message) != size:
-            raise Exception('Expected {:d} bytes, but got {:d}'.format(size, len(message)))
-        data = GLib.Bytes.new(message)
-        variant = GLib.Variant.new_from_bytes(GLib.VariantType.new('a{sv}'),
-                                              data, False)
-        if order != self.byteorder:
-            variant = GLib.Variant.byteswap(variant)
-
-        return variant
-
-    def read(self):
-        header = self.file.read(HEADER_SIZE)
-        if not header:
-            # Remote end quit
-            return None, None
-        order, _, cmdtype, size = self.decode_header(header)
-        msg = self.file.read(size)
-        if len(msg) != size:
-            raise PushException('Did not receive full message')
-        args = self.decode_message(msg, size, order)
-
-        return cmdtype, args
-
-    def receive(self, allowed):
-        cmdtype, args = self.read()
-        if cmdtype is None:
-            raise PushException('Expected reply, got none')
-        if cmdtype not in allowed:
-            raise PushException('Unexpected reply type', cmdtype.name)
-        return cmdtype, args.unpack()
-
-    def receive_info(self):
-        _, args = self.receive([PushCommandType.info])
-        return args
-
-    def receive_update(self):
-        _, args = self.receive([PushCommandType.update])
-        return args
-
-    def receive_putobjects(self, repo):
-
-        received_objects = []
-
-        # Open a TarFile for reading uncompressed tar from a stream
-        tar = tarfile.TarFile.open(mode='r|', fileobj=self.file)
-
-        # Extract every tarinfo into the temp location
-        #
-        # This should block while tar.next() reads the next
-        # tar object from the stream.
-        while True:
-            filepos = tar.fileobj.tell()
-            tar_info = tar.next()
-            if not tar_info:
-                # End of stream marker consists of two 512 Byte blocks.
-                # Current Python tarfile stops reading after the first block.
-                # Read the second block as well to ensure the stream is at
-                # the right position for following messages.
-                if tar.fileobj.tell() - filepos < 1024:
-                    tar.fileobj.read(512)
-                break
-
-            tar.extract(tar_info, self.tmpdir)
-            received_objects.append(tar_info.name)
-
-        # Finished with this stream
-        tar.close()
-
-        return received_objects
-
-    def receive_status(self):
-        _, args = self.receive([PushCommandType.status])
-        return args
-
-    def receive_done(self):
-        _, args = self.receive([PushCommandType.done])
-        return args
-
-
-def parse_remote_location(remotepath):
-    """Parse remote artifact cache URL that's been specified in our config."""
-    remote_host = remote_user = remote_repo = None
-
-    url = urlparse(remotepath, scheme='file')
-    if url.scheme:
-        if url.scheme not in ['file', 'ssh']:
-            raise PushException('Only URL schemes file and ssh are allowed, '
-                                'not "{}"'.format(url.scheme))
-        remote_host = url.hostname
-        remote_user = url.username
-        remote_repo = url.path
-        remote_port = url.port or 22
-    else:
-        # Scp/git style remote (user@hostname:path)
-        parts = remotepath.split('@', 1)
-        if len(parts) > 1:
-            remote_user = parts[0]
-            remainder = parts[1]
-        else:
-            remote_user = None
-            remainder = parts[0]
-        parts = remainder.split(':', 1)
-        if len(parts) != 2:
-            raise PushException('Remote repository "{}" does not '
-                                'contain a hostname and path separated '
-                                'by ":"'.format(remotepath))
-        remote_host, remote_repo = parts
-        # This form doesn't make it possible to specify a non-standard port.
-        remote_port = 22
-
-    return remote_host, remote_user, remote_repo, remote_port
-
-
-def ssh_commandline(remote_host, remote_user=None, remote_port=22):
-    if remote_host is None:
-        return []
-
-    ssh_cmd = ['ssh']
-    if remote_user:
-        ssh_cmd += ['-l', remote_user]
-    if remote_port != 22:
-        ssh_cmd += ['-p', str(remote_port)]
-    ssh_cmd += [remote_host]
-    return ssh_cmd
-
-
-def foo_run(func, args, stdin_fd, stdout_fd, stderr_fd):
-    sys.stdin = open(stdin_fd, 'r')
-    sys.stdout = open(stdout_fd, 'w')
-    sys.stderr = open(stderr_fd, 'w')
-    func(args)
-
-
-class ProcessWithPipes(object):
-    def __init__(self, func, args, *, stderr=None):
-        r0, w0 = os.pipe()
-        r1, w1 = os.pipe()
-        if stderr is None:
-            r2, w2 = os.pipe()
-        else:
-            w2 = stderr.fileno()
-        self.proc = multiprocessing.Process(target=foo_run, args=(func, args, r0, w1, w2))
-        self.proc.start()
-        self.stdin = open(w0, 'wb')
-        os.close(r0)
-        self.stdout = open(r1, 'rb')
-        os.close(w1)
-        if stderr is None:
-            self.stderr = open(r2, 'rb')
-            os.close(w2)
-
-        # The eventual return code
-        self.returncode = -1
-
-    def wait(self):
-        self.proc.join()
-        self.returncode = self.proc.exitcode
-
-
-class OSTreePusher(object):
-    def __init__(self, repopath, remotepath, branches=None, verbose=False,
-                 debug=False, output=None):
-        self.repopath = repopath
-        self.remotepath = remotepath
-        self.verbose = verbose
-        self.debug = debug
-        self.output = output
-
-        self.remote_host, self.remote_user, self.remote_repo, self.remote_port = \
-            parse_remote_location(remotepath)
-
-        if self.repopath is None:
-            self.repo = OSTree.Repo.new_default()
-        else:
-            self.repo = OSTree.Repo.new(Gio.File.new_for_path(self.repopath))
-        self.repo.open(None)
-
-        # Enumerate branches to push
-        if branches is None:
-            _, self.refs = self.repo.list_refs(None, None)
-        else:
-            self.refs = {}
-            for branch in branches:
-                _, rev = self.repo.resolve_rev(branch, False)
-                self.refs[branch] = rev
-
-        # Start ssh
-        ssh_cmd = ssh_commandline(self.remote_host, self.remote_user, self.remote_port)
-
-        ssh_cmd += ['bst-artifact-receive']
-        if self.verbose:
-            ssh_cmd += ['--verbose']
-        if self.debug:
-            ssh_cmd += ['--debug']
-        if not self.remote_host:
-            ssh_cmd += ['--pull-url', self.remote_repo]
-        ssh_cmd += [self.remote_repo]
-
-        logging.info('Executing {}'.format(' '.join(ssh_cmd)))
-
-        if self.remote_host:
-            self.ssh = subprocess.Popen(ssh_cmd, stdin=subprocess.PIPE,
-                                        stdout=subprocess.PIPE,
-                                        stderr=self.output,
-                                        start_new_session=True)
-        else:
-            self.ssh = ProcessWithPipes(receive_main, ssh_cmd[1:], stderr=self.output)
-
-        self.writer = PushMessageWriter(self.ssh.stdin)
-        self.reader = PushMessageReader(self.ssh.stdout)
-
-    def needed_commits(self, remote, local, needed):
-        parent = local
-        if remote == '0' * 64:
-            # Nonexistent remote branch, use None for convenience
-            remote = None
-        while parent != remote:
-            needed.add(parent)
-            _, commit = self.repo.load_variant_if_exists(OSTree.ObjectType.COMMIT,
-                                                         parent)
-            if commit is None:
-                raise PushException('Shallow history from commit {} does '
-                                    'not contain remote commit {}'.format(local, remote))
-            parent = OSTree.commit_get_parent(commit)
-            if parent is None:
-                break
-        if remote is not None and parent != remote:
-            self.writer.send_done()
-            raise PushExistsException('Remote commit {} not descendent of '
-                                      'commit {}'.format(remote, local))
-
-    def needed_objects(self, commits):
-        objects = set()
-        for rev in commits:
-            _, reachable = self.repo.traverse_commit(rev, 0, None)
-            for obj in reachable:
-                objname = OSTree.object_to_string(obj[0], obj[1])
-                if obj[1] == OSTree.ObjectType.FILE:
-                    # Make this a filez since we're archive-z2
-                    objname += 'z'
-                elif obj[1] == OSTree.ObjectType.COMMIT:
-                    # Add in detached metadata
-                    metaobj = objname + 'meta'
-                    metapath = ostree_object_path(self.repo, metaobj)
-                    if os.path.exists(metapath):
-                        objects.add(metaobj)
-
-                    # Add in Endless compat files
-                    for suffix in ['sig', 'sizes2']:
-                        metaobj = obj[0] + '.' + suffix
-                        metapath = ostree_object_path(self.repo, metaobj)
-                        if os.path.exists(metapath):
-                            objects.add(metaobj)
-                objects.add(objname)
-        return objects
-
-    def close(self):
-        self.ssh.stdin.close()
-        return self.ssh.wait()
-
-    def run(self):
-        remote_refs = {}
-        update_refs = {}
-
-        # Send info immediately
-        self.writer.send_info(self.repo, list(self.refs.keys()))
-
-        # Receive remote info
-        logging.info('Receiving repository information')
-        args = self.reader.receive_info()
-        remote_mode = args['mode']
-        if remote_mode != OSTree.RepoMode.ARCHIVE_Z2:
-            raise PushException('Can only push to archive-z2 repos')
-        remote_refs = args['refs']
-        for branch, rev in self.refs.items():
-            remote_rev = remote_refs.get(branch, '0' * 64)
-            if rev != remote_rev:
-                update_refs[branch] = remote_rev, rev
-        if not update_refs:
-            logging.info('Nothing to update')
-            self.writer.send_done()
-            raise PushExistsException('Nothing to update')
-
-        # Send update command
-        logging.info('Sending update request')
-        self.writer.send_update(update_refs)
-
-        # Receive status for update request
-        args = self.reader.receive_status()
-        if not args['result']:
-            self.writer.send_done()
-            raise PushException(args['message'])
-
-        # Collect commits and objects to push
-        commits = set()
-        exc_info = None
-        ref_count = 0
-        for branch, revs in update_refs.items():
-            logging.info('Updating {} {} to {}'.format(branch, revs[0], revs[1]))
-            try:
-                self.needed_commits(revs[0], revs[1], commits)
-                ref_count += 1
-            except PushExistsException:
-                if exc_info is None:
-                    exc_info = sys.exc_info()
-
-        # Re-raise PushExistsException if all refs exist already
-        if ref_count == 0 and exc_info:
-            raise exc_info[0].with_traceback(exc_info[1], exc_info[2])
-
-        logging.info('Enumerating objects to send')
-        objects = self.needed_objects(commits)
-
-        # Send all the objects to receiver, checking status after each
-        self.writer.send_putobjects(self.repo, objects)
-
-        # Inform receiver that all objects have been sent
-        self.writer.send_done()
-
-        # Wait until receiver has completed
-        self.reader.receive_done()
-
-        return self.close()
-
-
-# OSTreeReceiver is on the receiving end of an OSTree push.
-#
-# Args:
-#     repopath (str): On-disk location of the receiving repository.
-#     pull_url (str): Redirection for clients who want to pull, not push.
-#
-class OSTreeReceiver(object):
-    def __init__(self, repopath, pull_url):
-        self.repopath = repopath
-        self.pull_url = pull_url
-
-        if self.repopath is None:
-            self.repo = OSTree.Repo.new_default()
-        else:
-            self.repo = OSTree.Repo.new(Gio.File.new_for_path(self.repopath))
-        self.repo.open(None)
-
-        repo_tmp = os.path.join(self.repopath, 'tmp')
-        self.tmpdir = tempfile.mkdtemp(dir=repo_tmp, prefix='bst-push-')
-        self.writer = PushMessageWriter(sys.stdout.buffer)
-        self.reader = PushMessageReader(sys.stdin.buffer, tmpdir=self.tmpdir)
-
-        # Set a sane umask before writing any objects
-        os.umask(0o0022)
-
-    def close(self):
-        shutil.rmtree(self.tmpdir)
-        sys.stdout.flush()
-        return 0
-
-    def run(self):
-        try:
-            exit_code = self.do_run()
-            self.close()
-            return exit_code
-        except:
-            # BLIND EXCEPT - Just abort if we receive any exception, this
-            # can be a broken pipe, a tarfile read error when the remote
-            # connection is closed, a bug; whatever happens we want to cleanup.
-            self.close()
-            raise
-
-    def do_run(self):
-        # Receive remote info
-        args = self.reader.receive_info()
-        remote_refs = args['refs']
-
-        # Send info back
-        self.writer.send_info(self.repo, list(remote_refs.keys()),
-                              pull_url=self.pull_url)
-
-        # Wait for update or done command
-        cmdtype, args = self.reader.receive([PushCommandType.update,
-                                             PushCommandType.done])
-        if cmdtype == PushCommandType.done:
-            return 0
-        update_refs = args
-
-        profile_names = set()
-        for update_ref in update_refs:
-            # Strip off the SHA256 sum on the right of the reference,
-            # leaving the project and element name
-            project_and_element_name = re.sub(r"/[a-z0-9]+$", '', update_ref)
-            profile_names.add(project_and_element_name)
-
-        profile_name = '_'.join(profile_names)
-        profile_start(Topics.ARTIFACT_RECEIVE, profile_name)
-
-        self.writer.send_status(True)
-
-        # Wait for putobjects or done
-        cmdtype, args = self.reader.receive([PushCommandType.putobjects,
-                                             PushCommandType.done])
-
-        if cmdtype == PushCommandType.done:
-            logging.debug('Received done before any objects, exiting')
-            return 0
-
-        # Receive the actual objects
-        received_objects = self.reader.receive_putobjects(self.repo)
-
-        # Ensure that pusher has sent all objects
-        self.reader.receive_done()
-
-        # If we didn't get any objects, we're done
-        if not received_objects:
-            return 0
-
-        # Got all objects, move them to the object store
-        for obj in received_objects:
-            tmp_path = os.path.join(self.tmpdir, obj)
-            obj_path = ostree_object_path(self.repo, obj)
-            os.makedirs(os.path.dirname(obj_path), exist_ok=True)
-            logging.debug('Renaming {} to {}'.format(tmp_path, obj_path))
-            os.rename(tmp_path, obj_path)
-
-        # Verify that we have the specified commit objects
-        for branch, revs in update_refs.items():
-            _, has_object = self.repo.has_object(OSTree.ObjectType.COMMIT, revs[1], None)
-            if not has_object:
-                raise PushException('Missing commit {} for ref {}'.format(revs[1], branch))
-
-        # Finally, update the refs
-        for branch, revs in update_refs.items():
-            logging.debug('Setting ref {} to {}'.format(branch, revs[1]))
-            self.repo.set_ref_immediate(None, branch, revs[1], None)
-
-        # Inform pusher that everything is in place
-        self.writer.send_done()
-
-        profile_end(Topics.ARTIFACT_RECEIVE, profile_name)
-
-        return 0
-
-
-# initialize_push_connection()
-#
-# Test that we can connect to the remote bst-artifact-receive program, and
-# receive the pull URL for this artifact cache.
-#
-# We don't want to make the user wait until the first artifact has been built
-# to discover that they actually cannot push, so this should be called early.
-#
-# The SSH push protocol doesn't allow pulling artifacts. We don't want to
-# require users to specify two URLs for a single cache, so we have the push
-# protocol return the corresponding pull URL as part of the 'hello' response.
-#
-# Args:
-#   remote: The ssh remote url to push to
-#
-# Returns:
-#   (str): The URL that should be used for pushing to this cache.
-#
-# Raises:
-#   PushException if there was an issue connecting to the remote.
-def initialize_push_connection(remote):
-    remote_host, remote_user, remote_repo, remote_port = parse_remote_location(remote)
-    ssh_cmd = ssh_commandline(remote_host, remote_user, remote_port)
-
-    if remote_host:
-        # We need a short timeout here because if 'remote' isn't reachable at
-        # all, the process will hang until the connection times out.
-        ssh_cmd += ['-oConnectTimeout=3']
-
-    ssh_cmd += ['bst-artifact-receive', remote_repo]
-
-    if remote_host:
-        ssh = subprocess.Popen(ssh_cmd, stdin=subprocess.PIPE,
-                               stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    else:
-        ssh_cmd += ['--pull-url', remote_repo]
-        ssh = ProcessWithPipes(receive_main, ssh_cmd[1:])
-
-    writer = PushMessageWriter(ssh.stdin)
-    reader = PushMessageReader(ssh.stdout)
-
-    try:
-        writer.send_hello()
-        args = reader.receive_info()
-        writer.send_done()
-
-        if 'pull_url' in args:
-            pull_url = args['pull_url']
-        else:
-            raise PushException(
-                "Remote cache did not tell us its pull URL. This cache probably "
-                "requires updating to a newer version of `bst-artifact-receive`.")
-    except PushException as protocol_error:
-        # If we get a read error on the wire, let's first see if SSH reported
-        # an error such as 'Permission denied'. If so this will be much more
-        # useful to the user than the "Expected reply, got none" sort of
-        # message that reader.receive_info() will have raised.
-        ssh.wait()
-        if ssh.returncode != 0:
-            ssh_error = ssh.stderr.read().decode('unicode-escape').strip()
-            raise PushException("{}".format(ssh_error))
-        else:
-            raise protocol_error
-
-    return pull_url
-
-
-# push()
-#
-# Run the pusher in process, with logging going to the output file
-#
-# Args:
-#   repo: The local repository path
-#   remote: The ssh remote url to push to
-#   branches: The refs to push
-#   output: The output where logging should go
-#
-# Returns:
-#   (bool): True if the remote was updated, False if it already existed
-#           and no updated was required
-#
-# Raises:
-#   PushException if there was an error
-#
-def push(repo, remote, branches, output):
-
-    logging.basicConfig(format='%(module)s: %(levelname)s: %(message)s',
-                        level=logging.INFO, stream=output)
-
-    pusher = OSTreePusher(repo, remote, branches, True, False, output=output)
-
-    def terminate_push():
-        pusher.close()
-
-    with _signals.terminator(terminate_push):
-        try:
-            pusher.run()
-            return True
-        except ConnectionError as e:
-            # Connection attempt failed or connection was terminated unexpectedly
-            terminate_push()
-            raise PushException("Connection failed") from e
-        except PushException:
-            terminate_push()
-            raise
-        except PushExistsException:
-            # If the commit already existed, just bail out
-            # on the push and dont bother re-raising the error
-            logging.info("Ref {} was already present in remote {}".format(branches, remote))
-            terminate_push()
-            return False
-
-
-@click.command(short_help="Receive pushed artifacts over ssh")
-@click.option('--verbose', '-v', is_flag=True, default=False, help="Verbose mode")
-@click.option('--debug', '-d', is_flag=True, default=False, help="Debug mode")
-@click.option('--pull-url', type=str, required=True,
-              help="Clients who try to pull over SSH will be redirected here")
-@click.argument('repo')
-def receive_main(verbose, debug, pull_url, repo):
-    """A BuildStream sister program for receiving artifacts send to a shared artifact cache
-    """
-    loglevel = logging.WARNING
-    if verbose:
-        loglevel = logging.INFO
-    if debug:
-        loglevel = logging.DEBUG
-    logging.basicConfig(format='%(module)s: %(levelname)s: %(message)s',
-                        level=loglevel, stream=sys.stderr)
-
-    receiver = OSTreeReceiver(repo, pull_url)
-    return receiver.run()
diff --git a/buildstream/_artifactcache/tarcache.py b/buildstream/_artifactcache/tarcache.py
deleted file mode 100644
index 10ae9d0..0000000
--- a/buildstream/_artifactcache/tarcache.py
+++ /dev/null
@@ -1,298 +0,0 @@
-#!/usr/bin/env python3
-#
-#  Copyright (C) 2017 Codethink Limited
-#
-#  This program is free software; you can redistribute it and/or
-#  modify it under the terms of the GNU Lesser General Public
-#  License as published by the Free Software Foundation; either
-#  version 2 of the License, or (at your option) any later version.
-#
-#  This library is distributed in the hope that it will be useful,
-#  but WITHOUT ANY WARRANTY; without even the implied warranty of
-#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
-#  Lesser General Public License for more details.
-#
-#  You should have received a copy of the GNU Lesser General Public
-#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
-#
-#  Authors:
-#        Tristan Maat <tristan.maat@codethink.co.uk>
-
-import os
-import shutil
-import tarfile
-import subprocess
-
-from .. import utils, ProgramNotFoundError
-from .._exceptions import ArtifactError
-
-from . import ArtifactCache
-
-
-class TarCache(ArtifactCache):
-
-    def __init__(self, context):
-
-        super().__init__(context)
-
-        self.tardir = os.path.join(context.artifactdir, 'tar')
-        os.makedirs(self.tardir, exist_ok=True)
-
-    ################################################
-    #     Implementation of abstract methods       #
-    ################################################
-    def contains(self, element, key):
-        path = os.path.join(self.tardir, _tarpath(element, key))
-        return os.path.isfile(path)
-
-    def commit(self, element, content, keys):
-        os.makedirs(os.path.join(self.tardir, element._get_project().name, element.normal_name), exist_ok=True)
-
-        with utils._tempdir() as temp:
-            for key in keys:
-                ref = _tarpath(element, key)
-
-                refdir = os.path.join(temp, key)
-                shutil.copytree(content, refdir, symlinks=True)
-
-                _Tar.archive(os.path.join(self.tardir, ref), key, temp)
-
-    def extract(self, element, key):
-
-        fullname = self.get_artifact_fullname(element, key)
-        path = _tarpath(element, key)
-
-        # Extracting a nonexistent artifact is a bug
-        assert os.path.isfile(os.path.join(self.tardir, path)), "Artifact missing for {}".format(fullname)
-
-        # If the destination already exists, the artifact has been extracted
-        dest = os.path.join(self.extractdir, fullname)
-        if os.path.isdir(dest):
-            return dest
-
-        os.makedirs(self.extractdir, exist_ok=True)
-
-        with utils._tempdir(dir=self.extractdir) as tmpdir:
-            _Tar.extract(os.path.join(self.tardir, path), tmpdir)
-
-            os.makedirs(os.path.join(self.extractdir, element._get_project().name, element.normal_name),
-                        exist_ok=True)
-            try:
-                os.rename(os.path.join(tmpdir, key), dest)
-            except OSError as e:
-                # With rename, it's possible to get either ENOTEMPTY or EEXIST
-                # in the case that the destination path is a not empty directory.
-                #
-                # If rename fails with these errors, another process beat
-                # us to it so just ignore.
-                if e.errno not in [os.errno.ENOTEMPTY, os.errno.EEXIST]:
-                    raise ArtifactError("Failed to extract artifact '{}': {}"
-                                        .format(fullname, e)) from e
-
-        return dest
-
-
-# _tarpath()
-#
-# Generate a relative tarball path for a given element and it's cache key
-#
-# Args:
-#    element (Element): The Element object
-#    key (str): The element's cache key
-#
-# Returns:
-#    (str): The relative path to use for storing tarballs
-#
-def _tarpath(element, key):
-    project = element._get_project()
-    return os.path.join(project.name, element.normal_name, key + '.tar.bz2')
-
-
-# A helper class that contains tar archive/extract functions
-class _Tar():
-
-    # archive()
-    #
-    # Attempt to archive the given tarfile with the `tar` command,
-    # falling back to python's `tarfile` if this fails.
-    #
-    # Args:
-    #     location (str): The path to the tar to create
-    #     content (str): The path to the content to archvive
-    #     cwd (str): The cwd
-    #
-    # This is done since AIX tar does not support 2G+ files.
-    #
-    @classmethod
-    def archive(cls, location, content, cwd=os.getcwd()):
-
-        try:
-            cls._archive_with_tar(location, content, cwd)
-            return
-        except tarfile.TarError:
-            pass
-        except ProgramNotFoundError:
-            pass
-
-        # If the former did not complete successfully, we try with
-        # python's tar implementation (since it's hard to detect
-        # specific issues with specific tar implementations - a
-        # fallback).
-
-        try:
-            cls._archive_with_python(location, content, cwd)
-        except tarfile.TarError as e:
-            raise ArtifactError("Failed to archive {}: {}"
-                                .format(location, e)) from e
-
-    # extract()
-    #
-    # Attempt to extract the given tarfile with the `tar` command,
-    # falling back to python's `tarfile` if this fails.
-    #
-    # Args:
-    #     location (str): The path to the tar to extract
-    #     dest (str): The destination path to extract to
-    #
-    # This is done since python tarfile extraction is horrendously
-    # slow (2 hrs+ for base images).
-    #
-    @classmethod
-    def extract(cls, location, dest):
-
-        try:
-            cls._extract_with_tar(location, dest)
-            return
-        except tarfile.TarError:
-            pass
-        except ProgramNotFoundError:
-            pass
-
-        try:
-            cls._extract_with_python(location, dest)
-        except tarfile.TarError as e:
-            raise ArtifactError("Failed to extract {}: {}"
-                                .format(location, e)) from e
-
-    # _get_host_tar()
-    #
-    # Get the host tar command.
-    #
-    # Raises:
-    #     ProgramNotFoundError: If the tar executable cannot be
-    #                           located
-    #
-    @classmethod
-    def _get_host_tar(cls):
-        tar_cmd = None
-
-        for potential_tar_cmd in ['gtar', 'tar']:
-            try:
-                tar_cmd = utils.get_host_tool(potential_tar_cmd)
-                break
-            except ProgramNotFoundError:
-                continue
-
-        # If we still couldn't find tar, raise the ProgramNotfounderror
-        if tar_cmd is None:
-            raise ProgramNotFoundError("Did not find tar in PATH: {}"
-                                       .format(os.environ.get('PATH')))
-
-        return tar_cmd
-
-    # _archive_with_tar()
-    #
-    # Archive with an implementation of the `tar` command
-    #
-    # Args:
-    #     location (str): The path to the tar to create
-    #     content (str): The path to the content to archvive
-    #     cwd (str): The cwd
-    #
-    # Raises:
-    #     TarError: If an error occurs during extraction
-    #     ProgramNotFoundError: If the tar executable cannot be
-    #                           located
-    #
-    @classmethod
-    def _archive_with_tar(cls, location, content, cwd):
-        tar_cmd = cls._get_host_tar()
-
-        process = subprocess.Popen(
-            [tar_cmd, 'jcaf', location, content],
-            cwd=cwd,
-            stdout=subprocess.PIPE,
-            stderr=subprocess.PIPE
-        )
-
-        _, err = process.communicate()
-        if process.poll() != 0:
-            # Clean up in case the command failed in a broken state
-            try:
-                os.remove(location)
-            except FileNotFoundError:
-                pass
-
-            raise tarfile.TarError("Failed to archive '{}': {}"
-                                   .format(content, err.decode('utf8')))
-
-    # _archive_with_python()
-    #
-    # Archive with the python `tarfile` module
-    #
-    # Args:
-    #     location (str): The path to the tar to create
-    #     content (str): The path to the content to archvive
-    #     cwd (str): The cwd
-    #
-    # Raises:
-    #     TarError: If an error occurs during extraction
-    #
-    @classmethod
-    def _archive_with_python(cls, location, content, cwd):
-        with tarfile.open(location, mode='w:bz2') as tar:
-            tar.add(os.path.join(cwd, content), arcname=content)
-
-    # _extract_with_tar()
-    #
-    # Extract with an implementation of the `tar` command
-    #
-    # Args:
-    #     location (str): The path to the tar to extract
-    #     dest (str): The destination path to extract to
-    #
-    # Raises:
-    #     TarError: If an error occurs during extraction
-    #
-    @classmethod
-    def _extract_with_tar(cls, location, dest):
-        tar_cmd = cls._get_host_tar()
-
-        # Some tar implementations do not support '-C'
-        process = subprocess.Popen(
-            [tar_cmd, 'jxf', location],
-            cwd=dest,
-            stdout=subprocess.PIPE,
-            stderr=subprocess.PIPE
-        )
-
-        _, err = process.communicate()
-        if process.poll() != 0:
-            raise tarfile.TarError("Failed to extract '{}': {}"
-                                   .format(location, err.decode('utf8')))
-
-    # _extract_with_python()
-    #
-    # Extract with the python `tarfile` module
-    #
-    # Args:
-    #     location (str): The path to the tar to extract
-    #     dest (str): The destination path to extract to
-    #
-    # Raises:
-    #     TarError: If an error occurs during extraction
-    #
-    @classmethod
-    def _extract_with_python(cls, location, dest):
-        with tarfile.open(location) as tar:
-            tar.extractall(path=dest)
diff --git a/buildstream/_frontend/cli.py b/buildstream/_frontend/cli.py
index c321fa9..41e97cb 100644
--- a/buildstream/_frontend/cli.py
+++ b/buildstream/_frontend/cli.py
@@ -625,6 +625,11 @@
             click.echo('No open workspaces to close', err=True)
             sys.exit(0)
 
+        if all_:
+            elements = [element_name for element_name, _ in app.project.workspaces.list()]
+
+        elements = app.stream.redirect_element_names(elements)
+
         # Check that the workspaces in question exist
         nonexisting = []
         for element_name in elements:
@@ -638,8 +643,6 @@
                 click.echo('Aborting', err=True)
                 sys.exit(-1)
 
-        if all_:
-            elements = [element_name for element_name, _ in app.project.workspaces.list()]
         for element_name in elements:
             app.stream.workspace_close(element_name, remove_dir=remove_dir)
 
@@ -669,13 +672,6 @@
         if all_ and not app.stream.workspace_exists():
             raise AppError("No open workspaces to reset")
 
-        nonexisting = []
-        for element_name in elements:
-            if not app.stream.workspace_exists(element_name):
-                nonexisting.append(element_name)
-        if nonexisting:
-            raise AppError("Workspace does not exist", detail="\n".join(nonexisting))
-
         if app.interactive and not soft:
             if not click.confirm('This will remove all your changes, are you sure?'):
                 click.echo('Aborting', err=True)
diff --git a/buildstream/_frontend/linuxapp.py b/buildstream/_frontend/linuxapp.py
index 436a619..92586bc 100644
--- a/buildstream/_frontend/linuxapp.py
+++ b/buildstream/_frontend/linuxapp.py
@@ -29,6 +29,6 @@
 
     def notify(self, title, text):
 
-        term = os.environ('TERM')
+        term = os.environ['TERM']
         if term in ('xterm', 'vte'):
             click.echo("\033]777;notify;{};{}\007".format(title, text))
diff --git a/buildstream/_ostree.py b/buildstream/_ostree.py
index dfa7567..6fee37d 100644
--- a/buildstream/_ostree.py
+++ b/buildstream/_ostree.py
@@ -27,7 +27,6 @@
 # pylint: disable=bad-exception-context,catching-non-exception
 
 import os
-from collections import namedtuple
 
 import gi
 from gi.repository.GLib import Variant, VariantDict
@@ -117,80 +116,6 @@
         raise OSTreeError("Failed to checkout commit '{}': {}".format(commit_, e.message)) from e
 
 
-# commit():
-#
-# Commit built artifact to cache.
-#
-# Files are all recorded with uid/gid 0
-#
-# Args:
-#    repo (OSTree.Repo): The repo
-#    dir_ (str): The source directory to commit to the repo
-#    refs (list): A list of symbolic references (tag) for the commit
-#
-def commit(repo, dir_, refs):
-
-    def commit_filter(repo, path, file_info):
-
-        # For now, just set everything in the repo as uid/gid 0
-        #
-        # In the future we'll want to extract virtualized file
-        # attributes from a fuse layer and use that.
-        #
-        file_info.set_attribute_uint32('unix::uid', 0)
-        file_info.set_attribute_uint32('unix::gid', 0)
-
-        return OSTree.RepoCommitFilterResult.ALLOW
-
-    commit_modifier = OSTree.RepoCommitModifier.new(
-        OSTree.RepoCommitModifierFlags.NONE, commit_filter)
-
-    repo.prepare_transaction()
-    try:
-        # add tree to repository
-        mtree = OSTree.MutableTree.new()
-        repo.write_directory_to_mtree(Gio.File.new_for_path(dir_),
-                                      mtree, commit_modifier)
-        _, root = repo.write_mtree(mtree)
-
-        # create root commit object, no parent, no branch
-        _, rev = repo.write_commit(None, None, None, None, root)
-
-        # create refs
-        for ref in refs:
-            repo.transaction_set_ref(None, ref, rev)
-
-        # complete repo transaction
-        repo.commit_transaction(None)
-    except GLib.GError as e:
-
-        # Reraise any error as a buildstream error
-        repo.abort_transaction()
-        raise OSTreeError(e.message) from e
-
-
-# set_ref():
-#
-# Set symbolic reference to specified revision.
-#
-# Args:
-#    repo (OSTree.Repo): The repo
-#    ref (str): A symbolic reference (tag) for the commit
-#    rev (str): Commit checksum
-#
-def set_ref(repo, ref, rev):
-
-    repo.prepare_transaction()
-    try:
-        repo.transaction_set_ref(None, ref, rev)
-
-        # complete repo transaction
-        repo.commit_transaction(None)
-    except:
-        repo.abort_transaction()
-        raise
-
-
 # exists():
 #
 # Checks wether a given commit or symbolic ref exists and
@@ -244,172 +169,6 @@
     return checksum_
 
 
-OSTREE_GIO_FAST_QUERYINFO = ("standard::name,standard::type,standard::size,"
-                             "standard::is-symlink,standard::symlink-target,"
-                             "unix::device,unix::inode,unix::mode,unix::uid,"
-                             "unix::gid,unix::rdev")
-
-
-DiffItem = namedtuple('DiffItem', ['src', 'src_info',
-                                   'target', 'target_info',
-                                   'src_checksum', 'target_checksum'])
-
-
-# diff_dirs():
-#
-# Compute the difference between directory a and b as 3 separate sets
-# of OSTree.DiffItem.
-#
-# This is more-or-less a direct port of OSTree.diff_dirs (which cannot
-# be used via PyGobject), but does not support options.
-#
-# Args:
-#    a (Gio.File): The first directory for the comparison.
-#    b (Gio.File): The second directory for the comparison.
-#
-# Returns:
-#    (modified, removed, added)
-#
-def diff_dirs(a, b):
-    # get_file_checksum():
-    #
-    # Helper to compute the checksum of an arbitrary file (different
-    # objects have different methods to compute these).
-    #
-    def get_file_checksum(f, f_info):
-        if isinstance(f, OSTree.RepoFile):
-            return f.get_checksum()
-        else:
-            contents = None
-            if f_info.get_file_type() == Gio.FileType.REGULAR:
-                contents = f.read()
-
-            csum = OSTree.checksum_file_from_input(f_info, None, contents,
-                                                   OSTree.ObjectType.FILE)
-            return OSTree.checksum_from_bytes(csum)
-
-    # diff_files():
-    #
-    # Helper to compute a diff between two files.
-    #
-    def diff_files(a, a_info, b, b_info):
-        checksum_a = get_file_checksum(a, a_info)
-        checksum_b = get_file_checksum(b, b_info)
-
-        if checksum_a != checksum_b:
-            return DiffItem(a, a_info, b, b_info, checksum_a, checksum_b)
-
-        return None
-
-    # diff_add_dir_recurse():
-    #
-    # Helper to collect all files in a directory recursively.
-    #
-    def diff_add_dir_recurse(d):
-        added = []
-
-        dir_enum = d.enumerate_children(OSTREE_GIO_FAST_QUERYINFO,
-                                        Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
-
-        for child_info in dir_enum:
-            name = child_info.get_name()
-            child = d.get_child(name)
-            added.append(child)
-
-            if child_info.get_file_type() == Gio.FileType.DIRECTORY:
-                added.extend(diff_add_dir_recurse(child))
-
-        return added
-
-    modified = []
-    removed = []
-    added = []
-
-    child_a_info = a.query_info(OSTREE_GIO_FAST_QUERYINFO,
-                                Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
-    child_b_info = b.query_info(OSTREE_GIO_FAST_QUERYINFO,
-                                Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
-
-    # If both are directories and have the same checksum, we know that
-    # none of the underlying files changed, so we can save time.
-    if (child_a_info.get_file_type() == Gio.FileType.DIRECTORY and
-            child_b_info.get_file_type() == Gio.FileType.DIRECTORY and
-            isinstance(a, OSTree.RepoFileClass) and
-            isinstance(b, OSTree.RepoFileClass)):
-        if a.tree_get_contents_checksum() == b.tree_get_contents_checksum():
-            return modified, removed, added
-
-    # We walk through 'a' first
-    dir_enum = a.enumerate_children(OSTREE_GIO_FAST_QUERYINFO,
-                                    Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
-    for child_a_info in dir_enum:
-        name = child_a_info.get_name()
-
-        child_a = a.get_child(name)
-        child_a_type = child_a_info.get_file_type()
-
-        try:
-            child_b = b.get_child(name)
-            child_b_info = child_b.query_info(OSTREE_GIO_FAST_QUERYINFO,
-                                              Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
-        except GLib.Error as e:
-            # If the file does not exist in b, it has been removed
-            if e.matches(Gio.io_error_quark(), Gio.IOErrorEnum.NOT_FOUND):
-                removed.append(child_a)
-                continue
-            else:
-                raise
-
-        # If the files differ but are of different types, we report a
-        # modification, saving a bit of time because we won't need a
-        # checksum
-        child_b_type = child_b_info.get_file_type()
-        if child_a_type != child_b_type:
-            diff_item = DiffItem(child_a, child_a_info,
-                                 child_b, child_b_info,
-                                 None, None)
-            modified.append(diff_item)
-        # Finally, we compute checksums and compare the file contents directly
-        else:
-            diff_item = diff_files(child_a, child_a_info, child_b, child_b_info)
-
-            if diff_item:
-                modified.append(diff_item)
-
-            # If the files are both directories, we recursively use
-            # this function to find differences - saving time if they
-            # are equal.
-            if child_a_type == Gio.FileType.DIRECTORY:
-                subdir = diff_dirs(child_a, child_b)
-                modified.extend(subdir[0])
-                removed.extend(subdir[1])
-                added.extend(subdir[2])
-
-    # Now we walk through 'b' to find any files that were added
-    dir_enum = b.enumerate_children(OSTREE_GIO_FAST_QUERYINFO,
-                                    Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
-    for child_b_info in dir_enum:
-        name = child_b_info.get_name()
-
-        child_b = b.get_child(name)
-
-        try:
-            child_a = a.get_child(name)
-            child_a_info = child_a.query_info(OSTREE_GIO_FAST_QUERYINFO,
-                                              Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
-        except GLib.Error as e:
-            # If the file does not exist in 'a', it was added.
-            if e.matches(Gio.io_error_quark(), Gio.IOErrorEnum.NOT_FOUND):
-                added.append(child_b)
-                if child_b_info.get_file_type() == Gio.FileType.DIRECTORY:
-                    added.extend(diff_add_dir_recurse(child_b))
-                continue
-            else:
-                raise
-
-    return modified, removed, added
-
-
 # fetch()
 #
 # Fetch new objects from a remote, if configured
diff --git a/buildstream/_pipeline.py b/buildstream/_pipeline.py
index 04979bc..ba27ca6 100644
--- a/buildstream/_pipeline.py
+++ b/buildstream/_pipeline.py
@@ -212,11 +212,19 @@
     # use in the result, this function reports a list that is appropriate for
     # the selected option.
     #
-    def get_selection(self, targets, mode):
+    def get_selection(self, targets, mode, *, silent=True):
 
         elements = None
         if mode == PipelineSelection.NONE:
-            elements = targets
+            # Redirect and log if permitted
+            elements = []
+            for t in targets:
+                new_elm = t._get_source_element()
+                if new_elm != t and not silent:
+                    self._message(MessageType.INFO, "Element '{}' redirected to '{}'"
+                                  .format(t.name, new_elm.name))
+                if new_elm not in elements:
+                    elements.append(new_elm)
         elif mode == PipelineSelection.PLAN:
             elements = self.plan(targets)
         else:
diff --git a/buildstream/_platform/linux.py b/buildstream/_platform/linux.py
index 26dafb9..f620f25 100644
--- a/buildstream/_platform/linux.py
+++ b/buildstream/_platform/linux.py
@@ -22,7 +22,7 @@
 
 from .. import _site
 from .. import utils
-from .._artifactcache.ostreecache import OSTreeCache
+from .._artifactcache.cascache import CASCache
 from .._message import Message, MessageType
 from ..sandbox import SandboxBwrap
 
@@ -37,7 +37,7 @@
 
         self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8)
         self._user_ns_available = self._check_user_ns_available(context)
-        self._artifact_cache = OSTreeCache(context, enable_push=self._user_ns_available)
+        self._artifact_cache = CASCache(context, enable_push=self._user_ns_available)
 
     @property
     def artifactcache(self):
diff --git a/buildstream/_platform/unix.py b/buildstream/_platform/unix.py
index 6d7b463..e9c62a4 100644
--- a/buildstream/_platform/unix.py
+++ b/buildstream/_platform/unix.py
@@ -20,7 +20,7 @@
 
 import os
 
-from .._artifactcache.tarcache import TarCache
+from .._artifactcache.cascache import CASCache
 from .._exceptions import PlatformError
 from ..sandbox import SandboxChroot
 
@@ -32,7 +32,7 @@
     def __init__(self, context, project):
 
         super().__init__(context, project)
-        self._artifact_cache = TarCache(context)
+        self._artifact_cache = CASCache(context)
 
         # Not necessarily 100% reliable, but we want to fail early.
         if os.geteuid() != 0:
diff --git a/buildstream/_project.py b/buildstream/_project.py
index 5344e95..87f14ee 100644
--- a/buildstream/_project.py
+++ b/buildstream/_project.py
@@ -296,7 +296,7 @@
         #
 
         # Load artifacts pull/push configuration for this project
-        self.artifact_cache_specs = ArtifactCache.specs_from_config_node(config)
+        self.artifact_cache_specs = ArtifactCache.specs_from_config_node(config, self.directory)
 
         # Workspace configurations
         self.workspaces = Workspaces(self)
diff --git a/buildstream/_signals.py b/buildstream/_signals.py
index 9749e0d..06849c9 100644
--- a/buildstream/_signals.py
+++ b/buildstream/_signals.py
@@ -20,6 +20,7 @@
 import os
 import signal
 import sys
+import threading
 import traceback
 from contextlib import contextmanager, ExitStack
 from collections import deque
@@ -72,6 +73,11 @@
 def terminator(terminate_func):
     global terminator_stack                   # pylint: disable=global-statement
 
+    # Signal handling only works in the main thread
+    if threading.current_thread() != threading.main_thread():
+        yield
+        return
+
     outermost = False if terminator_stack else True
 
     terminator_stack.append(terminate_func)
diff --git a/buildstream/_stream.py b/buildstream/_stream.py
index 1c2494f..4749bf6 100644
--- a/buildstream/_stream.py
+++ b/buildstream/_stream.py
@@ -514,6 +514,13 @@
 
         elements, track_elements = self._load(targets, track_targets)
 
+        nonexisting = []
+        for element in elements:
+            if not self.workspace_exists(element.name):
+                nonexisting.append(element.name)
+        if nonexisting:
+            raise StreamError("Workspace does not exist", detail="\n".join(nonexisting))
+
         # Do the tracking first
         if track_first:
             self._fetch(elements, track_elements=track_elements)
@@ -663,6 +670,37 @@
             self._collect_sources(tempdir, tar_location,
                                   target.normal_name, compression)
 
+    # redirect_element_names()
+    #
+    # Takes a list of element names and returns a list where elements have been
+    # redirected to their source elements if the element file exists, and just
+    # the name, if not.
+    #
+    # Args:
+    #    elements (list of str): The element names to redirect
+    #
+    # Returns:
+    #    (list of str): The element names after redirecting
+    #
+    def redirect_element_names(self, elements):
+        element_dir = self._project.element_path
+        load_elements = []
+        output_elements = set()
+
+        for e in elements:
+            element_path = os.path.join(element_dir, e)
+            if os.path.exists(element_path):
+                load_elements.append(e)
+            else:
+                output_elements.add(e)
+        if load_elements:
+            loaded_elements, _ = self._load(load_elements, ())
+
+            for e in loaded_elements:
+                output_elements.add(e.name)
+
+        return list(output_elements)
+
     #############################################################
     #                 Scheduler API forwarding                  #
     #############################################################
@@ -803,7 +841,7 @@
         # Now move on to loading primary selection.
         #
         self._pipeline.resolve_elements(elements)
-        selected = self._pipeline.get_selection(elements, selection)
+        selected = self._pipeline.get_selection(elements, selection, silent=False)
         selected = self._pipeline.except_elements(elements,
                                                   selected,
                                                   except_elements)
diff --git a/buildstream/buildstream.proto b/buildstream/buildstream.proto
new file mode 100644
index 0000000..3d2cb42
--- /dev/null
+++ b/buildstream/buildstream.proto
@@ -0,0 +1,78 @@
+syntax = "proto3";
+
+package buildstream;
+
+import "google/api/annotations.proto";
+import "google/devtools/remoteexecution/v1test/remote_execution.proto";
+
+service ArtifactCache {
+  // Retrieve a cached artifact.
+  //
+  // Errors:
+  // * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
+  rpc GetArtifact(GetArtifactRequest) returns (GetArtifactResponse) {
+    option (google.api.http) = { get: "/v1test/{instance_name=**}/buildstream/artifacts/{key}" };
+  }
+
+  // Associate a cache key with a CAS build artifact.
+  //
+  // Errors:
+  // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+  //   entry to the cache.
+  rpc UpdateArtifact(UpdateArtifactRequest) returns (UpdateArtifactResponse) {
+    option (google.api.http) = { put: "/v1test/{instance_name=**}/buildstream/artifacts/{key}" body: "artifact" };
+  }
+
+  rpc Status(StatusRequest) returns (StatusResponse) {
+    option (google.api.http) = { put: "/v1test/{instance_name=**}/buildstream/artifacts:status" };
+  }
+}
+
+message GetArtifactRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // The BuildStream cache key.
+  string key = 2;
+}
+
+message GetArtifactResponse {
+  // The digest of the artifact [Directory][google.devtools.remoteexecution.v1test.Directory].
+  google.devtools.remoteexecution.v1test.Digest artifact = 1;
+}
+
+message UpdateArtifactRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // The BuildStream cache key.
+  repeated string keys = 2;
+
+  // The digest of the artifact [Directory][google.devtools.remoteexecution.v1test.Directory]
+  // to store in the cache.
+  google.devtools.remoteexecution.v1test.Digest artifact = 3;
+}
+
+message UpdateArtifactResponse {
+}
+
+message StatusRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+}
+
+message StatusResponse {
+  bool allow_updates = 1;
+}
diff --git a/buildstream/buildstream_pb2.py b/buildstream/buildstream_pb2.py
new file mode 100644
index 0000000..8f10201
--- /dev/null
+++ b/buildstream/buildstream_pb2.py
@@ -0,0 +1,325 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: buildstream/buildstream.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+from google.devtools.remoteexecution.v1test import remote_execution_pb2 as google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='buildstream/buildstream.proto',
+  package='buildstream',
+  syntax='proto3',
+  serialized_pb=_b('\n\x1d\x62uildstream/buildstream.proto\x12\x0b\x62uildstream\x1a\x1cgoogle/api/annotations.proto\x1a=google/devtools/remoteexecution/v1test/remote_execution.proto\"8\n\x12GetArtifactRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"W\n\x13GetArtifactResponse\x12@\n\x08\x61rtifact\x18\x01 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\"~\n\x15UpdateArtifactRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04keys\x18\x02 \x03(\t\x12@\n\x08\x61rtifact\x18\x03 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\"\x18\n\x16UpdateArtifactResponse\"&\n\rStatusRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\'\n\x0eStatusResponse\x12\x15\n\rallow_updates\x18\x01 \x01(\x08\x32\xcd\x03\n\rArtifactCache\x12\x90\x01\n\x0bGetArtifact\x12\x1f.buildstream.GetArtifactRequest\x1a .buildstream.GetArtifactResponse\">\x82\xd3\xe4\x93\x02\x38\x12\x36/v1test/{instance_name=**}/buildstream/artifacts/{key}\x12\xa3\x01\n\x0eUpdateArtifact\x12\".buildstream.UpdateArtifactRequest\x1a#.buildstream.UpdateArtifactResponse\"H\x82\xd3\xe4\x93\x02\x42\x1a\x36/v1test/{instance_name=**}/buildstream/artifacts/{key}:\x08\x61rtifact\x12\x82\x01\n\x06Status\x12\x1a.buildstream.StatusRequest\x1a\x1b.buildstream.StatusResponse\"?\x82\xd3\xe4\x93\x02\x39\x1a\x37/v1test/{instance_name=**}/buildstream/artifacts:statusb\x06proto3')
+  ,
+  dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.DESCRIPTOR,])
+
+
+
+
+_GETARTIFACTREQUEST = _descriptor.Descriptor(
+  name='GetArtifactRequest',
+  full_name='buildstream.GetArtifactRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='instance_name', full_name='buildstream.GetArtifactRequest.instance_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='key', full_name='buildstream.GetArtifactRequest.key', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=139,
+  serialized_end=195,
+)
+
+
+_GETARTIFACTRESPONSE = _descriptor.Descriptor(
+  name='GetArtifactResponse',
+  full_name='buildstream.GetArtifactResponse',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='artifact', full_name='buildstream.GetArtifactResponse.artifact', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=197,
+  serialized_end=284,
+)
+
+
+_UPDATEARTIFACTREQUEST = _descriptor.Descriptor(
+  name='UpdateArtifactRequest',
+  full_name='buildstream.UpdateArtifactRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='instance_name', full_name='buildstream.UpdateArtifactRequest.instance_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='keys', full_name='buildstream.UpdateArtifactRequest.keys', index=1,
+      number=2, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='artifact', full_name='buildstream.UpdateArtifactRequest.artifact', index=2,
+      number=3, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=286,
+  serialized_end=412,
+)
+
+
+_UPDATEARTIFACTRESPONSE = _descriptor.Descriptor(
+  name='UpdateArtifactResponse',
+  full_name='buildstream.UpdateArtifactResponse',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=414,
+  serialized_end=438,
+)
+
+
+_STATUSREQUEST = _descriptor.Descriptor(
+  name='StatusRequest',
+  full_name='buildstream.StatusRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='instance_name', full_name='buildstream.StatusRequest.instance_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=440,
+  serialized_end=478,
+)
+
+
+_STATUSRESPONSE = _descriptor.Descriptor(
+  name='StatusResponse',
+  full_name='buildstream.StatusResponse',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='allow_updates', full_name='buildstream.StatusResponse.allow_updates', index=0,
+      number=1, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=480,
+  serialized_end=519,
+)
+
+_GETARTIFACTRESPONSE.fields_by_name['artifact'].message_type = google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2._DIGEST
+_UPDATEARTIFACTREQUEST.fields_by_name['artifact'].message_type = google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2._DIGEST
+DESCRIPTOR.message_types_by_name['GetArtifactRequest'] = _GETARTIFACTREQUEST
+DESCRIPTOR.message_types_by_name['GetArtifactResponse'] = _GETARTIFACTRESPONSE
+DESCRIPTOR.message_types_by_name['UpdateArtifactRequest'] = _UPDATEARTIFACTREQUEST
+DESCRIPTOR.message_types_by_name['UpdateArtifactResponse'] = _UPDATEARTIFACTRESPONSE
+DESCRIPTOR.message_types_by_name['StatusRequest'] = _STATUSREQUEST
+DESCRIPTOR.message_types_by_name['StatusResponse'] = _STATUSRESPONSE
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+GetArtifactRequest = _reflection.GeneratedProtocolMessageType('GetArtifactRequest', (_message.Message,), dict(
+  DESCRIPTOR = _GETARTIFACTREQUEST,
+  __module__ = 'buildstream.buildstream_pb2'
+  # @@protoc_insertion_point(class_scope:buildstream.GetArtifactRequest)
+  ))
+_sym_db.RegisterMessage(GetArtifactRequest)
+
+GetArtifactResponse = _reflection.GeneratedProtocolMessageType('GetArtifactResponse', (_message.Message,), dict(
+  DESCRIPTOR = _GETARTIFACTRESPONSE,
+  __module__ = 'buildstream.buildstream_pb2'
+  # @@protoc_insertion_point(class_scope:buildstream.GetArtifactResponse)
+  ))
+_sym_db.RegisterMessage(GetArtifactResponse)
+
+UpdateArtifactRequest = _reflection.GeneratedProtocolMessageType('UpdateArtifactRequest', (_message.Message,), dict(
+  DESCRIPTOR = _UPDATEARTIFACTREQUEST,
+  __module__ = 'buildstream.buildstream_pb2'
+  # @@protoc_insertion_point(class_scope:buildstream.UpdateArtifactRequest)
+  ))
+_sym_db.RegisterMessage(UpdateArtifactRequest)
+
+UpdateArtifactResponse = _reflection.GeneratedProtocolMessageType('UpdateArtifactResponse', (_message.Message,), dict(
+  DESCRIPTOR = _UPDATEARTIFACTRESPONSE,
+  __module__ = 'buildstream.buildstream_pb2'
+  # @@protoc_insertion_point(class_scope:buildstream.UpdateArtifactResponse)
+  ))
+_sym_db.RegisterMessage(UpdateArtifactResponse)
+
+StatusRequest = _reflection.GeneratedProtocolMessageType('StatusRequest', (_message.Message,), dict(
+  DESCRIPTOR = _STATUSREQUEST,
+  __module__ = 'buildstream.buildstream_pb2'
+  # @@protoc_insertion_point(class_scope:buildstream.StatusRequest)
+  ))
+_sym_db.RegisterMessage(StatusRequest)
+
+StatusResponse = _reflection.GeneratedProtocolMessageType('StatusResponse', (_message.Message,), dict(
+  DESCRIPTOR = _STATUSRESPONSE,
+  __module__ = 'buildstream.buildstream_pb2'
+  # @@protoc_insertion_point(class_scope:buildstream.StatusResponse)
+  ))
+_sym_db.RegisterMessage(StatusResponse)
+
+
+
+_ARTIFACTCACHE = _descriptor.ServiceDescriptor(
+  name='ArtifactCache',
+  full_name='buildstream.ArtifactCache',
+  file=DESCRIPTOR,
+  index=0,
+  options=None,
+  serialized_start=522,
+  serialized_end=983,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='GetArtifact',
+    full_name='buildstream.ArtifactCache.GetArtifact',
+    index=0,
+    containing_service=None,
+    input_type=_GETARTIFACTREQUEST,
+    output_type=_GETARTIFACTRESPONSE,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0028\0226/v1test/{instance_name=**}/buildstream/artifacts/{key}')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='UpdateArtifact',
+    full_name='buildstream.ArtifactCache.UpdateArtifact',
+    index=1,
+    containing_service=None,
+    input_type=_UPDATEARTIFACTREQUEST,
+    output_type=_UPDATEARTIFACTRESPONSE,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002B\0326/v1test/{instance_name=**}/buildstream/artifacts/{key}:\010artifact')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='Status',
+    full_name='buildstream.ArtifactCache.Status',
+    index=2,
+    containing_service=None,
+    input_type=_STATUSREQUEST,
+    output_type=_STATUSRESPONSE,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0029\0327/v1test/{instance_name=**}/buildstream/artifacts:status')),
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_ARTIFACTCACHE)
+
+DESCRIPTOR.services_by_name['ArtifactCache'] = _ARTIFACTCACHE
+
+# @@protoc_insertion_point(module_scope)
diff --git a/buildstream/buildstream_pb2_grpc.py b/buildstream/buildstream_pb2_grpc.py
new file mode 100644
index 0000000..21d914a
--- /dev/null
+++ b/buildstream/buildstream_pb2_grpc.py
@@ -0,0 +1,87 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from buildstream import buildstream_pb2 as buildstream_dot_buildstream__pb2
+
+
+class ArtifactCacheStub(object):
+  # missing associated documentation comment in .proto file
+  pass
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.GetArtifact = channel.unary_unary(
+        '/buildstream.ArtifactCache/GetArtifact',
+        request_serializer=buildstream_dot_buildstream__pb2.GetArtifactRequest.SerializeToString,
+        response_deserializer=buildstream_dot_buildstream__pb2.GetArtifactResponse.FromString,
+        )
+    self.UpdateArtifact = channel.unary_unary(
+        '/buildstream.ArtifactCache/UpdateArtifact',
+        request_serializer=buildstream_dot_buildstream__pb2.UpdateArtifactRequest.SerializeToString,
+        response_deserializer=buildstream_dot_buildstream__pb2.UpdateArtifactResponse.FromString,
+        )
+    self.Status = channel.unary_unary(
+        '/buildstream.ArtifactCache/Status',
+        request_serializer=buildstream_dot_buildstream__pb2.StatusRequest.SerializeToString,
+        response_deserializer=buildstream_dot_buildstream__pb2.StatusResponse.FromString,
+        )
+
+
+class ArtifactCacheServicer(object):
+  # missing associated documentation comment in .proto file
+  pass
+
+  def GetArtifact(self, request, context):
+    """Retrieve a cached artifact.
+
+    Errors:
+    * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def UpdateArtifact(self, request, context):
+    """Associate a cache key with a CAS build artifact.
+
+    Errors:
+    * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+    entry to the cache.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def Status(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_ArtifactCacheServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'GetArtifact': grpc.unary_unary_rpc_method_handler(
+          servicer.GetArtifact,
+          request_deserializer=buildstream_dot_buildstream__pb2.GetArtifactRequest.FromString,
+          response_serializer=buildstream_dot_buildstream__pb2.GetArtifactResponse.SerializeToString,
+      ),
+      'UpdateArtifact': grpc.unary_unary_rpc_method_handler(
+          servicer.UpdateArtifact,
+          request_deserializer=buildstream_dot_buildstream__pb2.UpdateArtifactRequest.FromString,
+          response_serializer=buildstream_dot_buildstream__pb2.UpdateArtifactResponse.SerializeToString,
+      ),
+      'Status': grpc.unary_unary_rpc_method_handler(
+          servicer.Status,
+          request_deserializer=buildstream_dot_buildstream__pb2.StatusRequest.FromString,
+          response_serializer=buildstream_dot_buildstream__pb2.StatusResponse.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'buildstream.ArtifactCache', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/buildstream/element.py b/buildstream/element.py
index ec80693..a146675 100644
--- a/buildstream/element.py
+++ b/buildstream/element.py
@@ -1858,6 +1858,18 @@
             for dep in self.dependencies(Scope.ALL):
                 dep._set_log_handle(logfile, False)
 
+    # Returns the element whose sources this element is ultimately derived from.
+    #
+    # This is intended for being used to redirect commands that operate on an
+    # element to the element whose sources it is ultimately derived from.
+    #
+    # For example, element A is a build element depending on source foo,
+    # element B is a filter element that depends on element A. The source
+    # element of B is A, since B depends on A, and A has sources.
+    #
+    def _get_source_element(self):
+        return self
+
     #############################################################
     #                   Private Local Methods                   #
     #############################################################
@@ -1872,17 +1884,11 @@
         if self.__tracking_scheduled:
             return
 
-        # Determine overall consistency of the element
-        consistency = Consistency.CACHED
-        for source in self.__sources:
-            source._update_state()
-            source_consistency = source._get_consistency()
-            consistency = min(consistency, source_consistency)
-        self.__consistency = consistency
+        self.__consistency = Consistency.CACHED
+        workspace = self._get_workspace()
 
         # Special case for workspaces
-        workspace = self._get_workspace()
-        if workspace and self.__consistency > Consistency.INCONSISTENT:
+        if workspace:
 
             # A workspace is considered inconsistent in the case
             # that it's directory went missing
@@ -1890,6 +1896,13 @@
             fullpath = workspace.get_absolute_path()
             if not os.path.exists(fullpath):
                 self.__consistency = Consistency.INCONSISTENT
+        else:
+
+            # Determine overall consistency of the element
+            for source in self.__sources:
+                source._update_state()
+                source_consistency = source._get_consistency()
+                self.__consistency = min(self.__consistency, source_consistency)
 
     # __calculate_cache_key():
     #
diff --git a/buildstream/plugins/elements/filter.py b/buildstream/plugins/elements/filter.py
index 1c1a59b..8ce16ff 100644
--- a/buildstream/plugins/elements/filter.py
+++ b/buildstream/plugins/elements/filter.py
@@ -32,6 +32,10 @@
 runtime dependencies forward from this filter element onto its reverse
 dependencies.
 
+When workspaces are opened, closed or reset on this element, or this
+element is tracked, instead of erroring due to a lack of sources, this
+element will transparently pass on the command to its sole build-dependency.
+
 The default configuration and possible options are as such:
   .. literalinclude:: ../../../buildstream/plugins/elements/filter.yaml
      :language: yaml
@@ -103,6 +107,13 @@
                                    exclude=self.exclude, orphans=self.include_orphans)
         return ""
 
+    def _get_source_element(self):
+        # Filter elements act as proxies for their sole build-dependency
+        build_deps = list(self.dependencies(Scope.BUILD, recurse=False))
+        assert len(build_deps) == 1
+        output_elm = build_deps[0]._get_source_element()
+        return output_elm
+
 
 def setup():
     return FilterElement
diff --git a/buildstream/plugins/elements/junction.py b/buildstream/plugins/elements/junction.py
index df7faf3..81fd574 100644
--- a/buildstream/plugins/elements/junction.py
+++ b/buildstream/plugins/elements/junction.py
@@ -70,6 +70,22 @@
 they cannot be built or staged. It also means that another element cannot
 depend on a junction element itself.
 
+.. note::
+
+   BuildStream does not implicitly track junction elements. This means
+   that if we were to invoke: `bst build --track-all ELEMENT` on an element
+   which uses a junction element, the ref of the junction element
+   will not automatically be updated if a more recent version exists.
+
+   Therefore, if you require the most up-to-date version of a subproject,
+   you must explicitly track the junction element by invoking:
+   `bst track JUNCTION_ELEMENT`.
+
+   Furthermore, elements within the subproject are also not tracked by default.
+   For this, we must specify the `--track-cross-junctions` option. This option
+   must be preceeded by `--track ELEMENT` or `--track-all`.
+
+
 Sources
 -------
 ``bst show`` does not implicitly fetch junction sources if they haven't been
diff --git a/doc/Makefile b/doc/Makefile
index c894a13..557dc55 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -65,7 +65,7 @@
 templates:
 	mkdir -p source/elements
 	mkdir -p source/sources
-	$(SPHINXAPIDOC) --force --separate --module-first --no-headings -o source $(CURDIR)/../buildstream
+	$(SPHINXAPIDOC) --force --separate --module-first --no-headings -o source $(CURDIR)/../buildstream $(CURDIR)/../buildstream/*_pb2*.py
 	$(call plugin-doc-skeleton,$(CURDIR)/../buildstream/plugins/elements,elements)
 	$(call plugin-doc-skeleton,$(CURDIR)/../buildstream/plugins/sources,sources)
 
diff --git a/doc/source/artifacts.rst b/doc/source/artifacts.rst
index e0808e8..8e533c8 100644
--- a/doc/source/artifacts.rst
+++ b/doc/source/artifacts.rst
@@ -25,12 +25,11 @@
 define its own cache, it may be useful to have a local mirror of its cache, or
 you may have a reason to share artifacts privately.
 
-Remote artifact caches are identified by their URL. There are currently three
+Remote artifact caches are identified by their URL. There are currently two
 supported protocols:
 
-* ``http``: Pull-only access, without transport-layer security
-* ``https``: Pull-only access, with transport-layer security
-* ``ssh``: Push access, authenticated via SSH
+* ``http``: Pull and push access, without transport-layer security
+* ``https``: Pull and push access, with transport-layer security
 
 BuildStream allows you to configure as many caches as you like, and will query
 them in a specific order:
@@ -54,17 +53,23 @@
 
 Setting up the user
 ~~~~~~~~~~~~~~~~~~~
-A specific user is not needed for downloading artifacts, but since we
-are going to use ssh to upload the artifacts, you will want a dedicated
-user to own the artifact cache.
+A specific user is not needed, however, a dedicated user to own the
+artifact cache is recommended.
 
 .. code:: bash
 
    useradd artifacts
 
+The recommended approach is to run two instances on different ports.
+One instance has push disabled and doesn't require client authentication.
+The other instance has push enabled and requires client authentication.
 
-Installing the receiver
-~~~~~~~~~~~~~~~~~~~~~~~
+Alternatively, you can set up a reverse proxy and handle authentication
+and authorization there.
+
+
+Installing the server
+~~~~~~~~~~~~~~~~~~~~~
 You will also need to install BuildStream on the artifact server in order
 to receive uploaded artifacts over ssh. Follow the instructions for installing
 BuildStream :ref:`here <install>`
@@ -74,10 +79,10 @@
 checkout directory.
 
 Otherwise, some tinkering is required to ensure BuildStream is available
-in ``PATH`` when it's companion ``bst-artifact-receive`` program is run
+in ``PATH`` when it's companion ``bst-artifact-server`` program is run
 remotely.
 
-You can install only the artifact receiver companion program without
+You can install only the artifact server companion program without
 requiring BuildStream's more exigent dependencies by setting the
 ``BST_ARTIFACTS_ONLY`` environment variable at install time, like so:
 
@@ -86,81 +91,57 @@
     BST_ARTIFACTS_ONLY=1 pip3 install .
 
 
-Initializing the cache
-~~~~~~~~~~~~~~~~~~~~~~
-Now that you have a dedicated user to own the artifact cache, change
-to that user, and create the artifact cache ostree repository directly
-in it's home directory as such:
+Command reference
+~~~~~~~~~~~~~~~~~
+
+.. click:: buildstream._artifactcache.casserver:server_main
+   :prog: bst-artifact-server
+
+
+Key pair for the server
+~~~~~~~~~~~~~~~~~~~~~~~
+
+For TLS you need a key pair for the server. The following example creates
+a self-signed key, which requires clients to have a copy of the server certificate
+(e.g., in the project directory).
+You can also use a key pair obtained from a trusted certificate authority instead.
 
 .. code:: bash
 
-   ostree init --mode archive-z2 --repo artifacts
+    openssl req -new -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes -batch -subj "/CN=artifacts.com" -out server.crt -keyout server.key
 
-This should result in an artifact cache residing at the path ``/home/artifacts/artifacts``
+
+Authenticating users
+~~~~~~~~~~~~~~~~~~~~
+In order to give permission to a given user to upload
+artifacts, create a TLS key pair on the client.
+
+.. code:: bash
+
+    openssl req -new -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes -batch -out client.crt -keyout client.key
+
+Copy the public client certificate ``client.crt`` to the server and then add it
+to the authorized keys, like so:
+
+.. code:: bash
+
+   cat client.crt >> /home/artifacts/authorized.crt
 
 
 Serve the cache over https
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
-This part should be pretty simple, you can do this with various technologies, all
-we really require is that you make the artifacts available over https (you can use
-http but until we figure out using gpg signed ostree commits for the artifacts, it's
-better to serve over https).
 
-Here is an example, note that you must have a certificate **pem** file to use, as
-is the case for hosting anything over https.
-
-.. code:: python
-
-   import http.server, ssl, os
-
-   # Maybe use a custom port, especially if you are serving
-   # other web pages on the same computer
-   server_address = ('localhost', 443)
-   artifact_path = '/home/artifacts'
-
-   # The http server will serve from it's current
-   # working directory
-   os.chdir(artifact_path)
-
-   # Create Server
-   httpd = http.server.HTTPServer(
-       server_address,
-       http.server.SimpleHTTPRequestHandler)
-
-   # Add ssl
-   httpd.socket = ssl.wrap_socket(httpd.socket,
-                                  server_side=True,
-                                  certfile='localhost.pem',
-                                  ssl_version=ssl.PROTOCOL_TLSv1)
-
-   # Run it
-   httpd.serve_forever()
-
-
-Configure and run sshd
-~~~~~~~~~~~~~~~~~~~~~~
-You will need to run the sshd service to allow uploading artifacts.
-
-For this you will want something like the following in your ``/etc/ssh/sshd_config``
+Public instance without push:
 
 .. code:: bash
 
-   # Allow ssh logins/commands with the artifacts user
-   AllowUsers artifacts
+    bst-artifact-server --port 11001 --server-key server.key --server-cert server.crt /home/artifacts/artifacts
 
-   # Some specifics for the artifacts user
-   Match user artifacts
+Instance with push and requiring client authentication:
 
-        # Dont allow password authentication for artifacts user
-	#
-        PasswordAuthentication no
+.. code:: bash
 
-        # Also lets dedicate this login for only running the
-	# bst-artifact-receive program, note that the full
-	# command must be specified here; 'artifacts' is
-	# the HOME relative path to the artifact cache.
-	# The exact pull URL must also be specified.
-        ForceCommand bst-artifact-receive --pull-url https://example.com/artifacts --verbose artifacts
+    bst-artifact-server --port 11002 --server-key server.key --server-cert server.crt --client-certs authorized.crt --enable-push /home/artifacts/artifacts
 
 
 User configuration
@@ -172,6 +153,8 @@
 host is reachable on the internet as ``artifacts.com`` (for example),
 then a user can use the following user configuration:
 
+Pull-only:
+
 .. code:: yaml
 
    #
@@ -179,22 +162,27 @@
    #
    artifacts:
 
-     url: https://artifacts.com/artifacts
+     url: https://artifacts.com:11001
 
-     # Alternative form if you have push access to the cache
-     #url: ssh://artifacts@artifacts.com:22200/artifacts
-     #push: true
+     # Optional server certificate if not trusted by system root certificates
+     server-cert: server.crt
 
+Pull and push:
 
-Authenticating users
-~~~~~~~~~~~~~~~~~~~~
-In order to give permission to a given user to upload
-artifacts, simply use the regular ``ssh`` method.
+.. code:: yaml
 
-First obtain the user's public ssh key, and add it
-to the authorized keys, like so:
+   #
+   #    Artifacts
+   #
+   artifacts:
 
-.. code:: bash
+     url: https://artifacts.com:11002
 
-   cat user_id_rsa.pub >> /home/artifacts/.ssh/authorized_keys
+     # Optional server certificate if not trusted by system root certificates
+     server-cert: server.crt
 
+     # Optional client key pair for authentication
+     client-key: client.key
+     client-cert: client.crt
+
+     push: true
diff --git a/google/__init__.py b/google/__init__.py
new file mode 100644
index 0000000..3ad9513
--- /dev/null
+++ b/google/__init__.py
@@ -0,0 +1,2 @@
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
diff --git a/google/api/__init__.py b/google/api/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/google/api/__init__.py
diff --git a/google/api/annotations.proto b/google/api/annotations.proto
new file mode 100644
index 0000000..85c361b
--- /dev/null
+++ b/google/api/annotations.proto
@@ -0,0 +1,31 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/http.proto";
+import "google/protobuf/descriptor.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
+option java_multiple_files = true;
+option java_outer_classname = "AnnotationsProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+extend google.protobuf.MethodOptions {
+  // See `HttpRule`.
+  HttpRule http = 72295728;
+}
diff --git a/google/api/annotations_pb2.py b/google/api/annotations_pb2.py
new file mode 100644
index 0000000..d81bbc5
--- /dev/null
+++ b/google/api/annotations_pb2.py
@@ -0,0 +1,46 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: google/api/annotations.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.api import http_pb2 as google_dot_api_dot_http__pb2
+from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='google/api/annotations.proto',
+  package='google.api',
+  syntax='proto3',
+  serialized_pb=_b('\n\x1cgoogle/api/annotations.proto\x12\ngoogle.api\x1a\x15google/api/http.proto\x1a google/protobuf/descriptor.proto:E\n\x04http\x12\x1e.google.protobuf.MethodOptions\x18\xb0\xca\xbc\" \x01(\x0b\x32\x14.google.api.HttpRuleBn\n\x0e\x63om.google.apiB\x10\x41nnotationsProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xa2\x02\x04GAPIb\x06proto3')
+  ,
+  dependencies=[google_dot_api_dot_http__pb2.DESCRIPTOR,google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
+
+
+HTTP_FIELD_NUMBER = 72295728
+http = _descriptor.FieldDescriptor(
+  name='http', full_name='google.api.http', index=0,
+  number=72295728, type=11, cpp_type=10, label=1,
+  has_default_value=False, default_value=None,
+  message_type=None, enum_type=None, containing_type=None,
+  is_extension=True, extension_scope=None,
+  options=None, file=DESCRIPTOR)
+
+DESCRIPTOR.extensions_by_name['http'] = http
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+http.message_type = google_dot_api_dot_http__pb2._HTTPRULE
+google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(http)
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.apiB\020AnnotationsProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\242\002\004GAPI'))
+# @@protoc_insertion_point(module_scope)
diff --git a/google/api/annotations_pb2_grpc.py b/google/api/annotations_pb2_grpc.py
new file mode 100644
index 0000000..a894352
--- /dev/null
+++ b/google/api/annotations_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/google/api/http.proto b/google/api/http.proto
new file mode 100644
index 0000000..78d515d
--- /dev/null
+++ b/google/api/http.proto
@@ -0,0 +1,313 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
+option java_multiple_files = true;
+option java_outer_classname = "HttpProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Defines the HTTP configuration for an API service. It contains a list of
+// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
+// to one or more HTTP REST API methods.
+message Http {
+  // A list of HTTP configuration rules that apply to individual API methods.
+  //
+  // **NOTE:** All service configuration rules follow "last one wins" order.
+  repeated HttpRule rules = 1;
+
+  // When set to true, URL path parmeters will be fully URI-decoded except in
+  // cases of single segment matches in reserved expansion, where "%2F" will be
+  // left encoded.
+  //
+  // The default behavior is to not decode RFC 6570 reserved characters in multi
+  // segment matches.
+  bool fully_decode_reserved_expansion = 2;
+}
+
+// `HttpRule` defines the mapping of an RPC method to one or more HTTP
+// REST API methods. The mapping specifies how different portions of the RPC
+// request message are mapped to URL path, URL query parameters, and
+// HTTP request body. The mapping is typically specified as an
+// `google.api.http` annotation on the RPC method,
+// see "google/api/annotations.proto" for details.
+//
+// The mapping consists of a field specifying the path template and
+// method kind.  The path template can refer to fields in the request
+// message, as in the example below which describes a REST GET
+// operation on a resource collection of messages:
+//
+//
+//     service Messaging {
+//       rpc GetMessage(GetMessageRequest) returns (Message) {
+//         option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}";
+//       }
+//     }
+//     message GetMessageRequest {
+//       message SubMessage {
+//         string subfield = 1;
+//       }
+//       string message_id = 1; // mapped to the URL
+//       SubMessage sub = 2;    // `sub.subfield` is url-mapped
+//     }
+//     message Message {
+//       string text = 1; // content of the resource
+//     }
+//
+// The same http annotation can alternatively be expressed inside the
+// `GRPC API Configuration` YAML file.
+//
+//     http:
+//       rules:
+//         - selector: <proto_package_name>.Messaging.GetMessage
+//           get: /v1/messages/{message_id}/{sub.subfield}
+//
+// This definition enables an automatic, bidrectional mapping of HTTP
+// JSON to RPC. Example:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456/foo`  | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))`
+//
+// In general, not only fields but also field paths can be referenced
+// from a path pattern. Fields mapped to the path pattern cannot be
+// repeated and must have a primitive (non-message) type.
+//
+// Any fields in the request message which are not bound by the path
+// pattern automatically become (optional) HTTP query
+// parameters. Assume the following definition of the request message:
+//
+//
+//     service Messaging {
+//       rpc GetMessage(GetMessageRequest) returns (Message) {
+//         option (google.api.http).get = "/v1/messages/{message_id}";
+//       }
+//     }
+//     message GetMessageRequest {
+//       message SubMessage {
+//         string subfield = 1;
+//       }
+//       string message_id = 1; // mapped to the URL
+//       int64 revision = 2;    // becomes a parameter
+//       SubMessage sub = 3;    // `sub.subfield` becomes a parameter
+//     }
+//
+//
+// This enables a HTTP JSON to RPC mapping as below:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))`
+//
+// Note that fields which are mapped to HTTP parameters must have a
+// primitive type or a repeated primitive type. Message types are not
+// allowed. In the case of a repeated type, the parameter can be
+// repeated in the URL, as in `...?param=A&param=B`.
+//
+// For HTTP method kinds which allow a request body, the `body` field
+// specifies the mapping. Consider a REST update method on the
+// message resource collection:
+//
+//
+//     service Messaging {
+//       rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
+//         option (google.api.http) = {
+//           put: "/v1/messages/{message_id}"
+//           body: "message"
+//         };
+//       }
+//     }
+//     message UpdateMessageRequest {
+//       string message_id = 1; // mapped to the URL
+//       Message message = 2;   // mapped to the body
+//     }
+//
+//
+// The following HTTP JSON to RPC mapping is enabled, where the
+// representation of the JSON in the request body is determined by
+// protos JSON encoding:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
+//
+// The special name `*` can be used in the body mapping to define that
+// every field not bound by the path template should be mapped to the
+// request body.  This enables the following alternative definition of
+// the update method:
+//
+//     service Messaging {
+//       rpc UpdateMessage(Message) returns (Message) {
+//         option (google.api.http) = {
+//           put: "/v1/messages/{message_id}"
+//           body: "*"
+//         };
+//       }
+//     }
+//     message Message {
+//       string message_id = 1;
+//       string text = 2;
+//     }
+//
+//
+// The following HTTP JSON to RPC mapping is enabled:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")`
+//
+// Note that when using `*` in the body mapping, it is not possible to
+// have HTTP parameters, as all fields not bound by the path end in
+// the body. This makes this option more rarely used in practice of
+// defining REST APIs. The common usage of `*` is in custom methods
+// which don't use the URL at all for transferring data.
+//
+// It is possible to define multiple HTTP methods for one RPC by using
+// the `additional_bindings` option. Example:
+//
+//     service Messaging {
+//       rpc GetMessage(GetMessageRequest) returns (Message) {
+//         option (google.api.http) = {
+//           get: "/v1/messages/{message_id}"
+//           additional_bindings {
+//             get: "/v1/users/{user_id}/messages/{message_id}"
+//           }
+//         };
+//       }
+//     }
+//     message GetMessageRequest {
+//       string message_id = 1;
+//       string user_id = 2;
+//     }
+//
+//
+// This enables the following two alternative HTTP JSON to RPC
+// mappings:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
+// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")`
+//
+// # Rules for HTTP mapping
+//
+// The rules for mapping HTTP path, query parameters, and body fields
+// to the request message are as follows:
+//
+// 1. The `body` field specifies either `*` or a field path, or is
+//    omitted. If omitted, it indicates there is no HTTP request body.
+// 2. Leaf fields (recursive expansion of nested messages in the
+//    request) can be classified into three types:
+//     (a) Matched in the URL template.
+//     (b) Covered by body (if body is `*`, everything except (a) fields;
+//         else everything under the body field)
+//     (c) All other fields.
+// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
+// 4. Any body sent with an HTTP request can contain only (b) fields.
+//
+// The syntax of the path template is as follows:
+//
+//     Template = "/" Segments [ Verb ] ;
+//     Segments = Segment { "/" Segment } ;
+//     Segment  = "*" | "**" | LITERAL | Variable ;
+//     Variable = "{" FieldPath [ "=" Segments ] "}" ;
+//     FieldPath = IDENT { "." IDENT } ;
+//     Verb     = ":" LITERAL ;
+//
+// The syntax `*` matches a single path segment. The syntax `**` matches zero
+// or more path segments, which must be the last part of the path except the
+// `Verb`. The syntax `LITERAL` matches literal text in the path.
+//
+// The syntax `Variable` matches part of the URL path as specified by its
+// template. A variable template must not contain other variables. If a variable
+// matches a single path segment, its template may be omitted, e.g. `{var}`
+// is equivalent to `{var=*}`.
+//
+// If a variable contains exactly one path segment, such as `"{var}"` or
+// `"{var=*}"`, when such a variable is expanded into a URL path, all characters
+// except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the
+// Discovery Document as `{var}`.
+//
+// If a variable contains one or more path segments, such as `"{var=foo/*}"`
+// or `"{var=**}"`, when such a variable is expanded into a URL path, all
+// characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables
+// show up in the Discovery Document as `{+var}`.
+//
+// NOTE: While the single segment variable matches the semantics of
+// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2
+// Simple String Expansion, the multi segment variable **does not** match
+// RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion
+// does not expand special characters like `?` and `#`, which would lead
+// to invalid URLs.
+//
+// NOTE: the field paths in variables and in the `body` must not refer to
+// repeated fields or map fields.
+message HttpRule {
+  // Selects methods to which this rule applies.
+  //
+  // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+  string selector = 1;
+
+  // Determines the URL pattern is matched by this rules. This pattern can be
+  // used with any of the {get|put|post|delete|patch} methods. A custom method
+  // can be defined using the 'custom' field.
+  oneof pattern {
+    // Used for listing and getting information about resources.
+    string get = 2;
+
+    // Used for updating a resource.
+    string put = 3;
+
+    // Used for creating a resource.
+    string post = 4;
+
+    // Used for deleting a resource.
+    string delete = 5;
+
+    // Used for updating a resource.
+    string patch = 6;
+
+    // The custom pattern is used for specifying an HTTP method that is not
+    // included in the `pattern` field, such as HEAD, or "*" to leave the
+    // HTTP method unspecified for this rule. The wild-card rule is useful
+    // for services that provide content to Web (HTML) clients.
+    CustomHttpPattern custom = 8;
+  }
+
+  // The name of the request field whose value is mapped to the HTTP body, or
+  // `*` for mapping all fields not captured by the path pattern to the HTTP
+  // body. NOTE: the referred field must not be a repeated field and must be
+  // present at the top-level of request message type.
+  string body = 7;
+
+  // Additional HTTP bindings for the selector. Nested bindings must
+  // not contain an `additional_bindings` field themselves (that is,
+  // the nesting may only be one level deep).
+  repeated HttpRule additional_bindings = 11;
+}
+
+// A custom pattern is used for defining custom HTTP verb.
+message CustomHttpPattern {
+  // The name of this custom HTTP verb.
+  string kind = 1;
+
+  // The path matched by this custom verb.
+  string path = 2;
+}
diff --git a/google/api/http_pb2.py b/google/api/http_pb2.py
new file mode 100644
index 0000000..aad9ddb
--- /dev/null
+++ b/google/api/http_pb2.py
@@ -0,0 +1,243 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: google/api/http.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='google/api/http.proto',
+  package='google.api',
+  syntax='proto3',
+  serialized_pb=_b('\n\x15google/api/http.proto\x12\ngoogle.api\"T\n\x04Http\x12#\n\x05rules\x18\x01 \x03(\x0b\x32\x14.google.api.HttpRule\x12\'\n\x1f\x66ully_decode_reserved_expansion\x18\x02 \x01(\x08\"\xea\x01\n\x08HttpRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12\r\n\x03get\x18\x02 \x01(\tH\x00\x12\r\n\x03put\x18\x03 \x01(\tH\x00\x12\x0e\n\x04post\x18\x04 \x01(\tH\x00\x12\x10\n\x06\x64\x65lete\x18\x05 \x01(\tH\x00\x12\x0f\n\x05patch\x18\x06 \x01(\tH\x00\x12/\n\x06\x63ustom\x18\x08 \x01(\x0b\x32\x1d.google.api.CustomHttpPatternH\x00\x12\x0c\n\x04\x62ody\x18\x07 \x01(\t\x12\x31\n\x13\x61\x64\x64itional_bindings\x18\x0b \x03(\x0b\x32\x14.google.api.HttpRuleB\t\n\x07pattern\"/\n\x11\x43ustomHttpPattern\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\tBj\n\x0e\x63om.google.apiB\tHttpProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xf8\x01\x01\xa2\x02\x04GAPIb\x06proto3')
+)
+
+
+
+
+_HTTP = _descriptor.Descriptor(
+  name='Http',
+  full_name='google.api.Http',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='rules', full_name='google.api.Http.rules', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='fully_decode_reserved_expansion', full_name='google.api.Http.fully_decode_reserved_expansion', index=1,
+      number=2, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=37,
+  serialized_end=121,
+)
+
+
+_HTTPRULE = _descriptor.Descriptor(
+  name='HttpRule',
+  full_name='google.api.HttpRule',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='selector', full_name='google.api.HttpRule.selector', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='get', full_name='google.api.HttpRule.get', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='put', full_name='google.api.HttpRule.put', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='post', full_name='google.api.HttpRule.post', index=3,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='delete', full_name='google.api.HttpRule.delete', index=4,
+      number=5, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='patch', full_name='google.api.HttpRule.patch', index=5,
+      number=6, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='custom', full_name='google.api.HttpRule.custom', index=6,
+      number=8, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='body', full_name='google.api.HttpRule.body', index=7,
+      number=7, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='additional_bindings', full_name='google.api.HttpRule.additional_bindings', index=8,
+      number=11, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+    _descriptor.OneofDescriptor(
+      name='pattern', full_name='google.api.HttpRule.pattern',
+      index=0, containing_type=None, fields=[]),
+  ],
+  serialized_start=124,
+  serialized_end=358,
+)
+
+
+_CUSTOMHTTPPATTERN = _descriptor.Descriptor(
+  name='CustomHttpPattern',
+  full_name='google.api.CustomHttpPattern',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='kind', full_name='google.api.CustomHttpPattern.kind', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='path', full_name='google.api.CustomHttpPattern.path', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=360,
+  serialized_end=407,
+)
+
+_HTTP.fields_by_name['rules'].message_type = _HTTPRULE
+_HTTPRULE.fields_by_name['custom'].message_type = _CUSTOMHTTPPATTERN
+_HTTPRULE.fields_by_name['additional_bindings'].message_type = _HTTPRULE
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+  _HTTPRULE.fields_by_name['get'])
+_HTTPRULE.fields_by_name['get'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+  _HTTPRULE.fields_by_name['put'])
+_HTTPRULE.fields_by_name['put'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+  _HTTPRULE.fields_by_name['post'])
+_HTTPRULE.fields_by_name['post'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+  _HTTPRULE.fields_by_name['delete'])
+_HTTPRULE.fields_by_name['delete'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+  _HTTPRULE.fields_by_name['patch'])
+_HTTPRULE.fields_by_name['patch'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+  _HTTPRULE.fields_by_name['custom'])
+_HTTPRULE.fields_by_name['custom'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+DESCRIPTOR.message_types_by_name['Http'] = _HTTP
+DESCRIPTOR.message_types_by_name['HttpRule'] = _HTTPRULE
+DESCRIPTOR.message_types_by_name['CustomHttpPattern'] = _CUSTOMHTTPPATTERN
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Http = _reflection.GeneratedProtocolMessageType('Http', (_message.Message,), dict(
+  DESCRIPTOR = _HTTP,
+  __module__ = 'google.api.http_pb2'
+  # @@protoc_insertion_point(class_scope:google.api.Http)
+  ))
+_sym_db.RegisterMessage(Http)
+
+HttpRule = _reflection.GeneratedProtocolMessageType('HttpRule', (_message.Message,), dict(
+  DESCRIPTOR = _HTTPRULE,
+  __module__ = 'google.api.http_pb2'
+  # @@protoc_insertion_point(class_scope:google.api.HttpRule)
+  ))
+_sym_db.RegisterMessage(HttpRule)
+
+CustomHttpPattern = _reflection.GeneratedProtocolMessageType('CustomHttpPattern', (_message.Message,), dict(
+  DESCRIPTOR = _CUSTOMHTTPPATTERN,
+  __module__ = 'google.api.http_pb2'
+  # @@protoc_insertion_point(class_scope:google.api.CustomHttpPattern)
+  ))
+_sym_db.RegisterMessage(CustomHttpPattern)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.apiB\tHttpProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\370\001\001\242\002\004GAPI'))
+# @@protoc_insertion_point(module_scope)
diff --git a/google/api/http_pb2_grpc.py b/google/api/http_pb2_grpc.py
new file mode 100644
index 0000000..a894352
--- /dev/null
+++ b/google/api/http_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/google/bytestream/__init__.py b/google/bytestream/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/google/bytestream/__init__.py
diff --git a/google/bytestream/bytestream.proto b/google/bytestream/bytestream.proto
new file mode 100644
index 0000000..85e386f
--- /dev/null
+++ b/google/bytestream/bytestream.proto
@@ -0,0 +1,181 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bytestream;
+
+import "google/api/annotations.proto";
+import "google/protobuf/wrappers.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bytestream;bytestream";
+option java_outer_classname = "ByteStreamProto";
+option java_package = "com.google.bytestream";
+
+
+// #### Introduction
+//
+// The Byte Stream API enables a client to read and write a stream of bytes to
+// and from a resource. Resources have names, and these names are supplied in
+// the API calls below to identify the resource that is being read from or
+// written to.
+//
+// All implementations of the Byte Stream API export the interface defined here:
+//
+// * `Read()`: Reads the contents of a resource.
+//
+// * `Write()`: Writes the contents of a resource. The client can call `Write()`
+//   multiple times with the same resource and can check the status of the write
+//   by calling `QueryWriteStatus()`.
+//
+// #### Service parameters and metadata
+//
+// The ByteStream API provides no direct way to access/modify any metadata
+// associated with the resource.
+//
+// #### Errors
+//
+// The errors returned by the service are in the Google canonical error space.
+service ByteStream {
+  // `Read()` is used to retrieve the contents of a resource as a sequence
+  // of bytes. The bytes are returned in a sequence of responses, and the
+  // responses are delivered as the results of a server-side streaming RPC.
+  rpc Read(ReadRequest) returns (stream ReadResponse);
+
+  // `Write()` is used to send the contents of a resource as a sequence of
+  // bytes. The bytes are sent in a sequence of request protos of a client-side
+  // streaming RPC.
+  //
+  // A `Write()` action is resumable. If there is an error or the connection is
+  // broken during the `Write()`, the client should check the status of the
+  // `Write()` by calling `QueryWriteStatus()` and continue writing from the
+  // returned `committed_size`. This may be less than the amount of data the
+  // client previously sent.
+  //
+  // Calling `Write()` on a resource name that was previously written and
+  // finalized could cause an error, depending on whether the underlying service
+  // allows over-writing of previously written resources.
+  //
+  // When the client closes the request channel, the service will respond with
+  // a `WriteResponse`. The service will not view the resource as `complete`
+  // until the client has sent a `WriteRequest` with `finish_write` set to
+  // `true`. Sending any requests on a stream after sending a request with
+  // `finish_write` set to `true` will cause an error. The client **should**
+  // check the `WriteResponse` it receives to determine how much data the
+  // service was able to commit and whether the service views the resource as
+  // `complete` or not.
+  rpc Write(stream WriteRequest) returns (WriteResponse);
+
+  // `QueryWriteStatus()` is used to find the `committed_size` for a resource
+  // that is being written, which can then be used as the `write_offset` for
+  // the next `Write()` call.
+  //
+  // If the resource does not exist (i.e., the resource has been deleted, or the
+  // first `Write()` has not yet reached the service), this method returns the
+  // error `NOT_FOUND`.
+  //
+  // The client **may** call `QueryWriteStatus()` at any time to determine how
+  // much data has been processed for this resource. This is useful if the
+  // client is buffering data and needs to know which data can be safely
+  // evicted. For any sequence of `QueryWriteStatus()` calls for a given
+  // resource name, the sequence of returned `committed_size` values will be
+  // non-decreasing.
+  rpc QueryWriteStatus(QueryWriteStatusRequest) returns (QueryWriteStatusResponse);
+}
+
+// Request object for ByteStream.Read.
+message ReadRequest {
+  // The name of the resource to read.
+  string resource_name = 1;
+
+  // The offset for the first byte to return in the read, relative to the start
+  // of the resource.
+  //
+  // A `read_offset` that is negative or greater than the size of the resource
+  // will cause an `OUT_OF_RANGE` error.
+  int64 read_offset = 2;
+
+  // The maximum number of `data` bytes the server is allowed to return in the
+  // sum of all `ReadResponse` messages. A `read_limit` of zero indicates that
+  // there is no limit, and a negative `read_limit` will cause an error.
+  //
+  // If the stream returns fewer bytes than allowed by the `read_limit` and no
+  // error occurred, the stream includes all data from the `read_offset` to the
+  // end of the resource.
+  int64 read_limit = 3;
+}
+
+// Response object for ByteStream.Read.
+message ReadResponse {
+  // A portion of the data for the resource. The service **may** leave `data`
+  // empty for any given `ReadResponse`. This enables the service to inform the
+  // client that the request is still live while it is running an operation to
+  // generate more data.
+  bytes data = 10;
+}
+
+// Request object for ByteStream.Write.
+message WriteRequest {
+  // The name of the resource to write. This **must** be set on the first
+  // `WriteRequest` of each `Write()` action. If it is set on subsequent calls,
+  // it **must** match the value of the first request.
+  string resource_name = 1;
+
+  // The offset from the beginning of the resource at which the data should be
+  // written. It is required on all `WriteRequest`s.
+  //
+  // In the first `WriteRequest` of a `Write()` action, it indicates
+  // the initial offset for the `Write()` call. The value **must** be equal to
+  // the `committed_size` that a call to `QueryWriteStatus()` would return.
+  //
+  // On subsequent calls, this value **must** be set and **must** be equal to
+  // the sum of the first `write_offset` and the sizes of all `data` bundles
+  // sent previously on this stream.
+  //
+  // An incorrect value will cause an error.
+  int64 write_offset = 2;
+
+  // If `true`, this indicates that the write is complete. Sending any
+  // `WriteRequest`s subsequent to one in which `finish_write` is `true` will
+  // cause an error.
+  bool finish_write = 3;
+
+  // A portion of the data for the resource. The client **may** leave `data`
+  // empty for any given `WriteRequest`. This enables the client to inform the
+  // service that the request is still live while it is running an operation to
+  // generate more data.
+  bytes data = 10;
+}
+
+// Response object for ByteStream.Write.
+message WriteResponse {
+  // The number of bytes that have been processed for the given resource.
+  int64 committed_size = 1;
+}
+
+// Request object for ByteStream.QueryWriteStatus.
+message QueryWriteStatusRequest {
+  // The name of the resource whose write status is being requested.
+  string resource_name = 1;
+}
+
+// Response object for ByteStream.QueryWriteStatus.
+message QueryWriteStatusResponse {
+  // The number of bytes that have been processed for the given resource.
+  int64 committed_size = 1;
+
+  // `complete` is `true` only if the client has sent a `WriteRequest` with
+  // `finish_write` set to true, and the server has processed that request.
+  bool complete = 2;
+}
diff --git a/google/bytestream/bytestream_pb2.py b/google/bytestream/bytestream_pb2.py
new file mode 100644
index 0000000..a213f40
--- /dev/null
+++ b/google/bytestream/bytestream_pb2.py
@@ -0,0 +1,353 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: google/bytestream/bytestream.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='google/bytestream/bytestream.proto',
+  package='google.bytestream',
+  syntax='proto3',
+  serialized_pb=_b('\n\"google/bytestream/bytestream.proto\x12\x11google.bytestream\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/wrappers.proto\"M\n\x0bReadRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x13\n\x0bread_offset\x18\x02 \x01(\x03\x12\x12\n\nread_limit\x18\x03 \x01(\x03\"\x1c\n\x0cReadResponse\x12\x0c\n\x04\x64\x61ta\x18\n \x01(\x0c\"_\n\x0cWriteRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x14\n\x0cwrite_offset\x18\x02 \x01(\x03\x12\x14\n\x0c\x66inish_write\x18\x03 \x01(\x08\x12\x0c\n\x04\x64\x61ta\x18\n \x01(\x0c\"\'\n\rWriteResponse\x12\x16\n\x0e\x63ommitted_size\x18\x01 \x01(\x03\"0\n\x17QueryWriteStatusRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\"D\n\x18QueryWriteStatusResponse\x12\x16\n\x0e\x63ommitted_size\x18\x01 \x01(\x03\x12\x10\n\x08\x63omplete\x18\x02 \x01(\x08\x32\x92\x02\n\nByteStream\x12I\n\x04Read\x12\x1e.google.bytestream.ReadRequest\x1a\x1f.google.bytestream.ReadResponse0\x01\x12L\n\x05Write\x12\x1f.google.bytestream.WriteRequest\x1a .google.bytestream.WriteResponse(\x01\x12k\n\x10QueryWriteStatus\x12*.google.bytestream.QueryWriteStatusRequest\x1a+.google.bytestream.QueryWriteStatusResponseBe\n\x15\x63om.google.bytestreamB\x0f\x42yteStreamProtoZ;google.golang.org/genproto/googleapis/bytestream;bytestreamb\x06proto3')
+  ,
+  dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,])
+
+
+
+
+_READREQUEST = _descriptor.Descriptor(
+  name='ReadRequest',
+  full_name='google.bytestream.ReadRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='resource_name', full_name='google.bytestream.ReadRequest.resource_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='read_offset', full_name='google.bytestream.ReadRequest.read_offset', index=1,
+      number=2, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='read_limit', full_name='google.bytestream.ReadRequest.read_limit', index=2,
+      number=3, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=119,
+  serialized_end=196,
+)
+
+
+_READRESPONSE = _descriptor.Descriptor(
+  name='ReadResponse',
+  full_name='google.bytestream.ReadResponse',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='data', full_name='google.bytestream.ReadResponse.data', index=0,
+      number=10, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=198,
+  serialized_end=226,
+)
+
+
+_WRITEREQUEST = _descriptor.Descriptor(
+  name='WriteRequest',
+  full_name='google.bytestream.WriteRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='resource_name', full_name='google.bytestream.WriteRequest.resource_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='write_offset', full_name='google.bytestream.WriteRequest.write_offset', index=1,
+      number=2, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='finish_write', full_name='google.bytestream.WriteRequest.finish_write', index=2,
+      number=3, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='data', full_name='google.bytestream.WriteRequest.data', index=3,
+      number=10, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=228,
+  serialized_end=323,
+)
+
+
+_WRITERESPONSE = _descriptor.Descriptor(
+  name='WriteResponse',
+  full_name='google.bytestream.WriteResponse',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='committed_size', full_name='google.bytestream.WriteResponse.committed_size', index=0,
+      number=1, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=325,
+  serialized_end=364,
+)
+
+
+_QUERYWRITESTATUSREQUEST = _descriptor.Descriptor(
+  name='QueryWriteStatusRequest',
+  full_name='google.bytestream.QueryWriteStatusRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='resource_name', full_name='google.bytestream.QueryWriteStatusRequest.resource_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=366,
+  serialized_end=414,
+)
+
+
+_QUERYWRITESTATUSRESPONSE = _descriptor.Descriptor(
+  name='QueryWriteStatusResponse',
+  full_name='google.bytestream.QueryWriteStatusResponse',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='committed_size', full_name='google.bytestream.QueryWriteStatusResponse.committed_size', index=0,
+      number=1, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='complete', full_name='google.bytestream.QueryWriteStatusResponse.complete', index=1,
+      number=2, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=416,
+  serialized_end=484,
+)
+
+DESCRIPTOR.message_types_by_name['ReadRequest'] = _READREQUEST
+DESCRIPTOR.message_types_by_name['ReadResponse'] = _READRESPONSE
+DESCRIPTOR.message_types_by_name['WriteRequest'] = _WRITEREQUEST
+DESCRIPTOR.message_types_by_name['WriteResponse'] = _WRITERESPONSE
+DESCRIPTOR.message_types_by_name['QueryWriteStatusRequest'] = _QUERYWRITESTATUSREQUEST
+DESCRIPTOR.message_types_by_name['QueryWriteStatusResponse'] = _QUERYWRITESTATUSRESPONSE
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+ReadRequest = _reflection.GeneratedProtocolMessageType('ReadRequest', (_message.Message,), dict(
+  DESCRIPTOR = _READREQUEST,
+  __module__ = 'google.bytestream.bytestream_pb2'
+  # @@protoc_insertion_point(class_scope:google.bytestream.ReadRequest)
+  ))
+_sym_db.RegisterMessage(ReadRequest)
+
+ReadResponse = _reflection.GeneratedProtocolMessageType('ReadResponse', (_message.Message,), dict(
+  DESCRIPTOR = _READRESPONSE,
+  __module__ = 'google.bytestream.bytestream_pb2'
+  # @@protoc_insertion_point(class_scope:google.bytestream.ReadResponse)
+  ))
+_sym_db.RegisterMessage(ReadResponse)
+
+WriteRequest = _reflection.GeneratedProtocolMessageType('WriteRequest', (_message.Message,), dict(
+  DESCRIPTOR = _WRITEREQUEST,
+  __module__ = 'google.bytestream.bytestream_pb2'
+  # @@protoc_insertion_point(class_scope:google.bytestream.WriteRequest)
+  ))
+_sym_db.RegisterMessage(WriteRequest)
+
+WriteResponse = _reflection.GeneratedProtocolMessageType('WriteResponse', (_message.Message,), dict(
+  DESCRIPTOR = _WRITERESPONSE,
+  __module__ = 'google.bytestream.bytestream_pb2'
+  # @@protoc_insertion_point(class_scope:google.bytestream.WriteResponse)
+  ))
+_sym_db.RegisterMessage(WriteResponse)
+
+QueryWriteStatusRequest = _reflection.GeneratedProtocolMessageType('QueryWriteStatusRequest', (_message.Message,), dict(
+  DESCRIPTOR = _QUERYWRITESTATUSREQUEST,
+  __module__ = 'google.bytestream.bytestream_pb2'
+  # @@protoc_insertion_point(class_scope:google.bytestream.QueryWriteStatusRequest)
+  ))
+_sym_db.RegisterMessage(QueryWriteStatusRequest)
+
+QueryWriteStatusResponse = _reflection.GeneratedProtocolMessageType('QueryWriteStatusResponse', (_message.Message,), dict(
+  DESCRIPTOR = _QUERYWRITESTATUSRESPONSE,
+  __module__ = 'google.bytestream.bytestream_pb2'
+  # @@protoc_insertion_point(class_scope:google.bytestream.QueryWriteStatusResponse)
+  ))
+_sym_db.RegisterMessage(QueryWriteStatusResponse)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\025com.google.bytestreamB\017ByteStreamProtoZ;google.golang.org/genproto/googleapis/bytestream;bytestream'))
+
+_BYTESTREAM = _descriptor.ServiceDescriptor(
+  name='ByteStream',
+  full_name='google.bytestream.ByteStream',
+  file=DESCRIPTOR,
+  index=0,
+  options=None,
+  serialized_start=487,
+  serialized_end=761,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='Read',
+    full_name='google.bytestream.ByteStream.Read',
+    index=0,
+    containing_service=None,
+    input_type=_READREQUEST,
+    output_type=_READRESPONSE,
+    options=None,
+  ),
+  _descriptor.MethodDescriptor(
+    name='Write',
+    full_name='google.bytestream.ByteStream.Write',
+    index=1,
+    containing_service=None,
+    input_type=_WRITEREQUEST,
+    output_type=_WRITERESPONSE,
+    options=None,
+  ),
+  _descriptor.MethodDescriptor(
+    name='QueryWriteStatus',
+    full_name='google.bytestream.ByteStream.QueryWriteStatus',
+    index=2,
+    containing_service=None,
+    input_type=_QUERYWRITESTATUSREQUEST,
+    output_type=_QUERYWRITESTATUSRESPONSE,
+    options=None,
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_BYTESTREAM)
+
+DESCRIPTOR.services_by_name['ByteStream'] = _BYTESTREAM
+
+# @@protoc_insertion_point(module_scope)
diff --git a/google/bytestream/bytestream_pb2_grpc.py b/google/bytestream/bytestream_pb2_grpc.py
new file mode 100644
index 0000000..063f54a
--- /dev/null
+++ b/google/bytestream/bytestream_pb2_grpc.py
@@ -0,0 +1,160 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from google.bytestream import bytestream_pb2 as google_dot_bytestream_dot_bytestream__pb2
+
+
+class ByteStreamStub(object):
+  """#### Introduction
+
+  The Byte Stream API enables a client to read and write a stream of bytes to
+  and from a resource. Resources have names, and these names are supplied in
+  the API calls below to identify the resource that is being read from or
+  written to.
+
+  All implementations of the Byte Stream API export the interface defined here:
+
+  * `Read()`: Reads the contents of a resource.
+
+  * `Write()`: Writes the contents of a resource. The client can call `Write()`
+  multiple times with the same resource and can check the status of the write
+  by calling `QueryWriteStatus()`.
+
+  #### Service parameters and metadata
+
+  The ByteStream API provides no direct way to access/modify any metadata
+  associated with the resource.
+
+  #### Errors
+
+  The errors returned by the service are in the Google canonical error space.
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.Read = channel.unary_stream(
+        '/google.bytestream.ByteStream/Read',
+        request_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.SerializeToString,
+        response_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.FromString,
+        )
+    self.Write = channel.stream_unary(
+        '/google.bytestream.ByteStream/Write',
+        request_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.SerializeToString,
+        response_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.FromString,
+        )
+    self.QueryWriteStatus = channel.unary_unary(
+        '/google.bytestream.ByteStream/QueryWriteStatus',
+        request_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.SerializeToString,
+        response_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.FromString,
+        )
+
+
+class ByteStreamServicer(object):
+  """#### Introduction
+
+  The Byte Stream API enables a client to read and write a stream of bytes to
+  and from a resource. Resources have names, and these names are supplied in
+  the API calls below to identify the resource that is being read from or
+  written to.
+
+  All implementations of the Byte Stream API export the interface defined here:
+
+  * `Read()`: Reads the contents of a resource.
+
+  * `Write()`: Writes the contents of a resource. The client can call `Write()`
+  multiple times with the same resource and can check the status of the write
+  by calling `QueryWriteStatus()`.
+
+  #### Service parameters and metadata
+
+  The ByteStream API provides no direct way to access/modify any metadata
+  associated with the resource.
+
+  #### Errors
+
+  The errors returned by the service are in the Google canonical error space.
+  """
+
+  def Read(self, request, context):
+    """`Read()` is used to retrieve the contents of a resource as a sequence
+    of bytes. The bytes are returned in a sequence of responses, and the
+    responses are delivered as the results of a server-side streaming RPC.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def Write(self, request_iterator, context):
+    """`Write()` is used to send the contents of a resource as a sequence of
+    bytes. The bytes are sent in a sequence of request protos of a client-side
+    streaming RPC.
+
+    A `Write()` action is resumable. If there is an error or the connection is
+    broken during the `Write()`, the client should check the status of the
+    `Write()` by calling `QueryWriteStatus()` and continue writing from the
+    returned `committed_size`. This may be less than the amount of data the
+    client previously sent.
+
+    Calling `Write()` on a resource name that was previously written and
+    finalized could cause an error, depending on whether the underlying service
+    allows over-writing of previously written resources.
+
+    When the client closes the request channel, the service will respond with
+    a `WriteResponse`. The service will not view the resource as `complete`
+    until the client has sent a `WriteRequest` with `finish_write` set to
+    `true`. Sending any requests on a stream after sending a request with
+    `finish_write` set to `true` will cause an error. The client **should**
+    check the `WriteResponse` it receives to determine how much data the
+    service was able to commit and whether the service views the resource as
+    `complete` or not.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def QueryWriteStatus(self, request, context):
+    """`QueryWriteStatus()` is used to find the `committed_size` for a resource
+    that is being written, which can then be used as the `write_offset` for
+    the next `Write()` call.
+
+    If the resource does not exist (i.e., the resource has been deleted, or the
+    first `Write()` has not yet reached the service), this method returns the
+    error `NOT_FOUND`.
+
+    The client **may** call `QueryWriteStatus()` at any time to determine how
+    much data has been processed for this resource. This is useful if the
+    client is buffering data and needs to know which data can be safely
+    evicted. For any sequence of `QueryWriteStatus()` calls for a given
+    resource name, the sequence of returned `committed_size` values will be
+    non-decreasing.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_ByteStreamServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'Read': grpc.unary_stream_rpc_method_handler(
+          servicer.Read,
+          request_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.FromString,
+          response_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.SerializeToString,
+      ),
+      'Write': grpc.stream_unary_rpc_method_handler(
+          servicer.Write,
+          request_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.FromString,
+          response_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.SerializeToString,
+      ),
+      'QueryWriteStatus': grpc.unary_unary_rpc_method_handler(
+          servicer.QueryWriteStatus,
+          request_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.FromString,
+          response_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'google.bytestream.ByteStream', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/google/devtools/__init__.py b/google/devtools/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/google/devtools/__init__.py
diff --git a/google/devtools/remoteexecution/__init__.py b/google/devtools/remoteexecution/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/google/devtools/remoteexecution/__init__.py
diff --git a/google/devtools/remoteexecution/v1test/__init__.py b/google/devtools/remoteexecution/v1test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/google/devtools/remoteexecution/v1test/__init__.py
diff --git a/google/devtools/remoteexecution/v1test/remote_execution.proto b/google/devtools/remoteexecution/v1test/remote_execution.proto
new file mode 100644
index 0000000..a1d8155
--- /dev/null
+++ b/google/devtools/remoteexecution/v1test/remote_execution.proto
@@ -0,0 +1,982 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.devtools.remoteexecution.v1test;
+
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+import "google/rpc/status.proto";
+
+option csharp_namespace = "Google.RemoteExecution.V1Test";
+option go_package = "google.golang.org/genproto/googleapis/devtools/remoteexecution/v1test;remoteexecution";
+option java_multiple_files = true;
+option java_outer_classname = "RemoteExecutionProto";
+option java_package = "com.google.devtools.remoteexecution.v1test";
+option objc_class_prefix = "REX";
+
+
+// The Remote Execution API is used to execute an
+// [Action][google.devtools.remoteexecution.v1test.Action] on the remote
+// workers.
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service Execution {
+  // Execute an action remotely.
+  //
+  // In order to execute an action, the client must first upload all of the
+  // inputs, as well as the
+  // [Command][google.devtools.remoteexecution.v1test.Command] to run, into the
+  // [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage].
+  // It then calls `Execute` with an
+  // [Action][google.devtools.remoteexecution.v1test.Action] referring to them.
+  // The server will run the action and eventually return the result.
+  //
+  // The input `Action`'s fields MUST meet the various canonicalization
+  // requirements specified in the documentation for their types so that it has
+  // the same digest as other logically equivalent `Action`s. The server MAY
+  // enforce the requirements and return errors if a non-canonical input is
+  // received. It MAY also proceed without verifying some or all of the
+  // requirements, such as for performance reasons. If the server does not
+  // verify the requirement, then it will treat the `Action` as distinct from
+  // another logically equivalent action if they hash differently.
+  //
+  // Returns a [google.longrunning.Operation][google.longrunning.Operation]
+  // describing the resulting execution, with eventual `response`
+  // [ExecuteResponse][google.devtools.remoteexecution.v1test.ExecuteResponse].
+  // The `metadata` on the operation is of type
+  // [ExecuteOperationMetadata][google.devtools.remoteexecution.v1test.ExecuteOperationMetadata].
+  //
+  // To query the operation, you can use the
+  // [Operations API][google.longrunning.Operations.GetOperation]. If you wish
+  // to allow the server to stream operations updates, rather than requiring
+  // client polling, you can use the
+  // [Watcher API][google.watcher.v1.Watcher.Watch] with the Operation's `name`
+  // as the `target`.
+  //
+  // When using the Watcher API, the initial `data` will be the `Operation` at
+  // the time of the request. Updates will be provided periodically by the
+  // server until the `Operation` completes, at which point the response message
+  // will (assuming no error) be at `data.response`.
+  //
+  // The server NEED NOT implement other methods or functionality of the
+  // Operation and Watcher APIs.
+  //
+  // Errors discovered during creation of the `Operation` will be reported
+  // as gRPC Status errors, while errors that occurred while running the
+  // action will be reported in the `status` field of the `ExecuteResponse`. The
+  // server MUST NOT set the `error` field of the `Operation` proto.
+  // The possible errors include:
+  // * `INVALID_ARGUMENT`: One or more arguments are invalid.
+  // * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
+  //   action requested, such as a missing input or command or no worker being
+  //   available. The client may be able to fix the errors and retry.
+  // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run
+  //   the action.
+  // * `UNAVAILABLE`: Due to a transient condition, such as all workers being
+  //   occupied (and the server does not support a queue), the action could not
+  //   be started. The client should retry.
+  // * `INTERNAL`: An internal error occurred in the execution engine or the
+  //   worker.
+  // * `DEADLINE_EXCEEDED`: The execution timed out.
+  //
+  // In the case of a missing input or command, the server SHOULD additionally
+  // send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
+  // where, for each requested blob not present in the CAS, there is a
+  // `Violation` with a `type` of `MISSING` and a `subject` of
+  // `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
+  rpc Execute(ExecuteRequest) returns (google.longrunning.Operation) {
+    option (google.api.http) = { post: "/v1test/{instance_name=**}/actions:execute" body: "*" };
+  }
+}
+
+// The action cache API is used to query whether a given action has already been
+// performed and, if so, retrieve its result. Unlike the
+// [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage],
+// which addresses blobs by their own content, the action cache addresses the
+// [ActionResult][google.devtools.remoteexecution.v1test.ActionResult] by a
+// digest of the encoded [Action][google.devtools.remoteexecution.v1test.Action]
+// which produced them.
+//
+// The lifetime of entries in the action cache is implementation-specific, but
+// the server SHOULD assume that more recently used entries are more likely to
+// be used again. Additionally, action cache implementations SHOULD ensure that
+// any blobs referenced in the
+// [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage]
+// are still valid when returning a result.
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service ActionCache {
+  // Retrieve a cached execution result.
+  //
+  // Errors:
+  // * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
+  rpc GetActionResult(GetActionResultRequest) returns (ActionResult) {
+    option (google.api.http) = { get: "/v1test/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" };
+  }
+
+  // Upload a new execution result.
+  //
+  // This method is intended for servers which implement the distributed cache
+  // independently of the
+  // [Execution][google.devtools.remoteexecution.v1test.Execution] API. As a
+  // result, it is OPTIONAL for servers to implement.
+  //
+  // Errors:
+  // * `NOT_IMPLEMENTED`: This method is not supported by the server.
+  // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+  //   entry to the cache.
+  rpc UpdateActionResult(UpdateActionResultRequest) returns (ActionResult) {
+    option (google.api.http) = { put: "/v1test/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" body: "action_result" };
+  }
+}
+
+// The CAS (content-addressable storage) is used to store the inputs to and
+// outputs from the execution service. Each piece of content is addressed by the
+// digest of its binary data.
+//
+// Most of the binary data stored in the CAS is opaque to the execution engine,
+// and is only used as a communication medium. In order to build an
+// [Action][google.devtools.remoteexecution.v1test.Action],
+// however, the client will need to also upload the
+// [Command][google.devtools.remoteexecution.v1test.Command] and input root
+// [Directory][google.devtools.remoteexecution.v1test.Directory] for the Action.
+// The Command and Directory messages must be marshalled to wire format and then
+// uploaded under the hash as with any other piece of content. In practice, the
+// input root directory is likely to refer to other Directories in its
+// hierarchy, which must also each be uploaded on their own.
+//
+// For small file uploads the client should group them together and call
+// [BatchUpdateBlobs][google.devtools.remoteexecution.v1test.ContentAddressableStorage.BatchUpdateBlobs]
+// on chunks of no more than 10 MiB. For large uploads, the client must use the
+// [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
+// `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
+// where `instance_name` is as described in the next paragraph, `uuid` is a
+// version 4 UUID generated by the client, and `hash` and `size` are the
+// [Digest][google.devtools.remoteexecution.v1test.Digest] of the blob. The
+// `uuid` is used only to avoid collisions when multiple clients try to upload
+// the same file (or the same client tries to upload the file multiple times at
+// once on different threads), so the client MAY reuse the `uuid` for uploading
+// different blobs. The `resource_name` may optionally have a trailing filename
+// (or other metadata) for a client to use if it is storing URLs, as in
+// `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
+// after the `size` is ignored.
+//
+// A single server MAY support multiple instances of the execution system, each
+// with their own workers, storage, cache, etc. The exact relationship between
+// instances is up to the server. If the server does, then the `instance_name`
+// is an identifier, possibly containing multiple path segments, used to
+// distinguish between the various instances on the server, in a manner defined
+// by the server. For servers which do not support multiple instances, then the
+// `instance_name` is the empty path and the leading slash is omitted, so that
+// the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+//
+// When attempting an upload, if another client has already completed the upload
+// (which may occur in the middle of a single upload if another client uploads
+// the same blob concurrently), the request will terminate immediately with
+// a response whose `committed_size` is the full size of the uploaded file
+// (regardless of how much data was transmitted by the client). If the client
+// completes the upload but the
+// [Digest][google.devtools.remoteexecution.v1test.Digest] does not match, an
+// `INVALID_ARGUMENT` error will be returned. In either case, the client should
+// not attempt to retry the upload.
+//
+// For downloading blobs, the client must use the
+// [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
+// a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
+// `instance_name` is the instance name (see above), and `hash` and `size` are
+// the [Digest][google.devtools.remoteexecution.v1test.Digest] of the blob.
+//
+// The lifetime of entries in the CAS is implementation specific, but it SHOULD
+// be long enough to allow for newly-added and recently looked-up entries to be
+// used in subsequent calls (e.g. to
+// [Execute][google.devtools.remoteexecution.v1test.Execution.Execute]).
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service ContentAddressableStorage {
+  // Determine if blobs are present in the CAS.
+  //
+  // Clients can use this API before uploading blobs to determine which ones are
+  // already present in the CAS and do not need to be uploaded again.
+  //
+  // There are no method-specific errors.
+  rpc FindMissingBlobs(FindMissingBlobsRequest) returns (FindMissingBlobsResponse) {
+    option (google.api.http) = { post: "/v1test/{instance_name=**}/blobs:findMissing" body: "*" };
+  }
+
+  // Upload many blobs at once.
+  //
+  // The client MUST NOT upload blobs with a combined total size of more than 10
+  // MiB using this API. Such requests should either be split into smaller
+  // chunks or uploaded using the
+  // [ByteStream API][google.bytestream.ByteStream], as appropriate.
+  //
+  // This request is equivalent to calling [UpdateBlob][] on each individual
+  // blob, in parallel. The requests may succeed or fail independently.
+  //
+  // Errors:
+  // * `INVALID_ARGUMENT`: The client attempted to upload more than 10 MiB of
+  //   data.
+  //
+  // Individual requests may return the following errors, additionally:
+  // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
+  // * `INVALID_ARGUMENT`: The
+  // [Digest][google.devtools.remoteexecution.v1test.Digest] does not match the
+  // provided data.
+  rpc BatchUpdateBlobs(BatchUpdateBlobsRequest) returns (BatchUpdateBlobsResponse) {
+    option (google.api.http) = { post: "/v1test/{instance_name=**}/blobs:batchUpdate" body: "*" };
+  }
+
+  // Fetch the entire directory tree rooted at a node.
+  //
+  // This request must be targeted at a
+  // [Directory][google.devtools.remoteexecution.v1test.Directory] stored in the
+  // [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage]
+  // (CAS). The server will enumerate the `Directory` tree recursively and
+  // return every node descended from the root.
+  // The exact traversal order is unspecified and, unless retrieving subsequent
+  // pages from an earlier request, is not guaranteed to be stable across
+  // multiple invocations of `GetTree`.
+  //
+  // If part of the tree is missing from the CAS, the server will return the
+  // portion present and omit the rest.
+  //
+  // * `NOT_FOUND`: The requested tree root is not present in the CAS.
+  rpc GetTree(GetTreeRequest) returns (GetTreeResponse) {
+    option (google.api.http) = { get: "/v1test/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree" };
+  }
+}
+
+// An `Action` captures all the information about an execution which is required
+// to reproduce it.
+//
+// `Action`s are the core component of the [Execution] service. A single
+// `Action` represents a repeatable action that can be performed by the
+// execution service. `Action`s can be succinctly identified by the digest of
+// their wire format encoding and, once an `Action` has been executed, will be
+// cached in the action cache. Future requests can then use the cached result
+// rather than needing to run afresh.
+//
+// When a server completes execution of an
+// [Action][google.devtools.remoteexecution.v1test.Action], it MAY choose to
+// cache the [result][google.devtools.remoteexecution.v1test.ActionResult] in
+// the [ActionCache][google.devtools.remoteexecution.v1test.ActionCache] unless
+// `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By
+// default, future calls to [Execute][] the same `Action` will also serve their
+// results from the cache. Clients must take care to understand the caching
+// behaviour. Ideally, all `Action`s will be reproducible so that serving a
+// result from cache is always desirable and correct.
+message Action {
+  // The digest of the [Command][google.devtools.remoteexecution.v1test.Command]
+  // to run, which MUST be present in the
+  // [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage].
+  Digest command_digest = 1;
+
+  // The digest of the root
+  // [Directory][google.devtools.remoteexecution.v1test.Directory] for the input
+  // files. The files in the directory tree are available in the correct
+  // location on the build machine before the command is executed. The root
+  // directory, as well as every subdirectory and content blob referred to, MUST
+  // be in the
+  // [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage].
+  Digest input_root_digest = 2;
+
+  // A list of the output files that the client expects to retrieve from the
+  // action. Only the listed files, as well as directories listed in
+  // `output_directories`, will be returned to the client as output.
+  // Other files that may be created during command execution are discarded.
+  //
+  // The paths are specified using forward slashes (`/`) as path separators,
+  // even if the execution platform natively uses a different separator. The
+  // path MUST NOT include a trailing slash.
+  //
+  // In order to ensure consistent hashing of the same Action, the output paths
+  // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
+  // bytes).
+  repeated string output_files = 3;
+
+  // A list of the output directories that the client expects to retrieve from
+  // the action. Only the contents of the indicated directories (recursively
+  // including the contents of their subdirectories) will be
+  // returned, as well as files listed in `output_files`. Other files that may
+  // be created during command execution are discarded.
+  //
+  // The paths are specified using forward slashes (`/`) as path separators,
+  // even if the execution platform natively uses a different separator. The
+  // path MUST NOT include a trailing slash, unless the path is `"/"` (which,
+  // although not recommended, can be used to capture the entire working
+  // directory tree, including inputs).
+  //
+  // In order to ensure consistent hashing of the same Action, the output paths
+  // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
+  // bytes).
+  repeated string output_directories = 4;
+
+  // The platform requirements for the execution environment. The server MAY
+  // choose to execute the action on any worker satisfying the requirements, so
+  // the client SHOULD ensure that running the action on any such worker will
+  // have the same result.
+  Platform platform = 5;
+
+  // A timeout after which the execution should be killed. If the timeout is
+  // absent, then the client is specifying that the execution should continue
+  // as long as the server will let it. The server SHOULD impose a timeout if
+  // the client does not specify one, however, if the client does specify a
+  // timeout that is longer than the server's maximum timeout, the server MUST
+  // reject the request.
+  //
+  // The timeout is a part of the
+  // [Action][google.devtools.remoteexecution.v1test.Action] message, and
+  // therefore two `Actions` with different timeouts are different, even if they
+  // are otherwise identical. This is because, if they were not, running an
+  // `Action` with a lower timeout than is required might result in a cache hit
+  // from an execution run with a longer timeout, hiding the fact that the
+  // timeout is too short. By encoding it directly in the `Action`, a lower
+  // timeout will result in a cache miss and the execution timeout will fail
+  // immediately, rather than whenever the cache entry gets evicted.
+  google.protobuf.Duration timeout = 6;
+
+  // If true, then the `Action`'s result cannot be cached.
+  bool do_not_cache = 7;
+}
+
+// A `Command` is the actual command executed by a worker running an
+// [Action][google.devtools.remoteexecution.v1test.Action].
+//
+// Except as otherwise required, the environment (such as which system
+// libraries or binaries are available, and what filesystems are mounted where)
+// is defined by and specific to the implementation of the remote execution API.
+message Command {
+  // An `EnvironmentVariable` is one variable to set in the running program's
+  // environment.
+  message EnvironmentVariable {
+    // The variable name.
+    string name = 1;
+
+    // The variable value.
+    string value = 2;
+  }
+
+  // The arguments to the command. The first argument must be the path to the
+  // executable, which must be either a relative path, in which case it is
+  // evaluated with respect to the input root, or an absolute path. The `PATH`
+  // environment variable, or similar functionality on other systems, is not
+  // used to determine which executable to run.
+  //
+  // The working directory will always be the input root.
+  repeated string arguments = 1;
+
+  // The environment variables to set when running the program. The worker may
+  // provide its own default environment variables; these defaults can be
+  // overridden using this field. Additional variables can also be specified.
+  //
+  // In order to ensure that equivalent `Command`s always hash to the same
+  // value, the environment variables MUST be lexicographically sorted by name.
+  // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
+  repeated EnvironmentVariable environment_variables = 2;
+}
+
+// A `Platform` is a set of requirements, such as hardware, operating system, or
+// compiler toolchain, for an
+// [Action][google.devtools.remoteexecution.v1test.Action]'s execution
+// environment. A `Platform` is represented as a series of key-value pairs
+// representing the properties that are required of the platform.
+//
+// This message is currently being redeveloped since it is an overly simplistic
+// model of platforms.
+message Platform {
+  // A single property for the environment. The server is responsible for
+  // specifying the property `name`s that it accepts. If an unknown `name` is
+  // provided in the requirements for an
+  // [Action][google.devtools.remoteexecution.v1test.Action], the server SHOULD
+  // reject the execution request. If permitted by the server, the same `name`
+  // may occur multiple times.
+  //
+  // The server is also responsible for specifying the interpretation of
+  // property `value`s. For instance, a property describing how much RAM must be
+  // available may be interpreted as allowing a worker with 16GB to fulfill a
+  // request for 8GB, while a property describing the OS environment on which
+  // the action must be performed may require an exact match with the worker's
+  // OS.
+  //
+  // The server MAY use the `value` of one or more properties to determine how
+  // it sets up the execution environment, such as by making specific system
+  // files available to the worker.
+  message Property {
+    // The property name.
+    string name = 1;
+
+    // The property value.
+    string value = 2;
+  }
+
+  // The properties that make up this platform. In order to ensure that
+  // equivalent `Platform`s always hash to the same value, the properties MUST
+  // be lexicographically sorted by name, and then by value. Sorting of strings
+  // is done by code point, equivalently, by the UTF-8 bytes.
+  repeated Property properties = 1;
+}
+
+// A `Directory` represents a directory node in a file tree, containing zero or
+// more children [FileNodes][google.devtools.remoteexecution.v1test.FileNode],
+// [DirectoryNodes][google.devtools.remoteexecution.v1test.DirectoryNode] and
+// [SymlinkNodes][google.devtools.remoteexecution.v1test.SymlinkNode].
+// Each `Node` contains its name in the directory, either the digest of its
+// content (either a file blob or a `Directory` proto) or a symlink target, as
+// well as possibly some metadata about the file or directory.
+//
+// In order to ensure that two equivalent directory trees hash to the same
+// value, the following restrictions MUST be obeyed when constructing a
+// a `Directory`:
+//   - Every child in the directory must have a path of exactly one segment.
+//     Multiple levels of directory hierarchy may not be collapsed.
+//   - Each child in the directory must have a unique path segment (file name).
+//   - The files, directories, and symlinks in the directory must each be sorted
+//     in lexicographical order by path. The path strings must be sorted by code
+//     point, equivalently, by UTF-8 bytes.
+//
+// A `Directory` that obeys the restrictions is said to be in canonical form.
+//
+// As an example, the following could be used for a file named `bar` and a
+// directory named `foo` with an executable file named `baz` (hashes shortened
+// for readability):
+//
+// ```json
+// // (Directory proto)
+// {
+//   files: [
+//     {
+//       name: "bar",
+//       digest: {
+//         hash: "4a73bc9d03...",
+//         size: 65534
+//       }
+//     }
+//   ],
+//   directories: [
+//     {
+//       name: "foo",
+//       digest: {
+//         hash: "4cf2eda940...",
+//         size: 43
+//       }
+//     }
+//   ]
+// }
+//
+// // (Directory proto with hash "4cf2eda940..." and size 43)
+// {
+//   files: [
+//     {
+//       name: "baz",
+//       digest: {
+//         hash: "b2c941073e...",
+//         size: 1294,
+//       },
+//       is_executable: true
+//     }
+//   ]
+// }
+// ```
+message Directory {
+  // The files in the directory.
+  repeated FileNode files = 1;
+
+  // The subdirectories in the directory.
+  repeated DirectoryNode directories = 2;
+
+  // The symlinks in the directory.
+  repeated SymlinkNode symlinks = 3;
+}
+
+// A `FileNode` represents a single file and associated metadata.
+message FileNode {
+  // The name of the file.
+  string name = 1;
+
+  // The digest of the file's content.
+  Digest digest = 2;
+
+  // True if file is executable, false otherwise.
+  bool is_executable = 4;
+}
+
+// A `DirectoryNode` represents a child of a
+// [Directory][google.devtools.remoteexecution.v1test.Directory] which is itself
+// a `Directory` and its associated metadata.
+message DirectoryNode {
+  // The name of the directory.
+  string name = 1;
+
+  // The digest of the
+  // [Directory][google.devtools.remoteexecution.v1test.Directory] object
+  // represented. See [Digest][google.devtools.remoteexecution.v1test.Digest]
+  // for information about how to take the digest of a proto message.
+  Digest digest = 2;
+}
+
+// A `SymlinkNode` represents a symbolic link.
+message SymlinkNode {
+  // The name of the symlink.
+  string name = 1;
+
+  // The target path of the symlink.
+  string target = 2;
+}
+
+// A content digest. A digest for a given blob consists of the size of the blob
+// and its hash. The hash algorithm to use is defined by the server, but servers
+// SHOULD use SHA-256.
+//
+// The size is considered to be an integral part of the digest and cannot be
+// separated. That is, even if the `hash` field is correctly specified but
+// `size_bytes` is not, the server MUST reject the request.
+//
+// The reason for including the size in the digest is as follows: in a great
+// many cases, the server needs to know the size of the blob it is about to work
+// with prior to starting an operation with it, such as flattening Merkle tree
+// structures or streaming it to a worker. Technically, the server could
+// implement a separate metadata store, but this results in a significantly more
+// complicated implementation as opposed to having the client specify the size
+// up-front (or storing the size along with the digest in every message where
+// digests are embedded). This does mean that the API leaks some implementation
+// details of (what we consider to be) a reasonable server implementation, but
+// we consider this to be a worthwhile tradeoff.
+//
+// When a `Digest` is used to refer to a proto message, it always refers to the
+// message in binary encoded form. To ensure consistent hashing, clients and
+// servers MUST ensure that they serialize messages according to the following
+// rules, even if there are alternate valid encodings for the same message.
+// - Fields are serialized in tag order.
+// - There are no unknown fields.
+// - There are no duplicate fields.
+// - Fields are serialized according to the default semantics for their type.
+//
+// Most protocol buffer implementations will always follow these rules when
+// serializing, but care should be taken to avoid shortcuts. For instance,
+// concatenating two messages to merge them may produce duplicate fields.
+message Digest {
+  // The hash. In the case of SHA-256, it will always be a lowercase hex string
+  // exactly 64 characters long.
+  string hash = 1;
+
+  // The size of the blob, in bytes.
+  int64 size_bytes = 2;
+}
+
+// An ActionResult represents the result of an
+// [Action][google.devtools.remoteexecution.v1test.Action] being run.
+message ActionResult {
+  // The output files of the action. For each output file requested, if the
+  // corresponding file existed after the action completed, a single entry will
+  // be present in the output list.
+  //
+  // If the action does not produce the requested output, or produces a
+  // directory where a regular file is expected or vice versa, then that output
+  // will be omitted from the list. The server is free to arrange the output
+  // list as desired; clients MUST NOT assume that the output list is sorted.
+  repeated OutputFile output_files = 2;
+
+  // The output directories of the action. For each output directory requested,
+  // if the corresponding directory existed after the action completed, a single
+  // entry will be present in the output list, which will contain the digest of
+  // a [Tree][google.devtools.remoteexecution.v1.test.Tree] message containing
+  // the directory tree.
+  repeated OutputDirectory output_directories = 3;
+
+  // The exit code of the command.
+  int32 exit_code = 4;
+
+  // The standard output buffer of the action. The server will determine, based
+  // on the size of the buffer, whether to return it in raw form or to return
+  // a digest in `stdout_digest` that points to the buffer. If neither is set,
+  // then the buffer is empty. The client SHOULD NOT assume it will get one of
+  // the raw buffer or a digest on any given request and should be prepared to
+  // handle either.
+  bytes stdout_raw = 5;
+
+  // The digest for a blob containing the standard output of the action, which
+  // can be retrieved from the
+  // [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage].
+  // See `stdout_raw` for when this will be set.
+  Digest stdout_digest = 6;
+
+  // The standard error buffer of the action. The server will determine, based
+  // on the size of the buffer, whether to return it in raw form or to return
+  // a digest in `stderr_digest` that points to the buffer. If neither is set,
+  // then the buffer is empty. The client SHOULD NOT assume it will get one of
+  // the raw buffer or a digest on any given request and should be prepared to
+  // handle either.
+  bytes stderr_raw = 7;
+
+  // The digest for a blob containing the standard error of the action, which
+  // can be retrieved from the
+  // [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage].
+  // See `stderr_raw` for when this will be set.
+  Digest stderr_digest = 8;
+}
+
+// An `OutputFile` is similar to a
+// [FileNode][google.devtools.remoteexecution.v1test.FileNode], but it is
+// tailored for output as part of an `ActionResult`. It allows a full file path
+// rather than only a name, and allows the server to include content inline.
+//
+// `OutputFile` is binary-compatible with `FileNode`.
+message OutputFile {
+  // The full path of the file relative to the input root, including the
+  // filename. The path separator is a forward slash `/`.
+  string path = 1;
+
+  // The digest of the file's content.
+  Digest digest = 2;
+
+  // The raw content of the file.
+  //
+  // This field may be used by the server to provide the content of a file
+  // inline in an
+  // [ActionResult][google.devtools.remoteexecution.v1test.ActionResult] and
+  // avoid requiring that the client make a separate call to
+  // [ContentAddressableStorage.GetBlob] to retrieve it.
+  //
+  // The client SHOULD NOT assume that it will get raw content with any request,
+  // and always be prepared to retrieve it via `digest`.
+  bytes content = 3;
+
+  // True if file is executable, false otherwise.
+  bool is_executable = 4;
+}
+
+// A `Tree` contains all the
+// [Directory][google.devtools.remoteexecution.v1test.Directory] protos in a
+// single directory Merkle tree, compressed into one message.
+message Tree {
+  // The root directory in the tree.
+  Directory root = 1;
+
+  // All the child directories: the directories referred to by the root and,
+  // recursively, all its children. In order to reconstruct the directory tree,
+  // the client must take the digests of each of the child directories and then
+  // build up a tree starting from the `root`.
+  repeated Directory children = 2;
+}
+
+// An `OutputDirectory` is the output in an `ActionResult` corresponding to a
+// directory's full contents rather than a single file.
+message OutputDirectory {
+  // The full path of the directory relative to the input root, including the
+  // filename. The path separator is a forward slash `/`.
+  string path = 1;
+
+  // DEPRECATED: This field is deprecated and should no longer be used.
+  Digest digest = 2;
+
+  // The digest of the encoded
+  // [Tree][google.devtools.remoteexecution.v1test.Tree] proto containing the
+  // directory's contents.
+  Digest tree_digest = 3;
+}
+
+// A request message for
+// [Execution.Execute][google.devtools.remoteexecution.v1test.Execution.Execute].
+message ExecuteRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // The action to be performed.
+  Action action = 2;
+
+  // If true, the action will be executed anew even if its result was already
+  // present in the cache. If false, the result may be served from the
+  // [ActionCache][google.devtools.remoteexecution.v1test.ActionCache].
+  bool skip_cache_lookup = 3;
+
+  // DEPRECATED: This field should be ignored by clients and servers and will be
+  // removed.
+  int32 total_input_file_count = 4;
+
+  // DEPRECATED: This field should be ignored by clients and servers and will be
+  // removed.
+  int64 total_input_file_bytes = 5;
+}
+
+// A `LogFile` is a log stored in the CAS.
+message LogFile {
+  // The digest of the log contents.
+  Digest digest = 1;
+
+  // This is a hint as to the purpose of the log, and is set to true if the log
+  // is human-readable text that can be usefully displayed to a user, and false
+  // otherwise. For instance, if a command-line client wishes to print the
+  // server logs to the terminal for a failed action, this allows it to avoid
+  // displaying a binary file.
+  bool human_readable = 2;
+}
+
+// The response message for
+// [Execution.Execute][google.devtools.remoteexecution.v1test.Execution.Execute],
+// which will be contained in the [response
+// field][google.longrunning.Operation.response] of the
+// [Operation][google.longrunning.Operation].
+message ExecuteResponse {
+  // The result of the action.
+  ActionResult result = 1;
+
+  // True if the result was served from cache, false if it was executed.
+  bool cached_result = 2;
+
+  // If the status has a code other than `OK`, it indicates that the action did
+  // not finish execution. For example, if the operation times out during
+  // execution, the status will have a `DEADLINE_EXCEEDED` code. Servers MUST
+  // use this field for errors in execution, rather than the error field on the
+  // `Operation` object.
+  //
+  // If the status code is other than `OK`, then the result MUST NOT be cached.
+  // For an error status, the `result` field is optional; the server may
+  // populate the output-, stdout-, and stderr-related fields if it has any
+  // information available, such as the stdout and stderr of a timed-out action.
+  google.rpc.Status status = 3;
+
+  // An optional list of additional log outputs the server wishes to provide. A
+  // server can use this to return execution-specific logs however it wishes.
+  // This is intended primarily to make it easier for users to debug issues that
+  // may be outside of the actual job execution, such as by identifying the
+  // worker executing the action or by providing logs from the worker's setup
+  // phase. The keys SHOULD be human readable so that a client can display them
+  // to a user.
+  map<string, LogFile> server_logs = 4;
+}
+
+// Metadata about an ongoing
+// [execution][google.devtools.remoteexecution.v1test.Execution.Execute], which
+// will be contained in the [metadata
+// field][google.longrunning.Operation.response] of the
+// [Operation][google.longrunning.Operation].
+message ExecuteOperationMetadata {
+  // The current stage of execution.
+  enum Stage {
+    UNKNOWN = 0;
+
+    // Checking the result against the cache.
+    CACHE_CHECK = 1;
+
+    // Currently idle, awaiting a free machine to execute.
+    QUEUED = 2;
+
+    // Currently being executed by a worker.
+    EXECUTING = 3;
+
+    // Finished execution.
+    COMPLETED = 4;
+  }
+
+  Stage stage = 1;
+
+  // The digest of the [Action][google.devtools.remoteexecution.v1test.Action]
+  // being executed.
+  Digest action_digest = 2;
+
+  // If set, the client can use this name with
+  // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the
+  // standard output.
+  string stdout_stream_name = 3;
+
+  // If set, the client can use this name with
+  // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the
+  // standard error.
+  string stderr_stream_name = 4;
+}
+
+// A request message for
+// [ActionCache.GetActionResult][google.devtools.remoteexecution.v1test.ActionCache.GetActionResult].
+message GetActionResultRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // The digest of the [Action][google.devtools.remoteexecution.v1test.Action]
+  // whose result is requested.
+  Digest action_digest = 2;
+}
+
+// A request message for
+// [ActionCache.UpdateActionResult][google.devtools.remoteexecution.v1test.ActionCache.UpdateActionResult].
+message UpdateActionResultRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // The digest of the [Action][google.devtools.remoteexecution.v1test.Action]
+  // whose result is being uploaded.
+  Digest action_digest = 2;
+
+  // The [ActionResult][google.devtools.remoteexecution.v1test.ActionResult]
+  // to store in the cache.
+  ActionResult action_result = 3;
+}
+
+// A request message for
+// [ContentAddressableStorage.FindMissingBlobs][google.devtools.remoteexecution.v1test.ContentAddressableStorage.FindMissingBlobs].
+message FindMissingBlobsRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // A list of the blobs to check.
+  repeated Digest blob_digests = 2;
+}
+
+// A response message for
+// [ContentAddressableStorage.FindMissingBlobs][google.devtools.remoteexecution.v1test.ContentAddressableStorage.FindMissingBlobs].
+message FindMissingBlobsResponse {
+  // A list of the blobs requested *not* present in the storage.
+  repeated Digest missing_blob_digests = 2;
+}
+
+// A single request message for
+// [ContentAddressableStorage.BatchUpdateBlobs][google.devtools.remoteexecution.v1test.ContentAddressableStorage.BatchUpdateBlobs].
+message UpdateBlobRequest {
+  // The digest of the blob. This MUST be the digest of `data`.
+  Digest content_digest = 1;
+
+  // The raw binary data.
+  bytes data = 2;
+}
+
+// A request message for
+// [ContentAddressableStorage.BatchUpdateBlobs][google.devtools.remoteexecution.v1test.ContentAddressableStorage.BatchUpdateBlobs].
+message BatchUpdateBlobsRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // The individual upload requests.
+  repeated UpdateBlobRequest requests = 2;
+}
+
+// A response message for
+// [ContentAddressableStorage.BatchUpdateBlobs][google.devtools.remoteexecution.v1test.ContentAddressableStorage.BatchUpdateBlobs].
+message BatchUpdateBlobsResponse {
+  // A response corresponding to a single blob that the client tried to upload.
+  message Response {
+    // The digest to which this response corresponds.
+    Digest blob_digest = 1;
+
+    // The result of attempting to upload that blob.
+    google.rpc.Status status = 2;
+  }
+
+  // The responses to the requests.
+  repeated Response responses = 1;
+}
+
+// A request message for
+// [ContentAddressableStorage.GetTree][google.devtools.remoteexecution.v1test.ContentAddressableStorage.GetTree].
+message GetTreeRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // The digest of the root, which must be an encoded
+  // [Directory][google.devtools.remoteexecution.v1test.Directory] message
+  // stored in the
+  // [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage].
+  Digest root_digest = 2;
+
+  // A maximum page size to request. If present, the server will request no more
+  // than this many items. Regardless of whether a page size is specified, the
+  // server may place its own limit on the number of items to be returned and
+  // require the client to retrieve more items using a subsequent request.
+  int32 page_size = 3;
+
+  // A page token, which must be a value received in a previous
+  // [GetTreeResponse][google.devtools.remoteexecution.v1test.GetTreeResponse].
+  // If present, the server will use it to return the following page of results.
+  string page_token = 4;
+}
+
+// A response message for
+// [ContentAddressableStorage.GetTree][google.devtools.remoteexecution.v1test.ContentAddressableStorage.GetTree].
+message GetTreeResponse {
+  // The directories descended from the requested root.
+  repeated Directory directories = 1;
+
+  // If present, signifies that there are more results which the client can
+  // retrieve by passing this as the page_token in a subsequent
+  // [request][google.devtools.remoteexecution.v1test.GetTreeRequest].
+  // If empty, signifies that this is the last page of results.
+  string next_page_token = 2;
+}
+
+// Details for the tool used to call the API.
+message ToolDetails {
+  // Name of the tool, e.g. bazel.
+  string tool_name = 1;
+
+  // Version of the tool used for the request, e.g. 5.0.3.
+  string tool_version = 2;
+}
+
+// An optional Metadata to attach to any RPC request to tell the server about an
+// external context of the request. The server may use this for logging or other
+// purposes. To use it, the client attaches the header to the call using the
+// canonical proto serialization:
+// name: google.devtools.remoteexecution.v1test.requestmetadata-bin
+// contents: the base64 encoded binary RequestMetadata message.
+message RequestMetadata {
+  // The details for the tool invoking the requests.
+  ToolDetails tool_details = 1;
+
+  // An identifier that ties multiple requests to the same action.
+  // For example, multiple requests to the CAS, Action Cache, and Execution
+  // API are used in order to compile foo.cc.
+  string action_id = 2;
+
+  // An identifier that ties multiple actions together to a final result.
+  // For example, multiple actions are required to build and run foo_test.
+  string tool_invocation_id = 3;
+
+  // An identifier to tie multiple tool invocations together. For example,
+  // runs of foo_test, bar_test and baz_test on a post-submit of a given patch.
+  string correlated_invocations_id = 4;
+}
diff --git a/google/devtools/remoteexecution/v1test/remote_execution_pb2.py b/google/devtools/remoteexecution/v1test/remote_execution_pb2.py
new file mode 100644
index 0000000..b45d4c9
--- /dev/null
+++ b/google/devtools/remoteexecution/v1test/remote_execution_pb2.py
@@ -0,0 +1,1809 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: google/devtools/remoteexecution/v1test/remote_execution.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
+from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
+from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='google/devtools/remoteexecution/v1test/remote_execution.proto',
+  package='google.devtools.remoteexecution.v1test',
+  syntax='proto3',
+  serialized_pb=_b('\n=google/devtools/remoteexecution/v1test/remote_execution.proto\x12&google.devtools.remoteexecution.v1test\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x17google/rpc/status.proto\"\xd3\x02\n\x06\x41\x63tion\x12\x46\n\x0e\x63ommand_digest\x18\x01 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\x12I\n\x11input_root_digest\x18\x02 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\x12\x14\n\x0coutput_files\x18\x03 \x03(\t\x12\x1a\n\x12output_directories\x18\x04 \x03(\t\x12\x42\n\x08platform\x18\x05 \x01(\x0b\x32\x30.google.devtools.remoteexecution.v1test.Platform\x12*\n\x07timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x14\n\x0c\x64o_not_cache\x18\x07 \x01(\x08\"\xb4\x01\n\x07\x43ommand\x12\x11\n\targuments\x18\x01 \x03(\t\x12\x62\n\x15\x65nvironment_variables\x18\x02 \x03(\x0b\x32\x43.google.devtools.remoteexecution.v1test.Command.EnvironmentVariable\x1a\x32\n\x13\x45nvironmentVariable\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\x82\x01\n\x08Platform\x12M\n\nproperties\x18\x01 \x03(\x0b\x32\x39.google.devtools.remoteexecution.v1test.Platform.Property\x1a\'\n\x08Property\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xdf\x01\n\tDirectory\x12?\n\x05\x66iles\x18\x01 \x03(\x0b\x32\x30.google.devtools.remoteexecution.v1test.FileNode\x12J\n\x0b\x64irectories\x18\x02 \x03(\x0b\x32\x35.google.devtools.remoteexecution.v1test.DirectoryNode\x12\x45\n\x08symlinks\x18\x03 \x03(\x0b\x32\x33.google.devtools.remoteexecution.v1test.SymlinkNode\"o\n\x08\x46ileNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12>\n\x06\x64igest\x18\x02 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\x12\x15\n\ris_executable\x18\x04 \x01(\x08\"]\n\rDirectoryNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12>\n\x06\x64igest\x18\x02 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\"+\n\x0bSymlinkNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\"*\n\x06\x44igest\x12\x0c\n\x04hash\x18\x01 \x01(\t\x12\x12\n\nsize_bytes\x18\x02 \x01(\x03\"\xf6\x02\n\x0c\x41\x63tionResult\x12H\n\x0coutput_files\x18\x02 \x03(\x0b\x32\x32.google.devtools.remoteexecution.v1test.OutputFile\x12S\n\x12output_directories\x18\x03 \x03(\x0b\x32\x37.google.devtools.remoteexecution.v1test.OutputDirectory\x12\x11\n\texit_code\x18\x04 \x01(\x05\x12\x12\n\nstdout_raw\x18\x05 \x01(\x0c\x12\x45\n\rstdout_digest\x18\x06 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\x12\x12\n\nstderr_raw\x18\x07 \x01(\x0c\x12\x45\n\rstderr_digest\x18\x08 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\"\x82\x01\n\nOutputFile\x12\x0c\n\x04path\x18\x01 \x01(\t\x12>\n\x06\x64igest\x18\x02 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\x0c\x12\x15\n\ris_executable\x18\x04 \x01(\x08\"\x8c\x01\n\x04Tree\x12?\n\x04root\x18\x01 \x01(\x0b\x32\x31.google.devtools.remoteexecution.v1test.Directory\x12\x43\n\x08\x63hildren\x18\x02 \x03(\x0b\x32\x31.google.devtools.remoteexecution.v1test.Directory\"\xa4\x01\n\x0fOutputDirectory\x12\x0c\n\x04path\x18\x01 \x01(\t\x12>\n\x06\x64igest\x18\x02 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\x12\x43\n\x0btree_digest\x18\x03 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\"\xc2\x01\n\x0e\x45xecuteRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12>\n\x06\x61\x63tion\x18\x02 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Action\x12\x19\n\x11skip_cache_lookup\x18\x03 \x01(\x08\x12\x1e\n\x16total_input_file_count\x18\x04 \x01(\x05\x12\x1e\n\x16total_input_file_bytes\x18\x05 \x01(\x03\"a\n\x07LogFile\x12>\n\x06\x64igest\x18\x01 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\x12\x16\n\x0ehuman_readable\x18\x02 \x01(\x08\"\xd4\x02\n\x0f\x45xecuteResponse\x12\x44\n\x06result\x18\x01 \x01(\x0b\x32\x34.google.devtools.remoteexecution.v1test.ActionResult\x12\x15\n\rcached_result\x18\x02 \x01(\x08\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12\\\n\x0bserver_logs\x18\x04 \x03(\x0b\x32G.google.devtools.remoteexecution.v1test.ExecuteResponse.ServerLogsEntry\x1a\x62\n\x0fServerLogsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12>\n\x05value\x18\x02 \x01(\x0b\x32/.google.devtools.remoteexecution.v1test.LogFile:\x02\x38\x01\"\xc1\x02\n\x18\x45xecuteOperationMetadata\x12U\n\x05stage\x18\x01 \x01(\x0e\x32\x46.google.devtools.remoteexecution.v1test.ExecuteOperationMetadata.Stage\x12\x45\n\raction_digest\x18\x02 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\x12\x1a\n\x12stdout_stream_name\x18\x03 \x01(\t\x12\x1a\n\x12stderr_stream_name\x18\x04 \x01(\t\"O\n\x05Stage\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0f\n\x0b\x43\x41\x43HE_CHECK\x10\x01\x12\n\n\x06QUEUED\x10\x02\x12\r\n\tEXECUTING\x10\x03\x12\r\n\tCOMPLETED\x10\x04\"v\n\x16GetActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x45\n\raction_digest\x18\x02 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\"\xc6\x01\n\x19UpdateActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x45\n\raction_digest\x18\x02 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\x12K\n\raction_result\x18\x03 \x01(\x0b\x32\x34.google.devtools.remoteexecution.v1test.ActionResult\"v\n\x17\x46indMissingBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x44\n\x0c\x62lob_digests\x18\x02 \x03(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\"h\n\x18\x46indMissingBlobsResponse\x12L\n\x14missing_blob_digests\x18\x02 \x03(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\"i\n\x11UpdateBlobRequest\x12\x46\n\x0e\x63ontent_digest\x18\x01 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"}\n\x17\x42\x61tchUpdateBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12K\n\x08requests\x18\x02 \x03(\x0b\x32\x39.google.devtools.remoteexecution.v1test.UpdateBlobRequest\"\xed\x01\n\x18\x42\x61tchUpdateBlobsResponse\x12\\\n\tresponses\x18\x01 \x03(\x0b\x32I.google.devtools.remoteexecution.v1test.BatchUpdateBlobsResponse.Response\x1as\n\x08Response\x12\x43\n\x0b\x62lob_digest\x18\x01 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\x93\x01\n\x0eGetTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x43\n\x0broot_digest\x18\x02 \x01(\x0b\x32..google.devtools.remoteexecution.v1test.Digest\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t\"r\n\x0fGetTreeResponse\x12\x46\n\x0b\x64irectories\x18\x01 \x03(\x0b\x32\x31.google.devtools.remoteexecution.v1test.Directory\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"6\n\x0bToolDetails\x12\x11\n\ttool_name\x18\x01 \x01(\t\x12\x14\n\x0ctool_version\x18\x02 \x01(\t\"\xae\x01\n\x0fRequestMetadata\x12I\n\x0ctool_details\x18\x01 \x01(\x0b\x32\x33.google.devtools.remoteexecution.v1test.ToolDetails\x12\x11\n\taction_id\x18\x02 \x01(\t\x12\x1a\n\x12tool_invocation_id\x18\x03 \x01(\t\x12!\n\x19\x63orrelated_invocations_id\x18\x04 \x01(\t2\xa5\x01\n\tExecution\x12\x97\x01\n\x07\x45xecute\x12\x36.google.devtools.remoteexecution.v1test.ExecuteRequest\x1a\x1d.google.longrunning.Operation\"5\x82\xd3\xe4\x93\x02/\"*/v1test/{instance_name=**}/actions:execute:\x01*2\xfa\x03\n\x0b\x41\x63tionCache\x12\xe9\x01\n\x0fGetActionResult\x12>.google.devtools.remoteexecution.v1test.GetActionResultRequest\x1a\x34.google.devtools.remoteexecution.v1test.ActionResult\"`\x82\xd3\xe4\x93\x02Z\x12X/v1test/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}\x12\xfe\x01\n\x12UpdateActionResult\x12\x41.google.devtools.remoteexecution.v1test.UpdateActionResultRequest\x1a\x34.google.devtools.remoteexecution.v1test.ActionResult\"o\x82\xd3\xe4\x93\x02i\x1aX/v1test/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}:\raction_result2\x98\x05\n\x19\x43ontentAddressableStorage\x12\xce\x01\n\x10\x46indMissingBlobs\x12?.google.devtools.remoteexecution.v1test.FindMissingBlobsRequest\x1a@.google.devtools.remoteexecution.v1test.FindMissingBlobsResponse\"7\x82\xd3\xe4\x93\x02\x31\",/v1test/{instance_name=**}/blobs:findMissing:\x01*\x12\xce\x01\n\x10\x42\x61tchUpdateBlobs\x12?.google.devtools.remoteexecution.v1test.BatchUpdateBlobsRequest\x1a@.google.devtools.remoteexecution.v1test.BatchUpdateBlobsResponse\"7\x82\xd3\xe4\x93\x02\x31\",/v1test/{instance_name=**}/blobs:batchUpdate:\x01*\x12\xd8\x01\n\x07GetTree\x12\x36.google.devtools.remoteexecution.v1test.GetTreeRequest\x1a\x37.google.devtools.remoteexecution.v1test.GetTreeResponse\"\\\x82\xd3\xe4\x93\x02V\x12T/v1test/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTreeB\xc1\x01\n*com.google.devtools.remoteexecution.v1testB\x14RemoteExecutionProtoP\x01ZUgoogle.golang.org/genproto/googleapis/devtools/remoteexecution/v1test;remoteexecution\xa2\x02\x03REX\xaa\x02\x1dGoogle.RemoteExecution.V1Testb\x06proto3')
+  ,
+  dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
+
+
+
+_EXECUTEOPERATIONMETADATA_STAGE = _descriptor.EnumDescriptor(
+  name='Stage',
+  full_name='google.devtools.remoteexecution.v1test.ExecuteOperationMetadata.Stage',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='UNKNOWN', index=0, number=0,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='CACHE_CHECK', index=1, number=1,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='QUEUED', index=2, number=2,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='EXECUTING', index=3, number=3,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='COMPLETED', index=4, number=4,
+      options=None,
+      type=None),
+  ],
+  containing_type=None,
+  options=None,
+  serialized_start=3112,
+  serialized_end=3191,
+)
+_sym_db.RegisterEnumDescriptor(_EXECUTEOPERATIONMETADATA_STAGE)
+
+
+_ACTION = _descriptor.Descriptor(
+  name='Action',
+  full_name='google.devtools.remoteexecution.v1test.Action',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='command_digest', full_name='google.devtools.remoteexecution.v1test.Action.command_digest', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='input_root_digest', full_name='google.devtools.remoteexecution.v1test.Action.input_root_digest', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='output_files', full_name='google.devtools.remoteexecution.v1test.Action.output_files', index=2,
+      number=3, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='output_directories', full_name='google.devtools.remoteexecution.v1test.Action.output_directories', index=3,
+      number=4, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='platform', full_name='google.devtools.remoteexecution.v1test.Action.platform', index=4,
+      number=5, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='timeout', full_name='google.devtools.remoteexecution.v1test.Action.timeout', index=5,
+      number=6, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='do_not_cache', full_name='google.devtools.remoteexecution.v1test.Action.do_not_cache', index=6,
+      number=7, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=230,
+  serialized_end=569,
+)
+
+
+_COMMAND_ENVIRONMENTVARIABLE = _descriptor.Descriptor(
+  name='EnvironmentVariable',
+  full_name='google.devtools.remoteexecution.v1test.Command.EnvironmentVariable',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='google.devtools.remoteexecution.v1test.Command.EnvironmentVariable.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='value', full_name='google.devtools.remoteexecution.v1test.Command.EnvironmentVariable.value', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=702,
+  serialized_end=752,
+)
+
+_COMMAND = _descriptor.Descriptor(
+  name='Command',
+  full_name='google.devtools.remoteexecution.v1test.Command',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='arguments', full_name='google.devtools.remoteexecution.v1test.Command.arguments', index=0,
+      number=1, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='environment_variables', full_name='google.devtools.remoteexecution.v1test.Command.environment_variables', index=1,
+      number=2, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[_COMMAND_ENVIRONMENTVARIABLE, ],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=572,
+  serialized_end=752,
+)
+
+
+_PLATFORM_PROPERTY = _descriptor.Descriptor(
+  name='Property',
+  full_name='google.devtools.remoteexecution.v1test.Platform.Property',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='google.devtools.remoteexecution.v1test.Platform.Property.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='value', full_name='google.devtools.remoteexecution.v1test.Platform.Property.value', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=846,
+  serialized_end=885,
+)
+
+_PLATFORM = _descriptor.Descriptor(
+  name='Platform',
+  full_name='google.devtools.remoteexecution.v1test.Platform',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='properties', full_name='google.devtools.remoteexecution.v1test.Platform.properties', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[_PLATFORM_PROPERTY, ],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=755,
+  serialized_end=885,
+)
+
+
+_DIRECTORY = _descriptor.Descriptor(
+  name='Directory',
+  full_name='google.devtools.remoteexecution.v1test.Directory',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='files', full_name='google.devtools.remoteexecution.v1test.Directory.files', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='directories', full_name='google.devtools.remoteexecution.v1test.Directory.directories', index=1,
+      number=2, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='symlinks', full_name='google.devtools.remoteexecution.v1test.Directory.symlinks', index=2,
+      number=3, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=888,
+  serialized_end=1111,
+)
+
+
+_FILENODE = _descriptor.Descriptor(
+  name='FileNode',
+  full_name='google.devtools.remoteexecution.v1test.FileNode',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='google.devtools.remoteexecution.v1test.FileNode.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='digest', full_name='google.devtools.remoteexecution.v1test.FileNode.digest', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='is_executable', full_name='google.devtools.remoteexecution.v1test.FileNode.is_executable', index=2,
+      number=4, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1113,
+  serialized_end=1224,
+)
+
+
+_DIRECTORYNODE = _descriptor.Descriptor(
+  name='DirectoryNode',
+  full_name='google.devtools.remoteexecution.v1test.DirectoryNode',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='google.devtools.remoteexecution.v1test.DirectoryNode.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='digest', full_name='google.devtools.remoteexecution.v1test.DirectoryNode.digest', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1226,
+  serialized_end=1319,
+)
+
+
+_SYMLINKNODE = _descriptor.Descriptor(
+  name='SymlinkNode',
+  full_name='google.devtools.remoteexecution.v1test.SymlinkNode',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='google.devtools.remoteexecution.v1test.SymlinkNode.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='target', full_name='google.devtools.remoteexecution.v1test.SymlinkNode.target', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1321,
+  serialized_end=1364,
+)
+
+
+_DIGEST = _descriptor.Descriptor(
+  name='Digest',
+  full_name='google.devtools.remoteexecution.v1test.Digest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='hash', full_name='google.devtools.remoteexecution.v1test.Digest.hash', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='size_bytes', full_name='google.devtools.remoteexecution.v1test.Digest.size_bytes', index=1,
+      number=2, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1366,
+  serialized_end=1408,
+)
+
+
+_ACTIONRESULT = _descriptor.Descriptor(
+  name='ActionResult',
+  full_name='google.devtools.remoteexecution.v1test.ActionResult',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='output_files', full_name='google.devtools.remoteexecution.v1test.ActionResult.output_files', index=0,
+      number=2, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='output_directories', full_name='google.devtools.remoteexecution.v1test.ActionResult.output_directories', index=1,
+      number=3, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='exit_code', full_name='google.devtools.remoteexecution.v1test.ActionResult.exit_code', index=2,
+      number=4, type=5, cpp_type=1, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='stdout_raw', full_name='google.devtools.remoteexecution.v1test.ActionResult.stdout_raw', index=3,
+      number=5, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='stdout_digest', full_name='google.devtools.remoteexecution.v1test.ActionResult.stdout_digest', index=4,
+      number=6, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='stderr_raw', full_name='google.devtools.remoteexecution.v1test.ActionResult.stderr_raw', index=5,
+      number=7, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='stderr_digest', full_name='google.devtools.remoteexecution.v1test.ActionResult.stderr_digest', index=6,
+      number=8, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1411,
+  serialized_end=1785,
+)
+
+
+_OUTPUTFILE = _descriptor.Descriptor(
+  name='OutputFile',
+  full_name='google.devtools.remoteexecution.v1test.OutputFile',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='path', full_name='google.devtools.remoteexecution.v1test.OutputFile.path', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='digest', full_name='google.devtools.remoteexecution.v1test.OutputFile.digest', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='content', full_name='google.devtools.remoteexecution.v1test.OutputFile.content', index=2,
+      number=3, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='is_executable', full_name='google.devtools.remoteexecution.v1test.OutputFile.is_executable', index=3,
+      number=4, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1788,
+  serialized_end=1918,
+)
+
+
+_TREE = _descriptor.Descriptor(
+  name='Tree',
+  full_name='google.devtools.remoteexecution.v1test.Tree',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='root', full_name='google.devtools.remoteexecution.v1test.Tree.root', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='children', full_name='google.devtools.remoteexecution.v1test.Tree.children', index=1,
+      number=2, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1921,
+  serialized_end=2061,
+)
+
+
+_OUTPUTDIRECTORY = _descriptor.Descriptor(
+  name='OutputDirectory',
+  full_name='google.devtools.remoteexecution.v1test.OutputDirectory',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='path', full_name='google.devtools.remoteexecution.v1test.OutputDirectory.path', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='digest', full_name='google.devtools.remoteexecution.v1test.OutputDirectory.digest', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='tree_digest', full_name='google.devtools.remoteexecution.v1test.OutputDirectory.tree_digest', index=2,
+      number=3, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=2064,
+  serialized_end=2228,
+)
+
+
+_EXECUTEREQUEST = _descriptor.Descriptor(
+  name='ExecuteRequest',
+  full_name='google.devtools.remoteexecution.v1test.ExecuteRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='instance_name', full_name='google.devtools.remoteexecution.v1test.ExecuteRequest.instance_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='action', full_name='google.devtools.remoteexecution.v1test.ExecuteRequest.action', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='skip_cache_lookup', full_name='google.devtools.remoteexecution.v1test.ExecuteRequest.skip_cache_lookup', index=2,
+      number=3, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='total_input_file_count', full_name='google.devtools.remoteexecution.v1test.ExecuteRequest.total_input_file_count', index=3,
+      number=4, type=5, cpp_type=1, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='total_input_file_bytes', full_name='google.devtools.remoteexecution.v1test.ExecuteRequest.total_input_file_bytes', index=4,
+      number=5, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=2231,
+  serialized_end=2425,
+)
+
+
+_LOGFILE = _descriptor.Descriptor(
+  name='LogFile',
+  full_name='google.devtools.remoteexecution.v1test.LogFile',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='digest', full_name='google.devtools.remoteexecution.v1test.LogFile.digest', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='human_readable', full_name='google.devtools.remoteexecution.v1test.LogFile.human_readable', index=1,
+      number=2, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=2427,
+  serialized_end=2524,
+)
+
+
+_EXECUTERESPONSE_SERVERLOGSENTRY = _descriptor.Descriptor(
+  name='ServerLogsEntry',
+  full_name='google.devtools.remoteexecution.v1test.ExecuteResponse.ServerLogsEntry',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='key', full_name='google.devtools.remoteexecution.v1test.ExecuteResponse.ServerLogsEntry.key', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='value', full_name='google.devtools.remoteexecution.v1test.ExecuteResponse.ServerLogsEntry.value', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=2769,
+  serialized_end=2867,
+)
+
+_EXECUTERESPONSE = _descriptor.Descriptor(
+  name='ExecuteResponse',
+  full_name='google.devtools.remoteexecution.v1test.ExecuteResponse',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='result', full_name='google.devtools.remoteexecution.v1test.ExecuteResponse.result', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='cached_result', full_name='google.devtools.remoteexecution.v1test.ExecuteResponse.cached_result', index=1,
+      number=2, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='status', full_name='google.devtools.remoteexecution.v1test.ExecuteResponse.status', index=2,
+      number=3, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='server_logs', full_name='google.devtools.remoteexecution.v1test.ExecuteResponse.server_logs', index=3,
+      number=4, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[_EXECUTERESPONSE_SERVERLOGSENTRY, ],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=2527,
+  serialized_end=2867,
+)
+
+
+_EXECUTEOPERATIONMETADATA = _descriptor.Descriptor(
+  name='ExecuteOperationMetadata',
+  full_name='google.devtools.remoteexecution.v1test.ExecuteOperationMetadata',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='stage', full_name='google.devtools.remoteexecution.v1test.ExecuteOperationMetadata.stage', index=0,
+      number=1, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='action_digest', full_name='google.devtools.remoteexecution.v1test.ExecuteOperationMetadata.action_digest', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='stdout_stream_name', full_name='google.devtools.remoteexecution.v1test.ExecuteOperationMetadata.stdout_stream_name', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='stderr_stream_name', full_name='google.devtools.remoteexecution.v1test.ExecuteOperationMetadata.stderr_stream_name', index=3,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+    _EXECUTEOPERATIONMETADATA_STAGE,
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=2870,
+  serialized_end=3191,
+)
+
+
+_GETACTIONRESULTREQUEST = _descriptor.Descriptor(
+  name='GetActionResultRequest',
+  full_name='google.devtools.remoteexecution.v1test.GetActionResultRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='instance_name', full_name='google.devtools.remoteexecution.v1test.GetActionResultRequest.instance_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='action_digest', full_name='google.devtools.remoteexecution.v1test.GetActionResultRequest.action_digest', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=3193,
+  serialized_end=3311,
+)
+
+
+_UPDATEACTIONRESULTREQUEST = _descriptor.Descriptor(
+  name='UpdateActionResultRequest',
+  full_name='google.devtools.remoteexecution.v1test.UpdateActionResultRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='instance_name', full_name='google.devtools.remoteexecution.v1test.UpdateActionResultRequest.instance_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='action_digest', full_name='google.devtools.remoteexecution.v1test.UpdateActionResultRequest.action_digest', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='action_result', full_name='google.devtools.remoteexecution.v1test.UpdateActionResultRequest.action_result', index=2,
+      number=3, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=3314,
+  serialized_end=3512,
+)
+
+
+_FINDMISSINGBLOBSREQUEST = _descriptor.Descriptor(
+  name='FindMissingBlobsRequest',
+  full_name='google.devtools.remoteexecution.v1test.FindMissingBlobsRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='instance_name', full_name='google.devtools.remoteexecution.v1test.FindMissingBlobsRequest.instance_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='blob_digests', full_name='google.devtools.remoteexecution.v1test.FindMissingBlobsRequest.blob_digests', index=1,
+      number=2, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=3514,
+  serialized_end=3632,
+)
+
+
+_FINDMISSINGBLOBSRESPONSE = _descriptor.Descriptor(
+  name='FindMissingBlobsResponse',
+  full_name='google.devtools.remoteexecution.v1test.FindMissingBlobsResponse',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='missing_blob_digests', full_name='google.devtools.remoteexecution.v1test.FindMissingBlobsResponse.missing_blob_digests', index=0,
+      number=2, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=3634,
+  serialized_end=3738,
+)
+
+
+_UPDATEBLOBREQUEST = _descriptor.Descriptor(
+  name='UpdateBlobRequest',
+  full_name='google.devtools.remoteexecution.v1test.UpdateBlobRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='content_digest', full_name='google.devtools.remoteexecution.v1test.UpdateBlobRequest.content_digest', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='data', full_name='google.devtools.remoteexecution.v1test.UpdateBlobRequest.data', index=1,
+      number=2, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=3740,
+  serialized_end=3845,
+)
+
+
+_BATCHUPDATEBLOBSREQUEST = _descriptor.Descriptor(
+  name='BatchUpdateBlobsRequest',
+  full_name='google.devtools.remoteexecution.v1test.BatchUpdateBlobsRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='instance_name', full_name='google.devtools.remoteexecution.v1test.BatchUpdateBlobsRequest.instance_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='requests', full_name='google.devtools.remoteexecution.v1test.BatchUpdateBlobsRequest.requests', index=1,
+      number=2, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=3847,
+  serialized_end=3972,
+)
+
+
+_BATCHUPDATEBLOBSRESPONSE_RESPONSE = _descriptor.Descriptor(
+  name='Response',
+  full_name='google.devtools.remoteexecution.v1test.BatchUpdateBlobsResponse.Response',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='blob_digest', full_name='google.devtools.remoteexecution.v1test.BatchUpdateBlobsResponse.Response.blob_digest', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='status', full_name='google.devtools.remoteexecution.v1test.BatchUpdateBlobsResponse.Response.status', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=4097,
+  serialized_end=4212,
+)
+
+_BATCHUPDATEBLOBSRESPONSE = _descriptor.Descriptor(
+  name='BatchUpdateBlobsResponse',
+  full_name='google.devtools.remoteexecution.v1test.BatchUpdateBlobsResponse',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='responses', full_name='google.devtools.remoteexecution.v1test.BatchUpdateBlobsResponse.responses', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[_BATCHUPDATEBLOBSRESPONSE_RESPONSE, ],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=3975,
+  serialized_end=4212,
+)
+
+
+_GETTREEREQUEST = _descriptor.Descriptor(
+  name='GetTreeRequest',
+  full_name='google.devtools.remoteexecution.v1test.GetTreeRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='instance_name', full_name='google.devtools.remoteexecution.v1test.GetTreeRequest.instance_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='root_digest', full_name='google.devtools.remoteexecution.v1test.GetTreeRequest.root_digest', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='page_size', full_name='google.devtools.remoteexecution.v1test.GetTreeRequest.page_size', index=2,
+      number=3, type=5, cpp_type=1, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='page_token', full_name='google.devtools.remoteexecution.v1test.GetTreeRequest.page_token', index=3,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=4215,
+  serialized_end=4362,
+)
+
+
+_GETTREERESPONSE = _descriptor.Descriptor(
+  name='GetTreeResponse',
+  full_name='google.devtools.remoteexecution.v1test.GetTreeResponse',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='directories', full_name='google.devtools.remoteexecution.v1test.GetTreeResponse.directories', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='next_page_token', full_name='google.devtools.remoteexecution.v1test.GetTreeResponse.next_page_token', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=4364,
+  serialized_end=4478,
+)
+
+
+_TOOLDETAILS = _descriptor.Descriptor(
+  name='ToolDetails',
+  full_name='google.devtools.remoteexecution.v1test.ToolDetails',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='tool_name', full_name='google.devtools.remoteexecution.v1test.ToolDetails.tool_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='tool_version', full_name='google.devtools.remoteexecution.v1test.ToolDetails.tool_version', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=4480,
+  serialized_end=4534,
+)
+
+
+_REQUESTMETADATA = _descriptor.Descriptor(
+  name='RequestMetadata',
+  full_name='google.devtools.remoteexecution.v1test.RequestMetadata',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='tool_details', full_name='google.devtools.remoteexecution.v1test.RequestMetadata.tool_details', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='action_id', full_name='google.devtools.remoteexecution.v1test.RequestMetadata.action_id', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='tool_invocation_id', full_name='google.devtools.remoteexecution.v1test.RequestMetadata.tool_invocation_id', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='correlated_invocations_id', full_name='google.devtools.remoteexecution.v1test.RequestMetadata.correlated_invocations_id', index=3,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=4537,
+  serialized_end=4711,
+)
+
+_ACTION.fields_by_name['command_digest'].message_type = _DIGEST
+_ACTION.fields_by_name['input_root_digest'].message_type = _DIGEST
+_ACTION.fields_by_name['platform'].message_type = _PLATFORM
+_ACTION.fields_by_name['timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
+_COMMAND_ENVIRONMENTVARIABLE.containing_type = _COMMAND
+_COMMAND.fields_by_name['environment_variables'].message_type = _COMMAND_ENVIRONMENTVARIABLE
+_PLATFORM_PROPERTY.containing_type = _PLATFORM
+_PLATFORM.fields_by_name['properties'].message_type = _PLATFORM_PROPERTY
+_DIRECTORY.fields_by_name['files'].message_type = _FILENODE
+_DIRECTORY.fields_by_name['directories'].message_type = _DIRECTORYNODE
+_DIRECTORY.fields_by_name['symlinks'].message_type = _SYMLINKNODE
+_FILENODE.fields_by_name['digest'].message_type = _DIGEST
+_DIRECTORYNODE.fields_by_name['digest'].message_type = _DIGEST
+_ACTIONRESULT.fields_by_name['output_files'].message_type = _OUTPUTFILE
+_ACTIONRESULT.fields_by_name['output_directories'].message_type = _OUTPUTDIRECTORY
+_ACTIONRESULT.fields_by_name['stdout_digest'].message_type = _DIGEST
+_ACTIONRESULT.fields_by_name['stderr_digest'].message_type = _DIGEST
+_OUTPUTFILE.fields_by_name['digest'].message_type = _DIGEST
+_TREE.fields_by_name['root'].message_type = _DIRECTORY
+_TREE.fields_by_name['children'].message_type = _DIRECTORY
+_OUTPUTDIRECTORY.fields_by_name['digest'].message_type = _DIGEST
+_OUTPUTDIRECTORY.fields_by_name['tree_digest'].message_type = _DIGEST
+_EXECUTEREQUEST.fields_by_name['action'].message_type = _ACTION
+_LOGFILE.fields_by_name['digest'].message_type = _DIGEST
+_EXECUTERESPONSE_SERVERLOGSENTRY.fields_by_name['value'].message_type = _LOGFILE
+_EXECUTERESPONSE_SERVERLOGSENTRY.containing_type = _EXECUTERESPONSE
+_EXECUTERESPONSE.fields_by_name['result'].message_type = _ACTIONRESULT
+_EXECUTERESPONSE.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
+_EXECUTERESPONSE.fields_by_name['server_logs'].message_type = _EXECUTERESPONSE_SERVERLOGSENTRY
+_EXECUTEOPERATIONMETADATA.fields_by_name['stage'].enum_type = _EXECUTEOPERATIONMETADATA_STAGE
+_EXECUTEOPERATIONMETADATA.fields_by_name['action_digest'].message_type = _DIGEST
+_EXECUTEOPERATIONMETADATA_STAGE.containing_type = _EXECUTEOPERATIONMETADATA
+_GETACTIONRESULTREQUEST.fields_by_name['action_digest'].message_type = _DIGEST
+_UPDATEACTIONRESULTREQUEST.fields_by_name['action_digest'].message_type = _DIGEST
+_UPDATEACTIONRESULTREQUEST.fields_by_name['action_result'].message_type = _ACTIONRESULT
+_FINDMISSINGBLOBSREQUEST.fields_by_name['blob_digests'].message_type = _DIGEST
+_FINDMISSINGBLOBSRESPONSE.fields_by_name['missing_blob_digests'].message_type = _DIGEST
+_UPDATEBLOBREQUEST.fields_by_name['content_digest'].message_type = _DIGEST
+_BATCHUPDATEBLOBSREQUEST.fields_by_name['requests'].message_type = _UPDATEBLOBREQUEST
+_BATCHUPDATEBLOBSRESPONSE_RESPONSE.fields_by_name['blob_digest'].message_type = _DIGEST
+_BATCHUPDATEBLOBSRESPONSE_RESPONSE.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
+_BATCHUPDATEBLOBSRESPONSE_RESPONSE.containing_type = _BATCHUPDATEBLOBSRESPONSE
+_BATCHUPDATEBLOBSRESPONSE.fields_by_name['responses'].message_type = _BATCHUPDATEBLOBSRESPONSE_RESPONSE
+_GETTREEREQUEST.fields_by_name['root_digest'].message_type = _DIGEST
+_GETTREERESPONSE.fields_by_name['directories'].message_type = _DIRECTORY
+_REQUESTMETADATA.fields_by_name['tool_details'].message_type = _TOOLDETAILS
+DESCRIPTOR.message_types_by_name['Action'] = _ACTION
+DESCRIPTOR.message_types_by_name['Command'] = _COMMAND
+DESCRIPTOR.message_types_by_name['Platform'] = _PLATFORM
+DESCRIPTOR.message_types_by_name['Directory'] = _DIRECTORY
+DESCRIPTOR.message_types_by_name['FileNode'] = _FILENODE
+DESCRIPTOR.message_types_by_name['DirectoryNode'] = _DIRECTORYNODE
+DESCRIPTOR.message_types_by_name['SymlinkNode'] = _SYMLINKNODE
+DESCRIPTOR.message_types_by_name['Digest'] = _DIGEST
+DESCRIPTOR.message_types_by_name['ActionResult'] = _ACTIONRESULT
+DESCRIPTOR.message_types_by_name['OutputFile'] = _OUTPUTFILE
+DESCRIPTOR.message_types_by_name['Tree'] = _TREE
+DESCRIPTOR.message_types_by_name['OutputDirectory'] = _OUTPUTDIRECTORY
+DESCRIPTOR.message_types_by_name['ExecuteRequest'] = _EXECUTEREQUEST
+DESCRIPTOR.message_types_by_name['LogFile'] = _LOGFILE
+DESCRIPTOR.message_types_by_name['ExecuteResponse'] = _EXECUTERESPONSE
+DESCRIPTOR.message_types_by_name['ExecuteOperationMetadata'] = _EXECUTEOPERATIONMETADATA
+DESCRIPTOR.message_types_by_name['GetActionResultRequest'] = _GETACTIONRESULTREQUEST
+DESCRIPTOR.message_types_by_name['UpdateActionResultRequest'] = _UPDATEACTIONRESULTREQUEST
+DESCRIPTOR.message_types_by_name['FindMissingBlobsRequest'] = _FINDMISSINGBLOBSREQUEST
+DESCRIPTOR.message_types_by_name['FindMissingBlobsResponse'] = _FINDMISSINGBLOBSRESPONSE
+DESCRIPTOR.message_types_by_name['UpdateBlobRequest'] = _UPDATEBLOBREQUEST
+DESCRIPTOR.message_types_by_name['BatchUpdateBlobsRequest'] = _BATCHUPDATEBLOBSREQUEST
+DESCRIPTOR.message_types_by_name['BatchUpdateBlobsResponse'] = _BATCHUPDATEBLOBSRESPONSE
+DESCRIPTOR.message_types_by_name['GetTreeRequest'] = _GETTREEREQUEST
+DESCRIPTOR.message_types_by_name['GetTreeResponse'] = _GETTREERESPONSE
+DESCRIPTOR.message_types_by_name['ToolDetails'] = _TOOLDETAILS
+DESCRIPTOR.message_types_by_name['RequestMetadata'] = _REQUESTMETADATA
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Action = _reflection.GeneratedProtocolMessageType('Action', (_message.Message,), dict(
+  DESCRIPTOR = _ACTION,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.Action)
+  ))
+_sym_db.RegisterMessage(Action)
+
+Command = _reflection.GeneratedProtocolMessageType('Command', (_message.Message,), dict(
+
+  EnvironmentVariable = _reflection.GeneratedProtocolMessageType('EnvironmentVariable', (_message.Message,), dict(
+    DESCRIPTOR = _COMMAND_ENVIRONMENTVARIABLE,
+    __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+    # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.Command.EnvironmentVariable)
+    ))
+  ,
+  DESCRIPTOR = _COMMAND,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.Command)
+  ))
+_sym_db.RegisterMessage(Command)
+_sym_db.RegisterMessage(Command.EnvironmentVariable)
+
+Platform = _reflection.GeneratedProtocolMessageType('Platform', (_message.Message,), dict(
+
+  Property = _reflection.GeneratedProtocolMessageType('Property', (_message.Message,), dict(
+    DESCRIPTOR = _PLATFORM_PROPERTY,
+    __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+    # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.Platform.Property)
+    ))
+  ,
+  DESCRIPTOR = _PLATFORM,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.Platform)
+  ))
+_sym_db.RegisterMessage(Platform)
+_sym_db.RegisterMessage(Platform.Property)
+
+Directory = _reflection.GeneratedProtocolMessageType('Directory', (_message.Message,), dict(
+  DESCRIPTOR = _DIRECTORY,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.Directory)
+  ))
+_sym_db.RegisterMessage(Directory)
+
+FileNode = _reflection.GeneratedProtocolMessageType('FileNode', (_message.Message,), dict(
+  DESCRIPTOR = _FILENODE,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.FileNode)
+  ))
+_sym_db.RegisterMessage(FileNode)
+
+DirectoryNode = _reflection.GeneratedProtocolMessageType('DirectoryNode', (_message.Message,), dict(
+  DESCRIPTOR = _DIRECTORYNODE,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.DirectoryNode)
+  ))
+_sym_db.RegisterMessage(DirectoryNode)
+
+SymlinkNode = _reflection.GeneratedProtocolMessageType('SymlinkNode', (_message.Message,), dict(
+  DESCRIPTOR = _SYMLINKNODE,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.SymlinkNode)
+  ))
+_sym_db.RegisterMessage(SymlinkNode)
+
+Digest = _reflection.GeneratedProtocolMessageType('Digest', (_message.Message,), dict(
+  DESCRIPTOR = _DIGEST,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.Digest)
+  ))
+_sym_db.RegisterMessage(Digest)
+
+ActionResult = _reflection.GeneratedProtocolMessageType('ActionResult', (_message.Message,), dict(
+  DESCRIPTOR = _ACTIONRESULT,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.ActionResult)
+  ))
+_sym_db.RegisterMessage(ActionResult)
+
+OutputFile = _reflection.GeneratedProtocolMessageType('OutputFile', (_message.Message,), dict(
+  DESCRIPTOR = _OUTPUTFILE,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.OutputFile)
+  ))
+_sym_db.RegisterMessage(OutputFile)
+
+Tree = _reflection.GeneratedProtocolMessageType('Tree', (_message.Message,), dict(
+  DESCRIPTOR = _TREE,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.Tree)
+  ))
+_sym_db.RegisterMessage(Tree)
+
+OutputDirectory = _reflection.GeneratedProtocolMessageType('OutputDirectory', (_message.Message,), dict(
+  DESCRIPTOR = _OUTPUTDIRECTORY,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.OutputDirectory)
+  ))
+_sym_db.RegisterMessage(OutputDirectory)
+
+ExecuteRequest = _reflection.GeneratedProtocolMessageType('ExecuteRequest', (_message.Message,), dict(
+  DESCRIPTOR = _EXECUTEREQUEST,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.ExecuteRequest)
+  ))
+_sym_db.RegisterMessage(ExecuteRequest)
+
+LogFile = _reflection.GeneratedProtocolMessageType('LogFile', (_message.Message,), dict(
+  DESCRIPTOR = _LOGFILE,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.LogFile)
+  ))
+_sym_db.RegisterMessage(LogFile)
+
+ExecuteResponse = _reflection.GeneratedProtocolMessageType('ExecuteResponse', (_message.Message,), dict(
+
+  ServerLogsEntry = _reflection.GeneratedProtocolMessageType('ServerLogsEntry', (_message.Message,), dict(
+    DESCRIPTOR = _EXECUTERESPONSE_SERVERLOGSENTRY,
+    __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+    # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.ExecuteResponse.ServerLogsEntry)
+    ))
+  ,
+  DESCRIPTOR = _EXECUTERESPONSE,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.ExecuteResponse)
+  ))
+_sym_db.RegisterMessage(ExecuteResponse)
+_sym_db.RegisterMessage(ExecuteResponse.ServerLogsEntry)
+
+ExecuteOperationMetadata = _reflection.GeneratedProtocolMessageType('ExecuteOperationMetadata', (_message.Message,), dict(
+  DESCRIPTOR = _EXECUTEOPERATIONMETADATA,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.ExecuteOperationMetadata)
+  ))
+_sym_db.RegisterMessage(ExecuteOperationMetadata)
+
+GetActionResultRequest = _reflection.GeneratedProtocolMessageType('GetActionResultRequest', (_message.Message,), dict(
+  DESCRIPTOR = _GETACTIONRESULTREQUEST,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.GetActionResultRequest)
+  ))
+_sym_db.RegisterMessage(GetActionResultRequest)
+
+UpdateActionResultRequest = _reflection.GeneratedProtocolMessageType('UpdateActionResultRequest', (_message.Message,), dict(
+  DESCRIPTOR = _UPDATEACTIONRESULTREQUEST,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.UpdateActionResultRequest)
+  ))
+_sym_db.RegisterMessage(UpdateActionResultRequest)
+
+FindMissingBlobsRequest = _reflection.GeneratedProtocolMessageType('FindMissingBlobsRequest', (_message.Message,), dict(
+  DESCRIPTOR = _FINDMISSINGBLOBSREQUEST,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.FindMissingBlobsRequest)
+  ))
+_sym_db.RegisterMessage(FindMissingBlobsRequest)
+
+FindMissingBlobsResponse = _reflection.GeneratedProtocolMessageType('FindMissingBlobsResponse', (_message.Message,), dict(
+  DESCRIPTOR = _FINDMISSINGBLOBSRESPONSE,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.FindMissingBlobsResponse)
+  ))
+_sym_db.RegisterMessage(FindMissingBlobsResponse)
+
+UpdateBlobRequest = _reflection.GeneratedProtocolMessageType('UpdateBlobRequest', (_message.Message,), dict(
+  DESCRIPTOR = _UPDATEBLOBREQUEST,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.UpdateBlobRequest)
+  ))
+_sym_db.RegisterMessage(UpdateBlobRequest)
+
+BatchUpdateBlobsRequest = _reflection.GeneratedProtocolMessageType('BatchUpdateBlobsRequest', (_message.Message,), dict(
+  DESCRIPTOR = _BATCHUPDATEBLOBSREQUEST,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.BatchUpdateBlobsRequest)
+  ))
+_sym_db.RegisterMessage(BatchUpdateBlobsRequest)
+
+BatchUpdateBlobsResponse = _reflection.GeneratedProtocolMessageType('BatchUpdateBlobsResponse', (_message.Message,), dict(
+
+  Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
+    DESCRIPTOR = _BATCHUPDATEBLOBSRESPONSE_RESPONSE,
+    __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+    # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.BatchUpdateBlobsResponse.Response)
+    ))
+  ,
+  DESCRIPTOR = _BATCHUPDATEBLOBSRESPONSE,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.BatchUpdateBlobsResponse)
+  ))
+_sym_db.RegisterMessage(BatchUpdateBlobsResponse)
+_sym_db.RegisterMessage(BatchUpdateBlobsResponse.Response)
+
+GetTreeRequest = _reflection.GeneratedProtocolMessageType('GetTreeRequest', (_message.Message,), dict(
+  DESCRIPTOR = _GETTREEREQUEST,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.GetTreeRequest)
+  ))
+_sym_db.RegisterMessage(GetTreeRequest)
+
+GetTreeResponse = _reflection.GeneratedProtocolMessageType('GetTreeResponse', (_message.Message,), dict(
+  DESCRIPTOR = _GETTREERESPONSE,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.GetTreeResponse)
+  ))
+_sym_db.RegisterMessage(GetTreeResponse)
+
+ToolDetails = _reflection.GeneratedProtocolMessageType('ToolDetails', (_message.Message,), dict(
+  DESCRIPTOR = _TOOLDETAILS,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.ToolDetails)
+  ))
+_sym_db.RegisterMessage(ToolDetails)
+
+RequestMetadata = _reflection.GeneratedProtocolMessageType('RequestMetadata', (_message.Message,), dict(
+  DESCRIPTOR = _REQUESTMETADATA,
+  __module__ = 'google.devtools.remoteexecution.v1test.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:google.devtools.remoteexecution.v1test.RequestMetadata)
+  ))
+_sym_db.RegisterMessage(RequestMetadata)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n*com.google.devtools.remoteexecution.v1testB\024RemoteExecutionProtoP\001ZUgoogle.golang.org/genproto/googleapis/devtools/remoteexecution/v1test;remoteexecution\242\002\003REX\252\002\035Google.RemoteExecution.V1Test'))
+_EXECUTERESPONSE_SERVERLOGSENTRY.has_options = True
+_EXECUTERESPONSE_SERVERLOGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
+
+_EXECUTION = _descriptor.ServiceDescriptor(
+  name='Execution',
+  full_name='google.devtools.remoteexecution.v1test.Execution',
+  file=DESCRIPTOR,
+  index=0,
+  options=None,
+  serialized_start=4714,
+  serialized_end=4879,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='Execute',
+    full_name='google.devtools.remoteexecution.v1test.Execution.Execute',
+    index=0,
+    containing_service=None,
+    input_type=_EXECUTEREQUEST,
+    output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002/\"*/v1test/{instance_name=**}/actions:execute:\001*')),
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_EXECUTION)
+
+DESCRIPTOR.services_by_name['Execution'] = _EXECUTION
+
+
+_ACTIONCACHE = _descriptor.ServiceDescriptor(
+  name='ActionCache',
+  full_name='google.devtools.remoteexecution.v1test.ActionCache',
+  file=DESCRIPTOR,
+  index=1,
+  options=None,
+  serialized_start=4882,
+  serialized_end=5388,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='GetActionResult',
+    full_name='google.devtools.remoteexecution.v1test.ActionCache.GetActionResult',
+    index=0,
+    containing_service=None,
+    input_type=_GETACTIONRESULTREQUEST,
+    output_type=_ACTIONRESULT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002Z\022X/v1test/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='UpdateActionResult',
+    full_name='google.devtools.remoteexecution.v1test.ActionCache.UpdateActionResult',
+    index=1,
+    containing_service=None,
+    input_type=_UPDATEACTIONRESULTREQUEST,
+    output_type=_ACTIONRESULT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002i\032X/v1test/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}:\raction_result')),
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_ACTIONCACHE)
+
+DESCRIPTOR.services_by_name['ActionCache'] = _ACTIONCACHE
+
+
+_CONTENTADDRESSABLESTORAGE = _descriptor.ServiceDescriptor(
+  name='ContentAddressableStorage',
+  full_name='google.devtools.remoteexecution.v1test.ContentAddressableStorage',
+  file=DESCRIPTOR,
+  index=2,
+  options=None,
+  serialized_start=5391,
+  serialized_end=6055,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='FindMissingBlobs',
+    full_name='google.devtools.remoteexecution.v1test.ContentAddressableStorage.FindMissingBlobs',
+    index=0,
+    containing_service=None,
+    input_type=_FINDMISSINGBLOBSREQUEST,
+    output_type=_FINDMISSINGBLOBSRESPONSE,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0021\",/v1test/{instance_name=**}/blobs:findMissing:\001*')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='BatchUpdateBlobs',
+    full_name='google.devtools.remoteexecution.v1test.ContentAddressableStorage.BatchUpdateBlobs',
+    index=1,
+    containing_service=None,
+    input_type=_BATCHUPDATEBLOBSREQUEST,
+    output_type=_BATCHUPDATEBLOBSRESPONSE,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0021\",/v1test/{instance_name=**}/blobs:batchUpdate:\001*')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='GetTree',
+    full_name='google.devtools.remoteexecution.v1test.ContentAddressableStorage.GetTree',
+    index=2,
+    containing_service=None,
+    input_type=_GETTREEREQUEST,
+    output_type=_GETTREERESPONSE,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002V\022T/v1test/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree')),
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_CONTENTADDRESSABLESTORAGE)
+
+DESCRIPTOR.services_by_name['ContentAddressableStorage'] = _CONTENTADDRESSABLESTORAGE
+
+# @@protoc_insertion_point(module_scope)
diff --git a/google/devtools/remoteexecution/v1test/remote_execution_pb2_grpc.py b/google/devtools/remoteexecution/v1test/remote_execution_pb2_grpc.py
new file mode 100644
index 0000000..cfe2be9
--- /dev/null
+++ b/google/devtools/remoteexecution/v1test/remote_execution_pb2_grpc.py
@@ -0,0 +1,472 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from google.devtools.remoteexecution.v1test import remote_execution_pb2 as google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2
+from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
+
+
+class ExecutionStub(object):
+  """The Remote Execution API is used to execute an
+  [Action][google.devtools.remoteexecution.v1test.Action] on the remote
+  workers.
+
+  As with other services in the Remote Execution API, any call may return an
+  error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+  information about when the client should retry the request; clients SHOULD
+  respect the information provided.
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.Execute = channel.unary_unary(
+        '/google.devtools.remoteexecution.v1test.Execution/Execute',
+        request_serializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.ExecuteRequest.SerializeToString,
+        response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+        )
+
+
+class ExecutionServicer(object):
+  """The Remote Execution API is used to execute an
+  [Action][google.devtools.remoteexecution.v1test.Action] on the remote
+  workers.
+
+  As with other services in the Remote Execution API, any call may return an
+  error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+  information about when the client should retry the request; clients SHOULD
+  respect the information provided.
+  """
+
+  def Execute(self, request, context):
+    """Execute an action remotely.
+
+    In order to execute an action, the client must first upload all of the
+    inputs, as well as the
+    [Command][google.devtools.remoteexecution.v1test.Command] to run, into the
+    [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage].
+    It then calls `Execute` with an
+    [Action][google.devtools.remoteexecution.v1test.Action] referring to them.
+    The server will run the action and eventually return the result.
+
+    The input `Action`'s fields MUST meet the various canonicalization
+    requirements specified in the documentation for their types so that it has
+    the same digest as other logically equivalent `Action`s. The server MAY
+    enforce the requirements and return errors if a non-canonical input is
+    received. It MAY also proceed without verifying some or all of the
+    requirements, such as for performance reasons. If the server does not
+    verify the requirement, then it will treat the `Action` as distinct from
+    another logically equivalent action if they hash differently.
+
+    Returns a [google.longrunning.Operation][google.longrunning.Operation]
+    describing the resulting execution, with eventual `response`
+    [ExecuteResponse][google.devtools.remoteexecution.v1test.ExecuteResponse].
+    The `metadata` on the operation is of type
+    [ExecuteOperationMetadata][google.devtools.remoteexecution.v1test.ExecuteOperationMetadata].
+
+    To query the operation, you can use the
+    [Operations API][google.longrunning.Operations.GetOperation]. If you wish
+    to allow the server to stream operations updates, rather than requiring
+    client polling, you can use the
+    [Watcher API][google.watcher.v1.Watcher.Watch] with the Operation's `name`
+    as the `target`.
+
+    When using the Watcher API, the initial `data` will be the `Operation` at
+    the time of the request. Updates will be provided periodically by the
+    server until the `Operation` completes, at which point the response message
+    will (assuming no error) be at `data.response`.
+
+    The server NEED NOT implement other methods or functionality of the
+    Operation and Watcher APIs.
+
+    Errors discovered during creation of the `Operation` will be reported
+    as gRPC Status errors, while errors that occurred while running the
+    action will be reported in the `status` field of the `ExecuteResponse`. The
+    server MUST NOT set the `error` field of the `Operation` proto.
+    The possible errors include:
+    * `INVALID_ARGUMENT`: One or more arguments are invalid.
+    * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
+    action requested, such as a missing input or command or no worker being
+    available. The client may be able to fix the errors and retry.
+    * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run
+    the action.
+    * `UNAVAILABLE`: Due to a transient condition, such as all workers being
+    occupied (and the server does not support a queue), the action could not
+    be started. The client should retry.
+    * `INTERNAL`: An internal error occurred in the execution engine or the
+    worker.
+    * `DEADLINE_EXCEEDED`: The execution timed out.
+
+    In the case of a missing input or command, the server SHOULD additionally
+    send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
+    where, for each requested blob not present in the CAS, there is a
+    `Violation` with a `type` of `MISSING` and a `subject` of
+    `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_ExecutionServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'Execute': grpc.unary_unary_rpc_method_handler(
+          servicer.Execute,
+          request_deserializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.ExecuteRequest.FromString,
+          response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'google.devtools.remoteexecution.v1test.Execution', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
+
+
+class ActionCacheStub(object):
+  """The action cache API is used to query whether a given action has already been
+  performed and, if so, retrieve its result. Unlike the
+  [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage],
+  which addresses blobs by their own content, the action cache addresses the
+  [ActionResult][google.devtools.remoteexecution.v1test.ActionResult] by a
+  digest of the encoded [Action][google.devtools.remoteexecution.v1test.Action]
+  which produced them.
+
+  The lifetime of entries in the action cache is implementation-specific, but
+  the server SHOULD assume that more recently used entries are more likely to
+  be used again. Additionally, action cache implementations SHOULD ensure that
+  any blobs referenced in the
+  [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage]
+  are still valid when returning a result.
+
+  As with other services in the Remote Execution API, any call may return an
+  error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+  information about when the client should retry the request; clients SHOULD
+  respect the information provided.
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.GetActionResult = channel.unary_unary(
+        '/google.devtools.remoteexecution.v1test.ActionCache/GetActionResult',
+        request_serializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.GetActionResultRequest.SerializeToString,
+        response_deserializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.ActionResult.FromString,
+        )
+    self.UpdateActionResult = channel.unary_unary(
+        '/google.devtools.remoteexecution.v1test.ActionCache/UpdateActionResult',
+        request_serializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.UpdateActionResultRequest.SerializeToString,
+        response_deserializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.ActionResult.FromString,
+        )
+
+
+class ActionCacheServicer(object):
+  """The action cache API is used to query whether a given action has already been
+  performed and, if so, retrieve its result. Unlike the
+  [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage],
+  which addresses blobs by their own content, the action cache addresses the
+  [ActionResult][google.devtools.remoteexecution.v1test.ActionResult] by a
+  digest of the encoded [Action][google.devtools.remoteexecution.v1test.Action]
+  which produced them.
+
+  The lifetime of entries in the action cache is implementation-specific, but
+  the server SHOULD assume that more recently used entries are more likely to
+  be used again. Additionally, action cache implementations SHOULD ensure that
+  any blobs referenced in the
+  [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage]
+  are still valid when returning a result.
+
+  As with other services in the Remote Execution API, any call may return an
+  error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+  information about when the client should retry the request; clients SHOULD
+  respect the information provided.
+  """
+
+  def GetActionResult(self, request, context):
+    """Retrieve a cached execution result.
+
+    Errors:
+    * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def UpdateActionResult(self, request, context):
+    """Upload a new execution result.
+
+    This method is intended for servers which implement the distributed cache
+    independently of the
+    [Execution][google.devtools.remoteexecution.v1test.Execution] API. As a
+    result, it is OPTIONAL for servers to implement.
+
+    Errors:
+    * `NOT_IMPLEMENTED`: This method is not supported by the server.
+    * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+    entry to the cache.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_ActionCacheServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'GetActionResult': grpc.unary_unary_rpc_method_handler(
+          servicer.GetActionResult,
+          request_deserializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.GetActionResultRequest.FromString,
+          response_serializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.ActionResult.SerializeToString,
+      ),
+      'UpdateActionResult': grpc.unary_unary_rpc_method_handler(
+          servicer.UpdateActionResult,
+          request_deserializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.UpdateActionResultRequest.FromString,
+          response_serializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.ActionResult.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'google.devtools.remoteexecution.v1test.ActionCache', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
+
+
+class ContentAddressableStorageStub(object):
+  """The CAS (content-addressable storage) is used to store the inputs to and
+  outputs from the execution service. Each piece of content is addressed by the
+  digest of its binary data.
+
+  Most of the binary data stored in the CAS is opaque to the execution engine,
+  and is only used as a communication medium. In order to build an
+  [Action][google.devtools.remoteexecution.v1test.Action],
+  however, the client will need to also upload the
+  [Command][google.devtools.remoteexecution.v1test.Command] and input root
+  [Directory][google.devtools.remoteexecution.v1test.Directory] for the Action.
+  The Command and Directory messages must be marshalled to wire format and then
+  uploaded under the hash as with any other piece of content. In practice, the
+  input root directory is likely to refer to other Directories in its
+  hierarchy, which must also each be uploaded on their own.
+
+  For small file uploads the client should group them together and call
+  [BatchUpdateBlobs][google.devtools.remoteexecution.v1test.ContentAddressableStorage.BatchUpdateBlobs]
+  on chunks of no more than 10 MiB. For large uploads, the client must use the
+  [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
+  `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
+  where `instance_name` is as described in the next paragraph, `uuid` is a
+  version 4 UUID generated by the client, and `hash` and `size` are the
+  [Digest][google.devtools.remoteexecution.v1test.Digest] of the blob. The
+  `uuid` is used only to avoid collisions when multiple clients try to upload
+  the same file (or the same client tries to upload the file multiple times at
+  once on different threads), so the client MAY reuse the `uuid` for uploading
+  different blobs. The `resource_name` may optionally have a trailing filename
+  (or other metadata) for a client to use if it is storing URLs, as in
+  `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
+  after the `size` is ignored.
+
+  A single server MAY support multiple instances of the execution system, each
+  with their own workers, storage, cache, etc. The exact relationship between
+  instances is up to the server. If the server does, then the `instance_name`
+  is an identifier, possibly containing multiple path segments, used to
+  distinguish between the various instances on the server, in a manner defined
+  by the server. For servers which do not support multiple instances, then the
+  `instance_name` is the empty path and the leading slash is omitted, so that
+  the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+
+  When attempting an upload, if another client has already completed the upload
+  (which may occur in the middle of a single upload if another client uploads
+  the same blob concurrently), the request will terminate immediately with
+  a response whose `committed_size` is the full size of the uploaded file
+  (regardless of how much data was transmitted by the client). If the client
+  completes the upload but the
+  [Digest][google.devtools.remoteexecution.v1test.Digest] does not match, an
+  `INVALID_ARGUMENT` error will be returned. In either case, the client should
+  not attempt to retry the upload.
+
+  For downloading blobs, the client must use the
+  [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
+  a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
+  `instance_name` is the instance name (see above), and `hash` and `size` are
+  the [Digest][google.devtools.remoteexecution.v1test.Digest] of the blob.
+
+  The lifetime of entries in the CAS is implementation specific, but it SHOULD
+  be long enough to allow for newly-added and recently looked-up entries to be
+  used in subsequent calls (e.g. to
+  [Execute][google.devtools.remoteexecution.v1test.Execution.Execute]).
+
+  As with other services in the Remote Execution API, any call may return an
+  error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+  information about when the client should retry the request; clients SHOULD
+  respect the information provided.
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.FindMissingBlobs = channel.unary_unary(
+        '/google.devtools.remoteexecution.v1test.ContentAddressableStorage/FindMissingBlobs',
+        request_serializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.FindMissingBlobsRequest.SerializeToString,
+        response_deserializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.FindMissingBlobsResponse.FromString,
+        )
+    self.BatchUpdateBlobs = channel.unary_unary(
+        '/google.devtools.remoteexecution.v1test.ContentAddressableStorage/BatchUpdateBlobs',
+        request_serializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.BatchUpdateBlobsRequest.SerializeToString,
+        response_deserializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.BatchUpdateBlobsResponse.FromString,
+        )
+    self.GetTree = channel.unary_unary(
+        '/google.devtools.remoteexecution.v1test.ContentAddressableStorage/GetTree',
+        request_serializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.GetTreeRequest.SerializeToString,
+        response_deserializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.GetTreeResponse.FromString,
+        )
+
+
+class ContentAddressableStorageServicer(object):
+  """The CAS (content-addressable storage) is used to store the inputs to and
+  outputs from the execution service. Each piece of content is addressed by the
+  digest of its binary data.
+
+  Most of the binary data stored in the CAS is opaque to the execution engine,
+  and is only used as a communication medium. In order to build an
+  [Action][google.devtools.remoteexecution.v1test.Action],
+  however, the client will need to also upload the
+  [Command][google.devtools.remoteexecution.v1test.Command] and input root
+  [Directory][google.devtools.remoteexecution.v1test.Directory] for the Action.
+  The Command and Directory messages must be marshalled to wire format and then
+  uploaded under the hash as with any other piece of content. In practice, the
+  input root directory is likely to refer to other Directories in its
+  hierarchy, which must also each be uploaded on their own.
+
+  For small file uploads the client should group them together and call
+  [BatchUpdateBlobs][google.devtools.remoteexecution.v1test.ContentAddressableStorage.BatchUpdateBlobs]
+  on chunks of no more than 10 MiB. For large uploads, the client must use the
+  [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
+  `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
+  where `instance_name` is as described in the next paragraph, `uuid` is a
+  version 4 UUID generated by the client, and `hash` and `size` are the
+  [Digest][google.devtools.remoteexecution.v1test.Digest] of the blob. The
+  `uuid` is used only to avoid collisions when multiple clients try to upload
+  the same file (or the same client tries to upload the file multiple times at
+  once on different threads), so the client MAY reuse the `uuid` for uploading
+  different blobs. The `resource_name` may optionally have a trailing filename
+  (or other metadata) for a client to use if it is storing URLs, as in
+  `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
+  after the `size` is ignored.
+
+  A single server MAY support multiple instances of the execution system, each
+  with their own workers, storage, cache, etc. The exact relationship between
+  instances is up to the server. If the server does, then the `instance_name`
+  is an identifier, possibly containing multiple path segments, used to
+  distinguish between the various instances on the server, in a manner defined
+  by the server. For servers which do not support multiple instances, then the
+  `instance_name` is the empty path and the leading slash is omitted, so that
+  the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+
+  When attempting an upload, if another client has already completed the upload
+  (which may occur in the middle of a single upload if another client uploads
+  the same blob concurrently), the request will terminate immediately with
+  a response whose `committed_size` is the full size of the uploaded file
+  (regardless of how much data was transmitted by the client). If the client
+  completes the upload but the
+  [Digest][google.devtools.remoteexecution.v1test.Digest] does not match, an
+  `INVALID_ARGUMENT` error will be returned. In either case, the client should
+  not attempt to retry the upload.
+
+  For downloading blobs, the client must use the
+  [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
+  a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
+  `instance_name` is the instance name (see above), and `hash` and `size` are
+  the [Digest][google.devtools.remoteexecution.v1test.Digest] of the blob.
+
+  The lifetime of entries in the CAS is implementation specific, but it SHOULD
+  be long enough to allow for newly-added and recently looked-up entries to be
+  used in subsequent calls (e.g. to
+  [Execute][google.devtools.remoteexecution.v1test.Execution.Execute]).
+
+  As with other services in the Remote Execution API, any call may return an
+  error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+  information about when the client should retry the request; clients SHOULD
+  respect the information provided.
+  """
+
+  def FindMissingBlobs(self, request, context):
+    """Determine if blobs are present in the CAS.
+
+    Clients can use this API before uploading blobs to determine which ones are
+    already present in the CAS and do not need to be uploaded again.
+
+    There are no method-specific errors.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def BatchUpdateBlobs(self, request, context):
+    """Upload many blobs at once.
+
+    The client MUST NOT upload blobs with a combined total size of more than 10
+    MiB using this API. Such requests should either be split into smaller
+    chunks or uploaded using the
+    [ByteStream API][google.bytestream.ByteStream], as appropriate.
+
+    This request is equivalent to calling [UpdateBlob][] on each individual
+    blob, in parallel. The requests may succeed or fail independently.
+
+    Errors:
+    * `INVALID_ARGUMENT`: The client attempted to upload more than 10 MiB of
+    data.
+
+    Individual requests may return the following errors, additionally:
+    * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
+    * `INVALID_ARGUMENT`: The
+    [Digest][google.devtools.remoteexecution.v1test.Digest] does not match the
+    provided data.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def GetTree(self, request, context):
+    """Fetch the entire directory tree rooted at a node.
+
+    This request must be targeted at a
+    [Directory][google.devtools.remoteexecution.v1test.Directory] stored in the
+    [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage]
+    (CAS). The server will enumerate the `Directory` tree recursively and
+    return every node descended from the root.
+    The exact traversal order is unspecified and, unless retrieving subsequent
+    pages from an earlier request, is not guaranteed to be stable across
+    multiple invocations of `GetTree`.
+
+    If part of the tree is missing from the CAS, the server will return the
+    portion present and omit the rest.
+
+    * `NOT_FOUND`: The requested tree root is not present in the CAS.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_ContentAddressableStorageServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'FindMissingBlobs': grpc.unary_unary_rpc_method_handler(
+          servicer.FindMissingBlobs,
+          request_deserializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.FindMissingBlobsRequest.FromString,
+          response_serializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.FindMissingBlobsResponse.SerializeToString,
+      ),
+      'BatchUpdateBlobs': grpc.unary_unary_rpc_method_handler(
+          servicer.BatchUpdateBlobs,
+          request_deserializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.BatchUpdateBlobsRequest.FromString,
+          response_serializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.BatchUpdateBlobsResponse.SerializeToString,
+      ),
+      'GetTree': grpc.unary_unary_rpc_method_handler(
+          servicer.GetTree,
+          request_deserializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.GetTreeRequest.FromString,
+          response_serializer=google_dot_devtools_dot_remoteexecution_dot_v1test_dot_remote__execution__pb2.GetTreeResponse.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'google.devtools.remoteexecution.v1test.ContentAddressableStorage', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/google/longrunning/__init__.py b/google/longrunning/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/google/longrunning/__init__.py
diff --git a/google/longrunning/operations.proto b/google/longrunning/operations.proto
new file mode 100644
index 0000000..76fef29
--- /dev/null
+++ b/google/longrunning/operations.proto
@@ -0,0 +1,160 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.longrunning;
+
+import "google/api/annotations.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/empty.proto";
+import "google/rpc/status.proto";
+
+option csharp_namespace = "Google.LongRunning";
+option go_package = "google.golang.org/genproto/googleapis/longrunning;longrunning";
+option java_multiple_files = true;
+option java_outer_classname = "OperationsProto";
+option java_package = "com.google.longrunning";
+option php_namespace = "Google\\LongRunning";
+
+
+// Manages long-running operations with an API service.
+//
+// When an API method normally takes long time to complete, it can be designed
+// to return [Operation][google.longrunning.Operation] to the client, and the client can use this
+// interface to receive the real response asynchronously by polling the
+// operation resource, or pass the operation resource to another API (such as
+// Google Cloud Pub/Sub API) to receive the response.  Any API service that
+// returns long-running operations should implement the `Operations` interface
+// so developers can have a consistent client experience.
+service Operations {
+  // Lists operations that match the specified filter in the request. If the
+  // server doesn't support this method, it returns `UNIMPLEMENTED`.
+  //
+  // NOTE: the `name` binding below allows API services to override the binding
+  // to use different resource name schemes, such as `users/*/operations`.
+  rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) {
+    option (google.api.http) = { get: "/v1/{name=operations}" };
+  }
+
+  // Gets the latest state of a long-running operation.  Clients can use this
+  // method to poll the operation result at intervals as recommended by the API
+  // service.
+  rpc GetOperation(GetOperationRequest) returns (Operation) {
+    option (google.api.http) = { get: "/v1/{name=operations/**}" };
+  }
+
+  // Deletes a long-running operation. This method indicates that the client is
+  // no longer interested in the operation result. It does not cancel the
+  // operation. If the server doesn't support this method, it returns
+  // `google.rpc.Code.UNIMPLEMENTED`.
+  rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) {
+    option (google.api.http) = { delete: "/v1/{name=operations/**}" };
+  }
+
+  // Starts asynchronous cancellation on a long-running operation.  The server
+  // makes a best effort to cancel the operation, but success is not
+  // guaranteed.  If the server doesn't support this method, it returns
+  // `google.rpc.Code.UNIMPLEMENTED`.  Clients can use
+  // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+  // other methods to check whether the cancellation succeeded or whether the
+  // operation completed despite cancellation. On successful cancellation,
+  // the operation is not deleted; instead, it becomes an operation with
+  // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+  // corresponding to `Code.CANCELLED`.
+  rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) {
+    option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" };
+  }
+}
+
+// This resource represents a long-running operation that is the result of a
+// network API call.
+message Operation {
+  // The server-assigned name, which is only unique within the same service that
+  // originally returns it. If you use the default HTTP mapping, the
+  // `name` should have the format of `operations/some/unique/name`.
+  string name = 1;
+
+  // Service-specific metadata associated with the operation.  It typically
+  // contains progress information and common metadata such as create time.
+  // Some services might not provide such metadata.  Any method that returns a
+  // long-running operation should document the metadata type, if any.
+  google.protobuf.Any metadata = 2;
+
+  // If the value is `false`, it means the operation is still in progress.
+  // If true, the operation is completed, and either `error` or `response` is
+  // available.
+  bool done = 3;
+
+  // The operation result, which can be either an `error` or a valid `response`.
+  // If `done` == `false`, neither `error` nor `response` is set.
+  // If `done` == `true`, exactly one of `error` or `response` is set.
+  oneof result {
+    // The error result of the operation in case of failure or cancellation.
+    google.rpc.Status error = 4;
+
+    // The normal response of the operation in case of success.  If the original
+    // method returns no data on success, such as `Delete`, the response is
+    // `google.protobuf.Empty`.  If the original method is standard
+    // `Get`/`Create`/`Update`, the response should be the resource.  For other
+    // methods, the response should have the type `XxxResponse`, where `Xxx`
+    // is the original method name.  For example, if the original method name
+    // is `TakeSnapshot()`, the inferred response type is
+    // `TakeSnapshotResponse`.
+    google.protobuf.Any response = 5;
+  }
+}
+
+// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation].
+message GetOperationRequest {
+  // The name of the operation resource.
+  string name = 1;
+}
+
+// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
+message ListOperationsRequest {
+  // The name of the operation collection.
+  string name = 4;
+
+  // The standard list filter.
+  string filter = 1;
+
+  // The standard list page size.
+  int32 page_size = 2;
+
+  // The standard list page token.
+  string page_token = 3;
+}
+
+// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
+message ListOperationsResponse {
+  // A list of operations that matches the specified filter in the request.
+  repeated Operation operations = 1;
+
+  // The standard List next-page token.
+  string next_page_token = 2;
+}
+
+// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation].
+message CancelOperationRequest {
+  // The name of the operation resource to be cancelled.
+  string name = 1;
+}
+
+// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation].
+message DeleteOperationRequest {
+  // The name of the operation resource to be deleted.
+  string name = 1;
+}
+
diff --git a/google/longrunning/operations_pb2.py b/google/longrunning/operations_pb2.py
new file mode 100644
index 0000000..a938874
--- /dev/null
+++ b/google/longrunning/operations_pb2.py
@@ -0,0 +1,391 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: google/longrunning/operations.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='google/longrunning/operations.proto',
+  package='google.longrunning',
+  syntax='proto3',
+  serialized_pb=_b('\n#google/longrunning/operations.proto\x12\x12google.longrunning\x1a\x1cgoogle/api/annotations.proto\x1a\x19google/protobuf/any.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x17google/rpc/status.proto\"\xa8\x01\n\tOperation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x08metadata\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any\x12\x0c\n\x04\x64one\x18\x03 \x01(\x08\x12#\n\x05\x65rror\x18\x04 \x01(\x0b\x32\x12.google.rpc.StatusH\x00\x12(\n\x08response\x18\x05 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x42\x08\n\x06result\"#\n\x13GetOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\\\n\x15ListOperationsRequest\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"d\n\x16ListOperationsResponse\x12\x31\n\noperations\x18\x01 \x03(\x0b\x32\x1d.google.longrunning.Operation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"&\n\x16\x43\x61ncelOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"&\n\x16\x44\x65leteOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t2\x8c\x04\n\nOperations\x12\x86\x01\n\x0eListOperations\x12).google.longrunning.ListOperationsRequest\x1a*.google.longrunning.ListOperationsResponse\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/v1/{name=operations}\x12x\n\x0cGetOperation\x12\'.google.longrunning.GetOperationRequest\x1a\x1d.google.longrunning.Operation\" \x82\xd3\xe4\x93\x02\x1a\x12\x18/v1/{name=operations/**}\x12w\n\x0f\x44\x65leteOperation\x12*.google.longrunning.DeleteOperationRequest\x1a\x16.google.protobuf.Empty\" \x82\xd3\xe4\x93\x02\x1a*\x18/v1/{name=operations/**}\x12\x81\x01\n\x0f\x43\x61ncelOperation\x12*.google.longrunning.CancelOperationRequest\x1a\x16.google.protobuf.Empty\"*\x82\xd3\xe4\x93\x02$\"\x1f/v1/{name=operations/**}:cancel:\x01*B\x94\x01\n\x16\x63om.google.longrunningB\x0fOperationsProtoP\x01Z=google.golang.org/genproto/googleapis/longrunning;longrunning\xaa\x02\x12Google.LongRunning\xca\x02\x12Google\\LongRunningb\x06proto3')
+  ,
+  dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_any__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
+
+
+
+
+_OPERATION = _descriptor.Descriptor(
+  name='Operation',
+  full_name='google.longrunning.Operation',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='google.longrunning.Operation.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='metadata', full_name='google.longrunning.Operation.metadata', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='done', full_name='google.longrunning.Operation.done', index=2,
+      number=3, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='error', full_name='google.longrunning.Operation.error', index=3,
+      number=4, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='response', full_name='google.longrunning.Operation.response', index=4,
+      number=5, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+    _descriptor.OneofDescriptor(
+      name='result', full_name='google.longrunning.Operation.result',
+      index=0, containing_type=None, fields=[]),
+  ],
+  serialized_start=171,
+  serialized_end=339,
+)
+
+
+_GETOPERATIONREQUEST = _descriptor.Descriptor(
+  name='GetOperationRequest',
+  full_name='google.longrunning.GetOperationRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='google.longrunning.GetOperationRequest.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=341,
+  serialized_end=376,
+)
+
+
+_LISTOPERATIONSREQUEST = _descriptor.Descriptor(
+  name='ListOperationsRequest',
+  full_name='google.longrunning.ListOperationsRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='google.longrunning.ListOperationsRequest.name', index=0,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='filter', full_name='google.longrunning.ListOperationsRequest.filter', index=1,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='page_size', full_name='google.longrunning.ListOperationsRequest.page_size', index=2,
+      number=2, type=5, cpp_type=1, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='page_token', full_name='google.longrunning.ListOperationsRequest.page_token', index=3,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=378,
+  serialized_end=470,
+)
+
+
+_LISTOPERATIONSRESPONSE = _descriptor.Descriptor(
+  name='ListOperationsResponse',
+  full_name='google.longrunning.ListOperationsResponse',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='operations', full_name='google.longrunning.ListOperationsResponse.operations', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='next_page_token', full_name='google.longrunning.ListOperationsResponse.next_page_token', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=472,
+  serialized_end=572,
+)
+
+
+_CANCELOPERATIONREQUEST = _descriptor.Descriptor(
+  name='CancelOperationRequest',
+  full_name='google.longrunning.CancelOperationRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='google.longrunning.CancelOperationRequest.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=574,
+  serialized_end=612,
+)
+
+
+_DELETEOPERATIONREQUEST = _descriptor.Descriptor(
+  name='DeleteOperationRequest',
+  full_name='google.longrunning.DeleteOperationRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='google.longrunning.DeleteOperationRequest.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=614,
+  serialized_end=652,
+)
+
+_OPERATION.fields_by_name['metadata'].message_type = google_dot_protobuf_dot_any__pb2._ANY
+_OPERATION.fields_by_name['error'].message_type = google_dot_rpc_dot_status__pb2._STATUS
+_OPERATION.fields_by_name['response'].message_type = google_dot_protobuf_dot_any__pb2._ANY
+_OPERATION.oneofs_by_name['result'].fields.append(
+  _OPERATION.fields_by_name['error'])
+_OPERATION.fields_by_name['error'].containing_oneof = _OPERATION.oneofs_by_name['result']
+_OPERATION.oneofs_by_name['result'].fields.append(
+  _OPERATION.fields_by_name['response'])
+_OPERATION.fields_by_name['response'].containing_oneof = _OPERATION.oneofs_by_name['result']
+_LISTOPERATIONSRESPONSE.fields_by_name['operations'].message_type = _OPERATION
+DESCRIPTOR.message_types_by_name['Operation'] = _OPERATION
+DESCRIPTOR.message_types_by_name['GetOperationRequest'] = _GETOPERATIONREQUEST
+DESCRIPTOR.message_types_by_name['ListOperationsRequest'] = _LISTOPERATIONSREQUEST
+DESCRIPTOR.message_types_by_name['ListOperationsResponse'] = _LISTOPERATIONSRESPONSE
+DESCRIPTOR.message_types_by_name['CancelOperationRequest'] = _CANCELOPERATIONREQUEST
+DESCRIPTOR.message_types_by_name['DeleteOperationRequest'] = _DELETEOPERATIONREQUEST
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Operation = _reflection.GeneratedProtocolMessageType('Operation', (_message.Message,), dict(
+  DESCRIPTOR = _OPERATION,
+  __module__ = 'google.longrunning.operations_pb2'
+  # @@protoc_insertion_point(class_scope:google.longrunning.Operation)
+  ))
+_sym_db.RegisterMessage(Operation)
+
+GetOperationRequest = _reflection.GeneratedProtocolMessageType('GetOperationRequest', (_message.Message,), dict(
+  DESCRIPTOR = _GETOPERATIONREQUEST,
+  __module__ = 'google.longrunning.operations_pb2'
+  # @@protoc_insertion_point(class_scope:google.longrunning.GetOperationRequest)
+  ))
+_sym_db.RegisterMessage(GetOperationRequest)
+
+ListOperationsRequest = _reflection.GeneratedProtocolMessageType('ListOperationsRequest', (_message.Message,), dict(
+  DESCRIPTOR = _LISTOPERATIONSREQUEST,
+  __module__ = 'google.longrunning.operations_pb2'
+  # @@protoc_insertion_point(class_scope:google.longrunning.ListOperationsRequest)
+  ))
+_sym_db.RegisterMessage(ListOperationsRequest)
+
+ListOperationsResponse = _reflection.GeneratedProtocolMessageType('ListOperationsResponse', (_message.Message,), dict(
+  DESCRIPTOR = _LISTOPERATIONSRESPONSE,
+  __module__ = 'google.longrunning.operations_pb2'
+  # @@protoc_insertion_point(class_scope:google.longrunning.ListOperationsResponse)
+  ))
+_sym_db.RegisterMessage(ListOperationsResponse)
+
+CancelOperationRequest = _reflection.GeneratedProtocolMessageType('CancelOperationRequest', (_message.Message,), dict(
+  DESCRIPTOR = _CANCELOPERATIONREQUEST,
+  __module__ = 'google.longrunning.operations_pb2'
+  # @@protoc_insertion_point(class_scope:google.longrunning.CancelOperationRequest)
+  ))
+_sym_db.RegisterMessage(CancelOperationRequest)
+
+DeleteOperationRequest = _reflection.GeneratedProtocolMessageType('DeleteOperationRequest', (_message.Message,), dict(
+  DESCRIPTOR = _DELETEOPERATIONREQUEST,
+  __module__ = 'google.longrunning.operations_pb2'
+  # @@protoc_insertion_point(class_scope:google.longrunning.DeleteOperationRequest)
+  ))
+_sym_db.RegisterMessage(DeleteOperationRequest)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.longrunningB\017OperationsProtoP\001Z=google.golang.org/genproto/googleapis/longrunning;longrunning\252\002\022Google.LongRunning\312\002\022Google\\LongRunning'))
+
+_OPERATIONS = _descriptor.ServiceDescriptor(
+  name='Operations',
+  full_name='google.longrunning.Operations',
+  file=DESCRIPTOR,
+  index=0,
+  options=None,
+  serialized_start=655,
+  serialized_end=1179,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='ListOperations',
+    full_name='google.longrunning.Operations.ListOperations',
+    index=0,
+    containing_service=None,
+    input_type=_LISTOPERATIONSREQUEST,
+    output_type=_LISTOPERATIONSRESPONSE,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/v1/{name=operations}')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='GetOperation',
+    full_name='google.longrunning.Operations.GetOperation',
+    index=1,
+    containing_service=None,
+    input_type=_GETOPERATIONREQUEST,
+    output_type=_OPERATION,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\022\030/v1/{name=operations/**}')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='DeleteOperation',
+    full_name='google.longrunning.Operations.DeleteOperation',
+    index=2,
+    containing_service=None,
+    input_type=_DELETEOPERATIONREQUEST,
+    output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032*\030/v1/{name=operations/**}')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='CancelOperation',
+    full_name='google.longrunning.Operations.CancelOperation',
+    index=3,
+    containing_service=None,
+    input_type=_CANCELOPERATIONREQUEST,
+    output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002$\"\037/v1/{name=operations/**}:cancel:\001*')),
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_OPERATIONS)
+
+DESCRIPTOR.services_by_name['Operations'] = _OPERATIONS
+
+# @@protoc_insertion_point(module_scope)
diff --git a/google/longrunning/operations_pb2_grpc.py b/google/longrunning/operations_pb2_grpc.py
new file mode 100644
index 0000000..ecec180
--- /dev/null
+++ b/google/longrunning/operations_pb2_grpc.py
@@ -0,0 +1,132 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+
+
+class OperationsStub(object):
+  """Manages long-running operations with an API service.
+
+  When an API method normally takes long time to complete, it can be designed
+  to return [Operation][google.longrunning.Operation] to the client, and the client can use this
+  interface to receive the real response asynchronously by polling the
+  operation resource, or pass the operation resource to another API (such as
+  Google Cloud Pub/Sub API) to receive the response.  Any API service that
+  returns long-running operations should implement the `Operations` interface
+  so developers can have a consistent client experience.
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.ListOperations = channel.unary_unary(
+        '/google.longrunning.Operations/ListOperations',
+        request_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.SerializeToString,
+        response_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.FromString,
+        )
+    self.GetOperation = channel.unary_unary(
+        '/google.longrunning.Operations/GetOperation',
+        request_serializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.SerializeToString,
+        response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+        )
+    self.DeleteOperation = channel.unary_unary(
+        '/google.longrunning.Operations/DeleteOperation',
+        request_serializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.SerializeToString,
+        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+        )
+    self.CancelOperation = channel.unary_unary(
+        '/google.longrunning.Operations/CancelOperation',
+        request_serializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.SerializeToString,
+        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+        )
+
+
+class OperationsServicer(object):
+  """Manages long-running operations with an API service.
+
+  When an API method normally takes long time to complete, it can be designed
+  to return [Operation][google.longrunning.Operation] to the client, and the client can use this
+  interface to receive the real response asynchronously by polling the
+  operation resource, or pass the operation resource to another API (such as
+  Google Cloud Pub/Sub API) to receive the response.  Any API service that
+  returns long-running operations should implement the `Operations` interface
+  so developers can have a consistent client experience.
+  """
+
+  def ListOperations(self, request, context):
+    """Lists operations that match the specified filter in the request. If the
+    server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+    NOTE: the `name` binding below allows API services to override the binding
+    to use different resource name schemes, such as `users/*/operations`.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def GetOperation(self, request, context):
+    """Gets the latest state of a long-running operation.  Clients can use this
+    method to poll the operation result at intervals as recommended by the API
+    service.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def DeleteOperation(self, request, context):
+    """Deletes a long-running operation. This method indicates that the client is
+    no longer interested in the operation result. It does not cancel the
+    operation. If the server doesn't support this method, it returns
+    `google.rpc.Code.UNIMPLEMENTED`.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def CancelOperation(self, request, context):
+    """Starts asynchronous cancellation on a long-running operation.  The server
+    makes a best effort to cancel the operation, but success is not
+    guaranteed.  If the server doesn't support this method, it returns
+    `google.rpc.Code.UNIMPLEMENTED`.  Clients can use
+    [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+    other methods to check whether the cancellation succeeded or whether the
+    operation completed despite cancellation. On successful cancellation,
+    the operation is not deleted; instead, it becomes an operation with
+    an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+    corresponding to `Code.CANCELLED`.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_OperationsServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'ListOperations': grpc.unary_unary_rpc_method_handler(
+          servicer.ListOperations,
+          request_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.FromString,
+          response_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.SerializeToString,
+      ),
+      'GetOperation': grpc.unary_unary_rpc_method_handler(
+          servicer.GetOperation,
+          request_deserializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.FromString,
+          response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+      ),
+      'DeleteOperation': grpc.unary_unary_rpc_method_handler(
+          servicer.DeleteOperation,
+          request_deserializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.FromString,
+          response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+      ),
+      'CancelOperation': grpc.unary_unary_rpc_method_handler(
+          servicer.CancelOperation,
+          request_deserializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.FromString,
+          response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'google.longrunning.Operations', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/google/rpc/__init__.py b/google/rpc/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/google/rpc/__init__.py
diff --git a/google/rpc/status.proto b/google/rpc/status.proto
new file mode 100644
index 0000000..0839ee9
--- /dev/null
+++ b/google/rpc/status.proto
@@ -0,0 +1,92 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.rpc;
+
+import "google/protobuf/any.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/rpc/status;status";
+option java_multiple_files = true;
+option java_outer_classname = "StatusProto";
+option java_package = "com.google.rpc";
+option objc_class_prefix = "RPC";
+
+
+// The `Status` type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs. It is used by
+// [gRPC](https://github.com/grpc). The error model is designed to be:
+//
+// - Simple to use and understand for most users
+// - Flexible enough to meet unexpected needs
+//
+// # Overview
+//
+// The `Status` message contains three pieces of data: error code, error message,
+// and error details. The error code should be an enum value of
+// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed.  The
+// error message should be a developer-facing English message that helps
+// developers *understand* and *resolve* the error. If a localized user-facing
+// error message is needed, put the localized message in the error details or
+// localize it in the client. The optional error details may contain arbitrary
+// information about the error. There is a predefined set of error detail types
+// in the package `google.rpc` that can be used for common error conditions.
+//
+// # Language mapping
+//
+// The `Status` message is the logical representation of the error model, but it
+// is not necessarily the actual wire format. When the `Status` message is
+// exposed in different client libraries and different wire protocols, it can be
+// mapped differently. For example, it will likely be mapped to some exceptions
+// in Java, but more likely mapped to some error codes in C.
+//
+// # Other uses
+//
+// The error model and the `Status` message can be used in a variety of
+// environments, either with or without APIs, to provide a
+// consistent developer experience across different environments.
+//
+// Example uses of this error model include:
+//
+// - Partial errors. If a service needs to return partial errors to the client,
+//     it may embed the `Status` in the normal response to indicate the partial
+//     errors.
+//
+// - Workflow errors. A typical workflow has multiple steps. Each step may
+//     have a `Status` message for error reporting.
+//
+// - Batch operations. If a client uses batch request and batch response, the
+//     `Status` message should be used directly inside batch response, one for
+//     each error sub-response.
+//
+// - Asynchronous operations. If an API call embeds asynchronous operation
+//     results in its response, the status of those operations should be
+//     represented directly using the `Status` message.
+//
+// - Logging. If some API errors are stored in logs, the message `Status` could
+//     be used directly after any stripping needed for security/privacy reasons.
+message Status {
+  // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
+  int32 code = 1;
+
+  // A developer-facing error message, which should be in English. Any
+  // user-facing error message should be localized and sent in the
+  // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
+  string message = 2;
+
+  // A list of messages that carry the error details.  There is a common set of
+  // message types for APIs to use.
+  repeated google.protobuf.Any details = 3;
+}
diff --git a/google/rpc/status_pb2.py b/google/rpc/status_pb2.py
new file mode 100644
index 0000000..6c47723
--- /dev/null
+++ b/google/rpc/status_pb2.py
@@ -0,0 +1,88 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: google/rpc/status.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='google/rpc/status.proto',
+  package='google.rpc',
+  syntax='proto3',
+  serialized_pb=_b('\n\x17google/rpc/status.proto\x12\ngoogle.rpc\x1a\x19google/protobuf/any.proto\"N\n\x06Status\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\x12%\n\x07\x64\x65tails\x18\x03 \x03(\x0b\x32\x14.google.protobuf.AnyB^\n\x0e\x63om.google.rpcB\x0bStatusProtoP\x01Z7google.golang.org/genproto/googleapis/rpc/status;status\xa2\x02\x03RPCb\x06proto3')
+  ,
+  dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,])
+
+
+
+
+_STATUS = _descriptor.Descriptor(
+  name='Status',
+  full_name='google.rpc.Status',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='code', full_name='google.rpc.Status.code', index=0,
+      number=1, type=5, cpp_type=1, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='message', full_name='google.rpc.Status.message', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='details', full_name='google.rpc.Status.details', index=2,
+      number=3, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=66,
+  serialized_end=144,
+)
+
+_STATUS.fields_by_name['details'].message_type = google_dot_protobuf_dot_any__pb2._ANY
+DESCRIPTOR.message_types_by_name['Status'] = _STATUS
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), dict(
+  DESCRIPTOR = _STATUS,
+  __module__ = 'google.rpc.status_pb2'
+  # @@protoc_insertion_point(class_scope:google.rpc.Status)
+  ))
+_sym_db.RegisterMessage(Status)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.rpcB\013StatusProtoP\001Z7google.golang.org/genproto/googleapis/rpc/status;status\242\002\003RPC'))
+# @@protoc_insertion_point(module_scope)
diff --git a/google/rpc/status_pb2_grpc.py b/google/rpc/status_pb2_grpc.py
new file mode 100644
index 0000000..a894352
--- /dev/null
+++ b/google/rpc/status_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/setup.cfg b/setup.cfg
index e0b3c99..d37db78 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -23,5 +23,7 @@
     */bin/* ALL
     buildstream/_fuse/fuse.py ALL
     .eggs/* ALL
+    *_pb2.py ALL
+    *_pb2_grpc.py ALL
 env =
     D:BST_TEST_SUITE=True
diff --git a/setup.py b/setup.py
index 03a2bda..d891d24 100755
--- a/setup.py
+++ b/setup.py
@@ -29,7 +29,7 @@
     sys.exit(1)
 
 try:
-    from setuptools import setup, find_packages
+    from setuptools import setup, find_packages, Command
     from setuptools.command.easy_install import ScriptWriter
 except ImportError:
     print("BuildStream requires setuptools in order to build. Install it using"
@@ -82,47 +82,6 @@
             exit_bwrap("Bubblewrap too old")
 
 
-##################################################################
-# OSTree version requirements
-##################################################################
-REQUIRED_OSTREE_YEAR = 2017
-REQUIRED_OSTREE_RELEASE = 8
-
-
-def exit_ostree(reason):
-    print(reason +
-          "\nBuildStream requires OSTree >= v{}.{} with Python bindings. "
-          .format(REQUIRED_OSTREE_YEAR, REQUIRED_OSTREE_RELEASE) +
-          "Install it using your package manager (usually ostree or gir1.2-ostree-1.0).")
-    sys.exit(1)
-
-
-def assert_ostree_version():
-    platform = os.environ.get('BST_FORCE_BACKEND', '') or sys.platform
-    if platform.startswith('linux'):
-        try:
-            import gi
-        except ImportError:
-            print("BuildStream requires PyGObject (aka PyGI). Install it using"
-                  " your package manager (usually pygobject3 or python-gi).")
-            sys.exit(1)
-
-        try:
-            gi.require_version('OSTree', '1.0')
-            from gi.repository import OSTree
-        except ValueError:
-            exit_ostree("OSTree not found")
-
-        try:
-            if OSTree.YEAR_VERSION < REQUIRED_OSTREE_YEAR or \
-               (OSTree.YEAR_VERSION == REQUIRED_OSTREE_YEAR and
-                OSTree.RELEASE_VERSION < REQUIRED_OSTREE_RELEASE):
-                exit_ostree("OSTree v{}.{} is too old."
-                            .format(OSTree.YEAR_VERSION, OSTree.RELEASE_VERSION))
-        except AttributeError:
-            exit_ostree("OSTree is too old.")
-
-
 ###########################################
 # List the pre-built man pages to install #
 ###########################################
@@ -154,13 +113,12 @@
 # So screw it, lets just use an env var.
 bst_install_entry_points = {
     'console_scripts': [
-        'bst-artifact-receive = buildstream._artifactcache.pushreceive:receive_main'
+        'bst-artifact-server = buildstream._artifactcache.casserver:server_main'
     ],
 }
 
 if not os.environ.get('BST_ARTIFACTS_ONLY', ''):
     assert_bwrap()
-    assert_ostree_version()
     bst_install_entry_points['console_scripts'] += [
         'bst = buildstream._frontend:cli'
     ]
@@ -206,12 +164,46 @@
 
 
 #####################################################
+#         gRPC command for code generation          #
+#####################################################
+class BuildGRPC(Command):
+    """Command to generate project *_pb2.py modules from proto files."""
+
+    description = 'build gRPC protobuf modules'
+    user_options = []
+
+    def initialize_options(self):
+        pass
+
+    def finalize_options(self):
+        pass
+
+    def run(self):
+        try:
+            import grpc_tools.command
+        except ImportError:
+            print("BuildStream requires grpc_tools in order to build gRPC modules.\n"
+                  "Install it via pip (pip3 install grpcio-tools).")
+            exit(1)
+
+        grpc_tools.command.build_package_protos('.')
+
+
+def get_cmdclass():
+    cmdclass = {
+        'build_grpc': BuildGRPC,
+    }
+    cmdclass.update(versioneer.get_cmdclass())
+    return cmdclass
+
+
+#####################################################
 #             Main setup() Invocation               #
 #####################################################
 setup(name='BuildStream',
       # Use versioneer
       version=versioneer.get_version(),
-      cmdclass=versioneer.get_cmdclass(),
+      cmdclass=get_cmdclass(),
 
       description='A framework for modelling build pipelines in YAML',
       license='LGPL',
@@ -243,6 +235,8 @@
           'Click',
           'blessings',
           'jinja2 >= 2.10',
+          'protobuf',
+          'grpcio',
       ],
       entry_points=bst_install_entry_points,
       setup_requires=['pytest-runner'],
diff --git a/tests/artifactcache/junctions.py b/tests/artifactcache/junctions.py
index 12423f9..378d007 100644
--- a/tests/artifactcache/junctions.py
+++ b/tests/artifactcache/junctions.py
@@ -2,7 +2,6 @@
 import shutil
 import pytest
 from tests.testutils import cli, create_artifact_share
-from tests.testutils.site import IS_LINUX
 
 from buildstream import _yaml
 
@@ -37,60 +36,53 @@
     _yaml.dump(_yaml.node_sanitize(project_config), filename=project_conf_file)
 
 
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
 @pytest.mark.datafiles(DATA_DIR)
 def test_push_pull(cli, tmpdir, datafiles):
     project = os.path.join(str(datafiles), 'foo')
     base_project = os.path.join(str(project), 'base')
 
-    share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare-foo'))
-    base_share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare-base'))
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare-foo')) as share,\
+        create_artifact_share(os.path.join(str(tmpdir), 'artifactshare-base')) as base_share:
 
-    # First build it without the artifact cache configured
-    result = cli.run(project=project, args=['build', 'target.bst'])
-    assert result.exit_code == 0
+        # First build it without the artifact cache configured
+        result = cli.run(project=project, args=['build', 'target.bst'])
+        assert result.exit_code == 0
 
-    # Assert that we are now cached locally
-    state = cli.get_element_state(project, 'target.bst')
-    assert state == 'cached'
-    state = cli.get_element_state(base_project, 'target.bst')
-    assert state == 'cached'
+        # Assert that we are now cached locally
+        state = cli.get_element_state(project, 'target.bst')
+        assert state == 'cached'
+        state = cli.get_element_state(base_project, 'target.bst')
+        assert state == 'cached'
 
-    project_set_artifacts(project, share.repo)
-    project_set_artifacts(base_project, base_share.repo)
+        project_set_artifacts(project, share.repo)
+        project_set_artifacts(base_project, base_share.repo)
 
-    # Now try bst push
-    result = cli.run(project=project, args=['push', '--deps', 'all', 'target.bst'])
-    assert result.exit_code == 0
+        # Now try bst push
+        result = cli.run(project=project, args=['push', '--deps', 'all', 'target.bst'])
+        assert result.exit_code == 0
 
-    # And finally assert that the artifacts are in the right shares
-    assert_shared(cli, share, 'foo', project, 'target.bst')
-    assert_shared(cli, base_share, 'base', base_project, 'target.bst')
+        # And finally assert that the artifacts are in the right shares
+        assert_shared(cli, share, 'foo', project, 'target.bst')
+        assert_shared(cli, base_share, 'base', base_project, 'target.bst')
 
-    # Make sure we update the summary in our artifact shares,
-    # we dont have a real server around to do it
-    #
-    share.update_summary()
-    base_share.update_summary()
+        # Now we've pushed, delete the user's local artifact cache
+        # directory and try to redownload it from the share
+        #
+        artifacts = os.path.join(cli.directory, 'artifacts')
+        shutil.rmtree(artifacts)
 
-    # Now we've pushed, delete the user's local artifact cache
-    # directory and try to redownload it from the share
-    #
-    artifacts = os.path.join(cli.directory, 'artifacts')
-    shutil.rmtree(artifacts)
+        # Assert that nothing is cached locally anymore
+        state = cli.get_element_state(project, 'target.bst')
+        assert state != 'cached'
+        state = cli.get_element_state(base_project, 'target.bst')
+        assert state != 'cached'
 
-    # Assert that nothing is cached locally anymore
-    state = cli.get_element_state(project, 'target.bst')
-    assert state != 'cached'
-    state = cli.get_element_state(base_project, 'target.bst')
-    assert state != 'cached'
+        # Now try bst pull
+        result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
+        assert result.exit_code == 0
 
-    # Now try bst pull
-    result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
-    assert result.exit_code == 0
-
-    # And assert that they are again in the local cache, without having built
-    state = cli.get_element_state(project, 'target.bst')
-    assert state == 'cached'
-    state = cli.get_element_state(base_project, 'target.bst')
-    assert state == 'cached'
+        # And assert that they are again in the local cache, without having built
+        state = cli.get_element_state(project, 'target.bst')
+        assert state == 'cached'
+        state = cli.get_element_state(base_project, 'target.bst')
+        assert state == 'cached'
diff --git a/tests/artifactcache/tar.py b/tests/artifactcache/tar.py
deleted file mode 100644
index ef39be3..0000000
--- a/tests/artifactcache/tar.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import os
-import tarfile
-import tempfile
-from contextlib import ExitStack
-
-import pytest
-
-from buildstream._artifactcache.tarcache import _Tar
-from buildstream import utils, ProgramNotFoundError
-
-
-# Test that it 'works' - this may be equivalent to test_archive_no_tar()
-# on some systems.
-def test_archive_default():
-    with ExitStack() as stack:
-        src = stack.enter_context(tempfile.TemporaryDirectory())
-        tar_dir = stack.enter_context(tempfile.TemporaryDirectory())
-        scratch = stack.enter_context(tempfile.TemporaryDirectory())
-        test_file = stack.enter_context(open(os.path.join(src, 'test'), 'a'))
-        test_file.write('Test')
-
-        _Tar.archive(os.path.join(tar_dir, 'test.tar'), '.', src)
-
-        with tarfile.open(os.path.join(tar_dir, 'test.tar')) as tar:
-            tar.extractall(path=scratch)
-
-        assert os.listdir(scratch) == os.listdir(src)
-
-
-def test_archive_no_tar():
-    # Modify the path to exclude 'tar'
-    old_path = os.environ.get('PATH')
-    os.environ['PATH'] = ''
-
-    # Ensure we can't find 'tar' or 'gtar'
-    try:
-        for tar in ['gtar', 'tar']:
-            with pytest.raises(ProgramNotFoundError):
-                utils.get_host_tool(tar)
-
-    # Run the same test as before, this time 'tar' should not be available
-        test_archive_default()
-
-    # Reset the environment
-    finally:
-        os.environ['PATH'] = old_path
-
-
-# Same thing as test_archive_default()
-def test_extract_default():
-    with ExitStack() as stack:
-        src = stack.enter_context(tempfile.TemporaryDirectory())
-        tar_dir = stack.enter_context(tempfile.TemporaryDirectory())
-        scratch = stack.enter_context(tempfile.TemporaryDirectory())
-        test_file = stack.enter_context(open(os.path.join(src, 'test'), 'a'))
-        test_file.write('Test')
-
-        with tarfile.open(os.path.join(tar_dir, 'test.tar'), 'a:') as tar:
-            tar.add(src, 'contents')
-
-        _Tar.extract(os.path.join(tar_dir, 'test.tar'), scratch)
-
-        assert os.listdir(os.path.join(scratch, 'contents')) == os.listdir(src)
-
-
-def test_extract_no_tar():
-    # Modify the path to exclude 'tar'
-    old_path = os.environ.get('PATH')
-    os.environ['PATH'] = ''
-
-    # Ensure we can't find 'tar' or 'gtar'
-    for tar in ['gtar', 'tar']:
-        with pytest.raises(ProgramNotFoundError):
-            utils.get_host_tool(tar)
-
-    # Run the same test as before, this time 'tar' should not be available
-    try:
-        test_extract_default()
-
-    # Reset the environment
-    finally:
-        os.environ['PATH'] = old_path
diff --git a/tests/cachekey/project/elements/build1.expected b/tests/cachekey/project/elements/build1.expected
index ab8adf2..7c5af60 100644
--- a/tests/cachekey/project/elements/build1.expected
+++ b/tests/cachekey/project/elements/build1.expected
@@ -1 +1 @@
-93594f53df6c599598ea9c1d5101a8f7e57bbd82cac521494ce680e6f84de67d
\ No newline at end of file
+3db51572837956b28ffbc4aabdce659b4a1d91dcbb8b75954210346959ed5fa9
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/build2.expected b/tests/cachekey/project/elements/build2.expected
index 9499017..e1bd912 100644
--- a/tests/cachekey/project/elements/build2.expected
+++ b/tests/cachekey/project/elements/build2.expected
@@ -1 +1 @@
-3ae596efed1126d440780ef33d2144a06cb7215a778c4f59b12a2f77fa0ee3b2
\ No newline at end of file
+bcde6fc389b7d8bb7788989b68f68653ab8ed658117012c0611f218f4a585d38
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose1.expected b/tests/cachekey/project/elements/compose1.expected
index e912fbe..86a2a2f 100644
--- a/tests/cachekey/project/elements/compose1.expected
+++ b/tests/cachekey/project/elements/compose1.expected
@@ -1 +1 @@
-d67fccd867504706010f9f36b07cd35b3129e9d79ae287c3dc2bf9ec03e309ea
\ No newline at end of file
+6736bbcc055e1801a19288d3a64b622e0b9223164f8ad2ce842b18a4eaa0cfb9
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose2.expected b/tests/cachekey/project/elements/compose2.expected
index 4c3b901..a811cc4 100644
--- a/tests/cachekey/project/elements/compose2.expected
+++ b/tests/cachekey/project/elements/compose2.expected
@@ -1 +1 @@
-743eaac4f261d389d2c12fb9c8605eb70d5e42c8a0bccadef9f651dd137cedde
\ No newline at end of file
+9294428a0b5c0d44fdb3ab0f883ee87f9e62d51f96c7de1e5e81ed5e3934d403
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose3.expected b/tests/cachekey/project/elements/compose3.expected
index 85843a1..ce28c85 100644
--- a/tests/cachekey/project/elements/compose3.expected
+++ b/tests/cachekey/project/elements/compose3.expected
@@ -1 +1 @@
-5b401864d1d91809f59c258d37f78b410b244fcb20cab4bd0c1da17257515643
\ No newline at end of file
+4f1569b9a6317280e6299f9f7f706a6adcc89603030cde51d529dd6dfe2851be
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose4.expected b/tests/cachekey/project/elements/compose4.expected
index 38060ae..8d95a3d 100644
--- a/tests/cachekey/project/elements/compose4.expected
+++ b/tests/cachekey/project/elements/compose4.expected
@@ -1 +1 @@
-450664eb37302835e3289b95dfb38cab0b24e6c30c4b7b59a5dc1b5a7f1f01e0
\ No newline at end of file
+4c83744bec21c8c38bce2d48396b8df1eb4df7b2f155424016bd012743efd808
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose5.expected b/tests/cachekey/project/elements/compose5.expected
index 2f6307c..183534a 100644
--- a/tests/cachekey/project/elements/compose5.expected
+++ b/tests/cachekey/project/elements/compose5.expected
@@ -1 +1 @@
-fedaf8a315f8a9fb94d11c6f74a409188ff9397eac710e5ba6d9532162bd6973
\ No newline at end of file
+97385aa2192ef0295dd2601e78491d8bdf6b74e98938d0f8011747c2caf3a5c6
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/import1.expected b/tests/cachekey/project/elements/import1.expected
index 4669ed4..387da88 100644
--- a/tests/cachekey/project/elements/import1.expected
+++ b/tests/cachekey/project/elements/import1.expected
@@ -1 +1 @@
-20582fab199a8d110fd65b5616f45bc08ae3eccc7bfe8b94ba987f3986b69ce5
\ No newline at end of file
+99c8f61d415de3a6c96e48299fda5554bf4bbaf56bb4b5acd85861ab37ede0c3
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/import2.expected b/tests/cachekey/project/elements/import2.expected
index 2b071ac..0893dde 100644
--- a/tests/cachekey/project/elements/import2.expected
+++ b/tests/cachekey/project/elements/import2.expected
@@ -1 +1 @@
-4fcc04697288b0fdc0785b7350c308c3b40177d2ad0ec47ee4e59afbbe7634a9
\ No newline at end of file
+5f5884c5e4bb7066eede3a135e49753ec06b757a30983513a7a4e0cdd2a8f402
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/import3.expected b/tests/cachekey/project/elements/import3.expected
index 538daae..6d0fe86 100644
--- a/tests/cachekey/project/elements/import3.expected
+++ b/tests/cachekey/project/elements/import3.expected
@@ -1 +1 @@
-203a3749724d461a237f22ff261870616cedfe34bfb59603c935fd05644059b3
\ No newline at end of file
+e11f93ec629bc3556e15bd374e67a0b5e34350e1e9b1d1f98f8de984a27bbead
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/script1.expected b/tests/cachekey/project/elements/script1.expected
index cf12139..e8d5b24 100644
--- a/tests/cachekey/project/elements/script1.expected
+++ b/tests/cachekey/project/elements/script1.expected
@@ -1 +1 @@
-93de2701d76db777a560e1e531883b7922b07683d4e7c14ea26b0500946f2c62
\ No newline at end of file
+d8388b756de5c8441375ba32cedd9560a65a8f9a85e41038837d342c8fb10004
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/bzr1.expected b/tests/cachekey/project/sources/bzr1.expected
index 0e2a851..ca11c95 100644
--- a/tests/cachekey/project/sources/bzr1.expected
+++ b/tests/cachekey/project/sources/bzr1.expected
@@ -1 +1 @@
-8509b1e54cc11bc2681425a11498037ad3841295c26fec86ff61a6b09d83e10a
\ No newline at end of file
+519ee88fcca7fea091245713ec68baa048e3d876ea22559d4b2035d3d2ab2494
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/git1.expected b/tests/cachekey/project/sources/git1.expected
index 07fc21c..85dc885 100644
--- a/tests/cachekey/project/sources/git1.expected
+++ b/tests/cachekey/project/sources/git1.expected
@@ -1 +1 @@
-c1931acaea82971f1fc243dbe035a228c6103d52e09e618c7eda85f141c726cc
\ No newline at end of file
+a5424aa7cc25f0ada9ac1245b33d55d078559ae6c50b10bea3db9acb964b058c
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/git2.expected b/tests/cachekey/project/sources/git2.expected
index b08e08c..9a643c0 100644
--- a/tests/cachekey/project/sources/git2.expected
+++ b/tests/cachekey/project/sources/git2.expected
@@ -1 +1 @@
-6d1ee891d29e0af504ed59ccd46c653b74946d3778d7e941f4d8b6e68cf3ca50
\ No newline at end of file
+93bf7344c118664f0d7f2b8e5a6731b2a95de6df83ba7fa2a2ab28227b0b3e8b
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/local1.expected b/tests/cachekey/project/sources/local1.expected
index 4669ed4..387da88 100644
--- a/tests/cachekey/project/sources/local1.expected
+++ b/tests/cachekey/project/sources/local1.expected
@@ -1 +1 @@
-20582fab199a8d110fd65b5616f45bc08ae3eccc7bfe8b94ba987f3986b69ce5
\ No newline at end of file
+99c8f61d415de3a6c96e48299fda5554bf4bbaf56bb4b5acd85861ab37ede0c3
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/local2.expected b/tests/cachekey/project/sources/local2.expected
index 4a0796e..598fe73 100644
--- a/tests/cachekey/project/sources/local2.expected
+++ b/tests/cachekey/project/sources/local2.expected
@@ -1 +1 @@
-527685945072d971075edf6e4a06ce7146ef1cd023da0001c6e1613d525c76aa
\ No newline at end of file
+780a7e62bbe5bc0f975ec6cd749de6a85f9080d3628f16f881605801597916a7
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/ostree1.expected b/tests/cachekey/project/sources/ostree1.expected
index 5b4bf12..0e8e830 100644
--- a/tests/cachekey/project/sources/ostree1.expected
+++ b/tests/cachekey/project/sources/ostree1.expected
@@ -1 +1 @@
-b78e79c5ba297cf5cb41d6eaa5f4ca170216c967b84935364d30938021202341
\ No newline at end of file
+9b06b6e0c213a5475d2b0fcfee537c41dbec579e6109e95f7e7aeb0488f079f6
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/patch1.expected b/tests/cachekey/project/sources/patch1.expected
index a04b8fd..d7cf73c 100644
--- a/tests/cachekey/project/sources/patch1.expected
+++ b/tests/cachekey/project/sources/patch1.expected
@@ -1 +1 @@
-84830ad8577e5fa5a9dab14ce3f995b4dc16699aebc33122aa2dc5fade34528d
\ No newline at end of file
+d5b0f1fa5b4e3e7aa617de303125268c7a7461e415ecf1eccc8aee2cda56897e
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/patch2.expected b/tests/cachekey/project/sources/patch2.expected
index 3fafb87..56a92dc 100644
--- a/tests/cachekey/project/sources/patch2.expected
+++ b/tests/cachekey/project/sources/patch2.expected
@@ -1 +1 @@
-1d137c65e7f2f9c8a0a74a46461dfe9ba5c675d53a1ff96a4bf15f0889891883
\ No newline at end of file
+6decb6b49e48a5869b2a438254c911423275662aff73348cd95e64148011c097
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/patch3.expected b/tests/cachekey/project/sources/patch3.expected
index 6a62b70..f1257bb 100644
--- a/tests/cachekey/project/sources/patch3.expected
+++ b/tests/cachekey/project/sources/patch3.expected
@@ -1 +1 @@
-fd1f209c8f44fd629fb5201d6f299c47567b64828235b470b2ff8ff6edba4478
\ No newline at end of file
+ab91e0ab9e167c4e9d31480c96a6a91a47ff27246f4eeff4ce6b671cbd865901
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/tar1.expected b/tests/cachekey/project/sources/tar1.expected
index 5b52a4c..ab0bd56 100644
--- a/tests/cachekey/project/sources/tar1.expected
+++ b/tests/cachekey/project/sources/tar1.expected
@@ -1 +1 @@
-003d5c53c81ab4bf7e375c4e9704bdbc260473fecb334c9f78ed24ec5c1a908e
\ No newline at end of file
+ccb35d04789b0d83fd93a6c2f8688c4abfe20f5bc77420f63054893450b2a832
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/tar2.expected b/tests/cachekey/project/sources/tar2.expected
index d823bde..03241f4 100644
--- a/tests/cachekey/project/sources/tar2.expected
+++ b/tests/cachekey/project/sources/tar2.expected
@@ -1 +1 @@
-f501ed7c8df19071712634049fed1a1fb22fbeb6f27973595bc8139e56c6c446
\ No newline at end of file
+441c80ed92c77df8247344337f470ac7ab7fe91d2fe3900b498708b0faeac4b5
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/zip1.expected b/tests/cachekey/project/sources/zip1.expected
index 64c0655..a3ac93e 100644
--- a/tests/cachekey/project/sources/zip1.expected
+++ b/tests/cachekey/project/sources/zip1.expected
@@ -1 +1 @@
-6a3c3a788c6a6ddae204a013d0622b6c352a91ff31cdf6d652b96ad0ac5eda52
\ No newline at end of file
+be47de64162c9cce0322d0af327092c7afc3a890ba9d6ef92eef016dcced5bae
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/zip2.expected b/tests/cachekey/project/sources/zip2.expected
index 64bb772..49bd45f 100644
--- a/tests/cachekey/project/sources/zip2.expected
+++ b/tests/cachekey/project/sources/zip2.expected
@@ -1 +1 @@
-50a555bf892822b8f5e4d59b940ba4359afe8e6d01dff013d918a3befd9c3d8f
\ No newline at end of file
+bedd330938f9405e2febcf1de8428b7180eb62ab73f8e31e49871874ae351735
\ No newline at end of file
diff --git a/tests/cachekey/project/target.expected b/tests/cachekey/project/target.expected
index dcb6a66..4f4c7c1 100644
--- a/tests/cachekey/project/target.expected
+++ b/tests/cachekey/project/target.expected
@@ -1 +1 @@
-0de68ec99d39b12857a5350ebfdc7f49fdde9a3457a31b2330896307fb503f7b
\ No newline at end of file
+a408b3e4b6ba4d6a6338bd3153728be89a18b74b13bde554411a4371fda487bc
\ No newline at end of file
diff --git a/tests/frontend/pull.py b/tests/frontend/pull.py
index c3ebe41..0d38909 100644
--- a/tests/frontend/pull.py
+++ b/tests/frontend/pull.py
@@ -2,7 +2,6 @@
 import shutil
 import pytest
 from tests.testutils import cli, create_artifact_share
-from tests.testutils.site import IS_LINUX
 
 # Project directory
 DATA_DIR = os.path.join(
@@ -40,43 +39,42 @@
 #  * `bst build` pushes all build elements to configured 'push' cache
 #  * `bst pull --deps all` downloads everything from cache after local deletion
 #
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
 @pytest.mark.datafiles(DATA_DIR)
 def test_push_pull_all(cli, tmpdir, datafiles):
     project = os.path.join(datafiles.dirname, datafiles.basename)
-    share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'))
 
-    # First build the target element and push to the remote.
-    cli.configure({
-        'artifacts': {'url': share.repo, 'push': True}
-    })
-    result = cli.run(project=project, args=['build', 'target.bst'])
-    result.assert_success()
-    assert cli.get_element_state(project, 'target.bst') == 'cached'
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
 
-    # Assert that everything is now cached in the remote.
-    share.update_summary()
-    all_elements = ['target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst']
-    for element_name in all_elements:
-        assert_shared(cli, share, project, element_name)
+        # First build the target element and push to the remote.
+        cli.configure({
+            'artifacts': {'url': share.repo, 'push': True}
+        })
+        result = cli.run(project=project, args=['build', 'target.bst'])
+        result.assert_success()
+        assert cli.get_element_state(project, 'target.bst') == 'cached'
 
-    # Now we've pushed, delete the user's local artifact cache
-    # directory and try to redownload it from the share
-    #
-    artifacts = os.path.join(cli.directory, 'artifacts')
-    shutil.rmtree(artifacts)
+        # Assert that everything is now cached in the remote.
+        all_elements = ['target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst']
+        for element_name in all_elements:
+            assert_shared(cli, share, project, element_name)
 
-    # Assert that nothing is cached locally anymore
-    for element_name in all_elements:
-        assert cli.get_element_state(project, element_name) != 'cached'
+        # Now we've pushed, delete the user's local artifact cache
+        # directory and try to redownload it from the share
+        #
+        artifacts = os.path.join(cli.directory, 'artifacts')
+        shutil.rmtree(artifacts)
 
-    # Now try bst pull
-    result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
-    result.assert_success()
+        # Assert that nothing is cached locally anymore
+        for element_name in all_elements:
+            assert cli.get_element_state(project, element_name) != 'cached'
 
-    # And assert that it's again in the local cache, without having built
-    for element_name in all_elements:
-        assert cli.get_element_state(project, element_name) == 'cached'
+        # Now try bst pull
+        result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
+        result.assert_success()
+
+        # And assert that it's again in the local cache, without having built
+        for element_name in all_elements:
+            assert cli.get_element_state(project, element_name) == 'cached'
 
 
 # Tests that:
@@ -84,44 +82,40 @@
 #  * `bst build` pushes all build elements ONLY to configured 'push' cache
 #  * `bst pull` finds artifacts that are available only in the secondary cache
 #
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
 @pytest.mark.datafiles(DATA_DIR)
 def test_pull_secondary_cache(cli, tmpdir, datafiles):
     project = os.path.join(datafiles.dirname, datafiles.basename)
 
-    share1 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1'))
-    share2 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2'))
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1')) as share1,\
+        create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2')) as share2:
 
-    # Build the target and push it to share2 only.
-    cli.configure({
-        'artifacts': [
-            {'url': share1.repo, 'push': False},
-            {'url': share2.repo, 'push': True},
-        ]
-    })
-    result = cli.run(project=project, args=['build', 'target.bst'])
-    result.assert_success()
+        # Build the target and push it to share2 only.
+        cli.configure({
+            'artifacts': [
+                {'url': share1.repo, 'push': False},
+                {'url': share2.repo, 'push': True},
+            ]
+        })
+        result = cli.run(project=project, args=['build', 'target.bst'])
+        result.assert_success()
 
-    share1.update_summary()
-    share2.update_summary()
+        assert_not_shared(cli, share1, project, 'target.bst')
+        assert_shared(cli, share2, project, 'target.bst')
 
-    assert_not_shared(cli, share1, project, 'target.bst')
-    assert_shared(cli, share2, project, 'target.bst')
+        # Delete the user's local artifact cache.
+        artifacts = os.path.join(cli.directory, 'artifacts')
+        shutil.rmtree(artifacts)
 
-    # Delete the user's local artifact cache.
-    artifacts = os.path.join(cli.directory, 'artifacts')
-    shutil.rmtree(artifacts)
+        # Assert that the element is not cached anymore.
+        assert cli.get_element_state(project, 'target.bst') != 'cached'
 
-    # Assert that the element is not cached anymore.
-    assert cli.get_element_state(project, 'target.bst') != 'cached'
+        # Now try bst pull
+        result = cli.run(project=project, args=['pull', 'target.bst'])
+        result.assert_success()
 
-    # Now try bst pull
-    result = cli.run(project=project, args=['pull', 'target.bst'])
-    result.assert_success()
-
-    # And assert that it's again in the local cache, without having built,
-    # i.e. we found it in share2.
-    assert cli.get_element_state(project, 'target.bst') == 'cached'
+        # And assert that it's again in the local cache, without having built,
+        # i.e. we found it in share2.
+        assert cli.get_element_state(project, 'target.bst') == 'cached'
 
 
 # Tests that:
@@ -129,150 +123,144 @@
 #  * `bst push --remote` pushes to the given remote, not one from the config
 #  * `bst pull --remote` pulls from the given remote
 #
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
 @pytest.mark.datafiles(DATA_DIR)
 def test_push_pull_specific_remote(cli, tmpdir, datafiles):
     project = os.path.join(datafiles.dirname, datafiles.basename)
 
-    good_share = create_artifact_share(os.path.join(str(tmpdir), 'goodartifactshare'))
-    bad_share = create_artifact_share(os.path.join(str(tmpdir), 'badartifactshare'))
+    with create_artifact_share(os.path.join(str(tmpdir), 'goodartifactshare')) as good_share,\
+        create_artifact_share(os.path.join(str(tmpdir), 'badartifactshare')) as bad_share:
 
-    # Build the target so we have it cached locally only.
-    result = cli.run(project=project, args=['build', 'target.bst'])
-    result.assert_success()
+        # Build the target so we have it cached locally only.
+        result = cli.run(project=project, args=['build', 'target.bst'])
+        result.assert_success()
 
-    state = cli.get_element_state(project, 'target.bst')
-    assert state == 'cached'
+        state = cli.get_element_state(project, 'target.bst')
+        assert state == 'cached'
 
-    # Configure the default push location to be bad_share; we will assert that
-    # nothing actually gets pushed there.
-    cli.configure({
-        'artifacts': {'url': bad_share.repo, 'push': True},
-    })
+        # Configure the default push location to be bad_share; we will assert that
+        # nothing actually gets pushed there.
+        cli.configure({
+            'artifacts': {'url': bad_share.repo, 'push': True},
+        })
 
-    # Now try `bst push` to the good_share.
-    result = cli.run(project=project, args=[
-        'push', 'target.bst', '--remote', good_share.repo
-    ])
-    result.assert_success()
+        # Now try `bst push` to the good_share.
+        result = cli.run(project=project, args=[
+            'push', 'target.bst', '--remote', good_share.repo
+        ])
+        result.assert_success()
 
-    good_share.update_summary()
-    bad_share.update_summary()
+        # Assert that all the artifacts are in the share we pushed
+        # to, and not the other.
+        assert_shared(cli, good_share, project, 'target.bst')
+        assert_not_shared(cli, bad_share, project, 'target.bst')
 
-    # Assert that all the artifacts are in the share we pushed
-    # to, and not the other.
-    assert_shared(cli, good_share, project, 'target.bst')
-    assert_not_shared(cli, bad_share, project, 'target.bst')
+        # Now we've pushed, delete the user's local artifact cache
+        # directory and try to redownload it from the good_share.
+        #
+        artifacts = os.path.join(cli.directory, 'artifacts')
+        shutil.rmtree(artifacts)
 
-    # Now we've pushed, delete the user's local artifact cache
-    # directory and try to redownload it from the good_share.
-    #
-    artifacts = os.path.join(cli.directory, 'artifacts')
-    shutil.rmtree(artifacts)
+        result = cli.run(project=project, args=['pull', 'target.bst', '--remote',
+                                                good_share.repo])
+        result.assert_success()
 
-    result = cli.run(project=project, args=['pull', 'target.bst', '--remote',
-                                            good_share.repo])
-    result.assert_success()
-
-    # And assert that it's again in the local cache, without having built
-    assert cli.get_element_state(project, 'target.bst') == 'cached'
+        # And assert that it's again in the local cache, without having built
+        assert cli.get_element_state(project, 'target.bst') == 'cached'
 
 
 # Tests that:
 #
 #  * In non-strict mode, dependency changes don't block artifact reuse
 #
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
 @pytest.mark.datafiles(DATA_DIR)
 def test_push_pull_non_strict(cli, tmpdir, datafiles):
     project = os.path.join(datafiles.dirname, datafiles.basename)
-    share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'))
-    workspace = os.path.join(str(tmpdir), 'workspace')
 
-    # First build the target element and push to the remote.
-    cli.configure({
-        'artifacts': {'url': share.repo, 'push': True},
-        'projects': {
-            'test': {'strict': False}
-        }
-    })
-    result = cli.run(project=project, args=['build', 'target.bst'])
-    result.assert_success()
-    assert cli.get_element_state(project, 'target.bst') == 'cached'
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+        workspace = os.path.join(str(tmpdir), 'workspace')
 
-    # Assert that everything is now cached in the remote.
-    share.update_summary()
-    all_elements = ['target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst']
-    for element_name in all_elements:
-        assert_shared(cli, share, project, element_name)
+        # First build the target element and push to the remote.
+        cli.configure({
+            'artifacts': {'url': share.repo, 'push': True},
+            'projects': {
+                'test': {'strict': False}
+            }
+        })
+        result = cli.run(project=project, args=['build', 'target.bst'])
+        result.assert_success()
+        assert cli.get_element_state(project, 'target.bst') == 'cached'
 
-    # Now we've pushed, delete the user's local artifact cache
-    # directory and try to redownload it from the share
-    #
-    artifacts = os.path.join(cli.directory, 'artifacts')
-    shutil.rmtree(artifacts)
+        # Assert that everything is now cached in the remote.
+        all_elements = ['target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst']
+        for element_name in all_elements:
+            assert_shared(cli, share, project, element_name)
 
-    # Assert that nothing is cached locally anymore
-    for element_name in all_elements:
-        assert cli.get_element_state(project, element_name) != 'cached'
+        # Now we've pushed, delete the user's local artifact cache
+        # directory and try to redownload it from the share
+        #
+        artifacts = os.path.join(cli.directory, 'artifacts')
+        shutil.rmtree(artifacts)
 
-    # Add a file to force change in strict cache key of import-bin.bst
-    with open(os.path.join(str(project), 'files', 'bin-files', 'usr', 'bin', 'world'), 'w') as f:
-        f.write('world')
+        # Assert that nothing is cached locally anymore
+        for element_name in all_elements:
+            assert cli.get_element_state(project, element_name) != 'cached'
 
-    # Assert that the workspaced element requires a rebuild
-    assert cli.get_element_state(project, 'import-bin.bst') == 'buildable'
-    # Assert that the target is still waiting due to --no-strict
-    assert cli.get_element_state(project, 'target.bst') == 'waiting'
+        # Add a file to force change in strict cache key of import-bin.bst
+        with open(os.path.join(str(project), 'files', 'bin-files', 'usr', 'bin', 'world'), 'w') as f:
+            f.write('world')
 
-    # Now try bst pull
-    result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
-    result.assert_success()
+        # Assert that the workspaced element requires a rebuild
+        assert cli.get_element_state(project, 'import-bin.bst') == 'buildable'
+        # Assert that the target is still waiting due to --no-strict
+        assert cli.get_element_state(project, 'target.bst') == 'waiting'
 
-    # And assert that the target is again in the local cache, without having built
-    assert cli.get_element_state(project, 'target.bst') == 'cached'
+        # Now try bst pull
+        result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
+        result.assert_success()
+
+        # And assert that the target is again in the local cache, without having built
+        assert cli.get_element_state(project, 'target.bst') == 'cached'
 
 
 # Regression test for https://gitlab.com/BuildStream/buildstream/issues/202
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
 @pytest.mark.datafiles(DATA_DIR)
 def test_push_pull_track_non_strict(cli, tmpdir, datafiles):
     project = os.path.join(datafiles.dirname, datafiles.basename)
-    share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'))
 
-    # First build the target element and push to the remote.
-    cli.configure({
-        'artifacts': {'url': share.repo, 'push': True},
-        'projects': {
-            'test': {'strict': False}
-        }
-    })
-    result = cli.run(project=project, args=['build', 'target.bst'])
-    result.assert_success()
-    assert cli.get_element_state(project, 'target.bst') == 'cached'
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
 
-    # Assert that everything is now cached in the remote.
-    share.update_summary()
-    all_elements = {'target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst'}
-    for element_name in all_elements:
-        assert_shared(cli, share, project, element_name)
+        # First build the target element and push to the remote.
+        cli.configure({
+            'artifacts': {'url': share.repo, 'push': True},
+            'projects': {
+                'test': {'strict': False}
+            }
+        })
+        result = cli.run(project=project, args=['build', 'target.bst'])
+        result.assert_success()
+        assert cli.get_element_state(project, 'target.bst') == 'cached'
 
-    # Now we've pushed, delete the user's local artifact cache
-    # directory and try to redownload it from the share
-    #
-    artifacts = os.path.join(cli.directory, 'artifacts')
-    shutil.rmtree(artifacts)
+        # Assert that everything is now cached in the remote.
+        all_elements = {'target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst'}
+        for element_name in all_elements:
+            assert_shared(cli, share, project, element_name)
 
-    # Assert that nothing is cached locally anymore
-    for element_name in all_elements:
-        assert cli.get_element_state(project, element_name) != 'cached'
+        # Now we've pushed, delete the user's local artifact cache
+        # directory and try to redownload it from the share
+        #
+        artifacts = os.path.join(cli.directory, 'artifacts')
+        shutil.rmtree(artifacts)
 
-    # Now try bst build with tracking and pulling.
-    # Tracking will be skipped for target.bst as it doesn't have any sources.
-    # With the non-strict build plan target.bst immediately enters the pull queue.
-    # However, pulling has to be deferred until the dependencies have been
-    # tracked as the strict cache key needs to be calculated before querying
-    # the caches.
-    result = cli.run(project=project, args=['build', '--track-all', '--all', 'target.bst'])
-    result.assert_success()
-    assert set(result.get_pulled_elements()) == all_elements
+        # Assert that nothing is cached locally anymore
+        for element_name in all_elements:
+            assert cli.get_element_state(project, element_name) != 'cached'
+
+        # Now try bst build with tracking and pulling.
+        # Tracking will be skipped for target.bst as it doesn't have any sources.
+        # With the non-strict build plan target.bst immediately enters the pull queue.
+        # However, pulling has to be deferred until the dependencies have been
+        # tracked as the strict cache key needs to be calculated before querying
+        # the caches.
+        result = cli.run(project=project, args=['build', '--track-all', '--all', 'target.bst'])
+        result.assert_success()
+        assert set(result.get_pulled_elements()) == all_elements
diff --git a/tests/frontend/push.py b/tests/frontend/push.py
index e9c2daa..459c340 100644
--- a/tests/frontend/push.py
+++ b/tests/frontend/push.py
@@ -1,9 +1,7 @@
 import os
-import shutil
 import pytest
 from buildstream._exceptions import ErrorDomain
 from tests.testutils import cli, create_artifact_share
-from tests.testutils.site import IS_LINUX
 
 # Project directory
 DATA_DIR = os.path.join(
@@ -53,145 +51,143 @@
     assert cli.get_element_state(project, 'target.bst') == 'cached'
 
     # Set up two artifact shares.
-    share1 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1'))
-    share2 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2'))
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1')) as share1:
 
-    # Try pushing with no remotes configured. This should fail.
-    result = cli.run(project=project, args=['push', 'target.bst'])
-    result.assert_main_error(ErrorDomain.STREAM, None)
+        with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2')) as share2:
 
-    # Configure bst to pull but not push from a cache and run `bst push`.
-    # This should also fail.
-    cli.configure({
-        'artifacts': {'url': share1.repo, 'push': False},
-    })
-    result = cli.run(project=project, args=['push', 'target.bst'])
-    result.assert_main_error(ErrorDomain.STREAM, None)
+            # Try pushing with no remotes configured. This should fail.
+            result = cli.run(project=project, args=['push', 'target.bst'])
+            result.assert_main_error(ErrorDomain.STREAM, None)
 
-    # Configure bst to push to one of the caches and run `bst push`. This works.
-    cli.configure({
-        'artifacts': [
-            {'url': share1.repo, 'push': False},
-            {'url': share2.repo, 'push': True},
-        ]
-    })
-    result = cli.run(project=project, args=['push', 'target.bst'])
+            # Configure bst to pull but not push from a cache and run `bst push`.
+            # This should also fail.
+            cli.configure({
+                'artifacts': {'url': share1.repo, 'push': False},
+            })
+            result = cli.run(project=project, args=['push', 'target.bst'])
+            result.assert_main_error(ErrorDomain.STREAM, None)
 
-    assert_not_shared(cli, share1, project, 'target.bst')
-    assert_shared(cli, share2, project, 'target.bst')
+            # Configure bst to push to one of the caches and run `bst push`. This works.
+            cli.configure({
+                'artifacts': [
+                    {'url': share1.repo, 'push': False},
+                    {'url': share2.repo, 'push': True},
+                ]
+            })
+            result = cli.run(project=project, args=['push', 'target.bst'])
 
-    # Now try pushing to both (making sure to empty the cache we just pushed
-    # to).
-    shutil.rmtree(share2.directory)
-    share2 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2'))
-    cli.configure({
-        'artifacts': [
-            {'url': share1.repo, 'push': True},
-            {'url': share2.repo, 'push': True},
-        ]
-    })
-    result = cli.run(project=project, args=['push', 'target.bst'])
+            assert_not_shared(cli, share1, project, 'target.bst')
+            assert_shared(cli, share2, project, 'target.bst')
 
-    assert_shared(cli, share1, project, 'target.bst')
-    assert_shared(cli, share2, project, 'target.bst')
+        # Now try pushing to both
+
+        with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2')) as share2:
+            cli.configure({
+                'artifacts': [
+                    {'url': share1.repo, 'push': True},
+                    {'url': share2.repo, 'push': True},
+                ]
+            })
+            result = cli.run(project=project, args=['push', 'target.bst'])
+
+            assert_shared(cli, share1, project, 'target.bst')
+            assert_shared(cli, share2, project, 'target.bst')
 
 
 # Tests that `bst push --deps all` pushes all dependencies of the given element.
 #
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
 @pytest.mark.datafiles(DATA_DIR)
 def test_push_all(cli, tmpdir, datafiles):
     project = os.path.join(datafiles.dirname, datafiles.basename)
-    share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'))
 
-    # First build it without the artifact cache configured
-    result = cli.run(project=project, args=['build', 'target.bst'])
-    result.assert_success()
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
 
-    # Assert that we are now cached locally
-    assert cli.get_element_state(project, 'target.bst') == 'cached'
+        # First build it without the artifact cache configured
+        result = cli.run(project=project, args=['build', 'target.bst'])
+        result.assert_success()
 
-    # Configure artifact share
-    cli.configure({
-        #
-        # FIXME: This test hangs "sometimes" if we allow
-        #        concurrent push.
-        #
-        #        It's not too bad to ignore since we're
-        #        using the local artifact cache functionality
-        #        only, but it should probably be fixed.
-        #
-        'scheduler': {
-            'pushers': 1
-        },
-        'artifacts': {
-            'url': share.repo,
-            'push': True,
-        }
-    })
+        # Assert that we are now cached locally
+        assert cli.get_element_state(project, 'target.bst') == 'cached'
 
-    # Now try bst push all the deps
-    result = cli.run(project=project, args=[
-        'push', 'target.bst',
-        '--deps', 'all'
-    ])
-    result.assert_success()
+        # Configure artifact share
+        cli.configure({
+            #
+            # FIXME: This test hangs "sometimes" if we allow
+            #        concurrent push.
+            #
+            #        It's not too bad to ignore since we're
+            #        using the local artifact cache functionality
+            #        only, but it should probably be fixed.
+            #
+            'scheduler': {
+                'pushers': 1
+            },
+            'artifacts': {
+                'url': share.repo,
+                'push': True,
+            }
+        })
 
-    # And finally assert that all the artifacts are in the share
-    assert_shared(cli, share, project, 'target.bst')
-    assert_shared(cli, share, project, 'import-bin.bst')
-    assert_shared(cli, share, project, 'import-dev.bst')
-    assert_shared(cli, share, project, 'compose-all.bst')
+        # Now try bst push all the deps
+        result = cli.run(project=project, args=[
+            'push', 'target.bst',
+            '--deps', 'all'
+        ])
+        result.assert_success()
+
+        # And finally assert that all the artifacts are in the share
+        assert_shared(cli, share, project, 'target.bst')
+        assert_shared(cli, share, project, 'import-bin.bst')
+        assert_shared(cli, share, project, 'import-dev.bst')
+        assert_shared(cli, share, project, 'compose-all.bst')
 
 
 # Tests that `bst build` won't push artifacts to the cache it just pulled from.
 #
 # Regression test for https://gitlab.com/BuildStream/buildstream/issues/233.
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
 @pytest.mark.datafiles(DATA_DIR)
 def test_push_after_pull(cli, tmpdir, datafiles):
     project = os.path.join(datafiles.dirname, datafiles.basename)
 
     # Set up two artifact shares.
-    share1 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1'))
-    share2 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2'))
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1')) as share1,\
+        create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2')) as share2:
 
-    # Set the scene: share1 has the artifact, share2 does not.
-    #
-    cli.configure({
-        'artifacts': {'url': share1.repo, 'push': True},
-    })
+        # Set the scene: share1 has the artifact, share2 does not.
+        #
+        cli.configure({
+            'artifacts': {'url': share1.repo, 'push': True},
+        })
 
-    result = cli.run(project=project, args=['build', 'target.bst'])
-    result.assert_success()
+        result = cli.run(project=project, args=['build', 'target.bst'])
+        result.assert_success()
 
-    share1.update_summary()
-    cli.remove_artifact_from_cache(project, 'target.bst')
+        cli.remove_artifact_from_cache(project, 'target.bst')
 
-    assert_shared(cli, share1, project, 'target.bst')
-    assert_not_shared(cli, share2, project, 'target.bst')
-    assert cli.get_element_state(project, 'target.bst') != 'cached'
+        assert_shared(cli, share1, project, 'target.bst')
+        assert_not_shared(cli, share2, project, 'target.bst')
+        assert cli.get_element_state(project, 'target.bst') != 'cached'
 
-    # Now run the build again. Correct `bst build` behaviour is to download the
-    # artifact from share1 but not push it back again.
-    #
-    result = cli.run(project=project, args=['build', 'target.bst'])
-    result.assert_success()
-    assert result.get_pulled_elements() == ['target.bst']
-    assert result.get_pushed_elements() == []
+        # Now run the build again. Correct `bst build` behaviour is to download the
+        # artifact from share1 but not push it back again.
+        #
+        result = cli.run(project=project, args=['build', 'target.bst'])
+        result.assert_success()
+        assert result.get_pulled_elements() == ['target.bst']
+        assert result.get_pushed_elements() == []
 
-    # Delete the artifact locally again.
-    cli.remove_artifact_from_cache(project, 'target.bst')
+        # Delete the artifact locally again.
+        cli.remove_artifact_from_cache(project, 'target.bst')
 
-    # Now we add share2 into the mix as a second push remote. This time,
-    # `bst build` should push to share2 after pulling from share1.
-    cli.configure({
-        'artifacts': [
-            {'url': share1.repo, 'push': True},
-            {'url': share2.repo, 'push': True},
-        ]
-    })
-    result = cli.run(project=project, args=['build', 'target.bst'])
-    result.assert_success()
-    assert result.get_pulled_elements() == ['target.bst']
-    assert result.get_pushed_elements() == ['target.bst']
+        # Now we add share2 into the mix as a second push remote. This time,
+        # `bst build` should push to share2 after pulling from share1.
+        cli.configure({
+            'artifacts': [
+                {'url': share1.repo, 'push': True},
+                {'url': share2.repo, 'push': True},
+            ]
+        })
+        result = cli.run(project=project, args=['build', 'target.bst'])
+        result.assert_success()
+        assert result.get_pulled_elements() == ['target.bst']
+        assert result.get_pushed_elements() == ['target.bst']
diff --git a/tests/frontend/workspace.py b/tests/frontend/workspace.py
index e45696d..90b5061 100644
--- a/tests/frontend/workspace.py
+++ b/tests/frontend/workspace.py
@@ -356,6 +356,38 @@
 
 
 @pytest.mark.datafiles(DATA_DIR)
+def test_buildable_no_ref(cli, tmpdir, datafiles):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    element_name = 'workspace-test-no-ref.bst'
+    element_path = os.path.join(project, 'elements')
+
+    # Write out our test target without any source ref
+    repo = create_repo('git', str(tmpdir))
+    element = {
+        'kind': 'import',
+        'sources': [
+            repo.source_config()
+        ]
+    }
+    _yaml.dump(element,
+               os.path.join(element_path,
+                            element_name))
+
+    # Assert that this target is not buildable when no workspace is associated.
+    assert cli.get_element_state(project, element_name) == 'no reference'
+
+    # Now open the workspace. We don't need to checkout the source though.
+    workspace = os.path.join(str(tmpdir), 'workspace-no-ref')
+    os.makedirs(workspace)
+    args = ['workspace', 'open', '--no-checkout', element_name, workspace]
+    result = cli.run(project=project, args=args)
+    result.assert_success()
+
+    # Assert that the target is now buildable.
+    assert cli.get_element_state(project, element_name) == 'buildable'
+
+
+@pytest.mark.datafiles(DATA_DIR)
 @pytest.mark.parametrize("modification", [("addfile"), ("removefile"), ("modifyfile")])
 @pytest.mark.parametrize("strict", [("strict"), ("non-strict")])
 def test_detect_modifications(cli, tmpdir, datafiles, modification, strict):
diff --git a/tests/integration/workspace.py b/tests/integration/workspace.py
index 6eae1ef..102d053 100644
--- a/tests/integration/workspace.py
+++ b/tests/integration/workspace.py
@@ -216,7 +216,6 @@
 
 @pytest.mark.integration
 @pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(not IS_LINUX, reason='Incremental builds are not supported by the unix platform')
 def test_incremental_configure_commands_run_only_once(cli, tmpdir, datafiles):
     project = os.path.join(datafiles.dirname, datafiles.basename)
     workspace = os.path.join(cli.directory, 'workspace')
diff --git a/tests/plugins/filter.py b/tests/plugins/filter.py
index 3c092c2..45d6794 100644
--- a/tests/plugins/filter.py
+++ b/tests/plugins/filter.py
@@ -1,7 +1,9 @@
 import os
 import pytest
-from tests.testutils.runcli import cli
+import shutil
+from tests.testutils import cli, create_repo, ALL_REPO_KINDS
 from buildstream._exceptions import ErrorDomain
+from buildstream import _yaml
 
 DATA_DIR = os.path.join(
     os.path.dirname(os.path.realpath(__file__)),
@@ -85,3 +87,358 @@
     project = os.path.join(datafiles.dirname, datafiles.basename)
     result = cli.run(project=project, args=['build', 'forbidden-also-rdep.bst'])
     result.assert_main_error(ErrorDomain.ELEMENT, 'filter-bdepend-also-rdepend')
+
+
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
+def test_filter_workspace_open(datafiles, cli, tmpdir):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    workspace_dir = os.path.join(tmpdir.dirname, tmpdir.basename, "workspace")
+    result = cli.run(project=project, args=['workspace', 'open', 'deps-permitted.bst', workspace_dir])
+    result.assert_success()
+    assert os.path.exists(os.path.join(workspace_dir, "foo"))
+    assert os.path.exists(os.path.join(workspace_dir, "bar"))
+    assert os.path.exists(os.path.join(workspace_dir, "baz"))
+
+
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
+def test_filter_workspace_build(datafiles, cli, tmpdir):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    tempdir = os.path.join(tmpdir.dirname, tmpdir.basename)
+    workspace_dir = os.path.join(tempdir, "workspace")
+    result = cli.run(project=project, args=['workspace', 'open', 'output-orphans.bst', workspace_dir])
+    result.assert_success()
+    src = os.path.join(workspace_dir, "foo")
+    dst = os.path.join(workspace_dir, "quux")
+    shutil.copyfile(src, dst)
+    result = cli.run(project=project, args=['build', 'output-orphans.bst'])
+    result.assert_success()
+    checkout_dir = os.path.join(tempdir, "checkout")
+    result = cli.run(project=project, args=['checkout', 'output-orphans.bst', checkout_dir])
+    result.assert_success()
+    assert os.path.exists(os.path.join(checkout_dir, "quux"))
+
+
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
+def test_filter_workspace_close(datafiles, cli, tmpdir):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    tempdir = os.path.join(tmpdir.dirname, tmpdir.basename)
+    workspace_dir = os.path.join(tempdir, "workspace")
+    result = cli.run(project=project, args=['workspace', 'open', 'output-orphans.bst', workspace_dir])
+    result.assert_success()
+    src = os.path.join(workspace_dir, "foo")
+    dst = os.path.join(workspace_dir, "quux")
+    shutil.copyfile(src, dst)
+    result = cli.run(project=project, args=['workspace', 'close', 'deps-permitted.bst'])
+    result.assert_success()
+    result = cli.run(project=project, args=['build', 'output-orphans.bst'])
+    result.assert_success()
+    checkout_dir = os.path.join(tempdir, "checkout")
+    result = cli.run(project=project, args=['checkout', 'output-orphans.bst', checkout_dir])
+    result.assert_success()
+    assert not os.path.exists(os.path.join(checkout_dir, "quux"))
+
+
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
+def test_filter_workspace_reset(datafiles, cli, tmpdir):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    tempdir = os.path.join(tmpdir.dirname, tmpdir.basename)
+    workspace_dir = os.path.join(tempdir, "workspace")
+    result = cli.run(project=project, args=['workspace', 'open', 'output-orphans.bst', workspace_dir])
+    result.assert_success()
+    src = os.path.join(workspace_dir, "foo")
+    dst = os.path.join(workspace_dir, "quux")
+    shutil.copyfile(src, dst)
+    result = cli.run(project=project, args=['workspace', 'reset', 'deps-permitted.bst'])
+    result.assert_success()
+    result = cli.run(project=project, args=['build', 'output-orphans.bst'])
+    result.assert_success()
+    checkout_dir = os.path.join(tempdir, "checkout")
+    result = cli.run(project=project, args=['checkout', 'output-orphans.bst', checkout_dir])
+    result.assert_success()
+    assert not os.path.exists(os.path.join(checkout_dir, "quux"))
+
+
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS if kind not in ("patch", "local")])
+def test_filter_track(datafiles, cli, tmpdir, kind):
+    repo = create_repo(kind, str(tmpdir))
+    ref = repo.create(os.path.join(str(datafiles), "files"))
+    elements_dir = os.path.join(str(tmpdir), "elements")
+    project = str(tmpdir)
+    input_name = "input.bst"
+
+    project_config = {
+        "name": "filter-track-test",
+        "element-path": "elements",
+    }
+    project_file = os.path.join(str(tmpdir), "project.conf")
+    _yaml.dump(project_config, project_file)
+
+    input_config = {
+        "kind": "import",
+        "sources": [repo.source_config()],
+    }
+
+    input_file = os.path.join(elements_dir, input_name)
+    _yaml.dump(input_config, input_file)
+
+    filter1_config = {
+        "kind": "filter",
+        "depends": [
+            {"filename": input_name, "type": "build"}
+        ]
+    }
+    filter1_file = os.path.join(elements_dir, "filter1.bst")
+    _yaml.dump(filter1_config, filter1_file)
+
+    filter2_config = {
+        "kind": "filter",
+        "depends": [
+            {"filename": "filter1.bst", "type": "build"}
+        ]
+    }
+    filter2_file = os.path.join(elements_dir, "filter2.bst")
+    _yaml.dump(filter2_config, filter2_file)
+
+    # Assert that a fetch is needed
+    assert cli.get_element_state(project, input_name) == 'no reference'
+
+    # Now try to track it
+    result = cli.run(project=project, args=["track", "filter2.bst"])
+    result.assert_success()
+
+    # Now check that a ref field exists
+    new_input = _yaml.load(input_file)
+    assert new_input["sources"][0]["ref"] == ref
+
+
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS if kind not in ("patch", "local")])
+def test_filter_track_excepted(datafiles, cli, tmpdir, kind):
+    repo = create_repo(kind, str(tmpdir))
+    ref = repo.create(os.path.join(str(datafiles), "files"))
+    elements_dir = os.path.join(str(tmpdir), "elements")
+    project = str(tmpdir)
+    input_name = "input.bst"
+
+    project_config = {
+        "name": "filter-track-test",
+        "element-path": "elements",
+    }
+    project_file = os.path.join(str(tmpdir), "project.conf")
+    _yaml.dump(project_config, project_file)
+
+    input_config = {
+        "kind": "import",
+        "sources": [repo.source_config()],
+    }
+
+    input_file = os.path.join(elements_dir, input_name)
+    _yaml.dump(input_config, input_file)
+
+    filter1_config = {
+        "kind": "filter",
+        "depends": [
+            {"filename": input_name, "type": "build"}
+        ]
+    }
+    filter1_file = os.path.join(elements_dir, "filter1.bst")
+    _yaml.dump(filter1_config, filter1_file)
+
+    filter2_config = {
+        "kind": "filter",
+        "depends": [
+            {"filename": "filter1.bst", "type": "build"}
+        ]
+    }
+    filter2_file = os.path.join(elements_dir, "filter2.bst")
+    _yaml.dump(filter2_config, filter2_file)
+
+    # Assert that a fetch is needed
+    assert cli.get_element_state(project, input_name) == 'no reference'
+
+    # Now try to track it
+    result = cli.run(project=project, args=["track", "filter2.bst", "--except", "input.bst"])
+    result.assert_success()
+
+    # Now check that a ref field exists
+    new_input = _yaml.load(input_file)
+    assert "ref" not in new_input["sources"][0]
+
+
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS if kind not in ("patch", "local")])
+def test_filter_track_multi_to_one(datafiles, cli, tmpdir, kind):
+    repo = create_repo(kind, str(tmpdir))
+    ref = repo.create(os.path.join(str(datafiles), "files"))
+    elements_dir = os.path.join(str(tmpdir), "elements")
+    project = str(tmpdir)
+    input_name = "input.bst"
+
+    project_config = {
+        "name": "filter-track-test",
+        "element-path": "elements",
+    }
+    project_file = os.path.join(str(tmpdir), "project.conf")
+    _yaml.dump(project_config, project_file)
+
+    input_config = {
+        "kind": "import",
+        "sources": [repo.source_config()],
+    }
+
+    input_file = os.path.join(elements_dir, input_name)
+    _yaml.dump(input_config, input_file)
+
+    filter1_config = {
+        "kind": "filter",
+        "depends": [
+            {"filename": input_name, "type": "build"}
+        ]
+    }
+    filter1_file = os.path.join(elements_dir, "filter1.bst")
+    _yaml.dump(filter1_config, filter1_file)
+
+    filter2_config = {
+        "kind": "filter",
+        "depends": [
+            {"filename": input_name, "type": "build"}
+        ]
+    }
+    filter2_file = os.path.join(elements_dir, "filter2.bst")
+    _yaml.dump(filter2_config, filter2_file)
+
+    # Assert that a fetch is needed
+    assert cli.get_element_state(project, input_name) == 'no reference'
+
+    # Now try to track it
+    result = cli.run(project=project, args=["track", "filter1.bst", "filter2.bst"])
+    result.assert_success()
+
+    # Now check that a ref field exists
+    new_input = _yaml.load(input_file)
+    assert new_input["sources"][0]["ref"] == ref
+
+
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS if kind not in ("patch", "local")])
+def test_filter_track_multi(datafiles, cli, tmpdir, kind):
+    repo = create_repo(kind, str(tmpdir))
+    ref = repo.create(os.path.join(str(datafiles), "files"))
+    elements_dir = os.path.join(str(tmpdir), "elements")
+    project = str(tmpdir)
+    input_name = "input.bst"
+    input2_name = "input2.bst"
+
+    project_config = {
+        "name": "filter-track-test",
+        "element-path": "elements",
+    }
+    project_file = os.path.join(str(tmpdir), "project.conf")
+    _yaml.dump(project_config, project_file)
+
+    input_config = {
+        "kind": "import",
+        "sources": [repo.source_config()],
+    }
+
+    input_file = os.path.join(elements_dir, input_name)
+    _yaml.dump(input_config, input_file)
+
+    input2_config = dict(input_config)
+    input2_file = os.path.join(elements_dir, input2_name)
+    _yaml.dump(input2_config, input2_file)
+
+    filter1_config = {
+        "kind": "filter",
+        "depends": [
+            {"filename": input_name, "type": "build"}
+        ]
+    }
+    filter1_file = os.path.join(elements_dir, "filter1.bst")
+    _yaml.dump(filter1_config, filter1_file)
+
+    filter2_config = {
+        "kind": "filter",
+        "depends": [
+            {"filename": input2_name, "type": "build"}
+        ]
+    }
+    filter2_file = os.path.join(elements_dir, "filter2.bst")
+    _yaml.dump(filter2_config, filter2_file)
+
+    # Assert that a fetch is needed
+    assert cli.get_element_state(project, input_name) == 'no reference'
+    assert cli.get_element_state(project, input2_name) == 'no reference'
+
+    # Now try to track it
+    result = cli.run(project=project, args=["track", "filter1.bst", "filter2.bst"])
+    result.assert_success()
+
+    # Now check that a ref field exists
+    new_input = _yaml.load(input_file)
+    assert new_input["sources"][0]["ref"] == ref
+    new_input2 = _yaml.load(input2_file)
+    assert new_input2["sources"][0]["ref"] == ref
+
+
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS if kind not in ("patch", "local")])
+def test_filter_track_multi_exclude(datafiles, cli, tmpdir, kind):
+    repo = create_repo(kind, str(tmpdir))
+    ref = repo.create(os.path.join(str(datafiles), "files"))
+    elements_dir = os.path.join(str(tmpdir), "elements")
+    project = str(tmpdir)
+    input_name = "input.bst"
+    input2_name = "input2.bst"
+
+    project_config = {
+        "name": "filter-track-test",
+        "element-path": "elements",
+    }
+    project_file = os.path.join(str(tmpdir), "project.conf")
+    _yaml.dump(project_config, project_file)
+
+    input_config = {
+        "kind": "import",
+        "sources": [repo.source_config()],
+    }
+
+    input_file = os.path.join(elements_dir, input_name)
+    _yaml.dump(input_config, input_file)
+
+    input2_config = dict(input_config)
+    input2_file = os.path.join(elements_dir, input2_name)
+    _yaml.dump(input2_config, input2_file)
+
+    filter1_config = {
+        "kind": "filter",
+        "depends": [
+            {"filename": input_name, "type": "build"}
+        ]
+    }
+    filter1_file = os.path.join(elements_dir, "filter1.bst")
+    _yaml.dump(filter1_config, filter1_file)
+
+    filter2_config = {
+        "kind": "filter",
+        "depends": [
+            {"filename": input2_name, "type": "build"}
+        ]
+    }
+    filter2_file = os.path.join(elements_dir, "filter2.bst")
+    _yaml.dump(filter2_config, filter2_file)
+
+    # Assert that a fetch is needed
+    assert cli.get_element_state(project, input_name) == 'no reference'
+    assert cli.get_element_state(project, input2_name) == 'no reference'
+
+    # Now try to track it
+    result = cli.run(project=project, args=["track", "filter1.bst", "filter2.bst", "--except", input_name])
+    result.assert_success()
+
+    # Now check that a ref field exists
+    new_input = _yaml.load(input_file)
+    assert "ref" not in new_input["sources"][0]
+    new_input2 = _yaml.load(input2_file)
+    assert new_input2["sources"][0]["ref"] == ref
diff --git a/tests/testutils/artifactshare.py b/tests/testutils/artifactshare.py
index 8664c69..6b9117b 100644
--- a/tests/testutils/artifactshare.py
+++ b/tests/testutils/artifactshare.py
@@ -2,10 +2,18 @@
 import pytest
 import subprocess
 import os
+import shutil
+import signal
+
+from contextlib import contextmanager
+from multiprocessing import Process, Queue
+import pytest_cov
 
 from buildstream import _yaml
-
-from .site import HAVE_OSTREE_CLI
+from buildstream._artifactcache.cascache import CASCache
+from buildstream._artifactcache.casserver import create_server
+from buildstream._context import Context
+from buildstream._exceptions import ArtifactError
 
 
 # ArtifactShare()
@@ -20,11 +28,6 @@
 
     def __init__(self, directory):
 
-        # We need the ostree CLI for tests which use this
-        #
-        if not HAVE_OSTREE_CLI:
-            pytest.skip("ostree cli is not available")
-
         # The working directory for the artifact share (in case it
         # needs to do something outside of it's backend's storage folder).
         #
@@ -35,34 +38,42 @@
         # Unless this gets more complicated, just use this directly
         # in tests as a remote artifact push/pull configuration
         #
-        self.repo = os.path.join(self.directory, 'repo')
+        self.repodir = os.path.join(self.directory, 'repo')
 
-        os.makedirs(self.repo)
+        os.makedirs(self.repodir)
 
-        self.init()
-        self.update_summary()
+        context = Context()
+        context.artifactdir = self.repodir
 
-    # init():
-    #
-    # Initializes the artifact share
-    #
-    # Returns:
-    #    (smth): A new ref corresponding to this commit, which can
-    #            be passed as the ref in the Repo.source_config() API.
-    #
-    def init(self):
-        subprocess.call(['ostree', 'init',
-                         '--repo', self.repo,
-                         '--mode', 'archive-z2'])
+        self.cas = CASCache(context)
 
-    # update_summary():
+        q = Queue()
+
+        self.process = Process(target=self.run, args=(q,))
+        self.process.start()
+
+        # Retrieve port from server subprocess
+        port = q.get()
+
+        self.repo = 'http://localhost:{}'.format(port)
+
+    # run():
     #
-    # Ensure that the summary is up to date
+    # Run the artifact server.
     #
-    def update_summary(self):
-        subprocess.call(['ostree', 'summary',
-                         '--update',
-                         '--repo', self.repo])
+    def run(self, q):
+        pytest_cov.embed.cleanup_on_sigterm()
+
+        server = create_server(self.repodir, enable_push=True)
+        port = server.add_insecure_port('localhost:0')
+
+        server.start()
+
+        # Send port to parent
+        q.put(port)
+
+        # Sleep until termination by signal
+        signal.pause()
 
     # has_artifact():
     #
@@ -77,8 +88,8 @@
     #    (bool): True if the artifact exists in the share, otherwise false.
     def has_artifact(self, project_name, element_name, cache_key):
 
-        # NOTE: This should be kept in line with our ostree
-        #       based artifact cache code, the below is the
+        # NOTE: This should be kept in line with our
+        #       artifact cache code, the below is the
         #       same algo for creating an artifact reference
         #
 
@@ -93,18 +104,31 @@
         ])
         artifact_key = '{0}/{1}/{2}'.format(project_name, element_name, cache_key)
 
-        if not subprocess.call(['ostree', 'rev-parse',
-                                '--repo', self.repo,
-                                artifact_key]):
+        try:
+            tree = self.cas.resolve_ref(artifact_key)
             return True
+        except ArtifactError:
+            return False
 
-        return False
+    # close():
+    #
+    # Remove the artifact share.
+    #
+    def close(self):
+        self.process.terminate()
+        self.process.join()
+
+        shutil.rmtree(self.directory)
 
 
 # create_artifact_share()
 #
 # Create an ArtifactShare for use in a test case
 #
+@contextmanager
 def create_artifact_share(directory):
-
-    return ArtifactShare(directory)
+    share = ArtifactShare(directory)
+    try:
+        yield share
+    finally:
+        share.close()
diff --git a/tests/testutils/runcli.py b/tests/testutils/runcli.py
index 658e388..96d4ea4 100644
--- a/tests/testutils/runcli.py
+++ b/tests/testutils/runcli.py
@@ -19,8 +19,6 @@
 #
 from _pytest.capture import MultiCapture, FDCapture
 
-from tests.testutils.site import IS_LINUX
-
 # Import the main cli entrypoint
 from buildstream._frontend import cli as bst_cli
 from buildstream import _yaml
@@ -203,10 +201,7 @@
     def remove_artifact_from_cache(self, project, element_name):
         cache_dir = os.path.join(project, 'cache', 'artifacts')
 
-        if IS_LINUX:
-            cache_dir = os.path.join(cache_dir, 'ostree', 'refs', 'heads')
-        else:
-            cache_dir = os.path.join(cache_dir, 'tar')
+        cache_dir = os.path.join(cache_dir, 'cas', 'refs', 'heads')
 
         cache_dir = os.path.splitext(os.path.join(cache_dir, 'test', element_name))[0]
         shutil.rmtree(cache_dir)