Merge pull request #22326 from akvelon/pg-trigger-example-deploy

[Playground]: Modified WithKeys Playground Example
diff --git a/.asf.yaml b/.asf.yaml
index f5f204b..c5209d2 100644
--- a/.asf.yaml
+++ b/.asf.yaml
@@ -38,6 +38,7 @@
   collaborators:
     - pcoet
     - VladMatyunin
+    - olehborysevych
 
   enabled_merge_buttons:
     squash: true
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 64e4f71..c37cbab 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -9,7 +9,7 @@
  - [ ] Update `CHANGES.md` with noteworthy changes.
  - [ ] If this contribution is large, please file an Apache [Individual Contributor License Agreement](https://www.apache.org/licenses/icla.pdf).
 
-See the [Contributor Guide](https://beam.apache.org/contribute) for more tips on [how to make review process smoother](https://beam.apache.org/contribute/#make-reviewers-job-easier).
+See the [Contributor Guide](https://beam.apache.org/contribute) for more tips on [how to make review process smoother](https://beam.apache.org/contribute/get-started-contributing/#make-the-reviewers-job-easier).
 
 To check the build health, please visit [https://github.com/apache/beam/blob/master/.test-infra/BUILD_STATUS.md](https://github.com/apache/beam/blob/master/.test-infra/BUILD_STATUS.md)
 
@@ -18,5 +18,6 @@
 [![Build python source distribution and wheels](https://github.com/apache/beam/workflows/Build%20python%20source%20distribution%20and%20wheels/badge.svg?branch=master&event=schedule)](https://github.com/apache/beam/actions?query=workflow%3A%22Build+python+source+distribution+and+wheels%22+branch%3Amaster+event%3Aschedule)
 [![Python tests](https://github.com/apache/beam/workflows/Python%20tests/badge.svg?branch=master&event=schedule)](https://github.com/apache/beam/actions?query=workflow%3A%22Python+Tests%22+branch%3Amaster+event%3Aschedule)
 [![Java tests](https://github.com/apache/beam/workflows/Java%20Tests/badge.svg?branch=master&event=schedule)](https://github.com/apache/beam/actions?query=workflow%3A%22Java+Tests%22+branch%3Amaster+event%3Aschedule)
+[![Go tests](https://github.com/apache/beam/workflows/Go%20tests/badge.svg?branch=master&event=schedule)](https://github.com/apache/beam/actions?query=workflow%3A%22Go+tests%22+branch%3Amaster+event%3Aschedule)
 
 See [CI.md](https://github.com/apache/beam/blob/master/CI.md) for more information about GitHub Actions CI.
diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml
index f2066b1..520b97b 100644
--- a/.github/workflows/build_wheels.yml
+++ b/.github/workflows/build_wheels.yml
@@ -240,7 +240,7 @@
       working-directory: apache-beam-source
       env:
         CIBW_BUILD: ${{ matrix.os_python.python }}
-        CIBW_BEFORE_BUILD: pip install cython
+        CIBW_BEFORE_BUILD: pip install cython && pip install --upgrade setuptools
       run: cibuildwheel --print-build-identifiers && cibuildwheel --output-dir wheelhouse
       shell: bash
     - name: install sha512sum on MacOS
diff --git a/.github/workflows/pr-bot-new-prs.yml b/.github/workflows/pr-bot-new-prs.yml
index e68a703..dd905a7 100644
--- a/.github/workflows/pr-bot-new-prs.yml
+++ b/.github/workflows/pr-bot-new-prs.yml
@@ -29,7 +29,7 @@
 
     steps:
       - uses: actions/checkout@v2
-      - run: npm install
+      - run: npm ci
         working-directory: 'scripts/ci/pr-bot'
 
       # Runs a set of commands using the runners shell
diff --git a/.github/workflows/pr-bot-pr-updates.yml b/.github/workflows/pr-bot-pr-updates.yml
index 426a803..caf5e274 100644
--- a/.github/workflows/pr-bot-pr-updates.yml
+++ b/.github/workflows/pr-bot-pr-updates.yml
@@ -39,7 +39,7 @@
       - uses: actions/checkout@v2
         with:
           ref: 'master'
-      - run: npm install
+      - run: npm ci
         working-directory: 'scripts/ci/pr-bot'
 
       # Runs a set of commands using the runners shell
diff --git a/.github/workflows/pr-bot-prs-needing-attention.yml b/.github/workflows/pr-bot-prs-needing-attention.yml
index 745e0d4..16ea79a 100644
--- a/.github/workflows/pr-bot-prs-needing-attention.yml
+++ b/.github/workflows/pr-bot-prs-needing-attention.yml
@@ -29,7 +29,7 @@
 
     steps:
       - uses: actions/checkout@v2
-      - run: npm install
+      - run: npm ci
         working-directory: 'scripts/ci/pr-bot'
 
       # Runs a set of commands using the runners shell
diff --git a/.github/workflows/pr-bot-update-reviewers.yml b/.github/workflows/pr-bot-update-reviewers.yml
index 701f2f1..70beb37 100644
--- a/.github/workflows/pr-bot-update-reviewers.yml
+++ b/.github/workflows/pr-bot-update-reviewers.yml
@@ -29,7 +29,7 @@
 
     steps:
       - uses: actions/checkout@v2
-      - run: npm install
+      - run: npm ci
         working-directory: 'scripts/ci/pr-bot'
 
       # Runs a set of commands using the runners shell
diff --git a/.github/workflows/reportGenerator.yml b/.github/workflows/reportGenerator.yml
index 05bc4c0..c0e236c 100644
--- a/.github/workflows/reportGenerator.yml
+++ b/.github/workflows/reportGenerator.yml
@@ -28,7 +28,7 @@
     steps:
     - uses: actions/checkout@v3
     - run: |
-        npm install
+        npm ci
         node generateReport.js
       working-directory: 'scripts/ci/issue-report'
       env:
diff --git a/.github/workflows/typescript_tests.yml b/.github/workflows/typescript_tests.yml
index 9345bdb..2b6c21c 100644
--- a/.github/workflows/typescript_tests.yml
+++ b/.github/workflows/typescript_tests.yml
@@ -51,7 +51,7 @@
         uses: actions/setup-node@v2
         with:
           node-version: '16'
-      - run: npm install
+      - run: npm ci
         working-directory: ./sdks/typescript
       - run: npm run build
         working-directory: ./sdks/typescript
diff --git a/.test-infra/jenkins/LoadTestsBuilder.groovy b/.test-infra/jenkins/LoadTestsBuilder.groovy
index 7c5b739..894b7ae 100644
--- a/.test-infra/jenkins/LoadTestsBuilder.groovy
+++ b/.test-infra/jenkins/LoadTestsBuilder.groovy
@@ -60,9 +60,23 @@
 
   static String parseOptions(Map<String, ?> options) {
     options.collect { entry ->
+
+      if (entry.key.matches(".*\\s.*")) {
+        throw new IllegalArgumentException("""
+          Encountered invalid option name '${entry.key}'. Names must not
+          contain whitespace.
+          """)
+      }
+
       // Flags are indicated by null values
       if (entry.value == null) {
         "--${entry.key}"
+      } else if (entry.value.toString().matches(".*\\s.*") &&
+      !entry.value.toString().matches("'[^']*'")) {
+        throw new IllegalArgumentException("""
+          Option '${entry.key}' has an invalid value, '${entry.value}'. Values
+          must not contain whitespace, or they must be wrapped in singe quotes.
+          """)
       } else {
         "--${entry.key}=$entry.value".replace('\"', '\\\"').replace('\'', '\\\'')
       }
diff --git a/.test-infra/jenkins/job_LoadTests_Combine_Python.groovy b/.test-infra/jenkins/job_LoadTests_Combine_Python.groovy
index 1416db2..7c4f1ea 100644
--- a/.test-infra/jenkins/job_LoadTests_Combine_Python.groovy
+++ b/.test-infra/jenkins/job_LoadTests_Combine_Python.groovy
@@ -101,13 +101,13 @@
 def addStreamingOptions(test){
   test.pipelineOptions << [streaming: null,
     // TODO(https://github.com/apache/beam/issues/20806) remove shuffle_mode=appliance with runner v2 once issue is resolved.
-    experiments: "use_runner_v2, shuffle_mode=appliance"
+    experiments: "use_runner_v2,shuffle_mode=appliance"
   ]
 }
 
 def loadTestJob = { scope, triggeringContext, jobType ->
   scope.description("Runs Python Combine load tests on Dataflow runner in ${jobType} mode")
-  commonJobProperties.setTopLevelMainJobProperties(scope, 'master', 120)
+  commonJobProperties.setTopLevelMainJobProperties(scope, 'master', 720)
 
   def datasetName = loadTestsBuilder.getBigQueryDataset('load_test', triggeringContext)
   for (testConfiguration in loadTestConfigurations(datasetName, jobType)) {
diff --git a/.test-infra/jenkins/job_LoadTests_GBK_Python.groovy b/.test-infra/jenkins/job_LoadTests_GBK_Python.groovy
index 623a245..9a38af4 100644
--- a/.test-infra/jenkins/job_LoadTests_GBK_Python.groovy
+++ b/.test-infra/jenkins/job_LoadTests_GBK_Python.groovy
@@ -157,7 +157,7 @@
     // See https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#dataflow-runner-v2
     // for more details.
     // TODO(https://github.com/apache/beam/issues/20806) remove shuffle_mode=appliance with runner v2 once issue is resolved.
-    experiments: 'use_runner_v2, shuffle_mode=appliance',
+    experiments: 'use_runner_v2,shuffle_mode=appliance',
   ]
 }
 
diff --git a/.test-infra/jenkins/job_LoadTests_GBK_Python_reiterate.groovy b/.test-infra/jenkins/job_LoadTests_GBK_Python_reiterate.groovy
index 764bd60..3fa262a 100644
--- a/.test-infra/jenkins/job_LoadTests_GBK_Python_reiterate.groovy
+++ b/.test-infra/jenkins/job_LoadTests_GBK_Python_reiterate.groovy
@@ -87,7 +87,7 @@
     // See https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#dataflow-runner-v2
     // for more details.
     // TODO(https://github.com/apache/beam/issues/20806) remove shuffle_mode=appliance with runner v2 once issue is resolved.
-    experiments: 'use_runner_v2, shuffle_mode=appliance',
+    experiments: 'use_runner_v2,shuffle_mode=appliance',
   ]
 }
 
diff --git a/.test-infra/jenkins/job_LoadTests_ParDo_Python.groovy b/.test-infra/jenkins/job_LoadTests_ParDo_Python.groovy
index 8d5d965..44e9497 100644
--- a/.test-infra/jenkins/job_LoadTests_ParDo_Python.groovy
+++ b/.test-infra/jenkins/job_LoadTests_ParDo_Python.groovy
@@ -132,7 +132,7 @@
     // See https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#dataflow-runner-v2
     // for more details.
     // TODO(https://github.com/apache/beam/issues/20806) remove shuffle_mode=appliance with runner v2 once issue is resolved.
-    experiments: 'use_runner_v2, shuffle_mode=appliance',
+    experiments: 'use_runner_v2,shuffle_mode=appliance',
   ]
 }
 
diff --git a/.test-infra/jenkins/job_LoadTests_SideInput_Python.groovy b/.test-infra/jenkins/job_LoadTests_SideInput_Python.groovy
index e587a81..404d74c 100644
--- a/.test-infra/jenkins/job_LoadTests_SideInput_Python.groovy
+++ b/.test-infra/jenkins/job_LoadTests_SideInput_Python.groovy
@@ -40,7 +40,7 @@
       num_workers          : 10,
       autoscaling_algorithm: 'NONE',
       // TODO(https://github.com/apache/beam/issues/20806) remove shuffle_mode=appliance with runner v2 once issue is resolved.
-      experiments          : 'use_runner_v2, shuffle_mode=appliance',
+      experiments          : 'use_runner_v2,shuffle_mode=appliance',
     ] << testSpecificOptions
   ]
 }
diff --git a/.test-infra/jenkins/job_PerformanceTests_CdapIO.groovy b/.test-infra/jenkins/job_PerformanceTests_CdapIO.groovy
new file mode 100644
index 0000000..6724c7d
--- /dev/null
+++ b/.test-infra/jenkins/job_PerformanceTests_CdapIO.groovy
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import CommonJobProperties as common
+import Kubernetes
+import InfluxDBCredentialsHelper
+
+String jobName = "beam_PerformanceTests_Cdap"
+
+job(jobName) {
+  common.setTopLevelMainJobProperties(delegate)
+  common.setAutoJob(delegate, 'H H/6 * * *')
+  common.enablePhraseTriggeringFromPullRequest(
+      delegate,
+      'Java CdapIO Performance Test',
+      'Run Java CdapIO Performance Test')
+  InfluxDBCredentialsHelper.useCredentials(delegate)
+
+  String namespace = common.getKubernetesNamespace(jobName)
+  String kubeconfig = common.getKubeconfigLocationForNamespace(namespace)
+  Kubernetes k8s = Kubernetes.create(delegate, kubeconfig, namespace)
+
+  k8s.apply(common.makePathAbsolute("src/.test-infra/kubernetes/postgres/postgres-service-for-local-dev.yml"))
+  String postgresHostName = "LOAD_BALANCER_IP"
+  k8s.loadBalancerIP("postgres-for-dev", postgresHostName)
+
+  Map pipelineOptions = [
+    tempRoot             : 'gs://temp-storage-for-perf-tests',
+    project              : 'apache-beam-testing',
+    runner               : 'DataflowRunner',
+    numberOfRecords      : '600000',
+    bigQueryDataset      : 'beam_performance',
+    bigQueryTable        : 'cdapioit_results',
+    influxMeasurement    : 'cdapioit_results',
+    influxDatabase       : InfluxDBCredentialsHelper.InfluxDBDatabaseName,
+    influxHost           : InfluxDBCredentialsHelper.InfluxDBHostUrl,
+    postgresUsername     : 'postgres',
+    postgresPassword     : 'uuinkks',
+    postgresDatabaseName : 'postgres',
+    postgresServerName   : "\$${postgresHostName}",
+    postgresSsl          : false,
+    postgresPort         : '5432',
+    numWorkers           : '5',
+    autoscalingAlgorithm : 'NONE'
+  ]
+
+  steps {
+    gradle {
+      rootBuildScriptDir(common.checkoutDir)
+      common.setGradleSwitches(delegate)
+      switches("--info")
+      switches("-DintegrationTestPipelineOptions=\'${common.joinPipelineOptions(pipelineOptions)}\'")
+      switches("-DintegrationTestRunner=dataflow")
+      tasks(":sdks:java:io:cdap:integrationTest --tests org.apache.beam.sdk.io.cdap.CdapIOIT")
+    }
+  }
+}
diff --git a/.test-infra/jenkins/job_PreCommit_Java_Spark3_Versions.groovy b/.test-infra/jenkins/job_PreCommit_Java_Spark3_Versions.groovy
new file mode 100644
index 0000000..f13c4c0
--- /dev/null
+++ b/.test-infra/jenkins/job_PreCommit_Java_Spark3_Versions.groovy
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import PrecommitJobBuilder
+
+PrecommitJobBuilder builder = new PrecommitJobBuilder(
+    scope: this,
+    nameBase: 'Java_Spark3_Versions',
+    gradleTask: ':runners:spark:3:sparkVersionsTest',
+    gradleSwitches: [
+      '-PdisableSpotlessCheck=true'
+    ], // spotless checked in separate pre-commit
+    triggerPathPatterns: [
+      '^runners/spark/.*$',
+    ],
+    timeoutMins: 120,
+    )
+builder.build {
+  publishers {
+    archiveJunit('**/build/test-results/**/*.xml')
+  }
+}
\ No newline at end of file
diff --git a/CHANGES.md b/CHANGES.md
index 1e445cc..8251948 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -28,7 +28,6 @@
 ## I/Os
 
 * Support for X source added (Java/Python) ([#X](https://github.com/apache/beam/issues/X)).
-* Projection Pushdown optimizer is now on by default for streaming, matching the behavior of batch pipelines since 2.38.0. If you encounter a bug with the optimizer, please file an issue and disable the optimizer using pipeline option `--experiments=disable_projection_pushdown`.
 
 ## New Features / Improvements
 
@@ -37,7 +36,6 @@
 ## Breaking Changes
 
 * X behavior was changed ([#X](https://github.com/apache/beam/issues/X)).
-* Projection Pushdown optimizer may break Dataflow upgrade compatibility for optimized pipelines when it removes unused fields. If you need to upgrade and encounter a compatibility issue, disable the optimizer using pipeline option `--experiments=disable_projection_pushdown`.
 
 ## Deprecations
 
@@ -65,11 +63,44 @@
 ## New Features / Improvements
 
 * X feature added (Java/Python) ([#X](https://github.com/apache/beam/issues/X)).
+* Added support for Zstd compression to the Python SDK.
+
+## Breaking Changes
+
+* X behavior was changed ([#X](https://github.com/apache/beam/issues/X)).
+
+## Deprecations
+
+* X behavior is deprecated and will be removed in X versions ([#X](https://github.com/apache/beam/issues/X)).
+
+## Bugfixes
+
+* Fixed X (Java/Python) ([#X](https://github.com/apache/beam/issues/X)).
+## Known Issues
+
+* ([#X](https://github.com/apache/beam/issues/X)).
+
+# [2.41.0] - Unreleased
+
+## Highlights
+
+* New highly anticipated feature X added to Python SDK ([#X](https://github.com/apache/beam/issues/X)).
+* New highly anticipated feature Y added to Java SDK ([#Y](https://github.com/apache/beam/issues/Y)).
+
+## I/Os
+
+* Support for X source added (Java/Python) ([#X](https://github.com/apache/beam/issues/X)).
+* Projection Pushdown optimizer is now on by default for streaming, matching the behavior of batch pipelines since 2.38.0. If you encounter a bug with the optimizer, please file an issue and disable the optimizer using pipeline option `--experiments=disable_projection_pushdown`.
+
+## New Features / Improvements
+
+* X feature added (Java/Python) ([#X](https://github.com/apache/beam/issues/X)).
 * Previously available in Java sdk, Python sdk now also supports logging level overrides per module. ([#18222](https://github.com/apache/beam/issues/18222)).
 
 ## Breaking Changes
 
 * X behavior was changed ([#X](https://github.com/apache/beam/issues/X)).
+* Projection Pushdown optimizer may break Dataflow upgrade compatibility for optimized pipelines when it removes unused fields. If you need to upgrade and encounter a compatibility issue, disable the optimizer using pipeline option `--experiments=disable_projection_pushdown`.
 
 ## Deprecations
 
@@ -82,6 +113,7 @@
 ## Bugfixes
 
 * Fixed a condition where retrying queries would yield an incorrect cursor in the Java SDK Firestore Connector ([#22089](https://github.com/apache/beam/issues/22089)).
+* Fixed plumbing allowed lateness in Go SDK. It was ignoring the user set value earlier and always used to set to 0. ([#22474](https://github.com/apache/beam/issues/22474)).
 * Fixed X (Java/Python) ([#X](https://github.com/apache/beam/issues/X)).
 
 ## Known Issues
diff --git a/build.gradle.kts b/build.gradle.kts
index 7ea1889..3ce7a57 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -195,7 +195,6 @@
 
 tasks.register("javaPostCommit") {
   dependsOn(":sdks:java:extensions:google-cloud-platform-core:postCommit")
-  dependsOn(":sdks:java:extensions:timeseries:postCommit")
   dependsOn(":sdks:java:extensions:zetasketch:postCommit")
   dependsOn(":sdks:java:extensions:ml:postCommit")
 }
@@ -220,6 +219,7 @@
   dependsOn(":sdks:java:io:parquet:hadoopVersionsTest")
   dependsOn(":sdks:java:extensions:sorter:hadoopVersionsTest")
   dependsOn(":runners:spark:2:hadoopVersionsTest")
+  dependsOn(":runners:spark:3:hadoopVersionsTest")
 }
 
 tasks.register("sqlPostCommit") {
diff --git a/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy b/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy
index e4d8dd8..d106043 100644
--- a/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy
+++ b/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy
@@ -389,7 +389,7 @@
 
     // Automatically use the official release version if we are performing a release
     // otherwise append '-SNAPSHOT'
-    project.version = '2.41.0'
+    project.version = '2.42.0'
     if (!isRelease(project)) {
       project.version += '-SNAPSHOT'
     }
@@ -559,6 +559,7 @@
         checker_qual                                : "org.checkerframework:checker-qual:$checkerframework_version",
         classgraph                                  : "io.github.classgraph:classgraph:$classgraph_version",
         commons_codec                               : "commons-codec:commons-codec:1.15",
+        commons_collections                         : "commons-collections:commons-collections:3.2.2",
         commons_compress                            : "org.apache.commons:commons-compress:1.21",
         commons_csv                                 : "org.apache.commons:commons-csv:1.8",
         commons_io                                  : "commons-io:commons-io:2.6",
@@ -592,7 +593,7 @@
         google_cloud_core_grpc                      : "com.google.cloud:google-cloud-core-grpc", // google_cloud_platform_libraries_bom sets version
         google_cloud_datacatalog_v1beta1            : "com.google.cloud:google-cloud-datacatalog", // google_cloud_platform_libraries_bom sets version
         google_cloud_dataflow_java_proto_library_all: "com.google.cloud.dataflow:google-cloud-dataflow-java-proto-library-all:0.5.160304",
-        google_cloud_datastore_v1_proto_client      : "com.google.cloud.datastore:datastore-v1-proto-client:2.2.10",
+        google_cloud_datastore_v1_proto_client      : "com.google.cloud.datastore:datastore-v1-proto-client:2.9.0",
         google_cloud_firestore                      : "com.google.cloud:google-cloud-firestore", // google_cloud_platform_libraries_bom sets version
         google_cloud_pubsub                         : "com.google.cloud:google-cloud-pubsub", // google_cloud_platform_libraries_bom sets version
         google_cloud_pubsublite                     : "com.google.cloud:google-cloud-pubsublite",  // google_cloud_platform_libraries_bom sets version
@@ -687,7 +688,7 @@
         proto_google_cloud_bigtable_admin_v2        : "com.google.api.grpc:proto-google-cloud-bigtable-admin-v2", // google_cloud_platform_libraries_bom sets version
         proto_google_cloud_bigtable_v2              : "com.google.api.grpc:proto-google-cloud-bigtable-v2", // google_cloud_platform_libraries_bom sets version
         proto_google_cloud_datacatalog_v1beta1      : "com.google.api.grpc:proto-google-cloud-datacatalog-v1beta1", // google_cloud_platform_libraries_bom sets version
-        proto_google_cloud_datastore_v1             : "com.google.api.grpc:proto-google-cloud-datastore-v1:0.93.10", // google_cloud_platform_libraries_bom sets version
+        proto_google_cloud_datastore_v1             : "com.google.api.grpc:proto-google-cloud-datastore-v1:0.100.0", // google_cloud_platform_libraries_bom sets version
         proto_google_cloud_firestore_v1             : "com.google.api.grpc:proto-google-cloud-firestore-v1", // google_cloud_platform_libraries_bom sets version
         proto_google_cloud_pubsub_v1                : "com.google.api.grpc:proto-google-cloud-pubsub-v1", // google_cloud_platform_libraries_bom sets version
         proto_google_cloud_pubsublite_v1            : "com.google.api.grpc:proto-google-cloud-pubsublite-v1", // google_cloud_platform_libraries_bom sets version
@@ -1457,6 +1458,8 @@
           args '-f=0'
           args '-wf=0'
           args '-foe=true'
+          // Allow jmhTest to run concurrently with other jmhTest instances
+          systemProperties(['jmh.ignoreLock' : 'true'])
         }
         project.check.dependsOn jmhTest
       }
@@ -2507,6 +2510,7 @@
         // specific binary here could make gradle delete it while pip will believe
         // the package is fully installed.
         outputs.dirs(project.ext.envdir)
+        outputs.upToDateWhen { false }
       }
 
       project.ext.pythonSdkDeps = project.files(
diff --git a/buildSrc/src/main/groovy/org/apache/beam/gradle/GrpcVendoring_1_48_1.groovy b/buildSrc/src/main/groovy/org/apache/beam/gradle/GrpcVendoring_1_48_1.groovy
new file mode 100644
index 0000000..b2482c0
--- /dev/null
+++ b/buildSrc/src/main/groovy/org/apache/beam/gradle/GrpcVendoring_1_48_1.groovy
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.beam.gradle
+
+/**
+ * Utilities for working with our vendored version of gRPC.
+ * 
+ * To update:
+ * 1. Determine the set of io.grpc libraries we want to include, most likely a superset of
+ *    of the previous vendored gRPC version.
+ * 2. Use mvn dependency:tree and https://search.maven.org/search?q=g:io.grpc
+ *    to determine dependency tree. You may need to search for optional dependencies
+ *    and determine if they are needed (e.g. conscrypt).
+ * 3. Validate built artifacts by running linkage tool
+ *    (https://github.com/apache/beam/tree/master/vendor#how-to-validate-the-vendored-dependencies)
+ *    and unit and integration tests in a PR (e.g. https://github.com/apache/beam/pull/16460,
+ *    https://github.com/apache/beam/pull/16459)
+ */
+class GrpcVendoring_1_48_1 {
+  static def grpc_version = "1.48.1"
+
+  // See https://github.com/grpc/grpc-java/blob/v1.48.1/gradle/libs.versions.toml
+  // or https://search.maven.org/search?q=g:io.grpc%201.48.1
+  static def guava_version = "31.1-jre"
+  static def protobuf_version = "3.21.1"
+  static def gson_version = "2.9.0"
+  static def google_auth_version = "1.4.0"
+  static def opencensus_version = "0.31.0"
+  static def conscrypt_version = "2.5.2"
+  static def proto_google_common_protos_version = "2.9.0"
+  static def netty_version = "4.1.77.Final"
+  static def netty_tcnative_version = "2.0.53.Final"
+
+  /** Returns the list of implementation time dependencies. */
+  static List<String> dependencies() {
+    return [
+      "com.google.guava:guava:$guava_version",
+      "com.google.protobuf:protobuf-java:$protobuf_version",
+      "com.google.protobuf:protobuf-java-util:$protobuf_version",
+      "com.google.code.gson:gson:$gson_version",
+      "io.grpc:grpc-auth:$grpc_version",
+      "io.grpc:grpc-core:$grpc_version",
+      "io.grpc:grpc-context:$grpc_version",
+      "io.grpc:grpc-netty:$grpc_version",
+      "io.grpc:grpc-protobuf:$grpc_version",
+      "io.grpc:grpc-stub:$grpc_version",
+      "io.grpc:grpc-testing:$grpc_version",
+      // Use a classifier to ensure we get the jar containing native libraries. In the future
+      // hopefully netty releases a single jar containing native libraries for all architectures.
+      "io.netty:netty-transport-native-epoll:$netty_version:linux-x86_64",
+      "io.netty:netty-tcnative-boringssl-static:$netty_tcnative_version",
+      "com.google.auth:google-auth-library-credentials:$google_auth_version",
+      "com.google.api.grpc:proto-google-common-protos:$proto_google_common_protos_version",
+      "io.opencensus:opencensus-api:$opencensus_version",
+      "io.opencensus:opencensus-contrib-grpc-metrics:$opencensus_version",
+    ]
+  }
+
+  /**
+   * Returns the list of runtime time dependencies that should be exported as runtime
+   * dependencies within the vendored jar.
+   */
+  static List<String> runtimeDependencies() {
+    return [
+      'com.google.errorprone:error_prone_annotations:2.14.0',
+      // TODO(BEAM-9288): Enable relocation for conscrypt
+      "org.conscrypt:conscrypt-openjdk-uber:$conscrypt_version"
+    ]
+  }
+
+  /**
+   * Returns the list of test dependencies.
+   */
+  static List<String> testDependencies() {
+    return [
+      'junit:junit:4.12',
+    ]
+  }
+
+  static Map<String, String> relocations() {
+    // The relocation paths below specifically use gRPC and the full version string as
+    // the code relocation prefix. See https://lists.apache.org/thread.html/4c12db35b40a6d56e170cd6fc8bb0ac4c43a99aa3cb7dbae54176815@%3Cdev.beam.apache.org%3E
+    // for further details.
+
+    // To produce the list of necessary relocations, one needs to start with a set of target
+    // packages that one wants to vendor, find all necessary transitive dependencies of that
+    // set and provide relocations for each such that all necessary packages and their
+    // dependencies are relocated. Any optional dependency that doesn't need relocation
+    // must be excluded via an 'exclude' rule. There is additional complexity of libraries that use
+    // JNI or reflection and have to be handled on case by case basis by learning whether
+    // they support relocation and how would one go about doing it by reading any documentation
+    // those libraries may provide. The 'validateShadedJarDoesntLeakNonOrgApacheBeamClasses'
+    // ensures that there are no classes outside of the 'org.apache.beam' namespace.
+
+    String version = "v1p48p1";
+    String prefix = "org.apache.beam.vendor.grpc.${version}";
+    List<String> packagesToRelocate = [
+      // guava uses the com.google.common and com.google.thirdparty package namespaces
+      "com.google.common",
+      "com.google.thirdparty",
+      "com.google.protobuf",
+      "com.google.gson",
+      "com.google.auth",
+      "com.google.api",
+      "com.google.cloud",
+      "com.google.logging",
+      "com.google.longrunning",
+      "com.google.rpc",
+      "com.google.type",
+      "com.google.geo.type",
+      "io.grpc",
+      "io.netty",
+      "io.opencensus",
+      "io.perfmark",
+    ]
+
+    return packagesToRelocate.collectEntries {
+      [ (it): "${prefix}.${it}" ]
+    } + [
+      // Adapted from https://github.com/grpc/grpc-java/blob/e283f70ad91f99c7fee8b31b605ef12a4f9b1690/netty/shaded/build.gradle#L41
+      // We       "io.netty": "${prefix}.io.netty",have to be careful with these replacements as they must not match any
+      // string in NativeLibraryLoader, else they cause corruption. Note that
+      // this includes concatenation of string literals and constants.
+      'META-INF/native/libnetty': "META-INF/native/liborg_apache_beam_vendor_grpc_${version}_netty",
+      'META-INF/native/netty': "META-INF/native/org_apache_beam_vendor_grpc_${version}_netty",
+      'META-INF/native/lib-netty': "META-INF/native/lib-org-apache-beam-vendor-grpc-${version}-netty",
+    ]
+  }
+
+  /** Returns the list of shading exclusions. */
+  static List<String> exclusions() {
+    return [
+      // Don't include in the vendored jar:
+      // android annotations, errorprone, checkerframework, JDK8 annotations, objenesis, junit,
+      // commons-logging, log4j, slf4j and mockito
+      "android/annotation/**/",
+      "com/google/errorprone/**",
+      "com/google/instrumentation/**",
+      "com/google/j2objc/annotations/**",
+      "io/netty/handler/codec/marshalling/**",
+      "io/netty/handler/codec/spdy/**",
+      "io/netty/handler/codec/compression/JZlib*",
+      "io/netty/handler/codec/compression/Lz4*",
+      "io/netty/handler/codec/compression/Lzf*",
+      "io/netty/handler/codec/compression/Lzma*",
+      "io/netty/handler/codec/protobuf/Protobuf*Nano.class",
+      "io/netty/util/internal/logging/CommonsLogger*",
+      "io/netty/util/internal/logging/LocationAwareSlf4JLogger*",
+      "io/netty/util/internal/logging/Log4JLogger*",
+      "io/netty/util/internal/logging/Log4J2Logger*",
+      "javax/annotation/**",
+      "junit/**",
+      "module-info.class",
+      "org/checkerframework/**",
+      "org/codehaus/mojo/animal_sniffer/**",
+      "org/conscrypt/**",
+      "META-INF/native/libconscrypt**",
+      "META-INF/native/conscrypt**",
+      "org/hamcrest/**",
+      "org/junit/**",
+      "org/mockito/**",
+      "org/objenesis/**",
+    ]
+  }
+
+  /**
+   * Returns a closure contaning the dependencies map used for shading gRPC within the main
+   * Apache Beam project.
+   */
+  static Object dependenciesClosure() {
+    return {
+      dependencies().each { implementation it }
+      runtimeDependencies().each { shadow it }
+    }
+  }
+
+  /**
+   * Returns a closure with the code relocation configuration for shading gRPC within the main
+   * Apache Beam project.
+   */
+  static Object shadowClosure() {
+    return {
+      relocations().each { srcNamespace, destNamespace ->
+        relocate srcNamespace, destNamespace
+      }
+      exclusions().each { exclude it }
+    }
+  }
+}
diff --git a/examples/notebooks/beam-ml/run_inference_basic.ipynb b/examples/notebooks/beam-ml/run_inference_basic.ipynb
new file mode 100644
index 0000000..91f8999
--- /dev/null
+++ b/examples/notebooks/beam-ml/run_inference_basic.ipynb
@@ -0,0 +1,1367 @@
+{
+  "nbformat": 4,
+  "nbformat_minor": 2,
+  "metadata": {
+    "colab": {
+      "name": "Beam RunInference",
+      "provenance": [],
+      "collapsed_sections": [],
+      "toc_visible": true
+    },
+    "kernelspec": {
+      "name": "python3",
+      "display_name": "Python 3"
+    },
+    "language_info": {
+      "name": "python"
+    }
+  },
+  "cells": [
+    {
+      "cell_type": "code",
+      "source": [
+        "#@title ###### Licensed to the Apache Software Foundation (ASF), Version 2.0 (the \"License\")\n",
+        "\n",
+        "# Licensed to the Apache Software Foundation (ASF) under one\n",
+        "# or more contributor license agreements. See the NOTICE file\n",
+        "# distributed with this work for additional information\n",
+        "# regarding copyright ownership. The ASF licenses this file\n",
+        "# to you under the Apache License, Version 2.0 (the\n",
+        "# \"License\"); you may not use this file except in compliance\n",
+        "# with the License. You may obtain a copy of the License at\n",
+        "#\n",
+        "#   http://www.apache.org/licenses/LICENSE-2.0\n",
+        "#\n",
+        "# Unless required by applicable law or agreed to in writing,\n",
+        "# software distributed under the License is distributed on an\n",
+        "# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n",
+        "# KIND, either express or implied. See the License for the\n",
+        "# specific language governing permissions and limitations\n",
+        "# under the License."
+      ],
+      "metadata": {
+        "id": "C1rAsD2L-hSO",
+        "cellView": "form"
+      },
+      "id": "C1rAsD2L-hSO",
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "markdown",
+      "id": "b6f8f3af-744e-4eaa-8a30-6d03e8e4d21e",
+      "metadata": {
+        "id": "b6f8f3af-744e-4eaa-8a30-6d03e8e4d21e"
+      },
+      "source": [
+        "# RunInference\n",
+        "\n",
+        "<button>\n",
+        "  <a href=\"https://beam.apache.org/documentation/sdks/python-machine-learning/\">\n",
+        "    <img src=\"https://beam.apache.org/images/favicon.ico\" alt=\"Open the docs\" height=\"16\"/>\n",
+        "    Beam RunInference\n",
+        "  </a>\n",
+        "</button>\n",
+        "\n",
+        "In this notebook, we walk through the use of the RunInference transform.\n",
+        "The transform and its accompanying [ModelHandler](https://beam.apache.org/releases/pydoc/current/apache_beam.ml.inference.base.html#apache_beam.ml.inference.base.ModelHandler) classes handle the following tasks:\n",
+        "\n",
+        "\n",
+        "*   Optimizing loading models from popular frameworks.\n",
+        "*   Batching examples in a scalable fashion.\n",
+        "\n",
+        "\n",
+        "This notebook illustrates common RunInference patterns such as the following:\n",
+        "*   Generating predictions using both Pytorch and Scikit-learn.\n",
+        "*   Post processing results after RunInference.\n",
+        "*   Inference with multiple models in the same pipeline.\n",
+        "\n",
+        "The linear regression models used in these samples are trained on data that correspondes to the 5 and 10 times table; that is,`y = 5x` and `y = 10x` respectively."
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "299af9bb-b2fc-405c-96e7-ee0a6ae24bdd",
+      "metadata": {
+        "id": "299af9bb-b2fc-405c-96e7-ee0a6ae24bdd"
+      },
+      "source": [
+        "### Dependencies\n",
+        "\n",
+        "The RunInference library is available in Apache Beam version <b>2.40</b> or later.\n",
+        "\n",
+        "Pytorch module is needed to use Pytorch RunInference API. use `pip` to install Pytorch."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "# issue: https://github.com/apache/beam/issues/22218. Becuase of the updates to the Google cloud APIs, Beam SDK from 2.34.0 till 2.40.0 has some dependency conflicts. See the issue for more details.\n",
+        "# Workaround to install the apache beam without getting stuck for long time. Runtime might need to restart after this step.\n",
+        "!pip install google-api-core==1.31.6 --quiet\n",
+        "!pip install google-cloud-pubsub==2.13.1 google-cloud-bigquery-storage==2.13.2 --quiet\n",
+        "!pip install apache-beam[gcp,dataframe] --quiet"
+      ],
+      "metadata": {
+        "id": "loxD-rOVchRn",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "661baa2d-6e0f-4478-b7c1-db911593d5ff"
+      },
+      "id": "loxD-rOVchRn",
+      "execution_count": null,
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
+            "pandas-gbq 0.13.3 requires google-cloud-bigquery[bqstorage,pandas]<2.0.0dev,>=1.11.1, but you have google-cloud-bigquery 2.34.4 which is incompatible.\u001b[0m\n"
+          ]
+        }
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "7f841596-f217-46d2-b64e-1952db4de4cb",
+      "metadata": {
+        "id": "7f841596-f217-46d2-b64e-1952db4de4cb",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "da04ccb9-0801-47f6-ec9e-e87f0ca4569f"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
+            "Requirement already satisfied: torch in /usr/local/lib/python3.7/dist-packages (1.12.0+cu113)\n",
+            "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch) (4.1.1)\n"
+          ]
+        }
+      ],
+      "source": [
+        "%pip install torch"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "9a92e3a7-beb6-46ae-a5b0-53c15487de38",
+      "metadata": {
+        "id": "9a92e3a7-beb6-46ae-a5b0-53c15487de38"
+      },
+      "outputs": [],
+      "source": [
+        "import argparse\n",
+        "import csv\n",
+        "import json\n",
+        "import os\n",
+        "import torch\n",
+        "from typing import Tuple\n",
+        "\n",
+        "import apache_beam as beam\n",
+        "import numpy\n",
+        "from apache_beam.io.gcp.bigquery import ReadFromBigQuery\n",
+        "from apache_beam.ml.inference.base import KeyedModelHandler\n",
+        "from apache_beam.ml.inference.base import PredictionResult\n",
+        "from apache_beam.ml.inference.base import RunInference\n",
+        "from apache_beam.dataframe.convert import to_pcollection\n",
+        "from apache_beam.ml.inference.pytorch_inference import PytorchModelHandlerTensor\n",
+        "from apache_beam.ml.inference.pytorch_inference import PytorchModelHandlerKeyedTensor\n",
+        "from apache_beam.options.pipeline_options import PipelineOptions\n",
+        "\n",
+        "import warnings\n",
+        "warnings.filterwarnings('ignore')"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "from google.colab import auth\n",
+        "auth.authenticate_user()"
+      ],
+      "metadata": {
+        "id": "V0E35R5Ka2cE"
+      },
+      "id": "V0E35R5Ka2cE",
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "248458a6-cfd8-474d-ad0e-f37f7ae981ae",
+      "metadata": {
+        "id": "248458a6-cfd8-474d-ad0e-f37f7ae981ae"
+      },
+      "outputs": [],
+      "source": [
+        "# Constants\n",
+        "project = \"<your-project>\"\n",
+        "bucket = \"<your-bucket>\"\n",
+        "\n",
+        "# set the project to avoid warnings.\n",
+        "os.environ['GOOGLE_CLOUD_PROJECT'] = project\n",
+        "\n",
+        "save_model_dir_multiply_five = 'five_times_table_torch.pt'\n",
+        "save_model_dir_multiply_ten = 'ten_times_table_torch.pt'"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "b2b7cedc-79f5-4599-8178-e5da35dba032",
+      "metadata": {
+        "tags": [],
+        "id": "b2b7cedc-79f5-4599-8178-e5da35dba032"
+      },
+      "source": [
+        "## Create data and Pytorch models for RunInference transform"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "202e5a3e-4ccd-4ae3-9852-e47de0721839",
+      "metadata": {
+        "id": "202e5a3e-4ccd-4ae3-9852-e47de0721839"
+      },
+      "source": [
+        "### Linear regression model in Pytorch."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "68bf8bf0-f735-45ee-8ebb-a2d8bb8a6bc7",
+      "metadata": {
+        "id": "68bf8bf0-f735-45ee-8ebb-a2d8bb8a6bc7"
+      },
+      "outputs": [],
+      "source": [
+        "class LinearRegression(torch.nn.Module):\n",
+        "    def __init__(self, input_dim=1, output_dim=1):\n",
+        "        super().__init__()\n",
+        "        self.linear = torch.nn.Linear(input_dim, output_dim)  \n",
+        "    def forward(self, x):\n",
+        "        out = self.linear(x)\n",
+        "        return out"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "1918435c-0029-4eb6-8eee-bda5470eb2ff",
+      "metadata": {
+        "id": "1918435c-0029-4eb6-8eee-bda5470eb2ff"
+      },
+      "source": [
+        "### Prepare train and test data to train a 5 times model.\n",
+        "* `x` contains values in the range from 0 to 99.\n",
+        "* `y` is a list of 5 * `x`. \n",
+        "* `value_to_predict` includes values outside of the training data."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "9302917f-6200-4af4-a410-4bd6f2a070b8",
+      "metadata": {
+        "id": "9302917f-6200-4af4-a410-4bd6f2a070b8"
+      },
+      "outputs": [],
+      "source": [
+        "x = numpy.arange(0, 100, dtype=numpy.float32).reshape(-1, 1)\n",
+        "y = (x * 5).reshape(-1, 1)\n",
+        "value_to_predict = numpy.array([20, 40, 60, 90], dtype=numpy.float32).reshape(-1, 1)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "9dc22aec-08c3-43ab-a5ce-451cb63c485a",
+      "metadata": {
+        "id": "9dc22aec-08c3-43ab-a5ce-451cb63c485a"
+      },
+      "source": [
+        "### Train the linear regression mode on 5 times data."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "0a8b7924-ff06-4584-8f41-079268387a67",
+      "metadata": {
+        "id": "0a8b7924-ff06-4584-8f41-079268387a67"
+      },
+      "outputs": [],
+      "source": [
+        "five_times_model = LinearRegression()\n",
+        "optimizer = torch.optim.Adam(five_times_model.parameters())\n",
+        "loss_fn = torch.nn.L1Loss()\n",
+        "\n",
+        "\"\"\"\n",
+        "Train the five_times_model\n",
+        "\"\"\"\n",
+        "epochs = 10000\n",
+        "tensor_x = torch.from_numpy(x)\n",
+        "tensor_y = torch.from_numpy(y)\n",
+        "for epoch in range(epochs):\n",
+        "    y_pred = five_times_model(tensor_x)\n",
+        "    loss = loss_fn(y_pred, tensor_y)\n",
+        "    five_times_model.zero_grad()\n",
+        "    loss.backward()\n",
+        "    optimizer.step()"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "bd106b29-6187-42c1-9743-1666c147b5e3",
+      "metadata": {
+        "id": "bd106b29-6187-42c1-9743-1666c147b5e3"
+      },
+      "source": [
+        "Save the model using `torch.save()` and verify if the saved model file exists."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "882bbada-4f6d-4370-a047-c5961e564ee8",
+      "metadata": {
+        "id": "882bbada-4f6d-4370-a047-c5961e564ee8",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "3002ed41-dbd5-4a87-d2d1-d1c7908be2f2"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "True\n"
+          ]
+        }
+      ],
+      "source": [
+        "torch.save(five_times_model.state_dict(), save_model_dir_multiply_five)\n",
+        "print(os.path.exists(save_model_dir_multiply_five)) # verify if the model is saved"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "fa84cfca-83c6-4a91-aea1-3dd034c42ae0",
+      "metadata": {
+        "id": "fa84cfca-83c6-4a91-aea1-3dd034c42ae0"
+      },
+      "source": [
+        "### Prepare train and test data to train a 10 times model.\n",
+        "* `x` contains values in the range from 0 to 99.\n",
+        "* `y` is a list of 10 * `x`. "
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "27e0d1f6-2c3e-4418-8fb0-b5b89ffa66d5",
+      "metadata": {
+        "id": "27e0d1f6-2c3e-4418-8fb0-b5b89ffa66d5"
+      },
+      "outputs": [],
+      "source": [
+        "x = numpy.arange(0, 100, dtype=numpy.float32).reshape(-1, 1)\n",
+        "y = (x * 10).reshape(-1, 1)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "24d946dc-4fe0-4030-8f6a-aa8d27fd353d",
+      "metadata": {
+        "id": "24d946dc-4fe0-4030-8f6a-aa8d27fd353d"
+      },
+      "source": [
+        "### Train the linear regression model on 10 times data."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "2b352313-b791-48fd-9b9d-b54233176416",
+      "metadata": {
+        "id": "2b352313-b791-48fd-9b9d-b54233176416"
+      },
+      "outputs": [],
+      "source": [
+        "ten_times_model = LinearRegression()\n",
+        "optimizer = torch.optim.Adam(ten_times_model.parameters())\n",
+        "loss_fn = torch.nn.L1Loss()\n",
+        "\n",
+        "epochs = 10000\n",
+        "tensor_x = torch.from_numpy(x)\n",
+        "tensor_y = torch.from_numpy(y)\n",
+        "for epoch in range(epochs):\n",
+        "    y_pred = ten_times_model(tensor_x)\n",
+        "    loss = loss_fn(y_pred, tensor_y)\n",
+        "    ten_times_model.zero_grad()\n",
+        "    loss.backward()\n",
+        "    optimizer.step()"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "6f959e3b-230b-45e2-9df3-dd1f11acacd7",
+      "metadata": {
+        "id": "6f959e3b-230b-45e2-9df3-dd1f11acacd7"
+      },
+      "source": [
+        "Save the model using `torch.save()`"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "42b2ca0f-5d44-4d15-a313-f3d56ae7f675",
+      "metadata": {
+        "id": "42b2ca0f-5d44-4d15-a313-f3d56ae7f675",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "ed9f51c1-8dfe-44bc-c861-28d660ad3799"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "True\n"
+          ]
+        }
+      ],
+      "source": [
+        "torch.save(ten_times_model.state_dict(), save_model_dir_multiply_ten)\n",
+        "print(os.path.exists(save_model_dir_multiply_ten)) # verify if the model is saved"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "2e20efc4-13e8-46e2-9848-c0347deaa5af",
+      "metadata": {
+        "id": "2e20efc4-13e8-46e2-9848-c0347deaa5af"
+      },
+      "source": [
+        "# Pattern 1: RunInference for predictions."
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "1099fe94-d4cf-422e-a0d3-0cfba8af64d5",
+      "metadata": {
+        "id": "1099fe94-d4cf-422e-a0d3-0cfba8af64d5"
+      },
+      "source": [
+        "### Step 1 - Use RunInference within the pipeline.\n",
+        "\n",
+        "1. Create pytorch model handler object by passing required arguments such as `state_dict_path`, `model_class`, `model_params` to the `PytorchModelHandlerTensor` class.\n",
+        "2. Pass the `PytorchModelHandlerTensor` object to the RunInference transform to peform prediction on unkeyed data."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "e488a821-3b70-4284-96f3-ddee4dcb9d71",
+      "metadata": {
+        "id": "e488a821-3b70-4284-96f3-ddee4dcb9d71",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "6f4e4136-aa6c-4fd4-8be6-2ed8d7ca4545"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "PredictionResult(example=tensor([20.]), inference=tensor([99.9943], grad_fn=<UnbindBackward0>))\n",
+            "PredictionResult(example=tensor([40.]), inference=tensor([199.9889], grad_fn=<UnbindBackward0>))\n",
+            "PredictionResult(example=tensor([60.]), inference=tensor([299.9835], grad_fn=<UnbindBackward0>))\n",
+            "PredictionResult(example=tensor([90.]), inference=tensor([449.9753], grad_fn=<UnbindBackward0>))\n"
+          ]
+        }
+      ],
+      "source": [
+        "torch_five_times_model_handler = PytorchModelHandlerTensor(\n",
+        "    state_dict_path=save_model_dir_multiply_five,\n",
+        "    model_class=LinearRegression,\n",
+        "    model_params={'input_dim': 1,\n",
+        "                  'output_dim': 1}\n",
+        "                  )\n",
+        "pipeline = beam.Pipeline()\n",
+        "\n",
+        "with pipeline as p:\n",
+        "      (\n",
+        "      p \n",
+        "      | \"ReadInputData\" >> beam.Create(value_to_predict)\n",
+        "      | \"ConvertNumpyToTensor\" >> beam.Map(torch.Tensor)\n",
+        "      | \"RunInferenceTorch\" >> RunInference(torch_five_times_model_handler)\n",
+        "      | beam.Map(print)\n",
+        "      )"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "9d95e69b-203f-4abb-9abb-360bdf4d769a",
+      "metadata": {
+        "id": "9d95e69b-203f-4abb-9abb-360bdf4d769a"
+      },
+      "source": [
+        "# Pattern 2: Post-process RunInference results.\n",
+        "Add a `PredictionProcessor` to the pipeline after `RunInference`. `PredictionProcessor` processes the output of the `RunInference` transform."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "96f38a5a-4db0-4c39-8ce7-80d9f9911b48",
+      "metadata": {
+        "id": "96f38a5a-4db0-4c39-8ce7-80d9f9911b48",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "1bfa4cc6-ef01-4020-c739-df1efdc632c4"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "input is 20.0 output is 99.99430084228516\n",
+            "input is 40.0 output is 199.98887634277344\n",
+            "input is 60.0 output is 299.98345947265625\n",
+            "input is 90.0 output is 449.9753112792969\n"
+          ]
+        }
+      ],
+      "source": [
+        "class PredictionProcessor(beam.DoFn):\n",
+        "  \"\"\"\n",
+        "  A processor to format the output of the RunInference transform.\n",
+        "  \"\"\"\n",
+        "  def process(\n",
+        "      self,\n",
+        "      element: PredictionResult):\n",
+        "    input_value = element.example\n",
+        "    output_value = element.inference\n",
+        "    yield (f\"input is {input_value.item()} output is {output_value.item()}\")\n",
+        "\n",
+        "pipeline = beam.Pipeline()\n",
+        "\n",
+        "with pipeline as p:\n",
+        "    (\n",
+        "    p\n",
+        "    | \"ReadInputData\" >> beam.Create(value_to_predict)\n",
+        "    | \"ConvertNumpyToTensor\" >> beam.Map(torch.Tensor)\n",
+        "    | \"RunInferenceTorch\" >> RunInference(torch_five_times_model_handler)\n",
+        "    | \"PostProcessPredictions\" >> beam.ParDo(PredictionProcessor())\n",
+        "    | beam.Map(print)\n",
+        "    )"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "2be80463-cf79-481c-9d6a-81e500f1707b",
+      "metadata": {
+        "id": "2be80463-cf79-481c-9d6a-81e500f1707b"
+      },
+      "source": [
+        "# Pattern 3: Attach a key"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "f22da313-5bf8-4334-865b-bbfafc374e63",
+      "metadata": {
+        "id": "f22da313-5bf8-4334-865b-bbfafc374e63"
+      },
+      "source": [
+        "## Step 1 - Create a source with attached key.\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "746b67a7-3562-467f-bea3-d8cd18c14927",
+      "metadata": {
+        "id": "746b67a7-3562-467f-bea3-d8cd18c14927"
+      },
+      "source": [
+        "## Step 2 - Modify model handler and post processor.\n",
+        "* Modify the pipeline to read from sources like CSV files and BigQuery.\n",
+        "\n",
+        "In this step we:\n",
+        "\n",
+        "* Wrap the `PytorchModelHandlerTensor` object around `KeyedModelHandler` to handle keyed data.\n",
+        "* Add a map transform, which converts a table row into `Tuple[str, float]`.\n",
+        "* Add a map transform which converts `Tuple[str, float]` from  to `Tuple[str, torch.Tensor]`.\n",
+        "* Modify the post inference processor to output results along with the key."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "90b263fc-97a5-43dc-8874-083d7e65e96d",
+      "metadata": {
+        "id": "90b263fc-97a5-43dc-8874-083d7e65e96d"
+      },
+      "outputs": [],
+      "source": [
+        "class PredictionWithKeyProcessor(beam.DoFn):\n",
+        "    def __init__(self):\n",
+        "        beam.DoFn.__init__(self)\n",
+        "\n",
+        "    def process(\n",
+        "          self,\n",
+        "          element: Tuple[str, PredictionResult]):\n",
+        "        key = element[0]\n",
+        "        input_value = element[1].example\n",
+        "        output_value = element[1].inference\n",
+        "        yield (f\"key: {key}, input: {input_value.item()} output: {output_value.item()}\" )"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "c9b0fb49-d605-4f26-931a-57f42b0ad253",
+      "metadata": {
+        "id": "c9b0fb49-d605-4f26-931a-57f42b0ad253"
+      },
+      "source": [
+        "#### Use BigQuery as the source."
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "45ce4330-7d33-4c53-8033-f4fa02383894",
+      "metadata": {
+        "id": "45ce4330-7d33-4c53-8033-f4fa02383894"
+      },
+      "source": [
+        "Install Google Cloud BigQuery API using `pip`."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "4eb859dd-ba54-45a1-916b-5bbe4dc3f16e",
+      "metadata": {
+        "id": "4eb859dd-ba54-45a1-916b-5bbe4dc3f16e",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "c01594f4-443e-434a-b61a-a38beb00f1a9"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
+            "pandas-gbq 0.13.3 requires google-cloud-bigquery[bqstorage,pandas]<2.0.0dev,>=1.11.1, but you have google-cloud-bigquery 3.3.0 which is incompatible.\u001b[0m\n"
+          ]
+        }
+      ],
+      "source": [
+        "%pip install --upgrade google-cloud-bigquery --quiet"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "6e869347-dd49-40be-b1e5-749699dc0d83",
+      "metadata": {
+        "id": "6e869347-dd49-40be-b1e5-749699dc0d83"
+      },
+      "source": [
+        "Create a table in the BigQuery using the snippet below, which has two columns: One holds the key and the second holds the test value. To use BiqQuery, a Google Cloud account with the BigQuery API enabled is required."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "!gcloud config set project $project"
+      ],
+      "metadata": {
+        "id": "7mgnryX-Zlfs",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "ebd3e9e3-9c30-4027-f571-5cd3c1951e18"
+      },
+      "id": "7mgnryX-Zlfs",
+      "execution_count": null,
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "Updated property [core/project].\n"
+          ]
+        }
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "a6a984cd-2e92-4c44-821b-9bf1dd52fb7d",
+      "metadata": {
+        "id": "a6a984cd-2e92-4c44-821b-9bf1dd52fb7d",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "8e60b448-1384-4290-c164-cb43d876c350"
+      },
+      "outputs": [
+        {
+          "output_type": "execute_result",
+          "data": {
+            "text/plain": [
+              "<google.cloud.bigquery.table._EmptyRowIterator at 0x7f47d0556a50>"
+            ]
+          },
+          "metadata": {},
+          "execution_count": 86
+        }
+      ],
+      "source": [
+        "from google.cloud import bigquery\n",
+        "\n",
+        "client = bigquery.Client(project=project)\n",
+        "\n",
+        "# Make sure the dataset_id is unique in your project.\n",
+        "dataset_id = '{project}.maths'.format(project=project)\n",
+        "dataset = bigquery.Dataset(dataset_id)\n",
+        "\n",
+        "# Modify the location based on your project configuration.\n",
+        "dataset.location = 'US'\n",
+        "dataset = client.create_dataset(dataset, exists_ok=True)\n",
+        "\n",
+        "# Table name in the BigQuery dataset.\n",
+        "table_name = 'maths_problems_1'\n",
+        "\n",
+        "query = \"\"\"\n",
+        "    CREATE OR REPLACE TABLE\n",
+        "      {project}.maths.{table} ( key STRING OPTIONS(description=\"A unique key for the maths problem\"),\n",
+        "    value FLOAT64 OPTIONS(description=\"Our maths problem\" ) );\n",
+        "    INSERT INTO maths.{table}\n",
+        "    VALUES\n",
+        "      (\"first_question\", 105.00),\n",
+        "      (\"second_question\", 108.00),\n",
+        "      (\"third_question\", 1000.00),\n",
+        "      (\"fourth_question\", 1013.00)\n",
+        "\"\"\".format(project=project, table=table_name)\n",
+        "\n",
+        "create_job = client.query(query)\n",
+        "create_job.result()"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "479c9319-3295-4288-971c-dd0f0adfdd1e",
+      "metadata": {
+        "id": "479c9319-3295-4288-971c-dd0f0adfdd1e"
+      },
+      "source": [
+        "Use `BigQuery` as the source in the pipeline to read keyed data."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "34331897-23f5-4850-8974-67e522e956dc",
+      "metadata": {
+        "id": "34331897-23f5-4850-8974-67e522e956dc",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "23092c12-3370-414c-ba67-37be569cd21c"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "key: first_question, input: 105.0 output: 524.9712524414062\n",
+            "key: second_question, input: 108.0 output: 539.970458984375\n",
+            "key: third_question, input: 1000.0 output: 4999.72802734375\n",
+            "key: fourth_question, input: 1013.0 output: 5064.724609375\n"
+          ]
+        }
+      ],
+      "source": [
+        "pipeline_options = PipelineOptions().from_dictionary({'temp_location':f'{bucket}/tmp',\n",
+        "                                                      })\n",
+        "pipeline = beam.Pipeline(options=pipeline_options)\n",
+        "\n",
+        "keyed_torch_five_times_model_handler = KeyedModelHandler(torch_five_times_model_handler)\n",
+        "\n",
+        "table_name = 'maths_problems_1'\n",
+        "table_spec = f'{project}:maths.{table_name}'\n",
+        "\n",
+        "with pipeline as p:\n",
+        "      (\n",
+        "      p\n",
+        "      | \"ReadFromBQ\" >> beam.io.ReadFromBigQuery(table=table_spec) \n",
+        "      | \"PreprocessData\" >> beam.Map(lambda x: (x['key'], x['value']))\n",
+        "      | \"ConvertNumpyToTensor\" >> beam.Map(lambda x: (x[0], torch.Tensor([x[1]])))\n",
+        "      | \"RunInferenceTorch\" >> RunInference(keyed_torch_five_times_model_handler)\n",
+        "      | \"PostProcessPredictions\" >> beam.ParDo(PredictionWithKeyProcessor())\n",
+        "      | beam.Map(print)\n",
+        "      )"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "53ee7f24-5625-475a-b8cc-9c031591f304",
+      "metadata": {
+        "id": "53ee7f24-5625-475a-b8cc-9c031591f304"
+      },
+      "source": [
+        "### Using CSV file as the source."
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "06bc4396-ee2e-4228-8548-f953b5020c4e",
+      "metadata": {
+        "id": "06bc4396-ee2e-4228-8548-f953b5020c4e"
+      },
+      "source": [
+        "Create a CSV file with two columns: one named `key` that holds the keys, and a second named `value` that holds the test values."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "# creates a csv file with the below values.\n",
+        "csv_values = [(\"first_question\", 105.00),\n",
+        "      (\"second_question\", 108.00),\n",
+        "      (\"third_question\", 1000.00),\n",
+        "      (\"fourth_question\", 1013.00)]\n",
+        "input_csv_file = \"./maths_problem.csv\"\n",
+        "\n",
+        "with open(input_csv_file, 'w') as f:\n",
+        "  writer = csv.writer(f)\n",
+        "  writer.writerow(['key', 'value'])\n",
+        "  for row in csv_values:\n",
+        "    writer.writerow(row)\n",
+        "\n",
+        "assert os.path.exists(input_csv_file) == True"
+      ],
+      "metadata": {
+        "id": "exAZjP7cYAFv"
+      },
+      "id": "exAZjP7cYAFv",
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "9a054c2d-4d84-4b37-b067-1dda5347e776",
+      "metadata": {
+        "id": "9a054c2d-4d84-4b37-b067-1dda5347e776",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "000c72cf-6a7f-45d8-9dec-dc9db6ce0662"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "key: first_question, input: 105.0 output: 524.9712524414062\n",
+            "key: second_question, input: 108.0 output: 539.970458984375\n",
+            "key: third_question, input: 1000.0 output: 4999.72802734375\n",
+            "key: fourth_question, input: 1013.0 output: 5064.724609375\n"
+          ]
+        }
+      ],
+      "source": [
+        "pipeline_options = PipelineOptions().from_dictionary({'temp_location':f'{bucket}/tmp',\n",
+        "                                                      })\n",
+        "pipeline = beam.Pipeline(options=pipeline_options)\n",
+        "\n",
+        "keyed_torch_five_times_model_handler = KeyedModelHandler(torch_five_times_model_handler)\n",
+        "\n",
+        "with pipeline as p:\n",
+        "  df = p | beam.dataframe.io.read_csv(input_csv_file)\n",
+        "  pc = to_pcollection(df)\n",
+        "  (pc\n",
+        "    | \"ConvertNumpyToTensor\" >> beam.Map(lambda x: (x[0], torch.Tensor([x[1]])))\n",
+        "    | \"RunInferenceTorch\" >> RunInference(keyed_torch_five_times_model_handler)\n",
+        "    | \"PostProcessPredictions\" >> beam.ParDo(PredictionWithKeyProcessor())\n",
+        "    | beam.Map(print)\n",
+        "    )"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "742abfbb-545e-435b-8833-2410ce29d22c",
+      "metadata": {
+        "id": "742abfbb-545e-435b-8833-2410ce29d22c"
+      },
+      "source": [
+        "# Pattern 4: Inference with multiple models in the same pipeline.\n",
+        "\n",
+        "## Inference with multiple models in parallel. "
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "570b2f27-3beb-4295-b926-595592289c06",
+      "metadata": {
+        "id": "570b2f27-3beb-4295-b926-595592289c06"
+      },
+      "source": [
+        "Create a torch model handler for the 10 times model using `PytorchModelHandlerTensor`."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "73607c45-afe1-4990-9a55-e687ed40302e",
+      "metadata": {
+        "id": "73607c45-afe1-4990-9a55-e687ed40302e"
+      },
+      "outputs": [],
+      "source": [
+        "torch_ten_times_model_handler = PytorchModelHandlerTensor(state_dict_path=save_model_dir_multiply_ten,\n",
+        "                                        model_class=LinearRegression,\n",
+        "                                        model_params={'input_dim': 1,\n",
+        "                                                      'output_dim': 1}\n",
+        "                                        )\n",
+        "keyed_torch_ten_times_model_handler = KeyedModelHandler(torch_ten_times_model_handler)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "70ebed52-4ead-4cae-ac96-8cf206012ce1",
+      "metadata": {
+        "id": "70ebed52-4ead-4cae-ac96-8cf206012ce1"
+      },
+      "source": [
+        "In this, the same data is run through two different models: the one that we’ve been using to multiply by 5 \n",
+        "and a new model, which will learn to multiply by 10."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "629d070e-9902-42c9-a1e7-56c3d1864f13",
+      "metadata": {
+        "id": "629d070e-9902-42c9-a1e7-56c3d1864f13",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "f798bbc7-3f45-496f-b029-3cff5599bfaa"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "key: first_question * 5, input: 105.0 output: 1046.1859130859375\n",
+            "key: second_question * 5, input: 108.0 output: 1075.8590087890625\n",
+            "key: third_question * 5, input: 1000.0 output: 9898.654296875\n",
+            "key: fourth_question * 5, input: 1013.0 output: 10027.2373046875\n",
+            "key: first_question * 10, input: 105.0 output: 1046.1859130859375\n",
+            "key: second_question * 10, input: 108.0 output: 1075.8590087890625\n",
+            "key: third_question * 10, input: 1000.0 output: 9898.654296875\n",
+            "key: fourth_question * 10, input: 1013.0 output: 10027.2373046875\n"
+          ]
+        }
+      ],
+      "source": [
+        "pipeline_options = PipelineOptions().from_dictionary(\n",
+        "                                      {'temp_location':f'{bucket}/tmp'})\n",
+        "\n",
+        "pipeline = beam.Pipeline(options=pipeline_options)\n",
+        "\n",
+        "read_from_bq = beam.io.ReadFromBigQuery(table=table_spec)\n",
+        "\n",
+        "with pipeline as p:\n",
+        "  multiply_five = (\n",
+        "      p \n",
+        "      |  read_from_bq\n",
+        "      | \"CreateMultiplyFiveTuple\" >> beam.Map(lambda x: ('{} {}'.format(x['key'], '* 5'), x['value']))\n",
+        "      | \"ConvertNumpyToTensorFiveTuple\" >> beam.Map(lambda x: (x[0], torch.Tensor([x[1]])))\n",
+        "      | \"RunInferenceTorchFiveTuple\" >> RunInference(keyed_torch_ten_times_model_handler)\n",
+        "  )\n",
+        "  multiply_ten = (\n",
+        "      p \n",
+        "      | read_from_bq \n",
+        "      | \"CreateMultiplyTenTuple\" >> beam.Map(lambda x: ('{} {}'.format(x['key'], '* 10'), x['value']))\n",
+        "      | \"ConvertNumpyToTensorTenTuple\" >> beam.Map(lambda x: (x[0], torch.Tensor([x[1]])))\n",
+        "      | \"RunInferenceTorchTenTuple\" >> RunInference(keyed_torch_ten_times_model_handler)\n",
+        "  )\n",
+        "\n",
+        "  inference_result = ((multiply_five, multiply_ten) | beam.Flatten() \n",
+        "                                 | beam.ParDo(PredictionWithKeyProcessor()))\n",
+        "  inference_result | beam.Map(print)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "e71e6706-5d8d-4322-9def-ac7fb20d4a50",
+      "metadata": {
+        "id": "e71e6706-5d8d-4322-9def-ac7fb20d4a50"
+      },
+      "source": [
+        "## Inference with multiple models in sequence \n",
+        "\n",
+        "In a sequential pattern, data is sent to one or more models in sequence, \n",
+        "with the output from each model chaining to the next model.\n",
+        "\n",
+        "1. Read the data from BigQuery.\n",
+        "2. Map the data.\n",
+        "3. RunInference with multiply by 5 model.\n",
+        "4. Process the results.\n",
+        "5. RunInference with multiply by 10 model.\n",
+        "6. Process the results.\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "8db9d649-5549-4b58-a9ad-7b8592c2bcbf",
+      "metadata": {
+        "id": "8db9d649-5549-4b58-a9ad-7b8592c2bcbf",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "4f600937-4cb4-42dd-aa50-fa15538cc964"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "key: original input is `first_question tensor([105.])`, input: 524.9712524414062 output: 5200.13232421875\n",
+            "key: original input is `second_question tensor([108.])`, input: 539.970458984375 output: 5348.490234375\n",
+            "key: original input is `third_question tensor([1000.])`, input: 4999.72802734375 output: 49460.0703125\n",
+            "key: original input is `fourth_question tensor([1013.])`, input: 5064.724609375 output: 50102.953125\n"
+          ]
+        }
+      ],
+      "source": [
+        "def process_interim_inference(element):\n",
+        "    key, prediction_result = element\n",
+        "    input_value = prediction_result.example\n",
+        "    inference = prediction_result.inference\n",
+        "    formatted_input_value = 'original input is `{} {}`'.format(key, input_value)\n",
+        "    return formatted_input_value, inference\n",
+        "\n",
+        "\n",
+        "pipeline_options = PipelineOptions().from_dictionary(\n",
+        "                                      {'temp_location':f'{bucket}/tmp'})\n",
+        "pipeline = beam.Pipeline(options=pipeline_options)\n",
+        "\n",
+        "with pipeline as p:\n",
+        "  multiply_five = (\n",
+        "      p \n",
+        "      | beam.io.ReadFromBigQuery(table=table_spec) \n",
+        "      | \"CreateMultiplyFiveTuple\" >> beam.Map(lambda x: (x['key'], x['value']))\n",
+        "      | \"ConvertNumpyToTensorFiveTuple\" >> beam.Map(lambda x: (x[0], torch.Tensor([x[1]])))\n",
+        "      | \"RunInferenceTorchFiveTuple\" >> RunInference(keyed_torch_five_times_model_handler)\n",
+        "  )\n",
+        "\n",
+        "  inference_result = (\n",
+        "    multiply_five \n",
+        "      | \"ExtractResult\" >> beam.Map(process_interim_inference) \n",
+        "      | \"RunInferenceTorchTenTuple\" >> RunInference(keyed_torch_ten_times_model_handler)\n",
+        "      | beam.ParDo(PredictionWithKeyProcessor())\n",
+        "    )\n",
+        "  inference_result | beam.Map(print)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "32c9ba40-9396-48f4-9e7f-a2acced98bb2",
+      "metadata": {
+        "id": "32c9ba40-9396-48f4-9e7f-a2acced98bb2"
+      },
+      "source": [
+        "# Sklearn implementation of RunInference API.\n",
+        "\n",
+        "Here, we showcase the Sklearn implementation of the RunInference API with the unkeyed data and the keyed data.\n",
+        "\n",
+        "Sklearn is a build-dependency of Apache Beam. If a different version of sklearn needs to be installed, use `%pip install scikit-learn==<version>`"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "d6142b75-eef1-4e52-9fa4-fe02fe916b26",
+      "metadata": {
+        "id": "d6142b75-eef1-4e52-9fa4-fe02fe916b26"
+      },
+      "outputs": [],
+      "source": [
+        "import pickle\n",
+        "from sklearn import linear_model\n",
+        "\n",
+        "import numpy as np\n",
+        "from apache_beam.ml.inference.sklearn_inference import ModelFileType\n",
+        "from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerNumpy"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "6695cd22-e0bf-438f-8223-4a93392e6616",
+      "metadata": {
+        "id": "6695cd22-e0bf-438f-8223-4a93392e6616"
+      },
+      "source": [
+        "## Create the data and the Sklearn model.\n",
+        "In this cell, we perform:\n",
+        "1. Create the data to train the Sklearn linear regression model.\n",
+        "2. Train the linear regression model.\n",
+        "3. Save the Sklearn model using `pickle`."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "c57843e8-f696-4196-ad39-827e34849976",
+      "metadata": {
+        "id": "c57843e8-f696-4196-ad39-827e34849976"
+      },
+      "outputs": [],
+      "source": [
+        "# Input data to train the sklearn model.\n",
+        "x = numpy.arange(0, 100, dtype=numpy.float32).reshape(-1, 1)\n",
+        "y = (x * 5).reshape(-1, 1)\n",
+        "\n",
+        "regression = linear_model.LinearRegression()\n",
+        "regression.fit(x,y)\n",
+        "\n",
+        "sklearn_model_filename = 'sklearn_5x_model.pkl'\n",
+        "with open(sklearn_model_filename, 'wb') as f:\n",
+        "    pickle.dump(regression, f)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "69008a3d-3d15-4643-828c-b0419b347d01",
+      "metadata": {
+        "id": "69008a3d-3d15-4643-828c-b0419b347d01"
+      },
+      "source": [
+        "### Scikit-learn RunInference pipeline.\n",
+        "\n",
+        "1. Define the Sklearn model handler that accepts array_like object as input.\n",
+        "2. Read the data from BigQuery.\n",
+        "3. Use the Sklearn trained model and the Sklearn RunInference transform on unkeyed data."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "50a648a3-794a-4286-ab2b-fc0458db04ca",
+      "metadata": {
+        "id": "50a648a3-794a-4286-ab2b-fc0458db04ca",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "b305d977-6549-4c01-a402-c4e14f3f2b04"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "PredictionResult(example=[105.0], inference=array([525.]))\n",
+            "PredictionResult(example=[108.0], inference=array([540.]))\n",
+            "PredictionResult(example=[1000.0], inference=array([5000.]))\n",
+            "PredictionResult(example=[1013.0], inference=array([5065.]))\n"
+          ]
+        }
+      ],
+      "source": [
+        "# SklearnModelHandlerNumpy accepts only unkeyed examples.\n",
+        "sklearn_model_handler = SklearnModelHandlerNumpy(model_uri=sklearn_model_filename,\n",
+        "                                                 model_file_type=ModelFileType.PICKLE) # Use ModelFileType.JOBLIB if the model is seriazlized using joblib.\n",
+        "\n",
+        "\n",
+        "pipeline_options = PipelineOptions().from_dictionary(\n",
+        "                                      {'temp_location':f'{bucket}/tmp'})\n",
+        "pipeline = beam.Pipeline(options=pipeline_options)\n",
+        "\n",
+        "with pipeline as p:\n",
+        "  (\n",
+        "      p \n",
+        "      | \"ReadFromBQ\" >> beam.io.ReadFromBigQuery(table=table_spec)\n",
+        "      | \"ExtractInputs\" >> beam.Map(lambda x: [x['value']]) \n",
+        "      | \"RunInferenceSklearn\" >> RunInference(model_handler=sklearn_model_handler)\n",
+        "      | beam.Map(print)\n",
+        "  )"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "33e901d6-ed06-4268-8a5f-685d31b5558f",
+      "metadata": {
+        "id": "33e901d6-ed06-4268-8a5f-685d31b5558f"
+      },
+      "source": [
+        "### Sklearn RunInference on keyed inputs.\n",
+        "1. Wrap the `SklearnModelHandlerNumpy` object around `KeyedModelHandler` to handle keyed data.\n",
+        "2. Read the data from BigQuery.\n",
+        "3. Use the Sklearn trained model and the Sklearn RunInference transform on a keyed data."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "c212916d-b517-4589-ad15-a3a1df926fb3",
+      "metadata": {
+        "id": "c212916d-b517-4589-ad15-a3a1df926fb3",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "1c3ccf35-3cd7-401e-de23-c0e22b5f6ebd"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "('first_question', PredictionResult(example=[105.0], inference=array([525.])))\n",
+            "('second_question', PredictionResult(example=[108.0], inference=array([540.])))\n",
+            "('third_question', PredictionResult(example=[1000.0], inference=array([5000.])))\n",
+            "('fourth_question', PredictionResult(example=[1013.0], inference=array([5065.])))\n"
+          ]
+        }
+      ],
+      "source": [
+        "sklearn_model_handler = SklearnModelHandlerNumpy(model_uri=sklearn_model_filename,\n",
+        "                                                 model_file_type=ModelFileType.PICKLE) # Use ModelFileType.JOBLIB if the model is serialized using joblib.\n",
+        "\n",
+        "keyed_sklearn_model_handler = KeyedModelHandler(sklearn_model_handler)\n",
+        "\n",
+        "pipeline_options = PipelineOptions().from_dictionary(\n",
+        "                                      {'temp_location':f'{bucket}/tmp'})\n",
+        "pipeline = beam.Pipeline(options=pipeline_options)\n",
+        "\n",
+        "with pipeline as p:\n",
+        "  (\n",
+        "  p \n",
+        "  | \"ReadFromBQ\" >> beam.io.ReadFromBigQuery(table=table_spec)\n",
+        "  | \"ExtractInputs\" >> beam.Map(lambda x: (x['key'], [x['value']])) \n",
+        "  | \"RunInferenceSklearn\" >> RunInference(model_handler=keyed_sklearn_model_handler)\n",
+        "  | beam.Map(print)\n",
+        "  )"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "id": "f1481883-423b-4da0-8ae0-1a602b1807c6",
+      "metadata": {
+        "id": "f1481883-423b-4da0-8ae0-1a602b1807c6"
+      },
+      "source": [
+        "# Cross framework transforms in a single pipeline\n",
+        "\n",
+        "In this pipeline, RunInference transforms of different frameworks are used in a single pipeline sequentially. \n",
+        "\n",
+        "In the below cells, we perform the following actions:\n",
+        "1. Create `KeyedModelHandler` for Sklearn and Pytorch. \n",
+        "2. Run inference on Sklearn and perform intermediate processing using `process_interim_inference`.\n",
+        "3. Take the intermediate result from Sklearn RunInference transform and run that through Pytorch RunInference transform."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "a45d496c-4d7b-4173-b27b-253c5767bb8d",
+      "metadata": {
+        "id": "a45d496c-4d7b-4173-b27b-253c5767bb8d"
+      },
+      "outputs": [],
+      "source": [
+        "def process_interim_inference(element: Tuple[str, PredictionResult]):\n",
+        "    \"\"\"\n",
+        "    Returns the key and the prediction to the next RunInference transform.\n",
+        "    \"\"\"\n",
+        "    key, prediction_result = element\n",
+        "    prediction = prediction_result.inference\n",
+        "    return key, prediction\n",
+        "\n",
+        "class PredictionProcessor(beam.DoFn):\n",
+        "    def process(self, element: Tuple[str, PredictionResult]):\n",
+        "        key, prediction_result = element\n",
+        "        input_from_upstream = prediction_result.example\n",
+        "        prediction = prediction_result.inference\n",
+        "        yield (key, prediction.item())"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "id": "ada71e7d-cf29-4441-a921-310c05fa8576",
+      "metadata": {
+        "id": "ada71e7d-cf29-4441-a921-310c05fa8576",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "78eb9a0d-ace2-4c02-8970-13488dc2767c"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "('first_question', 2624.857421875)\n",
+            "('second_question', 2699.853271484375)\n",
+            "('third_question', 24998.642578125)\n",
+            "('fourth_question', 25323.625)\n"
+          ]
+        }
+      ],
+      "source": [
+        "pipeline_options = PipelineOptions().from_dictionary(\n",
+        "                                      {'temp_location':f'{bucket}/tmp'})\n",
+        "pipeline = beam.Pipeline(options=pipeline_options)\n",
+        "\n",
+        "read_from_bq = beam.io.ReadFromBigQuery(table=table_spec)\n",
+        "keyed_inputs = \"ExtractKeyedInputs\" >> beam.Map(lambda x: (x['key'], [x['value']]))\n",
+        "\n",
+        "keyed_sklearn_model_handler = KeyedModelHandler(SklearnModelHandlerNumpy(\n",
+        "    model_uri=sklearn_model_filename,\n",
+        "    model_file_type=ModelFileType.PICKLE))\n",
+        "\n",
+        "keyed_torch_model_handler = KeyedModelHandler(PytorchModelHandlerTensor(\n",
+        "    state_dict_path=save_model_dir_multiply_ten,\n",
+        "    model_class=LinearRegression,\n",
+        "    model_params={'input_dim': 1,\n",
+        "                  'output_dim': 1}))\n",
+        "\n",
+        "with pipeline as p:\n",
+        "  sklearn_inference_result = (\n",
+        "      p\n",
+        "      | read_from_bq\n",
+        "      | keyed_inputs\n",
+        "      | \"RunInferenceSklearn\" >> RunInference(model_handler=keyed_sklearn_model_handler)\n",
+        "      | \"ExtractOutputs\" >> beam.Map(process_interim_inference)\n",
+        "  )\n",
+        "\n",
+        "  torch_inference_result = (\n",
+        "      sklearn_inference_result\n",
+        "      | \"ConvertNumpyToTensorFiveTuple\" >> beam.Map(lambda x: (x[0], torch.Tensor([x[1]])))\n",
+        "      | \"RunInferenceTorchFiveTuple\" >> RunInference(keyed_torch_five_times_model_handler)\n",
+        "      | \"ProcessPredictions\" >> beam.ParDo(PredictionProcessor())\n",
+        "      | beam.Map(print)\n",
+        "  )\n"
+      ]
+    }
+  ]
+}
diff --git a/gradle.properties b/gradle.properties
index 0dfe74b..856d471 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -29,8 +29,8 @@
 # buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy.
 # To build a custom Beam version make sure you change it in both places, see
 # https://github.com/apache/beam/issues/21302.
-version=2.41.0-SNAPSHOT
-sdk_version=2.41.0.dev
+version=2.42.0-SNAPSHOT
+sdk_version=2.42.0.dev
 
 javaVersion=1.8
 
diff --git a/runners/core-construction-java/src/test/java/org/apache/beam/runners/core/construction/ValidateRunnerXlangTest.java b/runners/core-construction-java/src/test/java/org/apache/beam/runners/core/construction/ValidateRunnerXlangTest.java
index 8a13df9..1ecd904 100644
--- a/runners/core-construction-java/src/test/java/org/apache/beam/runners/core/construction/ValidateRunnerXlangTest.java
+++ b/runners/core-construction-java/src/test/java/org/apache/beam/runners/core/construction/ValidateRunnerXlangTest.java
@@ -34,13 +34,13 @@
 import org.apache.beam.sdk.transforms.Create;
 import org.apache.beam.sdk.transforms.MapElements;
 import org.apache.beam.sdk.transforms.join.KeyedPCollectionTuple;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.values.KV;
 import org.apache.beam.sdk.values.PCollection;
 import org.apache.beam.sdk.values.PCollectionList;
 import org.apache.beam.sdk.values.PCollectionTuple;
 import org.apache.beam.sdk.values.Row;
 import org.apache.beam.sdk.values.TypeDescriptors;
-import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -93,7 +93,7 @@
               .withFieldValue("data", data)
               .build();
 
-      ByteString.Output outputStream = ByteString.newOutput();
+      ByteStringOutputStream outputStream = new ByteStringOutputStream();
       try {
         RowCoder.of(configRow.getSchema()).encode(configRow, outputStream);
       } catch (IOException e) {
diff --git a/runners/core-java/src/main/java/org/apache/beam/runners/core/InMemoryTimerInternals.java b/runners/core-java/src/main/java/org/apache/beam/runners/core/InMemoryTimerInternals.java
index 5d1a9be..038d315 100644
--- a/runners/core-java/src/main/java/org/apache/beam/runners/core/InMemoryTimerInternals.java
+++ b/runners/core-java/src/main/java/org/apache/beam/runners/core/InMemoryTimerInternals.java
@@ -28,6 +28,7 @@
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
 import org.apache.beam.sdk.util.WindowTracing;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.MoreObjects;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.HashBasedTable;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Table;
 import org.checkerframework.checker.nullness.qual.Nullable;
@@ -155,10 +156,18 @@
   @Override
   public void deleteTimer(
       StateNamespace namespace, String timerId, String timerFamilyId, TimeDomain timeDomain) {
-    throw new UnsupportedOperationException("Canceling a timer by ID is not yet supported.");
+    TimerData removedTimer = existingTimers.remove(namespace, timerId + '+' + timerFamilyId);
+    if (removedTimer != null) {
+      Preconditions.checkState(
+          removedTimer.getDomain().equals(timeDomain),
+          "%s doesn't match time domain %s of timer",
+          timeDomain,
+          removedTimer.getDomain());
+      timersForDomain(timeDomain).remove(removedTimer);
+    }
   }
 
-  /** @deprecated use {@link #deleteTimer(StateNamespace, String, TimeDomain)}. */
+  /** @deprecated use {@link #deleteTimer(StateNamespace, String, String, TimeDomain)}. */
   @Deprecated
   @Override
   public void deleteTimer(StateNamespace namespace, String timerId, String timerFamilyId) {
@@ -168,11 +177,12 @@
     }
   }
 
-  /** @deprecated use {@link #deleteTimer(StateNamespace, String, TimeDomain)}. */
+  /** @deprecated use {@link #deleteTimer(StateNamespace, String, String, TimeDomain)}. */
   @Deprecated
   @Override
   public void deleteTimer(TimerData timer) {
-    deleteTimer(timer.getNamespace(), timer.getTimerId(), timer.getTimerFamilyId());
+    deleteTimer(
+        timer.getNamespace(), timer.getTimerId(), timer.getTimerFamilyId(), timer.getDomain());
   }
 
   @Override
diff --git a/runners/core-java/src/main/java/org/apache/beam/runners/core/metrics/MonitoringInfoEncodings.java b/runners/core-java/src/main/java/org/apache/beam/runners/core/metrics/MonitoringInfoEncodings.java
index cfa082c..3d8cf2d 100644
--- a/runners/core-java/src/main/java/org/apache/beam/runners/core/metrics/MonitoringInfoEncodings.java
+++ b/runners/core-java/src/main/java/org/apache/beam/runners/core/metrics/MonitoringInfoEncodings.java
@@ -22,6 +22,7 @@
 import org.apache.beam.sdk.coders.Coder;
 import org.apache.beam.sdk.coders.DoubleCoder;
 import org.apache.beam.sdk.coders.VarLongCoder;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.joda.time.Instant;
 
@@ -32,7 +33,7 @@
 
   /** Encodes to {@link MonitoringInfoConstants.TypeUrns#DISTRIBUTION_INT64_TYPE}. */
   public static ByteString encodeInt64Distribution(DistributionData data) {
-    ByteString.Output output = ByteString.newOutput();
+    ByteStringOutputStream output = new ByteStringOutputStream();
     try {
       VARINT_CODER.encode(data.count(), output);
       VARINT_CODER.encode(data.sum(), output);
@@ -62,7 +63,7 @@
   // TODO(BEAM-4374): Implement decodeDoubleDistribution(...)
   public static ByteString encodeDoubleDistribution(
       long count, double sum, double min, double max) {
-    ByteString.Output output = ByteString.newOutput();
+    ByteStringOutputStream output = new ByteStringOutputStream();
     try {
       VARINT_CODER.encode(count, output);
       DOUBLE_CODER.encode(sum, output);
@@ -76,7 +77,7 @@
 
   /** Encodes to {@link MonitoringInfoConstants.TypeUrns#LATEST_INT64_TYPE}. */
   public static ByteString encodeInt64Gauge(GaugeData data) {
-    ByteString.Output output = ByteString.newOutput();
+    ByteStringOutputStream output = new ByteStringOutputStream();
     try {
       VARINT_CODER.encode(data.timestamp().getMillis(), output);
       VARINT_CODER.encode(data.value(), output);
@@ -99,7 +100,7 @@
 
   /** Encodes to {@link MonitoringInfoConstants.TypeUrns#SUM_INT64_TYPE}. */
   public static ByteString encodeInt64Counter(long value) {
-    ByteString.Output output = ByteString.newOutput();
+    ByteStringOutputStream output = new ByteStringOutputStream();
     try {
       VARINT_CODER.encode(value, output);
     } catch (IOException e) {
@@ -119,7 +120,7 @@
 
   /** Encodes to {@link MonitoringInfoConstants.TypeUrns#SUM_DOUBLE_TYPE}. */
   public static ByteString encodeDoubleCounter(double value) {
-    ByteString.Output output = ByteString.newOutput();
+    ByteStringOutputStream output = new ByteStringOutputStream();
     try {
       DOUBLE_CODER.encode(value, output);
     } catch (IOException e) {
diff --git a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/PubsubSink.java b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/PubsubSink.java
index 6bd58fd..d8c670b 100644
--- a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/PubsubSink.java
+++ b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/PubsubSink.java
@@ -32,6 +32,7 @@
 import org.apache.beam.sdk.io.gcp.pubsub.PubsubMessage;
 import org.apache.beam.sdk.options.PipelineOptions;
 import org.apache.beam.sdk.transforms.SimpleFunction;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.SerializableUtils;
 import org.apache.beam.sdk.util.WindowedValue;
 import org.apache.beam.sdk.util.WindowedValue.WindowedValueCoder;
@@ -160,11 +161,11 @@
         if (formatted.getAttributeMap() != null) {
           pubsubMessageBuilder.putAllAttributes(formatted.getAttributeMap());
         }
-        ByteString.Output output = ByteString.newOutput();
+        ByteStringOutputStream output = new ByteStringOutputStream();
         pubsubMessageBuilder.build().writeTo(output);
         byteString = output.toByteString();
       } else {
-        ByteString.Output stream = ByteString.newOutput();
+        ByteStringOutputStream stream = new ByteStringOutputStream();
         coder.encode(data.getValue(), stream, Coder.Context.OUTER);
         byteString = stream.toByteString();
       }
diff --git a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/StateFetcher.java b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/StateFetcher.java
index 883822f..82ca67c 100644
--- a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/StateFetcher.java
+++ b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/StateFetcher.java
@@ -35,10 +35,10 @@
 import org.apache.beam.sdk.transforms.Materializations.MultimapView;
 import org.apache.beam.sdk.transforms.ViewFn;
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.values.PCollectionView;
 import org.apache.beam.sdk.values.TupleTag;
 import org.apache.beam.sdk.values.WindowingStrategy;
-import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Optional;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Supplier;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.cache.Cache;
@@ -124,7 +124,7 @@
 
           Coder<SideWindowT> windowCoder = sideWindowStrategy.getWindowFn().windowCoder();
 
-          ByteString.Output windowStream = ByteString.newOutput();
+          ByteStringOutputStream windowStream = new ByteStringOutputStream();
           windowCoder.encode(sideWindow, windowStream, Coder.Context.OUTER);
 
           @SuppressWarnings("unchecked")
diff --git a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/StreamingModeExecutionContext.java b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/StreamingModeExecutionContext.java
index da18f86..601efcd 100644
--- a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/StreamingModeExecutionContext.java
+++ b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/StreamingModeExecutionContext.java
@@ -54,6 +54,7 @@
 import org.apache.beam.sdk.metrics.MetricsContainer;
 import org.apache.beam.sdk.state.TimeDomain;
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.values.PCollectionView;
 import org.apache.beam.sdk.values.TupleTag;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
@@ -429,7 +430,7 @@
           ((UnboundedSource<?, UnboundedSource.CheckpointMark>) activeReader.getCurrentSource())
               .getCheckpointMarkCoder();
       if (checkpointCoder != null) {
-        ByteString.Output stream = ByteString.newOutput();
+        ByteStringOutputStream stream = new ByteStringOutputStream();
         try {
           checkpointCoder.encode(checkpointMark, stream, Coder.Context.OUTER);
         } catch (IOException e) {
@@ -738,10 +739,10 @@
         throw new IllegalStateException("writePCollectionViewData must follow a Combine.globally");
       }
 
-      ByteString.Output dataStream = ByteString.newOutput();
+      ByteStringOutputStream dataStream = new ByteStringOutputStream();
       dataCoder.encode(data, dataStream, Coder.Context.OUTER);
 
-      ByteString.Output windowStream = ByteString.newOutput();
+      ByteStringOutputStream windowStream = new ByteStringOutputStream();
       windowCoder.encode(window, windowStream, Coder.Context.OUTER);
 
       ensureStateful("Tried to write view data");
diff --git a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/StreamingSideInputFetcher.java b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/StreamingSideInputFetcher.java
index c8c7e04..12d98a8 100644
--- a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/StreamingSideInputFetcher.java
+++ b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/StreamingSideInputFetcher.java
@@ -45,10 +45,10 @@
 import org.apache.beam.sdk.state.WatermarkHoldState;
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
 import org.apache.beam.sdk.transforms.windowing.WindowFn;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.WindowedValue;
 import org.apache.beam.sdk.values.PCollectionView;
 import org.apache.beam.sdk.values.WindowingStrategy;
-import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.Parser;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
@@ -305,7 +305,7 @@
     SideWindowT sideInputWindow =
         (SideWindowT) view.getWindowMappingFn().getSideInputWindow(mainWindow);
 
-    ByteString.Output windowStream = ByteString.newOutput();
+    ByteStringOutputStream windowStream = new ByteStringOutputStream();
     try {
       sideInputWindowCoder.encode(sideInputWindow, windowStream, Coder.Context.OUTER);
     } catch (IOException e) {
diff --git a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillSink.java b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillSink.java
index 6696de6..a755f31 100644
--- a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillSink.java
+++ b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillSink.java
@@ -34,6 +34,7 @@
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
 import org.apache.beam.sdk.transforms.windowing.PaneInfo;
 import org.apache.beam.sdk.transforms.windowing.PaneInfo.PaneInfoCoder;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.WindowedValue;
 import org.apache.beam.sdk.util.WindowedValue.FullWindowedValueCoder;
 import org.apache.beam.sdk.values.KV;
@@ -69,7 +70,7 @@
       Collection<? extends BoundedWindow> windows,
       PaneInfo pane)
       throws IOException {
-    ByteString.Output stream = ByteString.newOutput();
+    ByteStringOutputStream stream = new ByteStringOutputStream();
     PaneInfoCoder.INSTANCE.encode(pane, stream);
     windowsCoder.encode(windows, stream, Coder.Context.OUTER);
     return stream.toByteString();
@@ -135,7 +136,7 @@
     }
 
     private <EncodeT> ByteString encode(Coder<EncodeT> coder, EncodeT object) throws IOException {
-      ByteString.Output stream = ByteString.newOutput();
+      ByteStringOutputStream stream = new ByteStringOutputStream();
       coder.encode(object, stream, Coder.Context.OUTER);
       return stream.toByteString();
     }
diff --git a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillStateInternals.java b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillStateInternals.java
index 5093b21..7eca9d3 100644
--- a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillStateInternals.java
+++ b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillStateInternals.java
@@ -80,6 +80,7 @@
 import org.apache.beam.sdk.transforms.Combine.CombineFn;
 import org.apache.beam.sdk.transforms.CombineWithContext.CombineFnWithContext;
 import org.apache.beam.sdk.transforms.windowing.TimestampCombiner;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.CombineFnUtil;
 import org.apache.beam.sdk.util.Weighted;
 import org.apache.beam.sdk.values.TimestampedValue;
@@ -351,7 +352,7 @@
     try {
       // Use ByteString.Output rather than concatenation and String.format. We build these keys
       // a lot, and this leads to better performance results. See associated benchmarks.
-      ByteString.Output stream = ByteString.newOutput();
+      ByteStringOutputStream stream = new ByteStringOutputStream();
       OutputStreamWriter writer = new OutputStreamWriter(stream, StandardCharsets.UTF_8);
 
       // stringKey starts and ends with a slash.  We separate it from the
@@ -522,7 +523,7 @@
 
       ByteString encoded = null;
       if (cachedSize == -1 || modified) {
-        ByteString.Output stream = ByteString.newOutput();
+        ByteStringOutputStream stream = new ByteStringOutputStream();
         if (value != null) {
           coder.encode(value, stream, Coder.Context.OUTER);
         }
@@ -1047,7 +1048,7 @@
               pendingAdds,
               (elem, id) -> {
                 try {
-                  ByteString.Output elementStream = ByteString.newOutput();
+                  ByteStringOutputStream elementStream = new ByteStringOutputStream();
                   elemCoder.encode(elem.getValue(), elementStream, Context.OUTER);
                   insertBuilder.addEntries(
                       SortedListEntry.newBuilder()
@@ -1249,7 +1250,7 @@
     }
 
     private ByteString protoKeyFromUserKey(K key) throws IOException {
-      ByteString.Output keyStream = ByteString.newOutput();
+      ByteStringOutputStream keyStream = new ByteStringOutputStream();
       stateKeyPrefix.writeTo(keyStream);
       keyCoder.encode(key, keyStream, Context.OUTER);
       return keyStream.toByteString();
@@ -1275,7 +1276,7 @@
 
       for (K key : localAdditions) {
         ByteString keyBytes = protoKeyFromUserKey(key);
-        ByteString.Output valueStream = ByteString.newOutput();
+        ByteStringOutputStream valueStream = new ByteStringOutputStream();
         valueCoder.encode(cachedValues.get(key), valueStream, Context.OUTER);
         ByteString valueBytes = valueStream.toByteString();
 
@@ -1290,7 +1291,7 @@
       localAdditions.clear();
 
       for (K key : localRemovals) {
-        ByteString.Output keyStream = ByteString.newOutput();
+        ByteStringOutputStream keyStream = new ByteStringOutputStream();
         stateKeyPrefix.writeTo(keyStream);
         keyCoder.encode(key, keyStream, Context.OUTER);
         ByteString keyBytes = keyStream.toByteString();
@@ -1304,7 +1305,7 @@
 
         V cachedValue = cachedValues.remove(key);
         if (cachedValue != null) {
-          ByteString.Output valueStream = ByteString.newOutput();
+          ByteStringOutputStream valueStream = new ByteStringOutputStream();
           valueCoder.encode(cachedValues.get(key), valueStream, Context.OUTER);
         }
       }
@@ -1555,7 +1556,7 @@
 
     private Future<V> getFutureForKey(K key) {
       try {
-        ByteString.Output keyStream = ByteString.newOutput();
+        ByteStringOutputStream keyStream = new ByteStringOutputStream();
         stateKeyPrefix.writeTo(keyStream);
         keyCoder.encode(key, keyStream, Context.OUTER);
         return reader.valueFuture(keyStream.toByteString(), stateFamily, valueCoder);
@@ -1703,7 +1704,7 @@
           bagUpdatesBuilder = commitBuilder.addBagUpdatesBuilder();
         }
         for (T value : localAdditions) {
-          ByteString.Output stream = ByteString.newOutput();
+          ByteStringOutputStream stream = new ByteStringOutputStream();
           // Encode the value
           elemCoder.encode(value, stream, Coder.Context.OUTER);
           ByteString encoded = stream.toByteString();
diff --git a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillStateReader.java b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillStateReader.java
index b6c2dfa..60f8517 100644
--- a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillStateReader.java
+++ b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillStateReader.java
@@ -453,30 +453,40 @@
   public void startBatchAndBlock() {
     // First, drain work out of the pending lookups into a set. These will be the items we fetch.
     HashSet<StateTag<?>> toFetch = Sets.newHashSet();
-    while (!pendingLookups.isEmpty()) {
-      StateTag<?> stateTag = pendingLookups.poll();
-      if (stateTag == null) {
-        break;
+    try {
+      while (!pendingLookups.isEmpty()) {
+        StateTag<?> stateTag = pendingLookups.poll();
+        if (stateTag == null) {
+          break;
+        }
+
+        if (!toFetch.add(stateTag)) {
+          throw new IllegalStateException("Duplicate tags being fetched.");
+        }
       }
 
-      if (!toFetch.add(stateTag)) {
-        throw new IllegalStateException("Duplicate tags being fetched.");
+      // If we failed to drain anything, some other thread pulled it off the queue. We have no work
+      // to do.
+      if (toFetch.isEmpty()) {
+        return;
       }
-    }
 
-    // If we failed to drain anything, some other thread pulled it off the queue. We have no work
-    // to do.
-    if (toFetch.isEmpty()) {
-      return;
-    }
+      Windmill.KeyedGetDataRequest request = createRequest(toFetch);
+      Windmill.KeyedGetDataResponse response = server.getStateData(computation, request);
+      if (response == null) {
+        throw new RuntimeException("Windmill unexpectedly returned null for request " + request);
+      }
 
-    Windmill.KeyedGetDataRequest request = createRequest(toFetch);
-    Windmill.KeyedGetDataResponse response = server.getStateData(computation, request);
-    if (response == null) {
-      throw new RuntimeException("Windmill unexpectedly returned null for request " + request);
+      // Removes tags from toFetch as they are processed.
+      consumeResponse(response, toFetch);
+    } catch (Exception e) {
+      // Set up all the remaining futures for this key to throw an exception. This ensures that if
+      // the exception is caught that all futures have been completed and do not block.
+      for (StateTag<?> stateTag : toFetch) {
+        waiting.get(stateTag).future.setException(e);
+      }
+      throw e;
     }
-
-    consumeResponse(response, toFetch);
   }
 
   public long getBytesRead() {
@@ -576,15 +586,8 @@
 
   private void consumeResponse(Windmill.KeyedGetDataResponse response, Set<StateTag<?>> toFetch) {
     bytesRead += response.getSerializedSize();
-
     if (response.getFailed()) {
-      // Set up all the futures for this key to throw an exception:
-      KeyTokenInvalidException keyTokenInvalidException =
-          new KeyTokenInvalidException(key.toStringUtf8());
-      for (StateTag<?> stateTag : toFetch) {
-        waiting.get(stateTag).future.setException(keyTokenInvalidException);
-      }
-      return;
+      throw new KeyTokenInvalidException(key.toStringUtf8());
     }
 
     if (!key.equals(response.getKey())) {
@@ -773,11 +776,15 @@
         getWaiting(stateTag, shouldRemove);
     SettableFuture<ValuesAndContPosition<T, Long>> future =
         coderAndFuture.getNonDoneFuture(stateTag);
-    Coder<T> coder = coderAndFuture.<T>getAndClearCoder();
-    List<T> values = this.bagPageValues(bag, coder);
-    future.set(
-        new ValuesAndContPosition<>(
-            values, bag.hasContinuationPosition() ? bag.getContinuationPosition() : null));
+    try {
+      Coder<T> coder = coderAndFuture.<T>getAndClearCoder();
+      List<T> values = this.bagPageValues(bag, coder);
+      future.set(
+          new ValuesAndContPosition<>(
+              values, bag.hasContinuationPosition() ? bag.getContinuationPosition() : null));
+    } catch (Exception e) {
+      future.setException(new RuntimeException("Error parsing bag response", e));
+    }
   }
 
   private void consumeWatermark(Windmill.WatermarkHold watermarkHold, StateTag<Long> stateTag) {
@@ -813,7 +820,7 @@
         T value = coder.decode(inputStream, Coder.Context.OUTER);
         future.set(value);
       } catch (IOException e) {
-        throw new IllegalStateException("Unable to decode value using " + coder, e);
+        future.setException(new IllegalStateException("Unable to decode value using " + coder, e));
       }
     } else {
       future.set(null);
@@ -840,14 +847,18 @@
     SettableFuture<ValuesAndContPosition<Map.Entry<ByteString, V>, ByteString>> future =
         coderAndFuture.getNonDoneFuture(stateTag);
     Coder<V> valueCoder = coderAndFuture.getAndClearCoder();
-    List<Map.Entry<ByteString, V>> values =
-        this.tagPrefixPageTagValues(tagValuePrefixResponse, valueCoder);
-    future.set(
-        new ValuesAndContPosition<>(
-            values,
-            tagValuePrefixResponse.hasContinuationPosition()
-                ? tagValuePrefixResponse.getContinuationPosition()
-                : null));
+    try {
+      List<Map.Entry<ByteString, V>> values =
+          this.tagPrefixPageTagValues(tagValuePrefixResponse, valueCoder);
+      future.set(
+          new ValuesAndContPosition<>(
+              values,
+              tagValuePrefixResponse.hasContinuationPosition()
+                  ? tagValuePrefixResponse.getContinuationPosition()
+                  : null));
+    } catch (Exception e) {
+      future.setException(new RuntimeException("Error parsing tag value prefix", e));
+    }
   }
 
   private <T> void consumeSortedList(
@@ -870,13 +881,17 @@
     SettableFuture<ValuesAndContPosition<TimestampedValue<T>, ByteString>> future =
         coderAndFuture.getNonDoneFuture(stateTag);
     Coder<T> coder = coderAndFuture.getAndClearCoder();
-    List<TimestampedValue<T>> values = this.sortedListPageValues(sortedListFetchResponse, coder);
-    future.set(
-        new ValuesAndContPosition<>(
-            values,
-            sortedListFetchResponse.hasContinuationPosition()
-                ? sortedListFetchResponse.getContinuationPosition()
-                : null));
+    try {
+      List<TimestampedValue<T>> values = this.sortedListPageValues(sortedListFetchResponse, coder);
+      future.set(
+          new ValuesAndContPosition<>(
+              values,
+              sortedListFetchResponse.hasContinuationPosition()
+                  ? sortedListFetchResponse.getContinuationPosition()
+                  : null));
+    } catch (Exception e) {
+      future.setException(new RuntimeException("Error parsing ordered list", e));
+    }
   }
   /**
    * An iterable over elements backed by paginated GetData requests to Windmill. The iterable may be
diff --git a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillTimeUtils.java b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillTimeUtils.java
index 8552c27..9732826 100644
--- a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillTimeUtils.java
+++ b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/WindmillTimeUtils.java
@@ -45,6 +45,9 @@
     // Windmill should never send us an unknown timestamp.
     Preconditions.checkArgument(timestampUs != Long.MIN_VALUE);
     Instant result = new Instant(divideAndRoundDown(timestampUs, 1000));
+    if (result.isBefore(BoundedWindow.TIMESTAMP_MIN_VALUE)) {
+      return BoundedWindow.TIMESTAMP_MIN_VALUE;
+    }
     if (result.isAfter(BoundedWindow.TIMESTAMP_MAX_VALUE)) {
       // End of time.
       return BoundedWindow.TIMESTAMP_MAX_VALUE;
diff --git a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/fn/control/RegisterAndProcessBundleOperation.java b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/fn/control/RegisterAndProcessBundleOperation.java
index 7be2820..580450a 100644
--- a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/fn/control/RegisterAndProcessBundleOperation.java
+++ b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/fn/control/RegisterAndProcessBundleOperation.java
@@ -71,6 +71,7 @@
 import org.apache.beam.sdk.transforms.Materializations;
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
 import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.MoreFutures;
 import org.apache.beam.sdk.values.PCollectionView;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
@@ -668,7 +669,7 @@
   }
 
   static ByteString encodeAndConcat(Iterable<Object> values, Coder valueCoder) throws IOException {
-    ByteString.Output out = ByteString.newOutput();
+    ByteStringOutputStream out = new ByteStringOutputStream();
     if (values != null) {
       for (Object value : values) {
         int size = out.size();
diff --git a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/graph/CreateExecutableStageNodeFunction.java b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/graph/CreateExecutableStageNodeFunction.java
index ee79ee3..0e9b86b 100644
--- a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/graph/CreateExecutableStageNodeFunction.java
+++ b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/graph/CreateExecutableStageNodeFunction.java
@@ -79,6 +79,7 @@
 import org.apache.beam.sdk.transforms.windowing.FixedWindows;
 import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
 import org.apache.beam.sdk.transforms.windowing.IntervalWindow.IntervalWindowCoder;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.WindowedValue.FullWindowedValueCoder;
 import org.apache.beam.sdk.util.WindowedValue.WindowedValueCoder;
 import org.apache.beam.sdk.values.PCollectionView;
@@ -216,7 +217,7 @@
 
       String coderId = "generatedCoder" + idGenerator.getId();
       String windowingStrategyId;
-      try (ByteString.Output output = ByteString.newOutput()) {
+      try (ByteStringOutputStream output = new ByteStringOutputStream()) {
         try {
           Coder<?> javaCoder =
               CloudObjects.coderFromCloudObject(CloudObject.fromSpec(instructionOutput.getCodec()));
diff --git a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/graph/RegisterNodeFunction.java b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/graph/RegisterNodeFunction.java
index befaccb..927bea9 100644
--- a/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/graph/RegisterNodeFunction.java
+++ b/runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/graph/RegisterNodeFunction.java
@@ -72,6 +72,7 @@
 import org.apache.beam.sdk.coders.Coder;
 import org.apache.beam.sdk.fn.IdGenerator;
 import org.apache.beam.sdk.transforms.Materializations;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.WindowedValue.FullWindowedValueCoder;
 import org.apache.beam.sdk.values.KV;
 import org.apache.beam.sdk.values.PCollectionView;
@@ -234,7 +235,7 @@
 
       String coderId = "generatedCoder" + idGenerator.getId();
       instructionOutputNodeToCoderIdBuilder.put(node, coderId);
-      try (ByteString.Output output = ByteString.newOutput()) {
+      try (ByteStringOutputStream output = new ByteStringOutputStream()) {
         try {
           Coder<?> javaCoder =
               CloudObjects.coderFromCloudObject(CloudObject.fromSpec(instructionOutput.getCodec()));
diff --git a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StateFetcherTest.java b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StateFetcherTest.java
index f16ff49..e87ec6a 100644
--- a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StateFetcherTest.java
+++ b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StateFetcherTest.java
@@ -42,6 +42,7 @@
 import org.apache.beam.sdk.transforms.Sum;
 import org.apache.beam.sdk.transforms.View;
 import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.values.PCollectionView;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Supplier;
@@ -73,7 +74,7 @@
   public void testFetchGlobalDataBasic() throws Exception {
     StateFetcher fetcher = new StateFetcher(server);
 
-    ByteString.Output stream = ByteString.newOutput();
+    ByteStringOutputStream stream = new ByteStringOutputStream();
     ListCoder.of(StringUtf8Coder.of()).encode(Arrays.asList("data"), stream, Coder.Context.OUTER);
     ByteString encodedIterable = stream.toByteString();
 
@@ -126,7 +127,7 @@
   public void testFetchGlobalDataNull() throws Exception {
     StateFetcher fetcher = new StateFetcher(server);
 
-    ByteString.Output stream = ByteString.newOutput();
+    ByteStringOutputStream stream = new ByteStringOutputStream();
     ListCoder.of(VoidCoder.of()).encode(Arrays.asList((Void) null), stream, Coder.Context.OUTER);
     ByteString encodedIterable = stream.toByteString();
 
@@ -179,10 +180,9 @@
   public void testFetchGlobalDataCacheOverflow() throws Exception {
     Coder<List<String>> coder = ListCoder.of(StringUtf8Coder.of());
 
-    ByteString.Output stream = ByteString.newOutput();
+    ByteStringOutputStream stream = new ByteStringOutputStream();
     coder.encode(Arrays.asList("data1"), stream, Coder.Context.OUTER);
-    ByteString encodedIterable1 = stream.toByteString();
-    stream = ByteString.newOutput();
+    ByteString encodedIterable1 = stream.toByteStringAndReset();
     coder.encode(Arrays.asList("data2"), stream, Coder.Context.OUTER);
     ByteString encodedIterable2 = stream.toByteString();
 
diff --git a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StreamingDataflowWorkerTest.java b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StreamingDataflowWorkerTest.java
index 338a1d7..d52680f 100644
--- a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StreamingDataflowWorkerTest.java
+++ b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StreamingDataflowWorkerTest.java
@@ -131,6 +131,7 @@
 import org.apache.beam.sdk.transforms.windowing.Repeatedly;
 import org.apache.beam.sdk.transforms.windowing.Sessions;
 import org.apache.beam.sdk.transforms.windowing.TimestampCombiner;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.CoderUtils;
 import org.apache.beam.sdk.util.DoFnInfo;
 import org.apache.beam.sdk.util.SerializableUtils;
@@ -144,7 +145,6 @@
 import org.apache.beam.sdk.values.WindowingStrategy;
 import org.apache.beam.sdk.values.WindowingStrategy.AccumulationMode;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
-import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString.Output;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.TextFormat;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Optional;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.cache.CacheStats;
@@ -661,7 +661,7 @@
 
   private ByteString addPaneTag(PaneInfo pane, byte[] windowBytes)
       throws CoderException, IOException {
-    Output output = ByteString.newOutput();
+    ByteStringOutputStream output = new ByteStringOutputStream();
     PaneInfo.PaneInfoCoder.INSTANCE.encode(pane, output, Context.OUTER);
     output.write(windowBytes);
     return output.toByteString();
diff --git a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StreamingGroupAlsoByWindowFnsTest.java b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StreamingGroupAlsoByWindowFnsTest.java
index 0a3a997..7b8e899 100644
--- a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StreamingGroupAlsoByWindowFnsTest.java
+++ b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StreamingGroupAlsoByWindowFnsTest.java
@@ -71,6 +71,7 @@
 import org.apache.beam.sdk.transforms.windowing.SlidingWindows;
 import org.apache.beam.sdk.transforms.windowing.TimestampCombiner;
 import org.apache.beam.sdk.util.AppliedCombineFn;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.WindowedValue;
 import org.apache.beam.sdk.values.KV;
 import org.apache.beam.sdk.values.TupleTag;
@@ -169,7 +170,7 @@
     Coder<Collection<? extends BoundedWindow>> windowsCoder =
         (Coder) CollectionCoder.of(windowCoder);
 
-    ByteString.Output dataOutput = ByteString.newOutput();
+    ByteStringOutputStream dataOutput = new ByteStringOutputStream();
     valueCoder.encode(value, dataOutput, Context.OUTER);
     messageBundle
         .addMessagesBuilder()
diff --git a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StreamingGroupAlsoByWindowsReshuffleDoFnTest.java b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StreamingGroupAlsoByWindowsReshuffleDoFnTest.java
index 1b9f96e..757e49d 100644
--- a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StreamingGroupAlsoByWindowsReshuffleDoFnTest.java
+++ b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/StreamingGroupAlsoByWindowsReshuffleDoFnTest.java
@@ -43,6 +43,7 @@
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
 import org.apache.beam.sdk.transforms.windowing.IntervalWindow;
 import org.apache.beam.sdk.transforms.windowing.PaneInfo;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.WindowedValue;
 import org.apache.beam.sdk.values.KV;
 import org.apache.beam.sdk.values.TupleTag;
@@ -109,7 +110,7 @@
     Coder<Collection<? extends BoundedWindow>> windowsCoder =
         (Coder) CollectionCoder.of(windowCoder);
 
-    ByteString.Output dataOutput = ByteString.newOutput();
+    ByteStringOutputStream dataOutput = new ByteStringOutputStream();
     valueCoder.encode(value, dataOutput, Context.OUTER);
     messageBundle
         .addMessagesBuilder()
diff --git a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillStateInternalsTest.java b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillStateInternalsTest.java
index 2abc4f0..157347c 100644
--- a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillStateInternalsTest.java
+++ b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillStateInternalsTest.java
@@ -67,6 +67,7 @@
 import org.apache.beam.sdk.state.WatermarkHoldState;
 import org.apache.beam.sdk.transforms.Sum;
 import org.apache.beam.sdk.transforms.windowing.TimestampCombiner;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.CoderUtils;
 import org.apache.beam.sdk.values.TimestampedValue;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
@@ -198,7 +199,7 @@
 
   private <K> ByteString protoKeyFromUserKey(@Nullable K tag, Coder<K> keyCoder)
       throws IOException {
-    ByteString.Output keyStream = ByteString.newOutput();
+    ByteStringOutputStream keyStream = new ByteStringOutputStream();
     key(NAMESPACE, "map").writeTo(keyStream);
     if (tag != null) {
       keyCoder.encode(tag, keyStream, Context.OUTER);
diff --git a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillStateReaderTest.java b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillStateReaderTest.java
index edcb191..94554c1 100644
--- a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillStateReaderTest.java
+++ b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillStateReaderTest.java
@@ -33,11 +33,12 @@
 import org.apache.beam.sdk.coders.Coder;
 import org.apache.beam.sdk.coders.VarIntCoder;
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.values.TimestampedValue;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
-import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString.Output;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Charsets;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Range;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.io.BaseEncoding;
 import org.hamcrest.Matchers;
 import org.joda.time.Instant;
 import org.junit.Before;
@@ -93,7 +94,7 @@
   }
 
   private ByteString intData(int value) throws IOException {
-    Output output = ByteString.newOutput();
+    ByteStringOutputStream output = new ByteStringOutputStream();
     INT_CODER.encode(value, output, Coder.Context.OUTER);
     return output.toByteString();
   }
@@ -763,7 +764,7 @@
 
   @Test
   public void testKeyTokenInvalid() throws Exception {
-    // Reads two bags and verifies that we batch them up correctly.
+    // Reads two states and verifies that we batch them up correctly.
     Future<Instant> watermarkFuture = underTest.watermarkFuture(STATE_KEY_2, STATE_FAMILY);
     Future<Iterable<Integer>> bagFuture = underTest.bagFuture(STATE_KEY_1, STATE_FAMILY, INT_CODER);
 
@@ -792,6 +793,164 @@
     }
   }
 
+  @Test
+  public void testBatchingReadException() throws Exception {
+    // Reads two states and verifies that we batch them up correctly and propagate the read
+    // exception to both, not just the issuing future.
+    Future<Instant> watermarkFuture = underTest.watermarkFuture(STATE_KEY_2, STATE_FAMILY);
+    Future<Iterable<Integer>> bagFuture = underTest.bagFuture(STATE_KEY_1, STATE_FAMILY, INT_CODER);
+
+    Mockito.verifyNoMoreInteractions(mockWindmill);
+
+    RuntimeException expectedException = new RuntimeException("expected exception");
+
+    Mockito.when(
+            mockWindmill.getStateData(
+                Mockito.eq(COMPUTATION), Mockito.isA(Windmill.KeyedGetDataRequest.class)))
+        .thenThrow(expectedException);
+
+    try {
+      watermarkFuture.get();
+      fail("Expected RuntimeException");
+    } catch (Exception e) {
+      assertThat(e.toString(), Matchers.containsString("expected exception"));
+    }
+
+    try {
+      bagFuture.get();
+      fail("Expected RuntimeException");
+    } catch (Exception e) {
+      assertThat(e.toString(), Matchers.containsString("expected exception"));
+    }
+  }
+
+  @Test
+  public void testBatchingCoderExceptions() throws Exception {
+    // Read a batch of states with coder errors and verify it only affects the
+    // relevant futures.
+    Future<Integer> valueFuture = underTest.valueFuture(STATE_KEY_1, STATE_FAMILY, INT_CODER);
+    Future<Iterable<Integer>> bagFuture = underTest.bagFuture(STATE_KEY_1, STATE_FAMILY, INT_CODER);
+    Future<Iterable<Map.Entry<ByteString, Integer>>> valuePrefixFuture =
+        underTest.valuePrefixFuture(STATE_KEY_PREFIX, STATE_FAMILY, INT_CODER);
+    long beginning = SortedListRange.getDefaultInstance().getStart();
+    long end = SortedListRange.getDefaultInstance().getLimit();
+    Future<Iterable<TimestampedValue<Integer>>> orderedListFuture =
+        underTest.orderedListFuture(
+            Range.closedOpen(beginning, end), STATE_KEY_1, STATE_FAMILY, INT_CODER);
+    // This should be the final part of the response, and we should process it successfully.
+    Future<Instant> watermarkFuture = underTest.watermarkFuture(STATE_KEY_1, STATE_FAMILY);
+
+    Mockito.verifyNoMoreInteractions(mockWindmill);
+
+    ByteString invalidByteString = ByteString.copyFrom(BaseEncoding.base16().decode("FFFF"));
+    Windmill.Value invalidValue = intValue(0).toBuilder().setData(invalidByteString).build();
+
+    Windmill.KeyedGetDataRequest.Builder expectedRequest =
+        Windmill.KeyedGetDataRequest.newBuilder()
+            .setKey(DATA_KEY)
+            .setShardingKey(SHARDING_KEY)
+            .setWorkToken(WORK_TOKEN)
+            .setMaxBytes(WindmillStateReader.MAX_KEY_BYTES)
+            .addValuesToFetch(
+                Windmill.TagValue.newBuilder()
+                    .setTag(STATE_KEY_1)
+                    .setStateFamily(STATE_FAMILY)
+                    .build())
+            .addBagsToFetch(
+                Windmill.TagBag.newBuilder()
+                    .setTag(STATE_KEY_1)
+                    .setStateFamily(STATE_FAMILY)
+                    .setFetchMaxBytes(WindmillStateReader.INITIAL_MAX_BAG_BYTES))
+            .addTagValuePrefixesToFetch(
+                Windmill.TagValuePrefixRequest.newBuilder()
+                    .setTagPrefix(STATE_KEY_PREFIX)
+                    .setStateFamily(STATE_FAMILY)
+                    .setFetchMaxBytes(WindmillStateReader.MAX_TAG_VALUE_PREFIX_BYTES))
+            .addSortedListsToFetch(
+                Windmill.TagSortedListFetchRequest.newBuilder()
+                    .setTag(STATE_KEY_1)
+                    .setStateFamily(STATE_FAMILY)
+                    .addFetchRanges(SortedListRange.newBuilder().setStart(beginning).setLimit(end))
+                    .setFetchMaxBytes(WindmillStateReader.MAX_ORDERED_LIST_BYTES))
+            .addWatermarkHoldsToFetch(
+                Windmill.WatermarkHold.newBuilder()
+                    .setTag(STATE_KEY_1)
+                    .setStateFamily(STATE_FAMILY));
+
+    Windmill.KeyedGetDataResponse.Builder response =
+        Windmill.KeyedGetDataResponse.newBuilder()
+            .setKey(DATA_KEY)
+            .addValues(
+                Windmill.TagValue.newBuilder()
+                    .setTag(STATE_KEY_1)
+                    .setStateFamily(STATE_FAMILY)
+                    .setValue(invalidValue))
+            .addBags(
+                Windmill.TagBag.newBuilder()
+                    .setTag(STATE_KEY_1)
+                    .setStateFamily(STATE_FAMILY)
+                    .addValues(invalidByteString))
+            .addTagValuePrefixes(
+                Windmill.TagValuePrefixResponse.newBuilder()
+                    .setTagPrefix(STATE_KEY_PREFIX)
+                    .setStateFamily(STATE_FAMILY)
+                    .addTagValues(
+                        Windmill.TagValue.newBuilder()
+                            .setTag(STATE_KEY_1)
+                            .setStateFamily(STATE_FAMILY)
+                            .setValue(invalidValue)))
+            .addTagSortedLists(
+                Windmill.TagSortedListFetchResponse.newBuilder()
+                    .setTag(STATE_KEY_1)
+                    .setStateFamily(STATE_FAMILY)
+                    .addEntries(
+                        SortedListEntry.newBuilder()
+                            .setValue(invalidByteString)
+                            .setSortKey(5000)
+                            .setId(5))
+                    .addFetchRanges(SortedListRange.newBuilder().setStart(beginning).setLimit(end)))
+            .addWatermarkHolds(
+                Windmill.WatermarkHold.newBuilder()
+                    .setTag(STATE_KEY_1)
+                    .setStateFamily(STATE_FAMILY)
+                    .addTimestamps(5000000)
+                    .addTimestamps(6000000));
+
+    Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest.build()))
+        .thenReturn(response.build());
+    Mockito.verifyNoMoreInteractions(mockWindmill);
+
+    try {
+      valueFuture.get();
+      fail("Expected RuntimeException");
+    } catch (Exception e) {
+      assertThat(e.toString(), Matchers.containsString("Unable to decode value"));
+    }
+
+    try {
+      bagFuture.get();
+      fail("Expected RuntimeException");
+    } catch (Exception e) {
+      assertThat(e.toString(), Matchers.containsString("Error parsing bag"));
+    }
+
+    try {
+      orderedListFuture.get();
+      fail("Expected RuntimeException");
+    } catch (Exception e) {
+      assertThat(e.toString(), Matchers.containsString("Error parsing ordered list"));
+    }
+
+    try {
+      valuePrefixFuture.get();
+      fail("Expected RuntimeException");
+    } catch (Exception e) {
+      assertThat(e.toString(), Matchers.containsString("Error parsing tag value prefix"));
+    }
+
+    assertThat(watermarkFuture.get(), Matchers.equalTo(new Instant(5000)));
+  }
+
   /**
    * Tests that multiple reads for the same tag in the same batch are cached. We can't compare the
    * futures since we've wrapped the delegate aronud them, so we just verify there is only one
diff --git a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillTimeUtilsTest.java b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillTimeUtilsTest.java
index 5f910c3..84e76d2 100644
--- a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillTimeUtilsTest.java
+++ b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillTimeUtilsTest.java
@@ -23,6 +23,7 @@
 import static org.junit.Assert.assertEquals;
 
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
+import org.joda.time.Duration;
 import org.joda.time.Instant;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -56,6 +57,15 @@
     assertEquals(new Instant(-17), windmillToHarnessTimestamp(-16987));
     assertEquals(new Instant(-17), windmillToHarnessTimestamp(-17000));
     assertEquals(new Instant(-18), windmillToHarnessTimestamp(-17001));
+    assertEquals(BoundedWindow.TIMESTAMP_MIN_VALUE, windmillToHarnessTimestamp(Long.MIN_VALUE + 1));
+    assertEquals(BoundedWindow.TIMESTAMP_MIN_VALUE, windmillToHarnessTimestamp(Long.MIN_VALUE + 2));
+    // Long.MIN_VALUE = -9223372036854775808, need to add 1808 microseconds to get to next
+    // millisecond returned by Beam.
+    assertEquals(
+        BoundedWindow.TIMESTAMP_MIN_VALUE.plus(Duration.millis(1)),
+        windmillToHarnessTimestamp(Long.MIN_VALUE + 1808));
+    assertEquals(
+        BoundedWindow.TIMESTAMP_MIN_VALUE, windmillToHarnessTimestamp(Long.MIN_VALUE + 1807));
   }
 
   @Test
diff --git a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillTimerInternalsTest.java b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillTimerInternalsTest.java
index 2d222b5..8632034 100644
--- a/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillTimerInternalsTest.java
+++ b/runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/WindmillTimerInternalsTest.java
@@ -88,12 +88,22 @@
                       TimerData.of(
                           namespace, timestamp, timestamp.minus(Duration.millis(1)), timeDomain));
               for (TimerData timer : anonymousTimers) {
-                assertThat(
+                Instant expectedTimestamp =
+                    timer.getOutputTimestamp().isBefore(BoundedWindow.TIMESTAMP_MIN_VALUE)
+                        ? BoundedWindow.TIMESTAMP_MIN_VALUE
+                        : timer.getOutputTimestamp();
+                TimerData computed =
                     WindmillTimerInternals.windmillTimerToTimerData(
                         prefix,
                         WindmillTimerInternals.timerDataToWindmillTimer(stateFamily, prefix, timer),
-                        coder),
-                    equalTo(timer));
+                        coder);
+                // The function itself bounds output, so we dont expect the original input as the
+                // output, we expect it to be bounded
+                TimerData expected =
+                    TimerData.of(
+                        timer.getNamespace(), timestamp, expectedTimestamp, timer.getDomain());
+
+                assertThat(computed, equalTo(expected));
               }
 
               for (String timerId : TEST_TIMER_IDS) {
@@ -117,13 +127,26 @@
                             timeDomain));
 
                 for (TimerData timer : timers) {
+                  Instant expectedTimestamp =
+                      timer.getOutputTimestamp().isBefore(BoundedWindow.TIMESTAMP_MIN_VALUE)
+                          ? BoundedWindow.TIMESTAMP_MIN_VALUE
+                          : timer.getOutputTimestamp();
+
+                  TimerData expected =
+                      TimerData.of(
+                          timer.getTimerId(),
+                          timer.getTimerFamilyId(),
+                          timer.getNamespace(),
+                          timer.getTimestamp(),
+                          expectedTimestamp,
+                          timer.getDomain());
                   assertThat(
                       WindmillTimerInternals.windmillTimerToTimerData(
                           prefix,
                           WindmillTimerInternals.timerDataToWindmillTimer(
                               stateFamily, prefix, timer),
                           coder),
-                      equalTo(timer));
+                      equalTo(expected));
                 }
               }
             }
diff --git a/runners/java-fn-execution/src/test/java/org/apache/beam/runners/fnexecution/control/RemoteExecutionTest.java b/runners/java-fn-execution/src/test/java/org/apache/beam/runners/fnexecution/control/RemoteExecutionTest.java
index 7e28a79..8f5dbfa 100644
--- a/runners/java-fn-execution/src/test/java/org/apache/beam/runners/fnexecution/control/RemoteExecutionTest.java
+++ b/runners/java-fn-execution/src/test/java/org/apache/beam/runners/fnexecution/control/RemoteExecutionTest.java
@@ -130,6 +130,7 @@
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
 import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
 import org.apache.beam.sdk.transforms.windowing.PaneInfo;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.CoderUtils;
 import org.apache.beam.sdk.util.WindowedValue;
 import org.apache.beam.sdk.values.KV;
@@ -819,7 +820,7 @@
   }
 
   private static ByteString encode(String value) throws Exception {
-    ByteString.Output output = ByteString.newOutput();
+    ByteStringOutputStream output = new ByteStringOutputStream();
     StringUtf8Coder.of().encode(value, output);
     return output.toByteString();
   }
@@ -1524,7 +1525,7 @@
 
     // 3 Requests expected: state read, state2 read, and state2 clear
     assertEquals(3, stateRequestHandler.getRequestCount());
-    ByteString.Output out = ByteString.newOutput();
+    ByteStringOutputStream out = new ByteStringOutputStream();
     StringUtf8Coder.of().encode("X", out);
 
     assertEquals(
diff --git a/runners/samza/build.gradle b/runners/samza/build.gradle
index 528b482..6f7165f 100644
--- a/runners/samza/build.gradle
+++ b/runners/samza/build.gradle
@@ -54,6 +54,7 @@
   implementation library.java.joda_time
   implementation library.java.args4j
   implementation library.java.commons_io
+  implementation library.java.commons_collections
   runtimeOnly "org.rocksdb:rocksdbjni:6.15.2"
   runtimeOnly "org.scala-lang:scala-library:2.11.8"
   implementation "org.apache.samza:samza-api:$samza_version"
diff --git a/runners/samza/src/main/java/org/apache/beam/runners/samza/metrics/SamzaMetricsContainer.java b/runners/samza/src/main/java/org/apache/beam/runners/samza/metrics/SamzaMetricsContainer.java
index 7415735..1679b74 100644
--- a/runners/samza/src/main/java/org/apache/beam/runners/samza/metrics/SamzaMetricsContainer.java
+++ b/runners/samza/src/main/java/org/apache/beam/runners/samza/metrics/SamzaMetricsContainer.java
@@ -75,6 +75,15 @@
     // TODO(https://github.com/apache/beam/issues/21043): add distribution metrics to Samza
   }
 
+  public void updateExecutableStageBundleMetric(String metricName, long time) {
+    @SuppressWarnings("unchecked")
+    Gauge<Long> gauge = (Gauge<Long>) getSamzaMetricFor(metricName);
+    if (gauge == null) {
+      gauge = metricsRegistry.newGauge(BEAM_METRICS_GROUP, metricName, 0L);
+    }
+    gauge.set(time);
+  }
+
   private class CounterUpdater implements Consumer<MetricResult<Long>> {
     @Override
     public void accept(MetricResult<Long> metricResult) {
diff --git a/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/DoFnOp.java b/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/DoFnOp.java
index f9aaea9..714693c 100644
--- a/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/DoFnOp.java
+++ b/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/DoFnOp.java
@@ -45,6 +45,7 @@
 import org.apache.beam.runners.fnexecution.provisioning.JobInfo;
 import org.apache.beam.runners.samza.SamzaExecutionContext;
 import org.apache.beam.runners.samza.SamzaPipelineOptions;
+import org.apache.beam.runners.samza.util.DoFnUtils;
 import org.apache.beam.runners.samza.util.FutureUtils;
 import org.apache.beam.sdk.coders.Coder;
 import org.apache.beam.sdk.transforms.DoFn;
@@ -224,6 +225,7 @@
       this.fnRunner =
           SamzaDoFnRunners.createPortable(
               transformId,
+              DoFnUtils.toStepName(executableStage),
               bundleStateId,
               windowedValueCoder,
               executableStage,
@@ -234,6 +236,7 @@
               samzaPipelineOptions,
               outputManagerFactory.create(emitter, outputFutureCollector),
               stageBundleFactory,
+              samzaExecutionContext,
               mainOutputTag,
               idToTupleTagMap,
               context,
diff --git a/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/SamzaDoFnRunners.java b/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/SamzaDoFnRunners.java
index 3a6ca18..dc9f330 100644
--- a/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/SamzaDoFnRunners.java
+++ b/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/SamzaDoFnRunners.java
@@ -22,6 +22,7 @@
 import java.util.Locale;
 import java.util.Map;
 import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadLocalRandom;
 import org.apache.beam.model.pipeline.v1.RunnerApi;
 import org.apache.beam.runners.core.DoFnRunner;
 import org.apache.beam.runners.core.DoFnRunners;
@@ -184,6 +185,7 @@
   @SuppressWarnings("unchecked")
   public static <InT, FnOutT> DoFnRunner<InT, FnOutT> createPortable(
       String transformId,
+      String stepName,
       String bundleStateId,
       Coder<WindowedValue<InT>> windowedValueCoder,
       ExecutableStage executableStage,
@@ -194,6 +196,7 @@
       SamzaPipelineOptions pipelineOptions,
       DoFnRunners.OutputManager outputManager,
       StageBundleFactory stageBundleFactory,
+      SamzaExecutionContext samzaExecutionContext,
       TupleTag<FnOutT> mainOutputTag,
       Map<String, TupleTag<?>> idToTupleTagMap,
       Context context,
@@ -219,6 +222,7 @@
         (SamzaExecutionContext) context.getApplicationContainerContext();
     final DoFnRunner<InT, FnOutT> underlyingRunner =
         new SdkHarnessDoFnRunner<>(
+            stepName,
             timerInternalsFactory,
             WindowUtils.getWindowStrategy(
                 executableStage.getInputPCollection().getId(), executableStage.getComponents()),
@@ -226,7 +230,8 @@
             stageBundleFactory,
             idToTupleTagMap,
             bundledEventsBag,
-            stateRequestHandler);
+            stateRequestHandler,
+            samzaExecutionContext);
     return pipelineOptions.getEnableMetrics()
         ? DoFnRunnerWithMetrics.wrap(
             underlyingRunner, executionContext.getMetricsContainer(), transformFullName)
@@ -234,6 +239,9 @@
   }
 
   private static class SdkHarnessDoFnRunner<InT, FnOutT> implements DoFnRunner<InT, FnOutT> {
+
+    private static final int DEFAULT_METRIC_SAMPLE_RATE = 100;
+
     private final SamzaTimerInternalsFactory timerInternalsFactory;
     private final WindowingStrategy windowingStrategy;
     private final DoFnRunners.OutputManager outputManager;
@@ -243,16 +251,21 @@
     private final BagState<WindowedValue<InT>> bundledEventsBag;
     private RemoteBundle remoteBundle;
     private FnDataReceiver<WindowedValue<?>> inputReceiver;
-    private StateRequestHandler stateRequestHandler;
+    private final StateRequestHandler stateRequestHandler;
+    private final SamzaExecutionContext samzaExecutionContext;
+    private long startBundleTime;
+    private final String metricName;
 
     private SdkHarnessDoFnRunner(
+        String stepName,
         SamzaTimerInternalsFactory<?> timerInternalsFactory,
         WindowingStrategy windowingStrategy,
         DoFnRunners.OutputManager outputManager,
         StageBundleFactory stageBundleFactory,
         Map<String, TupleTag<?>> idToTupleTagMap,
         BagState<WindowedValue<InT>> bundledEventsBag,
-        StateRequestHandler stateRequestHandler) {
+        StateRequestHandler stateRequestHandler,
+        SamzaExecutionContext samzaExecutionContext) {
       this.timerInternalsFactory = timerInternalsFactory;
       this.windowingStrategy = windowingStrategy;
       this.outputManager = outputManager;
@@ -260,6 +273,8 @@
       this.idToTupleTagMap = idToTupleTagMap;
       this.bundledEventsBag = bundledEventsBag;
       this.stateRequestHandler = stateRequestHandler;
+      this.samzaExecutionContext = samzaExecutionContext;
+      this.metricName = "ExecutableStage-" + stepName + "-process-ns";
     }
 
     @SuppressWarnings("unchecked")
@@ -298,6 +313,8 @@
                 stateRequestHandler,
                 BundleProgressHandler.ignored());
 
+        startBundleTime = getStartBundleTime();
+
         inputReceiver = Iterables.getOnlyElement(remoteBundle.getInputReceivers().values());
         bundledEventsBag
             .read()
@@ -314,6 +331,20 @@
       }
     }
 
+    @SuppressWarnings({
+      "RandomModInteger" // https://errorprone.info/bugpattern/RandomModInteger
+    })
+    private long getStartBundleTime() {
+      /*
+       * Use random number for sampling purpose instead of counting as
+       * SdkHarnessDoFnRunner is stateless and counters won't persist
+       * between invocations of DoFn(s).
+       */
+      return ThreadLocalRandom.current().nextInt() % DEFAULT_METRIC_SAMPLE_RATE == 0
+          ? System.nanoTime()
+          : 0;
+    }
+
     @Override
     public void processElement(WindowedValue<InT> elem) {
       try {
@@ -333,6 +364,25 @@
       }
     }
 
+    private void emitMetrics() {
+      if (startBundleTime <= 0) {
+        return;
+      }
+
+      final long count = Iterables.size(bundledEventsBag.read());
+
+      if (count <= 0) {
+        return;
+      }
+
+      final long finishBundleTime = System.nanoTime();
+      final long averageProcessTime = (finishBundleTime - startBundleTime) / count;
+
+      samzaExecutionContext
+          .getMetricsContainer()
+          .updateExecutableStageBundleMetric(metricName, averageProcessTime);
+    }
+
     @Override
     public <KeyT> void onTimer(
         String timerId,
@@ -369,6 +419,7 @@
         // RemoteBundle close blocks until all results are received
         remoteBundle.close();
         emitResults();
+        emitMetrics();
         bundledEventsBag.clear();
       } catch (Exception e) {
         throw new RuntimeException("Failed to finish remote bundle", e);
diff --git a/runners/samza/src/main/java/org/apache/beam/runners/samza/util/DoFnUtils.java b/runners/samza/src/main/java/org/apache/beam/runners/samza/util/DoFnUtils.java
new file mode 100644
index 0000000..c090111
--- /dev/null
+++ b/runners/samza/src/main/java/org/apache/beam/runners/samza/util/DoFnUtils.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.samza.util;
+
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.beam.runners.core.construction.graph.ExecutableStage;
+import org.apache.beam.runners.core.construction.graph.PipelineNode;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Splitter;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
+import org.apache.commons.collections.CollectionUtils;
+
+/** Utils for {@link org.apache.beam.runners.samza.runtime.DoFnOp}. */
+public class DoFnUtils {
+
+  public static String toStepName(ExecutableStage executableStage) {
+    /*
+     * Look for the first/input ParDo/DoFn in this executable stage by
+     * matching ParDo/DoFn's input PCollection with executable stage's
+     * input PCollection
+     */
+    Set<PipelineNode.PTransformNode> inputs =
+        executableStage.getTransforms().stream()
+            .filter(
+                transform ->
+                    transform
+                        .getTransform()
+                        .getInputsMap()
+                        .containsValue(executableStage.getInputPCollection().getId()))
+            .collect(Collectors.toSet());
+
+    Set<String> outputIds =
+        executableStage.getOutputPCollections().stream()
+            .map(PipelineNode.PCollectionNode::getId)
+            .collect(Collectors.toSet());
+
+    /*
+     * Look for the last/output ParDo/DoFn in this executable stage by
+     * matching ParDo/DoFn's output PCollection(s) with executable stage's
+     * out PCollection(s)
+     */
+    Set<PipelineNode.PTransformNode> outputs =
+        executableStage.getTransforms().stream()
+            .filter(
+                transform ->
+                    CollectionUtils.containsAny(
+                        transform.getTransform().getOutputsMap().values(), outputIds))
+            .collect(Collectors.toSet());
+
+    return String.format("[%s-%s]", toStepName(inputs), toStepName(outputs));
+  }
+
+  private static String toStepName(Set<PipelineNode.PTransformNode> nodes) {
+    // TODO: format name when there are multiple input/output PTransform(s) in the ExecutableStage
+    return nodes.isEmpty()
+        ? ""
+        : Iterables.get(
+            Splitter.on('/').split(nodes.iterator().next().getTransform().getUniqueName()), 0);
+  }
+}
diff --git a/runners/samza/src/test/java/org/apache/beam/runners/samza/util/DoFnUtilsTest.java b/runners/samza/src/test/java/org/apache/beam/runners/samza/util/DoFnUtilsTest.java
new file mode 100644
index 0000000..1e31616
--- /dev/null
+++ b/runners/samza/src/test/java/org/apache/beam/runners/samza/util/DoFnUtilsTest.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.samza.util;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.Serializable;
+import java.util.Objects;
+import org.apache.beam.runners.core.construction.PipelineTranslation;
+import org.apache.beam.runners.core.construction.graph.ExecutableStage;
+import org.apache.beam.runners.core.construction.graph.GreedyPipelineFuser;
+import org.apache.beam.sdk.Pipeline;
+import org.apache.beam.sdk.transforms.Create;
+import org.apache.beam.sdk.transforms.Filter;
+import org.apache.beam.sdk.transforms.GroupByKey;
+import org.apache.beam.sdk.transforms.PTransform;
+import org.apache.beam.sdk.values.KV;
+import org.apache.beam.sdk.values.PCollection;
+import org.apache.beam.sdk.values.PDone;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
+import org.junit.Test;
+
+public class DoFnUtilsTest implements Serializable {
+  private final Pipeline pipeline = Pipeline.create();
+
+  @Test
+  public void testExecutableStageWithoutOutput() {
+    pipeline.apply(Create.of(KV.of(1L, "1")));
+
+    assertEquals("[Create.Values-]", DoFnUtils.toStepName(getOnlyExecutableStage(pipeline)));
+  }
+
+  @Test
+  public void testExecutableStageWithCustomizedName() {
+    pipeline.apply("MyCreateOf", Create.of(KV.of(1L, "1")));
+    assertEquals("[MyCreateOf-]", DoFnUtils.toStepName(getOnlyExecutableStage(pipeline)));
+  }
+
+  @Test
+  public void testExecutableStageWithOutput() {
+    pipeline
+        .apply("MyCreateOf", Create.of(KV.of(1L, "1")))
+        .apply("MyFilterBy", Filter.by(Objects::nonNull))
+        .apply(GroupByKey.create());
+
+    assertEquals("[MyCreateOf-MyFilterBy]", DoFnUtils.toStepName(getOnlyExecutableStage(pipeline)));
+  }
+
+  @Test
+  public void testExecutableStageWithPDone() {
+    pipeline
+        .apply("MyCreateOf", Create.of("1"))
+        .apply(
+            "PDoneTransform",
+            new PTransform<PCollection<String>, PDone>() {
+              @Override
+              public PDone expand(PCollection<String> input) {
+                return PDone.in(pipeline);
+              }
+            });
+
+    assertEquals("[MyCreateOf-]", DoFnUtils.toStepName(getOnlyExecutableStage(pipeline)));
+  }
+
+  private static ExecutableStage getOnlyExecutableStage(Pipeline p) {
+    return Iterables.getOnlyElement(
+        GreedyPipelineFuser.fuse(PipelineTranslation.toProto(p)).getFusedStages());
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/Constants.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/Constants.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/Constants.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/Constants.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineOptions.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineOptions.java
similarity index 85%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineOptions.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineOptions.java
index bc585d8..3371a40 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineOptions.java
+++ b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineOptions.java
@@ -19,6 +19,7 @@
 
 import org.apache.beam.runners.spark.SparkCommonPipelineOptions;
 import org.apache.beam.sdk.options.Default;
+import org.apache.beam.sdk.options.Description;
 import org.apache.beam.sdk.options.PipelineOptions;
 
 /**
@@ -32,4 +33,10 @@
   boolean getTestMode();
 
   void setTestMode(boolean testMode);
+
+  @Description("Enable if the runner should use the currently active Spark session.")
+  @Default.Boolean(false)
+  boolean getUseActiveSparkSession();
+
+  void setUseActiveSparkSession(boolean value);
 }
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java
similarity index 82%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java
index 663c87a..1392ae8 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java
+++ b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java
@@ -20,7 +20,6 @@
 import static org.apache.beam.runners.core.metrics.MetricsContainerStepMap.asAttemptedOnlyMetricResults;
 
 import java.io.IOException;
-import java.util.Objects;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
@@ -31,8 +30,6 @@
 import org.apache.beam.sdk.metrics.MetricResults;
 import org.apache.beam.sdk.util.UserCodeException;
 import org.apache.spark.SparkException;
-import org.apache.spark.sql.SparkSession;
-import org.apache.spark.sql.streaming.StreamingQuery;
 import org.joda.time.Duration;
 
 /** Represents a Spark pipeline execution result. */
@@ -43,19 +40,16 @@
 public class SparkStructuredStreamingPipelineResult implements PipelineResult {
 
   final Future pipelineExecution;
-  final SparkSession sparkSession;
+  final Runnable onTerminalState;
+
   PipelineResult.State state;
 
-  boolean isStreaming;
-
   SparkStructuredStreamingPipelineResult(
-      final Future<?> pipelineExecution, final SparkSession sparkSession) {
+      final Future<?> pipelineExecution, final Runnable onTerminalState) {
     this.pipelineExecution = pipelineExecution;
-    this.sparkSession = sparkSession;
+    this.onTerminalState = onTerminalState;
     // pipelineExecution is expected to have started executing eagerly.
     this.state = State.RUNNING;
-    // TODO: Implement results on a streaming pipeline. Currently does not stream.
-    this.isStreaming = false;
   }
 
   private static RuntimeException runtimeExceptionFrom(final Throwable e) {
@@ -79,29 +73,10 @@
     return runtimeExceptionFrom(e);
   }
 
-  protected void stop() {
-    try {
-      // TODO: await any outstanding queries on the session if this is streaming.
-      if (isStreaming) {
-        for (StreamingQuery query : sparkSession.streams().active()) {
-          query.stop();
-        }
-      }
-    } catch (Exception e) {
-      throw beamExceptionFrom(e);
-    } finally {
-      sparkSession.stop();
-      if (Objects.equals(state, State.RUNNING)) {
-        this.state = State.STOPPED;
-      }
-    }
-  }
-
   private State awaitTermination(Duration duration)
       throws TimeoutException, ExecutionException, InterruptedException {
     pipelineExecution.get(duration.getMillis(), TimeUnit.MILLISECONDS);
     // Throws an exception if the job is not finished successfully in the given time.
-    // TODO: all streaming functionality
     return PipelineResult.State.DONE;
   }
 
@@ -149,7 +124,11 @@
     State oldState = this.state;
     this.state = newState;
     if (!oldState.isTerminal() && newState.isTerminal()) {
-      stop();
+      try {
+        onTerminalState.run();
+      } catch (Exception e) {
+        throw beamExceptionFrom(e);
+      }
     }
   }
 }
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
similarity index 95%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
copy to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
index d66e0c7..84c1bb3 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
+++ b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
@@ -131,6 +131,10 @@
             + " https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html\n"
             + " It is still experimental, its coverage of the Beam model is partial. ***");
 
+    LOG.warn(
+        "Support for Spark 2 is deprecated, this runner will be removed in a few releases.\n"
+            + "Spark 2 is reaching its EOL, consider migrating to Spark 3.");
+
     // clear state of Aggregators, Metrics and Watermarks if exists.
     AggregatorsAccumulator.clear();
     MetricsAccumulator.clear();
@@ -146,10 +150,12 @@
             });
     executorService.shutdown();
 
-    // TODO: Streaming.
+    Runnable onTerminalState =
+        options.getUseActiveSparkSession()
+            ? () -> {}
+            : () -> translationContext.getSparkSession().stop();
     SparkStructuredStreamingPipelineResult result =
-        new SparkStructuredStreamingPipelineResult(
-            submissionFuture, translationContext.getSparkSession());
+        new SparkStructuredStreamingPipelineResult(submissionFuture, onTerminalState);
 
     if (options.getEnableSparkMetricSinks()) {
       registerMetricsSource(options.getAppName());
@@ -162,7 +168,6 @@
 
     if (options.getTestMode()) {
       result.waitUntilFinish();
-      result.stop();
     }
 
     return result;
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrar.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrar.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrar.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrar.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/AggregatorsAccumulator.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/AggregatorsAccumulator.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/AggregatorsAccumulator.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/AggregatorsAccumulator.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregators.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregators.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregators.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregators.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregatorsAccumulator.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregatorsAccumulator.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregatorsAccumulator.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregatorsAccumulator.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/package-info.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/package-info.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/package-info.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/examples/WordCount.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/examples/WordCount.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/examples/WordCount.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/examples/WordCount.java
diff --git a/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetric.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetric.java
new file mode 100644
index 0000000..74bea7f
--- /dev/null
+++ b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetric.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.metrics;
+
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Metric;
+import com.codahale.metrics.MetricFilter;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.beam.runners.spark.structuredstreaming.aggregators.NamedAggregators;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** An adapter between the {@link NamedAggregators} and the Dropwizard {@link Metric} interface. */
+public class AggregatorMetric extends BeamMetricSet {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AggregatorMetric.class);
+
+  private final NamedAggregators namedAggregators;
+
+  private AggregatorMetric(NamedAggregators namedAggregators) {
+    this.namedAggregators = namedAggregators;
+  }
+
+  public static AggregatorMetric of(NamedAggregators namedAggregators) {
+    return new AggregatorMetric(namedAggregators);
+  }
+
+  @Override
+  public Map<String, Gauge<Double>> getValue(String prefix, MetricFilter filter) {
+    Map<String, Gauge<Double>> metrics = new HashMap<>();
+    for (Map.Entry<String, ?> entry : namedAggregators.renderAll().entrySet()) {
+      String name = prefix + "." + entry.getKey();
+      Object rawValue = entry.getValue();
+      if (rawValue != null) {
+        try {
+          Gauge<Double> gauge = staticGauge(rawValue);
+          if (filter.matches(name, gauge)) {
+            metrics.put(name, gauge);
+          }
+        } catch (NumberFormatException e) {
+          LOG.warn(
+              "Metric `{}` of type {} can't be reported, conversion to double failed.",
+              name,
+              rawValue.getClass().getSimpleName(),
+              e);
+        }
+      }
+    }
+    return metrics;
+  }
+
+  // Metric type is assumed to be compatible with Double
+  protected Gauge<Double> staticGauge(Object rawValue) throws NumberFormatException {
+    return rawValue instanceof Number
+        ? super.staticGauge((Number) rawValue)
+        : super.staticGauge(Double.parseDouble(rawValue.toString()));
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetricSource.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetricSource.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetricSource.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetricSource.java
diff --git a/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/BeamMetricSet.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/BeamMetricSet.java
new file mode 100644
index 0000000..7095036
--- /dev/null
+++ b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/BeamMetricSet.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.metrics;
+
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.MetricFilter;
+import java.util.Map;
+import org.apache.beam.runners.spark.metrics.WithMetricsSupport;
+
+/**
+ * {@link BeamMetricSet} is a {@link Gauge} that returns a map of multiple metrics which get
+ * flattened in {@link WithMetricsSupport#getGauges()} for usage in {@link
+ * org.apache.spark.metrics.sink.Sink Spark metric sinks}.
+ *
+ * <p>Note: Recent versions of Dropwizard {@link com.codahale.metrics.MetricRegistry MetricRegistry}
+ * do not allow registering arbitrary implementations of {@link com.codahale.metrics.Metric Metrics}
+ * and require usage of {@link Gauge} here.
+ */
+// TODO: turn into MetricRegistry https://github.com/apache/beam/issues/22384
+abstract class BeamMetricSet implements Gauge<Map<String, Gauge<Double>>> {
+
+  @Override
+  public final Map<String, Gauge<Double>> getValue() {
+    return getValue("", MetricFilter.ALL);
+  }
+
+  protected abstract Map<String, Gauge<Double>> getValue(String prefix, MetricFilter filter);
+
+  protected Gauge<Double> staticGauge(Number number) {
+    return new ConstantGauge(number.doubleValue());
+  }
+
+  private static class ConstantGauge implements Gauge<Double> {
+    private final double value;
+
+    ConstantGauge(double value) {
+      this.value = value;
+    }
+
+    @Override
+    public Double getValue() {
+      return value;
+    }
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/CompositeSource.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/CompositeSource.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/CompositeSource.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/CompositeSource.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsAccumulator.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsAccumulator.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsAccumulator.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsAccumulator.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsContainerStepMapAccumulator.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsContainerStepMapAccumulator.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsContainerStepMapAccumulator.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsContainerStepMapAccumulator.java
diff --git a/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetric.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetric.java
new file mode 100644
index 0000000..0cecae4
--- /dev/null
+++ b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetric.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.metrics;
+
+import static org.apache.beam.runners.core.metrics.MetricsContainerStepMap.asAttemptedOnlyMetricResults;
+import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Predicates.not;
+
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Metric;
+import com.codahale.metrics.MetricFilter;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import javax.annotation.Nullable;
+import org.apache.beam.sdk.metrics.DistributionResult;
+import org.apache.beam.sdk.metrics.GaugeResult;
+import org.apache.beam.sdk.metrics.MetricKey;
+import org.apache.beam.sdk.metrics.MetricName;
+import org.apache.beam.sdk.metrics.MetricQueryResults;
+import org.apache.beam.sdk.metrics.MetricResult;
+import org.apache.beam.sdk.metrics.MetricResults;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Strings;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Streams;
+
+/**
+ * An adapter between the {@link SparkMetricsContainerStepMap} and the Dropwizard {@link Metric}
+ * interface.
+ */
+class SparkBeamMetric extends BeamMetricSet {
+
+  private static final String ILLEGAL_CHARACTERS = "[^A-Za-z0-9-]";
+
+  @Override
+  public Map<String, Gauge<Double>> getValue(String prefix, MetricFilter filter) {
+    MetricResults metricResults =
+        asAttemptedOnlyMetricResults(MetricsAccumulator.getInstance().value());
+    Map<String, Gauge<Double>> metrics = new HashMap<>();
+    MetricQueryResults allMetrics = metricResults.allMetrics();
+    for (MetricResult<Long> metricResult : allMetrics.getCounters()) {
+      putFiltered(metrics, filter, renderName(prefix, metricResult), metricResult.getAttempted());
+    }
+    for (MetricResult<DistributionResult> metricResult : allMetrics.getDistributions()) {
+      DistributionResult result = metricResult.getAttempted();
+      String baseName = renderName(prefix, metricResult);
+      putFiltered(metrics, filter, baseName + ".count", result.getCount());
+      putFiltered(metrics, filter, baseName + ".sum", result.getSum());
+      putFiltered(metrics, filter, baseName + ".min", result.getMin());
+      putFiltered(metrics, filter, baseName + ".max", result.getMax());
+      putFiltered(metrics, filter, baseName + ".mean", result.getMean());
+    }
+    for (MetricResult<GaugeResult> metricResult : allMetrics.getGauges()) {
+      putFiltered(
+          metrics,
+          filter,
+          renderName(prefix, metricResult),
+          metricResult.getAttempted().getValue());
+    }
+    return metrics;
+  }
+
+  @VisibleForTesting
+  @SuppressWarnings("nullness") // ok to have nullable elements on stream
+  static String renderName(String prefix, MetricResult<?> metricResult) {
+    MetricKey key = metricResult.getKey();
+    MetricName name = key.metricName();
+    String step = key.stepName();
+    return Streams.concat(
+            Stream.of(prefix), // prefix is not cleaned, should it be?
+            Stream.of(stripSuffix(normalizePart(step))),
+            Stream.of(name.getNamespace(), name.getName()).map(SparkBeamMetric::normalizePart))
+        .filter(not(Strings::isNullOrEmpty))
+        .collect(Collectors.joining("."));
+  }
+
+  private static @Nullable String normalizePart(@Nullable String str) {
+    return str != null ? str.replaceAll(ILLEGAL_CHARACTERS, "_") : null;
+  }
+
+  private static @Nullable String stripSuffix(@Nullable String str) {
+    return str != null && str.endsWith("_") ? str.substring(0, str.length() - 1) : str;
+  }
+
+  private void putFiltered(
+      Map<String, Gauge<Double>> metrics, MetricFilter filter, String name, Number value) {
+    Gauge<Double> metric = staticGauge(value);
+    if (filter.matches(name, metric)) {
+      metrics.put(name, metric);
+    }
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricSource.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricSource.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricSource.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricSource.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkMetricsContainerStepMap.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkMetricsContainerStepMap.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkMetricsContainerStepMap.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkMetricsContainerStepMap.java
diff --git a/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/WithMetricsSupport.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/WithMetricsSupport.java
new file mode 100644
index 0000000..d48a229
--- /dev/null
+++ b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/WithMetricsSupport.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.metrics;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Meter;
+import com.codahale.metrics.MetricFilter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+import java.util.Map;
+import java.util.SortedMap;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableSortedMap;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Ordering;
+
+/**
+ * A {@link MetricRegistry} decorator-like that supports {@link BeamMetricSet}s as {@link Gauge
+ * Gauges}.
+ *
+ * <p>{@link MetricRegistry} is not an interface, so this is not a by-the-book decorator. That said,
+ * it delegates all metric related getters to the "decorated" instance.
+ */
+@SuppressWarnings({"rawtypes"}) // required by interface
+public class WithMetricsSupport extends MetricRegistry {
+
+  private final MetricRegistry internalMetricRegistry;
+
+  private WithMetricsSupport(final MetricRegistry internalMetricRegistry) {
+    this.internalMetricRegistry = internalMetricRegistry;
+  }
+
+  public static WithMetricsSupport forRegistry(final MetricRegistry metricRegistry) {
+    return new WithMetricsSupport(metricRegistry);
+  }
+
+  @Override
+  public SortedMap<String, Timer> getTimers(final MetricFilter filter) {
+    return internalMetricRegistry.getTimers(filter);
+  }
+
+  @Override
+  public SortedMap<String, Meter> getMeters(final MetricFilter filter) {
+    return internalMetricRegistry.getMeters(filter);
+  }
+
+  @Override
+  public SortedMap<String, Histogram> getHistograms(final MetricFilter filter) {
+    return internalMetricRegistry.getHistograms(filter);
+  }
+
+  @Override
+  public SortedMap<String, Counter> getCounters(final MetricFilter filter) {
+    return internalMetricRegistry.getCounters(filter);
+  }
+
+  @Override
+  public SortedMap<String, Gauge> getGauges(final MetricFilter filter) {
+    ImmutableSortedMap.Builder<String, Gauge> builder =
+        new ImmutableSortedMap.Builder<>(Ordering.from(String.CASE_INSENSITIVE_ORDER));
+
+    Map<String, Gauge> gauges =
+        internalMetricRegistry.getGauges(
+            (n, m) -> filter.matches(n, m) || m instanceof BeamMetricSet);
+
+    for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
+      Gauge gauge = entry.getValue();
+      if (gauge instanceof BeamMetricSet) {
+        builder.putAll(((BeamMetricSet) gauge).getValue(entry.getKey(), filter));
+      } else {
+        builder.put(entry.getKey(), gauge);
+      }
+    }
+    return builder.build();
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/package-info.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/package-info.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/package-info.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/package-info.java
diff --git a/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleCsvSink.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleCsvSink.java
new file mode 100644
index 0000000..c8f9139
--- /dev/null
+++ b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleCsvSink.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.metrics.sink;
+
+import com.codahale.metrics.MetricRegistry;
+import java.util.Properties;
+import org.apache.beam.runners.spark.structuredstreaming.metrics.AggregatorMetric;
+import org.apache.beam.runners.spark.structuredstreaming.metrics.WithMetricsSupport;
+import org.apache.spark.SecurityManager;
+import org.apache.spark.metrics.sink.Sink;
+
+/**
+ * A {@link Sink} for <a href="https://spark.apache.org/docs/latest/monitoring.html#metrics">Spark's
+ * metric system</a> that is tailored to report {@link AggregatorMetric}s to a CSV file.
+ *
+ * <p>The sink is configured using Spark configuration parameters, for example:
+ *
+ * <pre>{@code
+ * "spark.metrics.conf.*.sink.csv.class"="org.apache.beam.runners.spark.structuredstreaming.metrics.sink.CodahaleCsvSink"
+ * "spark.metrics.conf.*.sink.csv.directory"="<output_directory>"
+ * "spark.metrics.conf.*.sink.csv.period"=10
+ * "spark.metrics.conf.*.sink.csv.unit"=seconds
+ * }</pre>
+ */
+public class CodahaleCsvSink implements Sink {
+
+  // Initialized reflectively as done by Spark's MetricsSystem
+  private final org.apache.spark.metrics.sink.CsvSink delegate;
+
+  /** Constructor for Spark 3.1.x and earlier. */
+  public CodahaleCsvSink(
+      final Properties properties,
+      final MetricRegistry metricRegistry,
+      final SecurityManager securityMgr) {
+    try {
+      delegate =
+          org.apache.spark.metrics.sink.CsvSink.class
+              .getConstructor(Properties.class, MetricRegistry.class, SecurityManager.class)
+              .newInstance(properties, WithMetricsSupport.forRegistry(metricRegistry), securityMgr);
+    } catch (ReflectiveOperationException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  /** Constructor for Spark 3.2.x and later. */
+  public CodahaleCsvSink(final Properties properties, final MetricRegistry metricRegistry) {
+    try {
+      delegate =
+          org.apache.spark.metrics.sink.CsvSink.class
+              .getConstructor(Properties.class, MetricRegistry.class)
+              .newInstance(properties, WithMetricsSupport.forRegistry(metricRegistry));
+    } catch (ReflectiveOperationException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  @Override
+  public void start() {
+    delegate.start();
+  }
+
+  @Override
+  public void stop() {
+    delegate.stop();
+  }
+
+  @Override
+  public void report() {
+    delegate.report();
+  }
+}
diff --git a/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleGraphiteSink.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleGraphiteSink.java
new file mode 100644
index 0000000..5640c96
--- /dev/null
+++ b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleGraphiteSink.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.metrics.sink;
+
+import com.codahale.metrics.MetricRegistry;
+import java.util.Properties;
+import org.apache.beam.runners.spark.structuredstreaming.metrics.AggregatorMetric;
+import org.apache.beam.runners.spark.structuredstreaming.metrics.WithMetricsSupport;
+import org.apache.spark.SecurityManager;
+import org.apache.spark.metrics.sink.Sink;
+
+/**
+ * A {@link Sink} for <a href="https://spark.apache.org/docs/latest/monitoring.html#metrics">Spark's
+ * metric system</a> that is tailored to report {@link AggregatorMetric}s to Graphite.
+ *
+ * <p>The sink is configured using Spark configuration parameters, for example:
+ *
+ * <pre>{@code
+ * "spark.metrics.conf.*.sink.graphite.class"="org.apache.beam.runners.spark.structuredstreaming.metrics.sink.CodahaleGraphiteSink"
+ * "spark.metrics.conf.*.sink.graphite.host"="<graphite_hostname>"
+ * "spark.metrics.conf.*.sink.graphite.port"=<graphite_listening_port>
+ * "spark.metrics.conf.*.sink.graphite.period"=10
+ * "spark.metrics.conf.*.sink.graphite.unit"=seconds
+ * "spark.metrics.conf.*.sink.graphite.prefix"="<optional_prefix>"
+ * "spark.metrics.conf.*.sink.graphite.regex"="<optional_regex_to_send_matching_metrics>"
+ * }</pre>
+ */
+public class CodahaleGraphiteSink implements Sink {
+
+  // Initialized reflectively as done by Spark's MetricsSystem
+  private final org.apache.spark.metrics.sink.GraphiteSink delegate;
+
+  /** Constructor for Spark 3.1.x and earlier. */
+  public CodahaleGraphiteSink(
+      final Properties properties,
+      final MetricRegistry metricRegistry,
+      final org.apache.spark.SecurityManager securityMgr) {
+    try {
+      delegate =
+          org.apache.spark.metrics.sink.GraphiteSink.class
+              .getConstructor(Properties.class, MetricRegistry.class, SecurityManager.class)
+              .newInstance(properties, WithMetricsSupport.forRegistry(metricRegistry), securityMgr);
+    } catch (ReflectiveOperationException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  /** Constructor for Spark 3.2.x and later. */
+  public CodahaleGraphiteSink(final Properties properties, final MetricRegistry metricRegistry) {
+    try {
+      delegate =
+          org.apache.spark.metrics.sink.GraphiteSink.class
+              .getConstructor(Properties.class, MetricRegistry.class)
+              .newInstance(properties, WithMetricsSupport.forRegistry(metricRegistry));
+    } catch (ReflectiveOperationException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  @Override
+  public void start() {
+    delegate.start();
+  }
+
+  @Override
+  public void stop() {
+    delegate.stop();
+  }
+
+  @Override
+  public void report() {
+    delegate.report();
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/package-info.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/package-info.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/package-info.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/package-info.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/package-info.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/package-info.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/AbstractTranslationContext.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/AbstractTranslationContext.java
similarity index 88%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/AbstractTranslationContext.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/AbstractTranslationContext.java
index 766065f..aed287b 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/AbstractTranslationContext.java
+++ b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/AbstractTranslationContext.java
@@ -38,7 +38,6 @@
 import org.apache.beam.sdk.values.PValue;
 import org.apache.beam.sdk.values.TupleTag;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
-import org.apache.spark.SparkConf;
 import org.apache.spark.api.java.function.ForeachFunction;
 import org.apache.spark.sql.Dataset;
 import org.apache.spark.sql.ForeachWriter;
@@ -75,29 +74,7 @@
   private final Map<PCollectionView<?>, Dataset<?>> broadcastDataSets;
 
   public AbstractTranslationContext(SparkStructuredStreamingPipelineOptions options) {
-    SparkConf sparkConf = new SparkConf();
-    sparkConf.setMaster(options.getSparkMaster());
-    sparkConf.setAppName(options.getAppName());
-    if (options.getFilesToStage() != null && !options.getFilesToStage().isEmpty()) {
-      sparkConf.setJars(options.getFilesToStage().toArray(new String[0]));
-    }
-
-    // By default, Spark defines 200 as a number of sql partitions. This seems too much for local
-    // mode, so try to align with value of "sparkMaster" option in this case.
-    // We should not overwrite this value (or any user-defined spark configuration value) if the
-    // user has already configured it.
-    String sparkMaster = options.getSparkMaster();
-    if (sparkMaster != null
-        && sparkMaster.startsWith("local[")
-        && System.getProperty("spark.sql.shuffle.partitions") == null) {
-      int numPartitions =
-          Integer.parseInt(sparkMaster.substring("local[".length(), sparkMaster.length() - 1));
-      if (numPartitions > 0) {
-        sparkConf.set("spark.sql.shuffle.partitions", String.valueOf(numPartitions));
-      }
-    }
-
-    this.sparkSession = SparkSession.builder().config(sparkConf).getOrCreate();
+    this.sparkSession = SparkSessionFactory.getOrCreateSession(options);
     this.serializablePipelineOptions = new SerializablePipelineOptions(options);
     this.datasets = new HashMap<>();
     this.leaves = new HashSet<>();
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/PipelineTranslator.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/PipelineTranslator.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/PipelineTranslator.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/PipelineTranslator.java
diff --git a/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkSessionFactory.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkSessionFactory.java
new file mode 100644
index 0000000..d8430f5
--- /dev/null
+++ b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkSessionFactory.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.translation;
+
+import java.util.List;
+import javax.annotation.Nullable;
+import org.apache.beam.runners.spark.structuredstreaming.SparkStructuredStreamingPipelineOptions;
+import org.apache.spark.SparkConf;
+import org.apache.spark.sql.SparkSession;
+
+public class SparkSessionFactory {
+
+  /**
+   * Gets active {@link SparkSession} or creates one using {@link
+   * SparkStructuredStreamingPipelineOptions}.
+   */
+  public static SparkSession getOrCreateSession(SparkStructuredStreamingPipelineOptions options) {
+    if (options.getUseActiveSparkSession()) {
+      return SparkSession.active();
+    }
+    return sessionBuilder(options.getSparkMaster(), options.getAppName(), options.getFilesToStage())
+        .getOrCreate();
+  }
+
+  /** Creates Spark session builder with some optimizations for local mode, e.g. in tests. */
+  public static SparkSession.Builder sessionBuilder(String master) {
+    return sessionBuilder(master, null, null);
+  }
+
+  private static SparkSession.Builder sessionBuilder(
+      String master, @Nullable String appName, @Nullable List<String> jars) {
+    SparkConf sparkConf = new SparkConf();
+    sparkConf.setMaster(master);
+    if (appName != null) {
+      sparkConf.setAppName(appName);
+    }
+    if (jars != null && !jars.isEmpty()) {
+      sparkConf.setJars(jars.toArray(new String[0]));
+    }
+
+    // By default, Spark defines 200 as a number of sql partitions. This seems too much for local
+    // mode, so try to align with value of "sparkMaster" option in this case.
+    // We should not overwrite this value (or any user-defined spark configuration value) if the
+    // user has already configured it.
+    if (master != null
+        && master.startsWith("local[")
+        && System.getProperty("spark.sql.shuffle.partitions") == null) {
+      int numPartitions =
+          Integer.parseInt(master.substring("local[".length(), master.length() - 1));
+      if (numPartitions > 0) {
+        sparkConf.set("spark.sql.shuffle.partitions", String.valueOf(numPartitions));
+      }
+    }
+    return SparkSession.builder().config(sparkConf);
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkTransformOverrides.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkTransformOverrides.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkTransformOverrides.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkTransformOverrides.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/TransformTranslator.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/TransformTranslator.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/TransformTranslator.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/TransformTranslator.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/AggregatorCombiner.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/AggregatorCombiner.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/AggregatorCombiner.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/AggregatorCombiner.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombinePerKeyTranslatorBatch.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombinePerKeyTranslatorBatch.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombinePerKeyTranslatorBatch.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombinePerKeyTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CreatePCollectionViewTranslatorBatch.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CreatePCollectionViewTranslatorBatch.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CreatePCollectionViewTranslatorBatch.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CreatePCollectionViewTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnFunction.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnFunction.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnFunction.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnFunction.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnRunnerWithMetrics.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnRunnerWithMetrics.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnRunnerWithMetrics.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnRunnerWithMetrics.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTranslatorBatch.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTranslatorBatch.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTranslatorBatch.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTranslatorBatch.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTranslatorBatch.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTranslatorBatch.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ImpulseTranslatorBatch.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ImpulseTranslatorBatch.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ImpulseTranslatorBatch.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ImpulseTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTranslatorBatch.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTranslatorBatch.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTranslatorBatch.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/PipelineTranslatorBatch.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/PipelineTranslatorBatch.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/PipelineTranslatorBatch.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/PipelineTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ProcessContext.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ProcessContext.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ProcessContext.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ProcessContext.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReadSourceTranslatorBatch.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReadSourceTranslatorBatch.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReadSourceTranslatorBatch.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReadSourceTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReshuffleTranslatorBatch.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReshuffleTranslatorBatch.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReshuffleTranslatorBatch.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReshuffleTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTranslatorBatch.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTranslatorBatch.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTranslatorBatch.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/GroupAlsoByWindowViaOutputBufferFn.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/GroupAlsoByWindowViaOutputBufferFn.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/GroupAlsoByWindowViaOutputBufferFn.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/GroupAlsoByWindowViaOutputBufferFn.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/NoOpStepContext.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/NoOpStepContext.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/NoOpStepContext.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/NoOpStepContext.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/SparkSideInputReader.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/SparkSideInputReader.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/SparkSideInputReader.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/SparkSideInputReader.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/package-info.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/package-info.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/package-info.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/package-info.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/package-info.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/package-info.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/CoderHelpers.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/CoderHelpers.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/CoderHelpers.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/CoderHelpers.java
diff --git a/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderFactory.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderFactory.java
index 325d150..54b400f 100644
--- a/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderFactory.java
+++ b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderFactory.java
@@ -17,38 +17,35 @@
  */
 package org.apache.beam.runners.spark.structuredstreaming.translation.helpers;
 
-import static org.apache.spark.sql.types.DataTypes.BinaryType;
-
-import java.util.Collections;
-import java.util.List;
-import org.apache.beam.sdk.coders.Coder;
 import org.apache.spark.sql.Encoder;
-import org.apache.spark.sql.catalyst.analysis.GetColumnByOrdinal;
 import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder;
-import org.apache.spark.sql.catalyst.expressions.BoundReference;
-import org.apache.spark.sql.catalyst.expressions.Cast;
 import org.apache.spark.sql.catalyst.expressions.Expression;
-import org.apache.spark.sql.types.ObjectType;
-import scala.collection.JavaConversions;
-import scala.reflect.ClassTag;
+import org.apache.spark.sql.catalyst.expressions.objects.StaticInvoke;
+import org.apache.spark.sql.types.DataType;
+import scala.collection.immutable.List;
+import scala.collection.immutable.Nil$;
+import scala.collection.mutable.WrappedArray;
 import scala.reflect.ClassTag$;
 
 public class EncoderFactory {
 
-  public static <T> Encoder<T> fromBeamCoder(Coder<T> coder) {
-    Class<? super T> clazz = coder.getEncodedTypeDescriptor().getRawType();
-    ClassTag<T> classTag = ClassTag$.MODULE$.apply(clazz);
-    List<Expression> serializers =
-        Collections.singletonList(
-            new EncoderHelpers.EncodeUsingBeamCoder<>(
-                new BoundReference(0, new ObjectType(clazz), true), coder));
-
+  static <T> Encoder<T> create(
+      Expression serializer, Expression deserializer, Class<? super T> clazz) {
+    // TODO Isolate usage of Scala APIs in utility https://github.com/apache/beam/issues/22382
+    List<Expression> serializers = Nil$.MODULE$.$colon$colon(serializer);
     return new ExpressionEncoder<>(
         SchemaHelpers.binarySchema(),
         false,
-        JavaConversions.collectionAsScalaIterable(serializers).toSeq(),
-        new EncoderHelpers.DecodeUsingBeamCoder<>(
-            new Cast(new GetColumnByOrdinal(0, BinaryType), BinaryType), classTag, coder),
-        classTag);
+        serializers,
+        deserializer,
+        ClassTag$.MODULE$.apply(clazz));
+  }
+
+  /**
+   * Invoke method {@code fun} on Class {@code cls}, immediately propagating {@code null} if any
+   * input arg is {@code null}.
+   */
+  static Expression invokeIfNotNull(Class<?> cls, String fun, DataType type, Expression... args) {
+    return new StaticInvoke(cls, type, fun, new WrappedArray.ofRef<>(args), true, true);
   }
 }
diff --git a/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpers.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpers.java
new file mode 100644
index 0000000..68738cf
--- /dev/null
+++ b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpers.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.translation.helpers;
+
+import static org.apache.spark.sql.types.DataTypes.BinaryType;
+
+import org.apache.beam.sdk.coders.Coder;
+import org.apache.spark.sql.Encoder;
+import org.apache.spark.sql.catalyst.analysis.GetColumnByOrdinal;
+import org.apache.spark.sql.catalyst.expressions.BoundReference;
+import org.apache.spark.sql.catalyst.expressions.Expression;
+import org.apache.spark.sql.catalyst.expressions.Literal;
+import org.apache.spark.sql.types.DataType;
+import org.apache.spark.sql.types.ObjectType;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public class EncoderHelpers {
+  private static final DataType OBJECT_TYPE = new ObjectType(Object.class);
+
+  /**
+   * Wrap a Beam coder into a Spark Encoder using Catalyst Expression Encoders (which uses java code
+   * generation).
+   */
+  public static <T> Encoder<T> fromBeamCoder(Coder<T> coder) {
+    Class<? super T> clazz = coder.getEncodedTypeDescriptor().getRawType();
+    // Class T could be private, therefore use OBJECT_TYPE to not risk an IllegalAccessError
+    return EncoderFactory.create(
+        beamSerializer(rootRef(OBJECT_TYPE, true), coder),
+        beamDeserializer(rootCol(BinaryType), coder),
+        clazz);
+  }
+
+  /** Catalyst Expression that serializes elements using Beam {@link Coder}. */
+  private static <T> Expression beamSerializer(Expression obj, Coder<T> coder) {
+    Expression[] args = {obj, lit(coder, Coder.class)};
+    return EncoderFactory.invokeIfNotNull(CoderHelpers.class, "toByteArray", BinaryType, args);
+  }
+
+  /** Catalyst Expression that deserializes elements using Beam {@link Coder}. */
+  private static <T> Expression beamDeserializer(Expression bytes, Coder<T> coder) {
+    Expression[] args = {bytes, lit(coder, Coder.class)};
+    return EncoderFactory.invokeIfNotNull(CoderHelpers.class, "fromByteArray", OBJECT_TYPE, args);
+  }
+
+  private static Expression rootRef(DataType dt, boolean nullable) {
+    return new BoundReference(0, dt, nullable);
+  }
+
+  private static Expression rootCol(DataType dt) {
+    return new GetColumnByOrdinal(0, dt);
+  }
+
+  private static <T extends @NonNull Object> Literal lit(T obj, Class<? extends T> cls) {
+    return Literal.fromObject(obj, new ObjectType(cls));
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/KVHelpers.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/KVHelpers.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/KVHelpers.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/KVHelpers.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/MultiOutputCoder.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/MultiOutputCoder.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/MultiOutputCoder.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/MultiOutputCoder.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/RowHelpers.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/RowHelpers.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/RowHelpers.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/RowHelpers.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SchemaHelpers.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SchemaHelpers.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SchemaHelpers.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SchemaHelpers.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SideInputBroadcast.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SideInputBroadcast.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SideInputBroadcast.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SideInputBroadcast.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/WindowingHelpers.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/WindowingHelpers.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/WindowingHelpers.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/WindowingHelpers.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/package-info.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/package-info.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/package-info.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/package-info.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/package-info.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/package-info.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/PipelineTranslatorStreaming.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/PipelineTranslatorStreaming.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/PipelineTranslatorStreaming.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/PipelineTranslatorStreaming.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/ReadSourceTranslatorStreaming.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/ReadSourceTranslatorStreaming.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/ReadSourceTranslatorStreaming.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/ReadSourceTranslatorStreaming.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/package-info.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/package-info.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/package-info.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/CachedSideInputReader.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/CachedSideInputReader.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/CachedSideInputReader.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/CachedSideInputReader.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/SideInputStorage.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/SideInputStorage.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/SideInputStorage.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/SideInputStorage.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/package-info.java b/runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/package-info.java
similarity index 100%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/package-info.java
rename to runners/spark/2/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/package-info.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkSessionRule.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkSessionRule.java
similarity index 75%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkSessionRule.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkSessionRule.java
index f68df83..33eef26 100644
--- a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkSessionRule.java
+++ b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkSessionRule.java
@@ -23,6 +23,9 @@
 import java.util.Arrays;
 import java.util.Map;
 import javax.annotation.Nullable;
+import org.apache.beam.runners.spark.structuredstreaming.translation.SparkSessionFactory;
+import org.apache.beam.sdk.options.PipelineOptions;
+import org.apache.beam.sdk.testing.TestPipeline;
 import org.apache.beam.sdk.values.KV;
 import org.apache.spark.sql.SparkSession;
 import org.junit.rules.ExternalResource;
@@ -34,13 +37,12 @@
   private transient @Nullable SparkSession session = null;
 
   public SparkSessionRule(String sparkMaster, Map<String, String> sparkConfig) {
-    builder = SparkSession.builder();
+    builder = SparkSessionFactory.sessionBuilder(sparkMaster);
     sparkConfig.forEach(builder::config);
-    builder.master(sparkMaster);
   }
 
   public SparkSessionRule(KV<String, String>... sparkConfig) {
-    this("local", sparkConfig);
+    this("local[2]", sparkConfig);
   }
 
   public SparkSessionRule(String sparkMaster, KV<String, String>... sparkConfig) {
@@ -54,6 +56,19 @@
     return session;
   }
 
+  public PipelineOptions createPipelineOptions() {
+    return configure(TestPipeline.testingPipelineOptions());
+  }
+
+  public PipelineOptions configure(PipelineOptions options) {
+    SparkStructuredStreamingPipelineOptions opts =
+        options.as(SparkStructuredStreamingPipelineOptions.class);
+    opts.setUseActiveSparkSession(true);
+    opts.setRunner(SparkStructuredStreamingRunner.class);
+    opts.setTestMode(true);
+    return opts;
+  }
+
   @Override
   public Statement apply(Statement base, Description description) {
     builder.appName(description.getDisplayName());
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrarTest.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrarTest.java
similarity index 100%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrarTest.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrarTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/StructuredStreamingPipelineStateTest.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/StructuredStreamingPipelineStateTest.java
similarity index 100%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/StructuredStreamingPipelineStateTest.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/StructuredStreamingPipelineStateTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetrics.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetrics.java
similarity index 75%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetrics.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetrics.java
index 8649e91..f994f77 100644
--- a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetrics.java
+++ b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetrics.java
@@ -17,22 +17,22 @@
  */
 package org.apache.beam.runners.spark.structuredstreaming.aggregators.metrics.sink;
 
+import com.codahale.metrics.Gauge;
 import com.codahale.metrics.MetricFilter;
 import com.codahale.metrics.MetricRegistry;
+import java.util.Collection;
 import java.util.Properties;
 import org.apache.beam.runners.spark.structuredstreaming.metrics.WithMetricsSupport;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Predicates;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
 import org.apache.spark.metrics.sink.Sink;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
 
 /** An in-memory {@link Sink} implementation for tests. */
-@RunWith(JUnit4.class)
 public class InMemoryMetrics implements Sink {
 
   private static WithMetricsSupport extendedMetricsRegistry;
   private static MetricRegistry internalMetricRegistry;
 
+  // Constructor for Spark 3.1
   @SuppressWarnings("UnusedParameters")
   public InMemoryMetrics(
       final Properties properties,
@@ -42,26 +42,24 @@
     internalMetricRegistry = metricRegistry;
   }
 
-  @SuppressWarnings("TypeParameterUnusedInFormals")
-  public static <T> T valueOf(final String name) {
-    final T retVal;
+  // Constructor for Spark >= 3.2
+  @SuppressWarnings("UnusedParameters")
+  public InMemoryMetrics(final Properties properties, final MetricRegistry metricRegistry) {
+    extendedMetricsRegistry = WithMetricsSupport.forRegistry(metricRegistry);
+    internalMetricRegistry = metricRegistry;
+  }
 
+  @SuppressWarnings({"TypeParameterUnusedInFormals", "rawtypes"})
+  public static <T> T valueOf(final String name) {
     // this might fail in case we have multiple aggregators with the same suffix after
     // the last dot, but it should be good enough for tests.
-    if (extendedMetricsRegistry != null
-        && extendedMetricsRegistry.getGauges().keySet().stream()
-            .anyMatch(Predicates.containsPattern(name + "$")::apply)) {
-      String key =
-          extendedMetricsRegistry.getGauges().keySet().stream()
-              .filter(Predicates.containsPattern(name + "$")::apply)
-              .findFirst()
-              .get();
-      retVal = (T) extendedMetricsRegistry.getGauges().get(key).getValue();
+    if (extendedMetricsRegistry != null) {
+      Collection<Gauge> matches =
+          extendedMetricsRegistry.getGauges((n, m) -> n.endsWith(name)).values();
+      return matches.isEmpty() ? null : (T) Iterables.getOnlyElement(matches).getValue();
     } else {
-      retVal = null;
+      return null;
     }
-
-    return retVal;
   }
 
   @SuppressWarnings("WeakerAccess")
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetricsSinkRule.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetricsSinkRule.java
similarity index 100%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetricsSinkRule.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetricsSinkRule.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/SparkMetricsSinkTest.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/SparkMetricsSinkTest.java
similarity index 65%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/SparkMetricsSinkTest.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/SparkMetricsSinkTest.java
index 40b5036..2f02656 100644
--- a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/SparkMetricsSinkTest.java
+++ b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/SparkMetricsSinkTest.java
@@ -21,51 +21,39 @@
 import static org.hamcrest.Matchers.is;
 import static org.hamcrest.Matchers.nullValue;
 
-import org.apache.beam.runners.spark.structuredstreaming.SparkStructuredStreamingPipelineOptions;
-import org.apache.beam.runners.spark.structuredstreaming.SparkStructuredStreamingRunner;
+import org.apache.beam.runners.spark.structuredstreaming.SparkSessionRule;
 import org.apache.beam.runners.spark.structuredstreaming.examples.WordCount;
-import org.apache.beam.sdk.Pipeline;
 import org.apache.beam.sdk.coders.StringUtf8Coder;
-import org.apache.beam.sdk.options.PipelineOptionsFactory;
 import org.apache.beam.sdk.testing.PAssert;
+import org.apache.beam.sdk.testing.TestPipeline;
 import org.apache.beam.sdk.transforms.Create;
 import org.apache.beam.sdk.transforms.MapElements;
+import org.apache.beam.sdk.values.KV;
 import org.apache.beam.sdk.values.PCollection;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableSet;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
+import org.junit.ClassRule;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExternalResource;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
 
-/**
- * TODO: add testInStreamingMode() once streaming support will be implemented.
- *
- * <p>A test that verifies Beam metrics are reported to Spark's metrics sink in both batch and
- * streaming modes.
- */
-@Ignore("Has been failing since at least c350188ef7a8704c7336f3c20a1ab2144abbcd4a")
-@RunWith(JUnit4.class)
+/** A test that verifies Beam metrics are reported to Spark's metrics sink in batch mode. */
 public class SparkMetricsSinkTest {
-  @Rule public ExternalResource inMemoryMetricsSink = new InMemoryMetricsSinkRule();
+
+  @ClassRule
+  public static final SparkSessionRule SESSION =
+      new SparkSessionRule(
+          KV.of("spark.metrics.conf.*.sink.memory.class", InMemoryMetrics.class.getName()));
+
+  @Rule public final ExternalResource inMemoryMetricsSink = new InMemoryMetricsSinkRule();
+
+  @Rule
+  public final TestPipeline pipeline = TestPipeline.fromOptions(SESSION.createPipelineOptions());
 
   private static final ImmutableList<String> WORDS =
       ImmutableList.of("hi there", "hi", "hi sue bob", "hi sue", "", "bob hi");
   private static final ImmutableSet<String> EXPECTED_COUNTS =
       ImmutableSet.of("hi: 5", "there: 1", "sue: 2", "bob: 2");
-  private static Pipeline pipeline;
-
-  @BeforeClass
-  public static void beforeClass() {
-    SparkStructuredStreamingPipelineOptions options =
-        PipelineOptionsFactory.create().as(SparkStructuredStreamingPipelineOptions.class);
-    options.setRunner(SparkStructuredStreamingRunner.class);
-    options.setTestMode(true);
-    pipeline = Pipeline.create(options);
-  }
 
   @Test
   public void testInBatchMode() throws Exception {
@@ -76,9 +64,10 @@
             .apply(Create.of(WORDS).withCoder(StringUtf8Coder.of()))
             .apply(new WordCount.CountWords())
             .apply(MapElements.via(new WordCount.FormatAsTextFn()));
+
     PAssert.that(output).containsInAnyOrder(EXPECTED_COUNTS);
     pipeline.run();
 
-    assertThat(InMemoryMetrics.<Double>valueOf("emptyLines"), is(1d));
+    assertThat(InMemoryMetrics.valueOf("emptyLines"), is(1d));
   }
 }
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/metrics/BeamMetricTest.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricTest.java
similarity index 71%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/metrics/BeamMetricTest.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricTest.java
index a698934..fd0aa35 100644
--- a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/metrics/BeamMetricTest.java
+++ b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricTest.java
@@ -24,12 +24,9 @@
 import org.apache.beam.sdk.metrics.MetricName;
 import org.apache.beam.sdk.metrics.MetricResult;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
 
 /** Test BeamMetric. */
-@RunWith(JUnit4.class)
-public class BeamMetricTest {
+public class SparkBeamMetricTest {
   @Test
   public void testRenderName() {
     MetricResult<Object> metricResult =
@@ -38,10 +35,25 @@
                 "myStep.one.two(three)", MetricName.named("myNameSpace//", "myName()")),
             123,
             456);
-    String renderedName = new SparkBeamMetric().renderName(metricResult);
+    String renderedName = SparkBeamMetric.renderName("", metricResult);
     assertThat(
         "Metric name was not rendered correctly",
         renderedName,
         equalTo("myStep_one_two_three.myNameSpace__.myName__"));
   }
+
+  @Test
+  public void testRenderNameWithPrefix() {
+    MetricResult<Object> metricResult =
+        MetricResult.create(
+            MetricKey.create(
+                "myStep.one.two(three)", MetricName.named("myNameSpace//", "myName()")),
+            123,
+            456);
+    String renderedName = SparkBeamMetric.renderName("prefix", metricResult);
+    assertThat(
+        "Metric name was not rendered correctly",
+        renderedName,
+        equalTo("prefix.myStep_one_two_three.myNameSpace__.myName__"));
+  }
 }
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombineTest.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombineTest.java
similarity index 100%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombineTest.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombineTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ComplexSourceTest.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ComplexSourceTest.java
similarity index 100%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ComplexSourceTest.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ComplexSourceTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTest.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTest.java
similarity index 100%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTest.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTest.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTest.java
similarity index 100%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTest.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTest.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTest.java
similarity index 100%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTest.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/SimpleSourceTest.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/SimpleSourceTest.java
similarity index 100%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/SimpleSourceTest.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/SimpleSourceTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTest.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTest.java
similarity index 100%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTest.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTest.java
diff --git a/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpersTest.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpersTest.java
new file mode 100644
index 0000000..c8a8fba
--- /dev/null
+++ b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpersTest.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.translation.helpers;
+
+import static java.util.Arrays.asList;
+import static org.apache.beam.runners.spark.structuredstreaming.translation.helpers.EncoderHelpers.fromBeamCoder;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.Assert.assertEquals;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Objects;
+import org.apache.beam.runners.spark.structuredstreaming.SparkSessionRule;
+import org.apache.beam.sdk.coders.Coder;
+import org.apache.beam.sdk.coders.DelegateCoder;
+import org.apache.beam.sdk.coders.StringUtf8Coder;
+import org.apache.beam.sdk.coders.VarIntCoder;
+import org.apache.beam.sdk.values.TypeDescriptor;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Encoder;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Test of the wrapping of Beam Coders as Spark ExpressionEncoders. */
+@RunWith(JUnit4.class)
+public class EncoderHelpersTest {
+
+  @ClassRule public static SparkSessionRule sessionRule = new SparkSessionRule();
+
+  private <T> Dataset<T> createDataset(List<T> data, Encoder<T> encoder) {
+    Dataset<T> ds = sessionRule.getSession().createDataset(data, encoder);
+    ds.printSchema();
+    return ds;
+  }
+
+  @Test
+  public void beamCoderToSparkEncoderTest() {
+    List<Integer> data = Arrays.asList(1, 2, 3);
+    Dataset<Integer> dataset = createDataset(data, EncoderHelpers.fromBeamCoder(VarIntCoder.of()));
+    assertEquals(data, dataset.collectAsList());
+  }
+
+  @Test
+  public void testBeamEncoderOfPrivateType() {
+    // Verify concrete types are not used in coder generation.
+    // In case of private types this would cause an IllegalAccessError.
+    List<PrivateString> data = asList(new PrivateString("1"), new PrivateString("2"));
+    Dataset<PrivateString> dataset = createDataset(data, fromBeamCoder(PrivateString.CODER));
+    assertThat(dataset.collect(), equalTo(data.toArray()));
+  }
+
+  private static class PrivateString {
+    private static final Coder<PrivateString> CODER =
+        DelegateCoder.of(
+            StringUtf8Coder.of(),
+            str -> str.string,
+            PrivateString::new,
+            new TypeDescriptor<PrivateString>() {});
+
+    private final String string;
+
+    public PrivateString(String string) {
+      this.string = string;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
+      PrivateString that = (PrivateString) o;
+      return Objects.equals(string, that.string);
+    }
+
+    @Override
+    public int hashCode() {
+      return Objects.hash(string);
+    }
+  }
+}
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/SimpleSourceTest.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/SimpleSourceTest.java
similarity index 100%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/SimpleSourceTest.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/SimpleSourceTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/SerializationDebugger.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/SerializationDebugger.java
similarity index 100%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/SerializationDebugger.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/SerializationDebugger.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/package-info.java b/runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/package-info.java
similarity index 100%
rename from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/package-info.java
rename to runners/spark/2/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/package-info.java
diff --git a/runners/spark/3/build.gradle b/runners/spark/3/build.gradle
index 1641dc5..2bb5d67 100644
--- a/runners/spark/3/build.gradle
+++ b/runners/spark/3/build.gradle
@@ -28,3 +28,37 @@
 
 // Load the main build script which contains all build logic.
 apply from: "$basePath/spark_runner.gradle"
+
+
+def sparkVersions = [
+    "330": "3.3.0",
+    "321": "3.2.1"
+]
+
+sparkVersions.each { kv ->
+  configurations.create("sparkVersion$kv.key")
+  configurations."sparkVersion$kv.key" {
+    resolutionStrategy {
+      spark.components.each { component -> force "$component:$kv.value" }
+    }
+  }
+
+  dependencies {
+    spark.components.each { component -> "sparkVersion$kv.key" "$component:$kv.value" }
+  }
+
+  tasks.register("sparkVersion${kv.key}Test", Test) {
+    group = "Verification"
+    description = "Verifies code compatibility with Spark $kv.value"
+    classpath = configurations."sparkVersion$kv.key" + sourceSets.test.runtimeClasspath
+    systemProperties test.systemProperties
+
+    include "**/*.class"
+    maxParallelForks 4
+  }
+}
+
+tasks.register("sparkVersionsTest") {
+  group = "Verification"
+  dependsOn sparkVersions.collect{k,v -> "sparkVersion${k}Test"}
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/Constants.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/Constants.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/Constants.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/Constants.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineOptions.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineOptions.java
similarity index 85%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineOptions.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineOptions.java
index bc585d8..3371a40 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineOptions.java
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineOptions.java
@@ -19,6 +19,7 @@
 
 import org.apache.beam.runners.spark.SparkCommonPipelineOptions;
 import org.apache.beam.sdk.options.Default;
+import org.apache.beam.sdk.options.Description;
 import org.apache.beam.sdk.options.PipelineOptions;
 
 /**
@@ -32,4 +33,10 @@
   boolean getTestMode();
 
   void setTestMode(boolean testMode);
+
+  @Description("Enable if the runner should use the currently active Spark session.")
+  @Default.Boolean(false)
+  boolean getUseActiveSparkSession();
+
+  void setUseActiveSparkSession(boolean value);
 }
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java
similarity index 82%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java
index 663c87a..1392ae8 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java
@@ -20,7 +20,6 @@
 import static org.apache.beam.runners.core.metrics.MetricsContainerStepMap.asAttemptedOnlyMetricResults;
 
 import java.io.IOException;
-import java.util.Objects;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
@@ -31,8 +30,6 @@
 import org.apache.beam.sdk.metrics.MetricResults;
 import org.apache.beam.sdk.util.UserCodeException;
 import org.apache.spark.SparkException;
-import org.apache.spark.sql.SparkSession;
-import org.apache.spark.sql.streaming.StreamingQuery;
 import org.joda.time.Duration;
 
 /** Represents a Spark pipeline execution result. */
@@ -43,19 +40,16 @@
 public class SparkStructuredStreamingPipelineResult implements PipelineResult {
 
   final Future pipelineExecution;
-  final SparkSession sparkSession;
+  final Runnable onTerminalState;
+
   PipelineResult.State state;
 
-  boolean isStreaming;
-
   SparkStructuredStreamingPipelineResult(
-      final Future<?> pipelineExecution, final SparkSession sparkSession) {
+      final Future<?> pipelineExecution, final Runnable onTerminalState) {
     this.pipelineExecution = pipelineExecution;
-    this.sparkSession = sparkSession;
+    this.onTerminalState = onTerminalState;
     // pipelineExecution is expected to have started executing eagerly.
     this.state = State.RUNNING;
-    // TODO: Implement results on a streaming pipeline. Currently does not stream.
-    this.isStreaming = false;
   }
 
   private static RuntimeException runtimeExceptionFrom(final Throwable e) {
@@ -79,29 +73,10 @@
     return runtimeExceptionFrom(e);
   }
 
-  protected void stop() {
-    try {
-      // TODO: await any outstanding queries on the session if this is streaming.
-      if (isStreaming) {
-        for (StreamingQuery query : sparkSession.streams().active()) {
-          query.stop();
-        }
-      }
-    } catch (Exception e) {
-      throw beamExceptionFrom(e);
-    } finally {
-      sparkSession.stop();
-      if (Objects.equals(state, State.RUNNING)) {
-        this.state = State.STOPPED;
-      }
-    }
-  }
-
   private State awaitTermination(Duration duration)
       throws TimeoutException, ExecutionException, InterruptedException {
     pipelineExecution.get(duration.getMillis(), TimeUnit.MILLISECONDS);
     // Throws an exception if the job is not finished successfully in the given time.
-    // TODO: all streaming functionality
     return PipelineResult.State.DONE;
   }
 
@@ -149,7 +124,11 @@
     State oldState = this.state;
     this.state = newState;
     if (!oldState.isTerminal() && newState.isTerminal()) {
-      stop();
+      try {
+        onTerminalState.run();
+      } catch (Exception e) {
+        throw beamExceptionFrom(e);
+      }
     }
   }
 }
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
similarity index 97%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
rename to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
index d66e0c7..b1de9e9 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
@@ -146,10 +146,12 @@
             });
     executorService.shutdown();
 
-    // TODO: Streaming.
+    Runnable onTerminalState =
+        options.getUseActiveSparkSession()
+            ? () -> {}
+            : () -> translationContext.getSparkSession().stop();
     SparkStructuredStreamingPipelineResult result =
-        new SparkStructuredStreamingPipelineResult(
-            submissionFuture, translationContext.getSparkSession());
+        new SparkStructuredStreamingPipelineResult(submissionFuture, onTerminalState);
 
     if (options.getEnableSparkMetricSinks()) {
       registerMetricsSource(options.getAppName());
@@ -162,7 +164,6 @@
 
     if (options.getTestMode()) {
       result.waitUntilFinish();
-      result.stop();
     }
 
     return result;
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrar.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrar.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrar.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrar.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/AggregatorsAccumulator.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/AggregatorsAccumulator.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/AggregatorsAccumulator.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/AggregatorsAccumulator.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregators.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregators.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregators.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregators.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregatorsAccumulator.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregatorsAccumulator.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregatorsAccumulator.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/NamedAggregatorsAccumulator.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/package-info.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/package-info.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/package-info.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/examples/WordCount.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/examples/WordCount.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/examples/WordCount.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/examples/WordCount.java
diff --git a/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetric.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetric.java
new file mode 100644
index 0000000..74bea7f
--- /dev/null
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetric.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.metrics;
+
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Metric;
+import com.codahale.metrics.MetricFilter;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.beam.runners.spark.structuredstreaming.aggregators.NamedAggregators;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** An adapter between the {@link NamedAggregators} and the Dropwizard {@link Metric} interface. */
+public class AggregatorMetric extends BeamMetricSet {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AggregatorMetric.class);
+
+  private final NamedAggregators namedAggregators;
+
+  private AggregatorMetric(NamedAggregators namedAggregators) {
+    this.namedAggregators = namedAggregators;
+  }
+
+  public static AggregatorMetric of(NamedAggregators namedAggregators) {
+    return new AggregatorMetric(namedAggregators);
+  }
+
+  @Override
+  public Map<String, Gauge<Double>> getValue(String prefix, MetricFilter filter) {
+    Map<String, Gauge<Double>> metrics = new HashMap<>();
+    for (Map.Entry<String, ?> entry : namedAggregators.renderAll().entrySet()) {
+      String name = prefix + "." + entry.getKey();
+      Object rawValue = entry.getValue();
+      if (rawValue != null) {
+        try {
+          Gauge<Double> gauge = staticGauge(rawValue);
+          if (filter.matches(name, gauge)) {
+            metrics.put(name, gauge);
+          }
+        } catch (NumberFormatException e) {
+          LOG.warn(
+              "Metric `{}` of type {} can't be reported, conversion to double failed.",
+              name,
+              rawValue.getClass().getSimpleName(),
+              e);
+        }
+      }
+    }
+    return metrics;
+  }
+
+  // Metric type is assumed to be compatible with Double
+  protected Gauge<Double> staticGauge(Object rawValue) throws NumberFormatException {
+    return rawValue instanceof Number
+        ? super.staticGauge((Number) rawValue)
+        : super.staticGauge(Double.parseDouble(rawValue.toString()));
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetricSource.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetricSource.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetricSource.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetricSource.java
diff --git a/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/BeamMetricSet.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/BeamMetricSet.java
new file mode 100644
index 0000000..7095036
--- /dev/null
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/BeamMetricSet.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.metrics;
+
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.MetricFilter;
+import java.util.Map;
+import org.apache.beam.runners.spark.metrics.WithMetricsSupport;
+
+/**
+ * {@link BeamMetricSet} is a {@link Gauge} that returns a map of multiple metrics which get
+ * flattened in {@link WithMetricsSupport#getGauges()} for usage in {@link
+ * org.apache.spark.metrics.sink.Sink Spark metric sinks}.
+ *
+ * <p>Note: Recent versions of Dropwizard {@link com.codahale.metrics.MetricRegistry MetricRegistry}
+ * do not allow registering arbitrary implementations of {@link com.codahale.metrics.Metric Metrics}
+ * and require usage of {@link Gauge} here.
+ */
+// TODO: turn into MetricRegistry https://github.com/apache/beam/issues/22384
+abstract class BeamMetricSet implements Gauge<Map<String, Gauge<Double>>> {
+
+  @Override
+  public final Map<String, Gauge<Double>> getValue() {
+    return getValue("", MetricFilter.ALL);
+  }
+
+  protected abstract Map<String, Gauge<Double>> getValue(String prefix, MetricFilter filter);
+
+  protected Gauge<Double> staticGauge(Number number) {
+    return new ConstantGauge(number.doubleValue());
+  }
+
+  private static class ConstantGauge implements Gauge<Double> {
+    private final double value;
+
+    ConstantGauge(double value) {
+      this.value = value;
+    }
+
+    @Override
+    public Double getValue() {
+      return value;
+    }
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/CompositeSource.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/CompositeSource.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/CompositeSource.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/CompositeSource.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsAccumulator.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsAccumulator.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsAccumulator.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsAccumulator.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsContainerStepMapAccumulator.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsContainerStepMapAccumulator.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsContainerStepMapAccumulator.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/MetricsContainerStepMapAccumulator.java
diff --git a/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetric.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetric.java
new file mode 100644
index 0000000..0cecae4
--- /dev/null
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetric.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.metrics;
+
+import static org.apache.beam.runners.core.metrics.MetricsContainerStepMap.asAttemptedOnlyMetricResults;
+import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Predicates.not;
+
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Metric;
+import com.codahale.metrics.MetricFilter;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import javax.annotation.Nullable;
+import org.apache.beam.sdk.metrics.DistributionResult;
+import org.apache.beam.sdk.metrics.GaugeResult;
+import org.apache.beam.sdk.metrics.MetricKey;
+import org.apache.beam.sdk.metrics.MetricName;
+import org.apache.beam.sdk.metrics.MetricQueryResults;
+import org.apache.beam.sdk.metrics.MetricResult;
+import org.apache.beam.sdk.metrics.MetricResults;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Strings;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Streams;
+
+/**
+ * An adapter between the {@link SparkMetricsContainerStepMap} and the Dropwizard {@link Metric}
+ * interface.
+ */
+class SparkBeamMetric extends BeamMetricSet {
+
+  private static final String ILLEGAL_CHARACTERS = "[^A-Za-z0-9-]";
+
+  @Override
+  public Map<String, Gauge<Double>> getValue(String prefix, MetricFilter filter) {
+    MetricResults metricResults =
+        asAttemptedOnlyMetricResults(MetricsAccumulator.getInstance().value());
+    Map<String, Gauge<Double>> metrics = new HashMap<>();
+    MetricQueryResults allMetrics = metricResults.allMetrics();
+    for (MetricResult<Long> metricResult : allMetrics.getCounters()) {
+      putFiltered(metrics, filter, renderName(prefix, metricResult), metricResult.getAttempted());
+    }
+    for (MetricResult<DistributionResult> metricResult : allMetrics.getDistributions()) {
+      DistributionResult result = metricResult.getAttempted();
+      String baseName = renderName(prefix, metricResult);
+      putFiltered(metrics, filter, baseName + ".count", result.getCount());
+      putFiltered(metrics, filter, baseName + ".sum", result.getSum());
+      putFiltered(metrics, filter, baseName + ".min", result.getMin());
+      putFiltered(metrics, filter, baseName + ".max", result.getMax());
+      putFiltered(metrics, filter, baseName + ".mean", result.getMean());
+    }
+    for (MetricResult<GaugeResult> metricResult : allMetrics.getGauges()) {
+      putFiltered(
+          metrics,
+          filter,
+          renderName(prefix, metricResult),
+          metricResult.getAttempted().getValue());
+    }
+    return metrics;
+  }
+
+  @VisibleForTesting
+  @SuppressWarnings("nullness") // ok to have nullable elements on stream
+  static String renderName(String prefix, MetricResult<?> metricResult) {
+    MetricKey key = metricResult.getKey();
+    MetricName name = key.metricName();
+    String step = key.stepName();
+    return Streams.concat(
+            Stream.of(prefix), // prefix is not cleaned, should it be?
+            Stream.of(stripSuffix(normalizePart(step))),
+            Stream.of(name.getNamespace(), name.getName()).map(SparkBeamMetric::normalizePart))
+        .filter(not(Strings::isNullOrEmpty))
+        .collect(Collectors.joining("."));
+  }
+
+  private static @Nullable String normalizePart(@Nullable String str) {
+    return str != null ? str.replaceAll(ILLEGAL_CHARACTERS, "_") : null;
+  }
+
+  private static @Nullable String stripSuffix(@Nullable String str) {
+    return str != null && str.endsWith("_") ? str.substring(0, str.length() - 1) : str;
+  }
+
+  private void putFiltered(
+      Map<String, Gauge<Double>> metrics, MetricFilter filter, String name, Number value) {
+    Gauge<Double> metric = staticGauge(value);
+    if (filter.matches(name, metric)) {
+      metrics.put(name, metric);
+    }
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricSource.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricSource.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricSource.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricSource.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkMetricsContainerStepMap.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkMetricsContainerStepMap.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkMetricsContainerStepMap.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkMetricsContainerStepMap.java
diff --git a/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/WithMetricsSupport.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/WithMetricsSupport.java
new file mode 100644
index 0000000..d48a229
--- /dev/null
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/WithMetricsSupport.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.metrics;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Meter;
+import com.codahale.metrics.MetricFilter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+import java.util.Map;
+import java.util.SortedMap;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableSortedMap;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Ordering;
+
+/**
+ * A {@link MetricRegistry} decorator-like that supports {@link BeamMetricSet}s as {@link Gauge
+ * Gauges}.
+ *
+ * <p>{@link MetricRegistry} is not an interface, so this is not a by-the-book decorator. That said,
+ * it delegates all metric related getters to the "decorated" instance.
+ */
+@SuppressWarnings({"rawtypes"}) // required by interface
+public class WithMetricsSupport extends MetricRegistry {
+
+  private final MetricRegistry internalMetricRegistry;
+
+  private WithMetricsSupport(final MetricRegistry internalMetricRegistry) {
+    this.internalMetricRegistry = internalMetricRegistry;
+  }
+
+  public static WithMetricsSupport forRegistry(final MetricRegistry metricRegistry) {
+    return new WithMetricsSupport(metricRegistry);
+  }
+
+  @Override
+  public SortedMap<String, Timer> getTimers(final MetricFilter filter) {
+    return internalMetricRegistry.getTimers(filter);
+  }
+
+  @Override
+  public SortedMap<String, Meter> getMeters(final MetricFilter filter) {
+    return internalMetricRegistry.getMeters(filter);
+  }
+
+  @Override
+  public SortedMap<String, Histogram> getHistograms(final MetricFilter filter) {
+    return internalMetricRegistry.getHistograms(filter);
+  }
+
+  @Override
+  public SortedMap<String, Counter> getCounters(final MetricFilter filter) {
+    return internalMetricRegistry.getCounters(filter);
+  }
+
+  @Override
+  public SortedMap<String, Gauge> getGauges(final MetricFilter filter) {
+    ImmutableSortedMap.Builder<String, Gauge> builder =
+        new ImmutableSortedMap.Builder<>(Ordering.from(String.CASE_INSENSITIVE_ORDER));
+
+    Map<String, Gauge> gauges =
+        internalMetricRegistry.getGauges(
+            (n, m) -> filter.matches(n, m) || m instanceof BeamMetricSet);
+
+    for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
+      Gauge gauge = entry.getValue();
+      if (gauge instanceof BeamMetricSet) {
+        builder.putAll(((BeamMetricSet) gauge).getValue(entry.getKey(), filter));
+      } else {
+        builder.put(entry.getKey(), gauge);
+      }
+    }
+    return builder.build();
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/package-info.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/package-info.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/package-info.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/package-info.java
diff --git a/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleCsvSink.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleCsvSink.java
new file mode 100644
index 0000000..c8f9139
--- /dev/null
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleCsvSink.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.metrics.sink;
+
+import com.codahale.metrics.MetricRegistry;
+import java.util.Properties;
+import org.apache.beam.runners.spark.structuredstreaming.metrics.AggregatorMetric;
+import org.apache.beam.runners.spark.structuredstreaming.metrics.WithMetricsSupport;
+import org.apache.spark.SecurityManager;
+import org.apache.spark.metrics.sink.Sink;
+
+/**
+ * A {@link Sink} for <a href="https://spark.apache.org/docs/latest/monitoring.html#metrics">Spark's
+ * metric system</a> that is tailored to report {@link AggregatorMetric}s to a CSV file.
+ *
+ * <p>The sink is configured using Spark configuration parameters, for example:
+ *
+ * <pre>{@code
+ * "spark.metrics.conf.*.sink.csv.class"="org.apache.beam.runners.spark.structuredstreaming.metrics.sink.CodahaleCsvSink"
+ * "spark.metrics.conf.*.sink.csv.directory"="<output_directory>"
+ * "spark.metrics.conf.*.sink.csv.period"=10
+ * "spark.metrics.conf.*.sink.csv.unit"=seconds
+ * }</pre>
+ */
+public class CodahaleCsvSink implements Sink {
+
+  // Initialized reflectively as done by Spark's MetricsSystem
+  private final org.apache.spark.metrics.sink.CsvSink delegate;
+
+  /** Constructor for Spark 3.1.x and earlier. */
+  public CodahaleCsvSink(
+      final Properties properties,
+      final MetricRegistry metricRegistry,
+      final SecurityManager securityMgr) {
+    try {
+      delegate =
+          org.apache.spark.metrics.sink.CsvSink.class
+              .getConstructor(Properties.class, MetricRegistry.class, SecurityManager.class)
+              .newInstance(properties, WithMetricsSupport.forRegistry(metricRegistry), securityMgr);
+    } catch (ReflectiveOperationException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  /** Constructor for Spark 3.2.x and later. */
+  public CodahaleCsvSink(final Properties properties, final MetricRegistry metricRegistry) {
+    try {
+      delegate =
+          org.apache.spark.metrics.sink.CsvSink.class
+              .getConstructor(Properties.class, MetricRegistry.class)
+              .newInstance(properties, WithMetricsSupport.forRegistry(metricRegistry));
+    } catch (ReflectiveOperationException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  @Override
+  public void start() {
+    delegate.start();
+  }
+
+  @Override
+  public void stop() {
+    delegate.stop();
+  }
+
+  @Override
+  public void report() {
+    delegate.report();
+  }
+}
diff --git a/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleGraphiteSink.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleGraphiteSink.java
new file mode 100644
index 0000000..5640c96
--- /dev/null
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleGraphiteSink.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.metrics.sink;
+
+import com.codahale.metrics.MetricRegistry;
+import java.util.Properties;
+import org.apache.beam.runners.spark.structuredstreaming.metrics.AggregatorMetric;
+import org.apache.beam.runners.spark.structuredstreaming.metrics.WithMetricsSupport;
+import org.apache.spark.SecurityManager;
+import org.apache.spark.metrics.sink.Sink;
+
+/**
+ * A {@link Sink} for <a href="https://spark.apache.org/docs/latest/monitoring.html#metrics">Spark's
+ * metric system</a> that is tailored to report {@link AggregatorMetric}s to Graphite.
+ *
+ * <p>The sink is configured using Spark configuration parameters, for example:
+ *
+ * <pre>{@code
+ * "spark.metrics.conf.*.sink.graphite.class"="org.apache.beam.runners.spark.structuredstreaming.metrics.sink.CodahaleGraphiteSink"
+ * "spark.metrics.conf.*.sink.graphite.host"="<graphite_hostname>"
+ * "spark.metrics.conf.*.sink.graphite.port"=<graphite_listening_port>
+ * "spark.metrics.conf.*.sink.graphite.period"=10
+ * "spark.metrics.conf.*.sink.graphite.unit"=seconds
+ * "spark.metrics.conf.*.sink.graphite.prefix"="<optional_prefix>"
+ * "spark.metrics.conf.*.sink.graphite.regex"="<optional_regex_to_send_matching_metrics>"
+ * }</pre>
+ */
+public class CodahaleGraphiteSink implements Sink {
+
+  // Initialized reflectively as done by Spark's MetricsSystem
+  private final org.apache.spark.metrics.sink.GraphiteSink delegate;
+
+  /** Constructor for Spark 3.1.x and earlier. */
+  public CodahaleGraphiteSink(
+      final Properties properties,
+      final MetricRegistry metricRegistry,
+      final org.apache.spark.SecurityManager securityMgr) {
+    try {
+      delegate =
+          org.apache.spark.metrics.sink.GraphiteSink.class
+              .getConstructor(Properties.class, MetricRegistry.class, SecurityManager.class)
+              .newInstance(properties, WithMetricsSupport.forRegistry(metricRegistry), securityMgr);
+    } catch (ReflectiveOperationException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  /** Constructor for Spark 3.2.x and later. */
+  public CodahaleGraphiteSink(final Properties properties, final MetricRegistry metricRegistry) {
+    try {
+      delegate =
+          org.apache.spark.metrics.sink.GraphiteSink.class
+              .getConstructor(Properties.class, MetricRegistry.class)
+              .newInstance(properties, WithMetricsSupport.forRegistry(metricRegistry));
+    } catch (ReflectiveOperationException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  @Override
+  public void start() {
+    delegate.start();
+  }
+
+  @Override
+  public void stop() {
+    delegate.stop();
+  }
+
+  @Override
+  public void report() {
+    delegate.report();
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/package-info.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/package-info.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/package-info.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/package-info.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/package-info.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/package-info.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/AbstractTranslationContext.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/AbstractTranslationContext.java
similarity index 88%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/AbstractTranslationContext.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/AbstractTranslationContext.java
index 766065f..aed287b 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/AbstractTranslationContext.java
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/AbstractTranslationContext.java
@@ -38,7 +38,6 @@
 import org.apache.beam.sdk.values.PValue;
 import org.apache.beam.sdk.values.TupleTag;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
-import org.apache.spark.SparkConf;
 import org.apache.spark.api.java.function.ForeachFunction;
 import org.apache.spark.sql.Dataset;
 import org.apache.spark.sql.ForeachWriter;
@@ -75,29 +74,7 @@
   private final Map<PCollectionView<?>, Dataset<?>> broadcastDataSets;
 
   public AbstractTranslationContext(SparkStructuredStreamingPipelineOptions options) {
-    SparkConf sparkConf = new SparkConf();
-    sparkConf.setMaster(options.getSparkMaster());
-    sparkConf.setAppName(options.getAppName());
-    if (options.getFilesToStage() != null && !options.getFilesToStage().isEmpty()) {
-      sparkConf.setJars(options.getFilesToStage().toArray(new String[0]));
-    }
-
-    // By default, Spark defines 200 as a number of sql partitions. This seems too much for local
-    // mode, so try to align with value of "sparkMaster" option in this case.
-    // We should not overwrite this value (or any user-defined spark configuration value) if the
-    // user has already configured it.
-    String sparkMaster = options.getSparkMaster();
-    if (sparkMaster != null
-        && sparkMaster.startsWith("local[")
-        && System.getProperty("spark.sql.shuffle.partitions") == null) {
-      int numPartitions =
-          Integer.parseInt(sparkMaster.substring("local[".length(), sparkMaster.length() - 1));
-      if (numPartitions > 0) {
-        sparkConf.set("spark.sql.shuffle.partitions", String.valueOf(numPartitions));
-      }
-    }
-
-    this.sparkSession = SparkSession.builder().config(sparkConf).getOrCreate();
+    this.sparkSession = SparkSessionFactory.getOrCreateSession(options);
     this.serializablePipelineOptions = new SerializablePipelineOptions(options);
     this.datasets = new HashMap<>();
     this.leaves = new HashSet<>();
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/PipelineTranslator.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/PipelineTranslator.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/PipelineTranslator.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/PipelineTranslator.java
diff --git a/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkSessionFactory.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkSessionFactory.java
new file mode 100644
index 0000000..d8430f5
--- /dev/null
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkSessionFactory.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.translation;
+
+import java.util.List;
+import javax.annotation.Nullable;
+import org.apache.beam.runners.spark.structuredstreaming.SparkStructuredStreamingPipelineOptions;
+import org.apache.spark.SparkConf;
+import org.apache.spark.sql.SparkSession;
+
+public class SparkSessionFactory {
+
+  /**
+   * Gets active {@link SparkSession} or creates one using {@link
+   * SparkStructuredStreamingPipelineOptions}.
+   */
+  public static SparkSession getOrCreateSession(SparkStructuredStreamingPipelineOptions options) {
+    if (options.getUseActiveSparkSession()) {
+      return SparkSession.active();
+    }
+    return sessionBuilder(options.getSparkMaster(), options.getAppName(), options.getFilesToStage())
+        .getOrCreate();
+  }
+
+  /** Creates Spark session builder with some optimizations for local mode, e.g. in tests. */
+  public static SparkSession.Builder sessionBuilder(String master) {
+    return sessionBuilder(master, null, null);
+  }
+
+  private static SparkSession.Builder sessionBuilder(
+      String master, @Nullable String appName, @Nullable List<String> jars) {
+    SparkConf sparkConf = new SparkConf();
+    sparkConf.setMaster(master);
+    if (appName != null) {
+      sparkConf.setAppName(appName);
+    }
+    if (jars != null && !jars.isEmpty()) {
+      sparkConf.setJars(jars.toArray(new String[0]));
+    }
+
+    // By default, Spark defines 200 as a number of sql partitions. This seems too much for local
+    // mode, so try to align with value of "sparkMaster" option in this case.
+    // We should not overwrite this value (or any user-defined spark configuration value) if the
+    // user has already configured it.
+    if (master != null
+        && master.startsWith("local[")
+        && System.getProperty("spark.sql.shuffle.partitions") == null) {
+      int numPartitions =
+          Integer.parseInt(master.substring("local[".length(), master.length() - 1));
+      if (numPartitions > 0) {
+        sparkConf.set("spark.sql.shuffle.partitions", String.valueOf(numPartitions));
+      }
+    }
+    return SparkSession.builder().config(sparkConf);
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkTransformOverrides.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkTransformOverrides.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkTransformOverrides.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/SparkTransformOverrides.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/TransformTranslator.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/TransformTranslator.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/TransformTranslator.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/TransformTranslator.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/AggregatorCombiner.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/AggregatorCombiner.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/AggregatorCombiner.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/AggregatorCombiner.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombinePerKeyTranslatorBatch.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombinePerKeyTranslatorBatch.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombinePerKeyTranslatorBatch.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombinePerKeyTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CreatePCollectionViewTranslatorBatch.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CreatePCollectionViewTranslatorBatch.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CreatePCollectionViewTranslatorBatch.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CreatePCollectionViewTranslatorBatch.java
diff --git a/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DatasetSourceBatch.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DatasetSourceBatch.java
index f2fd800..46bde96 100644
--- a/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DatasetSourceBatch.java
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DatasetSourceBatch.java
@@ -34,8 +34,8 @@
 import org.apache.beam.sdk.io.BoundedSource;
 import org.apache.beam.sdk.options.PipelineOptions;
 import org.apache.beam.sdk.util.WindowedValue;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Strings;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableSet;
-import org.apache.parquet.Strings;
 import org.apache.spark.sql.catalyst.InternalRow;
 import org.apache.spark.sql.connector.catalog.SupportsRead;
 import org.apache.spark.sql.connector.catalog.Table;
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnFunction.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnFunction.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnFunction.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnFunction.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnRunnerWithMetrics.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnRunnerWithMetrics.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnRunnerWithMetrics.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/DoFnRunnerWithMetrics.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTranslatorBatch.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTranslatorBatch.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTranslatorBatch.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTranslatorBatch.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTranslatorBatch.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTranslatorBatch.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ImpulseTranslatorBatch.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ImpulseTranslatorBatch.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ImpulseTranslatorBatch.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ImpulseTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTranslatorBatch.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTranslatorBatch.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTranslatorBatch.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/PipelineTranslatorBatch.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/PipelineTranslatorBatch.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/PipelineTranslatorBatch.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/PipelineTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ProcessContext.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ProcessContext.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ProcessContext.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ProcessContext.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReadSourceTranslatorBatch.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReadSourceTranslatorBatch.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReadSourceTranslatorBatch.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReadSourceTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReshuffleTranslatorBatch.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReshuffleTranslatorBatch.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReshuffleTranslatorBatch.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ReshuffleTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTranslatorBatch.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTranslatorBatch.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTranslatorBatch.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTranslatorBatch.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/GroupAlsoByWindowViaOutputBufferFn.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/GroupAlsoByWindowViaOutputBufferFn.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/GroupAlsoByWindowViaOutputBufferFn.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/GroupAlsoByWindowViaOutputBufferFn.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/NoOpStepContext.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/NoOpStepContext.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/NoOpStepContext.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/NoOpStepContext.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/SparkSideInputReader.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/SparkSideInputReader.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/SparkSideInputReader.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/SparkSideInputReader.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/package-info.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/package-info.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/package-info.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/functions/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/package-info.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/package-info.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/package-info.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/CoderHelpers.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/CoderHelpers.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/CoderHelpers.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/CoderHelpers.java
diff --git a/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderFactory.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderFactory.java
index 39a7150..c7d69c0 100644
--- a/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderFactory.java
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderFactory.java
@@ -17,33 +17,48 @@
  */
 package org.apache.beam.runners.spark.structuredstreaming.translation.helpers;
 
-import static org.apache.spark.sql.types.DataTypes.BinaryType;
-
-import org.apache.beam.sdk.coders.Coder;
-import org.apache.spark.sql.Encoder;
-import org.apache.spark.sql.catalyst.analysis.GetColumnByOrdinal;
+import java.lang.reflect.Constructor;
 import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder;
-import org.apache.spark.sql.catalyst.expressions.BoundReference;
-import org.apache.spark.sql.catalyst.expressions.Cast;
 import org.apache.spark.sql.catalyst.expressions.Expression;
-import org.apache.spark.sql.types.ObjectType;
+import org.apache.spark.sql.catalyst.expressions.objects.StaticInvoke;
+import org.apache.spark.sql.types.DataType;
+import scala.collection.immutable.Nil$;
+import scala.collection.mutable.WrappedArray;
 import scala.reflect.ClassTag;
-import scala.reflect.ClassTag$;
 
 public class EncoderFactory {
+  // default constructor to reflectively create static invoke expressions
+  private static final Constructor<StaticInvoke> STATIC_INVOKE_CONSTRUCTOR =
+      (Constructor<StaticInvoke>) StaticInvoke.class.getConstructors()[0];
 
-  public static <T> Encoder<T> fromBeamCoder(Coder<T> coder) {
-    Class<? super T> clazz = coder.getEncodedTypeDescriptor().getRawType();
-    ClassTag<T> classTag = ClassTag$.MODULE$.apply(clazz);
-    Expression serializer =
-        new EncoderHelpers.EncodeUsingBeamCoder<>(
-            new BoundReference(0, new ObjectType(clazz), true), coder);
-    Expression deserializer =
-        new EncoderHelpers.DecodeUsingBeamCoder<>(
-            new Cast(
-                new GetColumnByOrdinal(0, BinaryType), BinaryType, scala.Option.<String>empty()),
-            classTag,
-            coder);
-    return new ExpressionEncoder<>(serializer, deserializer, classTag);
+  static <T> ExpressionEncoder<T> create(
+      Expression serializer, Expression deserializer, Class<? super T> clazz) {
+    return new ExpressionEncoder<>(serializer, deserializer, ClassTag.apply(clazz));
+  }
+
+  /**
+   * Invoke method {@code fun} on Class {@code cls}, immediately propagating {@code null} if any
+   * input arg is {@code null}.
+   *
+   * <p>To address breaking interfaces between various version of Spark 3 these are created
+   * reflectively. This is fine as it's just needed once to create the query plan.
+   */
+  static Expression invokeIfNotNull(Class<?> cls, String fun, DataType type, Expression... args) {
+    try {
+      switch (STATIC_INVOKE_CONSTRUCTOR.getParameterCount()) {
+        case 6:
+          // Spark 3.1.x
+          return STATIC_INVOKE_CONSTRUCTOR.newInstance(
+              cls, type, fun, new WrappedArray.ofRef<>(args), true, true);
+        case 8:
+          // Spark 3.2.x, 3.3.x
+          return STATIC_INVOKE_CONSTRUCTOR.newInstance(
+              cls, type, fun, new WrappedArray.ofRef<>(args), Nil$.MODULE$, true, true, true);
+        default:
+          throw new RuntimeException("Unsupported version of Spark");
+      }
+    } catch (IllegalArgumentException | ReflectiveOperationException ex) {
+      throw new RuntimeException(ex);
+    }
   }
 }
diff --git a/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpers.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpers.java
new file mode 100644
index 0000000..68738cf
--- /dev/null
+++ b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpers.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.translation.helpers;
+
+import static org.apache.spark.sql.types.DataTypes.BinaryType;
+
+import org.apache.beam.sdk.coders.Coder;
+import org.apache.spark.sql.Encoder;
+import org.apache.spark.sql.catalyst.analysis.GetColumnByOrdinal;
+import org.apache.spark.sql.catalyst.expressions.BoundReference;
+import org.apache.spark.sql.catalyst.expressions.Expression;
+import org.apache.spark.sql.catalyst.expressions.Literal;
+import org.apache.spark.sql.types.DataType;
+import org.apache.spark.sql.types.ObjectType;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public class EncoderHelpers {
+  private static final DataType OBJECT_TYPE = new ObjectType(Object.class);
+
+  /**
+   * Wrap a Beam coder into a Spark Encoder using Catalyst Expression Encoders (which uses java code
+   * generation).
+   */
+  public static <T> Encoder<T> fromBeamCoder(Coder<T> coder) {
+    Class<? super T> clazz = coder.getEncodedTypeDescriptor().getRawType();
+    // Class T could be private, therefore use OBJECT_TYPE to not risk an IllegalAccessError
+    return EncoderFactory.create(
+        beamSerializer(rootRef(OBJECT_TYPE, true), coder),
+        beamDeserializer(rootCol(BinaryType), coder),
+        clazz);
+  }
+
+  /** Catalyst Expression that serializes elements using Beam {@link Coder}. */
+  private static <T> Expression beamSerializer(Expression obj, Coder<T> coder) {
+    Expression[] args = {obj, lit(coder, Coder.class)};
+    return EncoderFactory.invokeIfNotNull(CoderHelpers.class, "toByteArray", BinaryType, args);
+  }
+
+  /** Catalyst Expression that deserializes elements using Beam {@link Coder}. */
+  private static <T> Expression beamDeserializer(Expression bytes, Coder<T> coder) {
+    Expression[] args = {bytes, lit(coder, Coder.class)};
+    return EncoderFactory.invokeIfNotNull(CoderHelpers.class, "fromByteArray", OBJECT_TYPE, args);
+  }
+
+  private static Expression rootRef(DataType dt, boolean nullable) {
+    return new BoundReference(0, dt, nullable);
+  }
+
+  private static Expression rootCol(DataType dt) {
+    return new GetColumnByOrdinal(0, dt);
+  }
+
+  private static <T extends @NonNull Object> Literal lit(T obj, Class<? extends T> cls) {
+    return Literal.fromObject(obj, new ObjectType(cls));
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/KVHelpers.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/KVHelpers.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/KVHelpers.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/KVHelpers.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/MultiOutputCoder.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/MultiOutputCoder.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/MultiOutputCoder.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/MultiOutputCoder.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/RowHelpers.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/RowHelpers.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/RowHelpers.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/RowHelpers.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SchemaHelpers.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SchemaHelpers.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SchemaHelpers.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SchemaHelpers.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SideInputBroadcast.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SideInputBroadcast.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SideInputBroadcast.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/SideInputBroadcast.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/WindowingHelpers.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/WindowingHelpers.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/WindowingHelpers.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/WindowingHelpers.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/package-info.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/package-info.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/package-info.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/package-info.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/package-info.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/package-info.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/PipelineTranslatorStreaming.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/PipelineTranslatorStreaming.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/PipelineTranslatorStreaming.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/PipelineTranslatorStreaming.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/ReadSourceTranslatorStreaming.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/ReadSourceTranslatorStreaming.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/ReadSourceTranslatorStreaming.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/ReadSourceTranslatorStreaming.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/package-info.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/package-info.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/package-info.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/package-info.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/CachedSideInputReader.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/CachedSideInputReader.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/CachedSideInputReader.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/CachedSideInputReader.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/SideInputStorage.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/SideInputStorage.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/SideInputStorage.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/SideInputStorage.java
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/package-info.java b/runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/package-info.java
similarity index 100%
copy from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/package-info.java
copy to runners/spark/3/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/utils/package-info.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkSessionRule.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkSessionRule.java
similarity index 75%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkSessionRule.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkSessionRule.java
index f68df83..33eef26 100644
--- a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkSessionRule.java
+++ b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkSessionRule.java
@@ -23,6 +23,9 @@
 import java.util.Arrays;
 import java.util.Map;
 import javax.annotation.Nullable;
+import org.apache.beam.runners.spark.structuredstreaming.translation.SparkSessionFactory;
+import org.apache.beam.sdk.options.PipelineOptions;
+import org.apache.beam.sdk.testing.TestPipeline;
 import org.apache.beam.sdk.values.KV;
 import org.apache.spark.sql.SparkSession;
 import org.junit.rules.ExternalResource;
@@ -34,13 +37,12 @@
   private transient @Nullable SparkSession session = null;
 
   public SparkSessionRule(String sparkMaster, Map<String, String> sparkConfig) {
-    builder = SparkSession.builder();
+    builder = SparkSessionFactory.sessionBuilder(sparkMaster);
     sparkConfig.forEach(builder::config);
-    builder.master(sparkMaster);
   }
 
   public SparkSessionRule(KV<String, String>... sparkConfig) {
-    this("local", sparkConfig);
+    this("local[2]", sparkConfig);
   }
 
   public SparkSessionRule(String sparkMaster, KV<String, String>... sparkConfig) {
@@ -54,6 +56,19 @@
     return session;
   }
 
+  public PipelineOptions createPipelineOptions() {
+    return configure(TestPipeline.testingPipelineOptions());
+  }
+
+  public PipelineOptions configure(PipelineOptions options) {
+    SparkStructuredStreamingPipelineOptions opts =
+        options.as(SparkStructuredStreamingPipelineOptions.class);
+    opts.setUseActiveSparkSession(true);
+    opts.setRunner(SparkStructuredStreamingRunner.class);
+    opts.setTestMode(true);
+    return opts;
+  }
+
   @Override
   public Statement apply(Statement base, Description description) {
     builder.appName(description.getDisplayName());
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrarTest.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrarTest.java
similarity index 100%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrarTest.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunnerRegistrarTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/StructuredStreamingPipelineStateTest.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/StructuredStreamingPipelineStateTest.java
similarity index 100%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/StructuredStreamingPipelineStateTest.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/StructuredStreamingPipelineStateTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetrics.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetrics.java
similarity index 75%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetrics.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetrics.java
index 8649e91..f994f77 100644
--- a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetrics.java
+++ b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetrics.java
@@ -17,22 +17,22 @@
  */
 package org.apache.beam.runners.spark.structuredstreaming.aggregators.metrics.sink;
 
+import com.codahale.metrics.Gauge;
 import com.codahale.metrics.MetricFilter;
 import com.codahale.metrics.MetricRegistry;
+import java.util.Collection;
 import java.util.Properties;
 import org.apache.beam.runners.spark.structuredstreaming.metrics.WithMetricsSupport;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Predicates;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
 import org.apache.spark.metrics.sink.Sink;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
 
 /** An in-memory {@link Sink} implementation for tests. */
-@RunWith(JUnit4.class)
 public class InMemoryMetrics implements Sink {
 
   private static WithMetricsSupport extendedMetricsRegistry;
   private static MetricRegistry internalMetricRegistry;
 
+  // Constructor for Spark 3.1
   @SuppressWarnings("UnusedParameters")
   public InMemoryMetrics(
       final Properties properties,
@@ -42,26 +42,24 @@
     internalMetricRegistry = metricRegistry;
   }
 
-  @SuppressWarnings("TypeParameterUnusedInFormals")
-  public static <T> T valueOf(final String name) {
-    final T retVal;
+  // Constructor for Spark >= 3.2
+  @SuppressWarnings("UnusedParameters")
+  public InMemoryMetrics(final Properties properties, final MetricRegistry metricRegistry) {
+    extendedMetricsRegistry = WithMetricsSupport.forRegistry(metricRegistry);
+    internalMetricRegistry = metricRegistry;
+  }
 
+  @SuppressWarnings({"TypeParameterUnusedInFormals", "rawtypes"})
+  public static <T> T valueOf(final String name) {
     // this might fail in case we have multiple aggregators with the same suffix after
     // the last dot, but it should be good enough for tests.
-    if (extendedMetricsRegistry != null
-        && extendedMetricsRegistry.getGauges().keySet().stream()
-            .anyMatch(Predicates.containsPattern(name + "$")::apply)) {
-      String key =
-          extendedMetricsRegistry.getGauges().keySet().stream()
-              .filter(Predicates.containsPattern(name + "$")::apply)
-              .findFirst()
-              .get();
-      retVal = (T) extendedMetricsRegistry.getGauges().get(key).getValue();
+    if (extendedMetricsRegistry != null) {
+      Collection<Gauge> matches =
+          extendedMetricsRegistry.getGauges((n, m) -> n.endsWith(name)).values();
+      return matches.isEmpty() ? null : (T) Iterables.getOnlyElement(matches).getValue();
     } else {
-      retVal = null;
+      return null;
     }
-
-    return retVal;
   }
 
   @SuppressWarnings("WeakerAccess")
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetricsSinkRule.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetricsSinkRule.java
similarity index 100%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetricsSinkRule.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/InMemoryMetricsSinkRule.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/SparkMetricsSinkTest.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/SparkMetricsSinkTest.java
similarity index 65%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/SparkMetricsSinkTest.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/SparkMetricsSinkTest.java
index 40b5036..2f02656 100644
--- a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/SparkMetricsSinkTest.java
+++ b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/aggregators/metrics/sink/SparkMetricsSinkTest.java
@@ -21,51 +21,39 @@
 import static org.hamcrest.Matchers.is;
 import static org.hamcrest.Matchers.nullValue;
 
-import org.apache.beam.runners.spark.structuredstreaming.SparkStructuredStreamingPipelineOptions;
-import org.apache.beam.runners.spark.structuredstreaming.SparkStructuredStreamingRunner;
+import org.apache.beam.runners.spark.structuredstreaming.SparkSessionRule;
 import org.apache.beam.runners.spark.structuredstreaming.examples.WordCount;
-import org.apache.beam.sdk.Pipeline;
 import org.apache.beam.sdk.coders.StringUtf8Coder;
-import org.apache.beam.sdk.options.PipelineOptionsFactory;
 import org.apache.beam.sdk.testing.PAssert;
+import org.apache.beam.sdk.testing.TestPipeline;
 import org.apache.beam.sdk.transforms.Create;
 import org.apache.beam.sdk.transforms.MapElements;
+import org.apache.beam.sdk.values.KV;
 import org.apache.beam.sdk.values.PCollection;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableSet;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
+import org.junit.ClassRule;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExternalResource;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
 
-/**
- * TODO: add testInStreamingMode() once streaming support will be implemented.
- *
- * <p>A test that verifies Beam metrics are reported to Spark's metrics sink in both batch and
- * streaming modes.
- */
-@Ignore("Has been failing since at least c350188ef7a8704c7336f3c20a1ab2144abbcd4a")
-@RunWith(JUnit4.class)
+/** A test that verifies Beam metrics are reported to Spark's metrics sink in batch mode. */
 public class SparkMetricsSinkTest {
-  @Rule public ExternalResource inMemoryMetricsSink = new InMemoryMetricsSinkRule();
+
+  @ClassRule
+  public static final SparkSessionRule SESSION =
+      new SparkSessionRule(
+          KV.of("spark.metrics.conf.*.sink.memory.class", InMemoryMetrics.class.getName()));
+
+  @Rule public final ExternalResource inMemoryMetricsSink = new InMemoryMetricsSinkRule();
+
+  @Rule
+  public final TestPipeline pipeline = TestPipeline.fromOptions(SESSION.createPipelineOptions());
 
   private static final ImmutableList<String> WORDS =
       ImmutableList.of("hi there", "hi", "hi sue bob", "hi sue", "", "bob hi");
   private static final ImmutableSet<String> EXPECTED_COUNTS =
       ImmutableSet.of("hi: 5", "there: 1", "sue: 2", "bob: 2");
-  private static Pipeline pipeline;
-
-  @BeforeClass
-  public static void beforeClass() {
-    SparkStructuredStreamingPipelineOptions options =
-        PipelineOptionsFactory.create().as(SparkStructuredStreamingPipelineOptions.class);
-    options.setRunner(SparkStructuredStreamingRunner.class);
-    options.setTestMode(true);
-    pipeline = Pipeline.create(options);
-  }
 
   @Test
   public void testInBatchMode() throws Exception {
@@ -76,9 +64,10 @@
             .apply(Create.of(WORDS).withCoder(StringUtf8Coder.of()))
             .apply(new WordCount.CountWords())
             .apply(MapElements.via(new WordCount.FormatAsTextFn()));
+
     PAssert.that(output).containsInAnyOrder(EXPECTED_COUNTS);
     pipeline.run();
 
-    assertThat(InMemoryMetrics.<Double>valueOf("emptyLines"), is(1d));
+    assertThat(InMemoryMetrics.valueOf("emptyLines"), is(1d));
   }
 }
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/metrics/BeamMetricTest.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricTest.java
similarity index 71%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/metrics/BeamMetricTest.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricTest.java
index a698934..fd0aa35 100644
--- a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/metrics/BeamMetricTest.java
+++ b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetricTest.java
@@ -24,12 +24,9 @@
 import org.apache.beam.sdk.metrics.MetricName;
 import org.apache.beam.sdk.metrics.MetricResult;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
 
 /** Test BeamMetric. */
-@RunWith(JUnit4.class)
-public class BeamMetricTest {
+public class SparkBeamMetricTest {
   @Test
   public void testRenderName() {
     MetricResult<Object> metricResult =
@@ -38,10 +35,25 @@
                 "myStep.one.two(three)", MetricName.named("myNameSpace//", "myName()")),
             123,
             456);
-    String renderedName = new SparkBeamMetric().renderName(metricResult);
+    String renderedName = SparkBeamMetric.renderName("", metricResult);
     assertThat(
         "Metric name was not rendered correctly",
         renderedName,
         equalTo("myStep_one_two_three.myNameSpace__.myName__"));
   }
+
+  @Test
+  public void testRenderNameWithPrefix() {
+    MetricResult<Object> metricResult =
+        MetricResult.create(
+            MetricKey.create(
+                "myStep.one.two(three)", MetricName.named("myNameSpace//", "myName()")),
+            123,
+            456);
+    String renderedName = SparkBeamMetric.renderName("prefix", metricResult);
+    assertThat(
+        "Metric name was not rendered correctly",
+        renderedName,
+        equalTo("prefix.myStep_one_two_three.myNameSpace__.myName__"));
+  }
 }
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombineTest.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombineTest.java
similarity index 100%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombineTest.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/CombineTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ComplexSourceTest.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ComplexSourceTest.java
similarity index 100%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ComplexSourceTest.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ComplexSourceTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTest.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTest.java
similarity index 100%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTest.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/FlattenTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTest.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTest.java
similarity index 100%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTest.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTest.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTest.java
similarity index 100%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTest.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/ParDoTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/SimpleSourceTest.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/SimpleSourceTest.java
similarity index 100%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/SimpleSourceTest.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/SimpleSourceTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTest.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTest.java
similarity index 100%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTest.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/WindowAssignTest.java
diff --git a/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpersTest.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpersTest.java
new file mode 100644
index 0000000..c8a8fba
--- /dev/null
+++ b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpersTest.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.structuredstreaming.translation.helpers;
+
+import static java.util.Arrays.asList;
+import static org.apache.beam.runners.spark.structuredstreaming.translation.helpers.EncoderHelpers.fromBeamCoder;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.Assert.assertEquals;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Objects;
+import org.apache.beam.runners.spark.structuredstreaming.SparkSessionRule;
+import org.apache.beam.sdk.coders.Coder;
+import org.apache.beam.sdk.coders.DelegateCoder;
+import org.apache.beam.sdk.coders.StringUtf8Coder;
+import org.apache.beam.sdk.coders.VarIntCoder;
+import org.apache.beam.sdk.values.TypeDescriptor;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Encoder;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Test of the wrapping of Beam Coders as Spark ExpressionEncoders. */
+@RunWith(JUnit4.class)
+public class EncoderHelpersTest {
+
+  @ClassRule public static SparkSessionRule sessionRule = new SparkSessionRule();
+
+  private <T> Dataset<T> createDataset(List<T> data, Encoder<T> encoder) {
+    Dataset<T> ds = sessionRule.getSession().createDataset(data, encoder);
+    ds.printSchema();
+    return ds;
+  }
+
+  @Test
+  public void beamCoderToSparkEncoderTest() {
+    List<Integer> data = Arrays.asList(1, 2, 3);
+    Dataset<Integer> dataset = createDataset(data, EncoderHelpers.fromBeamCoder(VarIntCoder.of()));
+    assertEquals(data, dataset.collectAsList());
+  }
+
+  @Test
+  public void testBeamEncoderOfPrivateType() {
+    // Verify concrete types are not used in coder generation.
+    // In case of private types this would cause an IllegalAccessError.
+    List<PrivateString> data = asList(new PrivateString("1"), new PrivateString("2"));
+    Dataset<PrivateString> dataset = createDataset(data, fromBeamCoder(PrivateString.CODER));
+    assertThat(dataset.collect(), equalTo(data.toArray()));
+  }
+
+  private static class PrivateString {
+    private static final Coder<PrivateString> CODER =
+        DelegateCoder.of(
+            StringUtf8Coder.of(),
+            str -> str.string,
+            PrivateString::new,
+            new TypeDescriptor<PrivateString>() {});
+
+    private final String string;
+
+    public PrivateString(String string) {
+      this.string = string;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
+      PrivateString that = (PrivateString) o;
+      return Objects.equals(string, that.string);
+    }
+
+    @Override
+    public int hashCode() {
+      return Objects.hash(string);
+    }
+  }
+}
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/SimpleSourceTest.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/SimpleSourceTest.java
similarity index 100%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/SimpleSourceTest.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/streaming/SimpleSourceTest.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/SerializationDebugger.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/SerializationDebugger.java
similarity index 100%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/SerializationDebugger.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/SerializationDebugger.java
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/package-info.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/package-info.java
similarity index 100%
copy from runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/package-info.java
copy to runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/package-info.java
diff --git a/runners/spark/spark_runner.gradle b/runners/spark/spark_runner.gradle
index 2fac13b..78b4cdb 100644
--- a/runners/spark/spark_runner.gradle
+++ b/runners/spark/spark_runner.gradle
@@ -17,7 +17,6 @@
  */
 
 import groovy.json.JsonOutput
-import java.util.stream.Collectors
 
 apply plugin: 'org.apache.beam.module'
 applyJavaNature(
@@ -123,6 +122,19 @@
   if(project.hasProperty("rerun-tests")) { 	outputs.upToDateWhen {false} }
 }
 
+class SparkComponents {
+  List<String> components
+}
+
+extensions.create('spark', SparkComponents)
+spark.components = [
+    "org.apache.spark:spark-core_$spark_scala_version",
+    "org.apache.spark:spark-network-common_$spark_scala_version",
+    "org.apache.spark:spark-sql_$spark_scala_version",
+    "org.apache.spark:spark-streaming_$spark_scala_version",
+    "org.apache.spark:spark-catalyst_$spark_scala_version"
+]
+
 dependencies {
   implementation project(path: ":model:pipeline", configuration: "shadow")
   implementation project(path: ":sdks:java:core", configuration: "shadow")
@@ -140,13 +152,11 @@
   implementation project(":sdks:java:fn-execution")
   implementation library.java.vendored_grpc_1_43_2
   implementation library.java.vendored_guava_26_0_jre
-  implementation "com.codahale.metrics:metrics-core:3.0.1"
-  provided "org.apache.spark:spark-core_$spark_scala_version:$spark_version"
-  provided "org.apache.spark:spark-network-common_$spark_scala_version:$spark_version"
+  implementation "io.dropwizard.metrics:metrics-core:3.1.5" // version used by Spark 2.4
+  spark.components.each { component ->
+    provided "$component:$spark_version"
+  }
   permitUnusedDeclared "org.apache.spark:spark-network-common_$spark_scala_version:$spark_version"
-  provided "org.apache.spark:spark-sql_$spark_scala_version:$spark_version"
-  provided "org.apache.spark:spark-streaming_$spark_scala_version:$spark_version"
-  provided "org.apache.spark:spark-catalyst_$spark_scala_version:$spark_version"
   if (project.property("spark_scala_version").equals("2.11")) {
     compileOnly "org.scala-lang:scala-library:2.11.12"
     runtimeOnly library.java.jackson_module_scala_2_11
@@ -154,19 +164,15 @@
     compileOnly "org.scala-lang:scala-library:2.12.15"
     runtimeOnly library.java.jackson_module_scala_2_12
   }
-  if (project.property("spark_version").equals("3.1.2")) {
-    compileOnly "org.apache.parquet:parquet-common:1.10.1"
-  }
   // Force paranamer 2.8 to avoid issues when using Scala 2.12
   runtimeOnly "com.thoughtworks.paranamer:paranamer:2.8"
   provided library.java.hadoop_common
   provided library.java.commons_io
   provided library.java.hamcrest
   provided "com.esotericsoftware:kryo-shaded:4.0.2"
-  testImplementation "org.apache.spark:spark-core_$spark_scala_version:$spark_version"
-  testImplementation "org.apache.spark:spark-network-common_$spark_scala_version:$spark_version"
-  testImplementation "org.apache.spark:spark-sql_$spark_scala_version:$spark_version"
-  testImplementation "org.apache.spark:spark-streaming_$spark_scala_version:$spark_version"
+  spark.components.each { component ->
+    testImplementation "$component:$spark_version"
+  }
   testImplementation project(":sdks:java:io:kafka")
   testImplementation project(path: ":sdks:java:core", configuration: "shadowTest")
   // SparkStateInternalsTest extends abstract StateInternalsTest
@@ -194,21 +200,15 @@
 def gcpProject = project.findProperty('gcpProject') ?: 'apache-beam-testing'
 def tempLocation = project.findProperty('tempLocation') ?: 'gs://temp-storage-for-end-to-end-tests'
 
-configurations.testRuntimeClasspath {
-  // Testing the Spark runner causes a StackOverflowError if slf4j-jdk14 is on the classpath
+configurations.all {
+  // Prevent StackOverflowError if slf4j-jdk14 is on the classpath
   exclude group: "org.slf4j", module: "slf4j-jdk14"
+  // Avoid any transitive usage of the old codahale group to make dependency resolution deterministic
+  exclude group: "com.codahale.metrics", module: "metrics-core"
 }
 
-configurations.validatesRunner {
-  // Testing the Spark runner causes a StackOverflowError if slf4j-jdk14 is on the classpath
-  exclude group: "org.slf4j", module: "slf4j-jdk14"
-}
-
-
 hadoopVersions.each { kv ->
   configurations."hadoopVersion$kv.key" {
-    // Testing the Spark runner causes a StackOverflowError if slf4j-jdk14 is on the classpath
-    exclude group: "org.slf4j", module: "slf4j-jdk14"
     resolutionStrategy {
       force "org.apache.hadoop:hadoop-common:$kv.value"
     }
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/AggregatorMetric.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/AggregatorMetric.java
index 18a3785..41db37c 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/AggregatorMetric.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/AggregatorMetric.java
@@ -17,23 +17,58 @@
  */
 package org.apache.beam.runners.spark.metrics;
 
+import com.codahale.metrics.Gauge;
 import com.codahale.metrics.Metric;
+import com.codahale.metrics.MetricFilter;
+import java.util.HashMap;
+import java.util.Map;
 import org.apache.beam.runners.spark.aggregators.NamedAggregators;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-/** An adapter between the {@link NamedAggregators} and Codahale's {@link Metric} interface. */
-public class AggregatorMetric implements Metric {
+/** An adapter between the {@link NamedAggregators} and the Dropwizard {@link Metric} interface. */
+public class AggregatorMetric extends BeamMetricSet {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AggregatorMetric.class);
 
   private final NamedAggregators namedAggregators;
 
-  private AggregatorMetric(final NamedAggregators namedAggregators) {
+  private AggregatorMetric(NamedAggregators namedAggregators) {
     this.namedAggregators = namedAggregators;
   }
 
-  public static AggregatorMetric of(final NamedAggregators namedAggregators) {
+  public static AggregatorMetric of(NamedAggregators namedAggregators) {
     return new AggregatorMetric(namedAggregators);
   }
 
-  NamedAggregators getNamedAggregators() {
-    return namedAggregators;
+  @Override
+  public Map<String, Gauge<Double>> getValue(String prefix, MetricFilter filter) {
+    Map<String, Gauge<Double>> metrics = new HashMap<>();
+    for (Map.Entry<String, ?> entry : namedAggregators.renderAll().entrySet()) {
+      String name = prefix + "." + entry.getKey();
+      Object rawValue = entry.getValue();
+      if (rawValue != null) {
+        try {
+          Gauge<Double> gauge = staticGauge(rawValue);
+          if (filter.matches(name, gauge)) {
+            metrics.put(name, gauge);
+          }
+        } catch (NumberFormatException e) {
+          LOG.warn(
+              "Metric `{}` of type {} can't be reported, conversion to double failed.",
+              name,
+              rawValue.getClass().getSimpleName(),
+              e);
+        }
+      }
+    }
+    return metrics;
+  }
+
+  // Metric type is assumed to be compatible with Double
+  protected Gauge<Double> staticGauge(Object rawValue) throws NumberFormatException {
+    return rawValue instanceof Number
+        ? super.staticGauge((Number) rawValue)
+        : super.staticGauge(Double.parseDouble(rawValue.toString()));
   }
 }
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/BeamMetricSet.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/BeamMetricSet.java
new file mode 100644
index 0000000..2e2970f
--- /dev/null
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/BeamMetricSet.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.spark.metrics;
+
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.MetricFilter;
+import java.util.Map;
+
+/**
+ * {@link BeamMetricSet} is a {@link Gauge} that returns a map of multiple metrics which get
+ * flattened in {@link WithMetricsSupport#getGauges()} for usage in {@link
+ * org.apache.spark.metrics.sink.Sink Spark metric sinks}.
+ *
+ * <p>Note: Recent versions of Dropwizard {@link com.codahale.metrics.MetricRegistry MetricRegistry}
+ * do not allow registering arbitrary implementations of {@link com.codahale.metrics.Metric Metrics}
+ * and require usage of {@link Gauge} here.
+ */
+// TODO: turn into MetricRegistry https://github.com/apache/beam/issues/22384
+abstract class BeamMetricSet implements Gauge<Map<String, Gauge<Double>>> {
+
+  @Override
+  public final Map<String, Gauge<Double>> getValue() {
+    return getValue("", MetricFilter.ALL);
+  }
+
+  protected abstract Map<String, Gauge<Double>> getValue(String prefix, MetricFilter filter);
+
+  protected Gauge<Double> staticGauge(Number number) {
+    return new ConstantGauge(number.doubleValue());
+  }
+
+  private static class ConstantGauge implements Gauge<Double> {
+    private final double value;
+
+    ConstantGauge(double value) {
+      this.value = value;
+    }
+
+    @Override
+    public Double getValue() {
+      return value;
+    }
+  }
+}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/SparkBeamMetric.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/SparkBeamMetric.java
index 298db0f..1eb8349 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/SparkBeamMetric.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/SparkBeamMetric.java
@@ -17,13 +17,17 @@
  */
 package org.apache.beam.runners.spark.metrics;
 
-import static java.util.stream.Collectors.toList;
 import static org.apache.beam.runners.core.metrics.MetricsContainerStepMap.asAttemptedOnlyMetricResults;
+import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Predicates.not;
 
+import com.codahale.metrics.Gauge;
 import com.codahale.metrics.Metric;
-import java.util.ArrayList;
+import com.codahale.metrics.MetricFilter;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import javax.annotation.Nullable;
 import org.apache.beam.runners.core.metrics.MetricsContainerStepMap;
 import org.apache.beam.sdk.metrics.DistributionResult;
 import org.apache.beam.sdk.metrics.GaugeResult;
@@ -33,61 +37,72 @@
 import org.apache.beam.sdk.metrics.MetricResult;
 import org.apache.beam.sdk.metrics.MetricResults;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Strings;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Streams;
 
 /**
- * An adapter between the {@link MetricsContainerStepMap} and Codahale's {@link Metric} interface.
+ * An adapter between the {@link MetricsContainerStepMap} and the Dropwizard {@link Metric}
+ * interface.
  */
-public class SparkBeamMetric implements Metric {
+class SparkBeamMetric extends BeamMetricSet {
+
   private static final String ILLEGAL_CHARACTERS = "[^A-Za-z0-9-]";
 
-  static Map<String, ?> renderAll(MetricResults metricResults) {
-    Map<String, Object> metrics = new HashMap<>();
-    MetricQueryResults metricQueryResults = metricResults.allMetrics();
-    for (MetricResult<Long> metricResult : metricQueryResults.getCounters()) {
-      metrics.put(renderName(metricResult), metricResult.getAttempted());
+  @Override
+  public Map<String, Gauge<Double>> getValue(String prefix, MetricFilter filter) {
+    MetricResults metricResults =
+        asAttemptedOnlyMetricResults(MetricsAccumulator.getInstance().value());
+    Map<String, Gauge<Double>> metrics = new HashMap<>();
+    MetricQueryResults allMetrics = metricResults.allMetrics();
+    for (MetricResult<Long> metricResult : allMetrics.getCounters()) {
+      putFiltered(metrics, filter, renderName(prefix, metricResult), metricResult.getAttempted());
     }
-    for (MetricResult<DistributionResult> metricResult : metricQueryResults.getDistributions()) {
+    for (MetricResult<DistributionResult> metricResult : allMetrics.getDistributions()) {
       DistributionResult result = metricResult.getAttempted();
-      metrics.put(renderName(metricResult) + ".count", result.getCount());
-      metrics.put(renderName(metricResult) + ".sum", result.getSum());
-      metrics.put(renderName(metricResult) + ".min", result.getMin());
-      metrics.put(renderName(metricResult) + ".max", result.getMax());
-      metrics.put(renderName(metricResult) + ".mean", result.getMean());
+      String baseName = renderName(prefix, metricResult);
+      putFiltered(metrics, filter, baseName + ".count", result.getCount());
+      putFiltered(metrics, filter, baseName + ".sum", result.getSum());
+      putFiltered(metrics, filter, baseName + ".min", result.getMin());
+      putFiltered(metrics, filter, baseName + ".max", result.getMax());
+      putFiltered(metrics, filter, baseName + ".mean", result.getMean());
     }
-    for (MetricResult<GaugeResult> metricResult : metricQueryResults.getGauges()) {
-      metrics.put(renderName(metricResult), metricResult.getAttempted().getValue());
+    for (MetricResult<GaugeResult> metricResult : allMetrics.getGauges()) {
+      putFiltered(
+          metrics,
+          filter,
+          renderName(prefix, metricResult),
+          metricResult.getAttempted().getValue());
     }
     return metrics;
   }
 
-  Map<String, ?> renderAll() {
-    MetricResults metricResults =
-        asAttemptedOnlyMetricResults(MetricsAccumulator.getInstance().value());
-    return renderAll(metricResults);
-  }
-
   @VisibleForTesting
-  static String renderName(MetricResult<?> metricResult) {
+  @SuppressWarnings("nullness") // ok to have nullable elements on stream
+  static String renderName(String prefix, MetricResult<?> metricResult) {
     MetricKey key = metricResult.getKey();
     MetricName name = key.metricName();
     String step = key.stepName();
+    return Streams.concat(
+            Stream.of(prefix),
+            Stream.of(stripSuffix(normalizePart(step))),
+            Stream.of(name.getNamespace(), name.getName()).map(SparkBeamMetric::normalizePart))
+        .filter(not(Strings::isNullOrEmpty))
+        .collect(Collectors.joining("."));
+  }
 
-    ArrayList<String> pieces = new ArrayList<>();
+  private static @Nullable String normalizePart(@Nullable String str) {
+    return str != null ? str.replaceAll(ILLEGAL_CHARACTERS, "_") : null;
+  }
 
-    if (step != null) {
-      step = step.replaceAll(ILLEGAL_CHARACTERS, "_");
-      if (step.endsWith("_")) {
-        step = step.substring(0, step.length() - 1);
-      }
-      pieces.add(step);
+  private static @Nullable String stripSuffix(@Nullable String str) {
+    return str != null && str.endsWith("_") ? str.substring(0, str.length() - 1) : str;
+  }
+
+  private void putFiltered(
+      Map<String, Gauge<Double>> metrics, MetricFilter filter, String name, Number value) {
+    Gauge<Double> metric = staticGauge(value);
+    if (filter.matches(name, metric)) {
+      metrics.put(name, metric);
     }
-
-    pieces.addAll(
-        ImmutableList.of(name.getNamespace(), name.getName()).stream()
-            .map(str -> str.replaceAll(ILLEGAL_CHARACTERS, "_"))
-            .collect(toList()));
-
-    return String.join(".", pieces);
   }
 }
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/WithMetricsSupport.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/WithMetricsSupport.java
index 1d551f0..a0fc714 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/WithMetricsSupport.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/WithMetricsSupport.java
@@ -21,24 +21,13 @@
 import com.codahale.metrics.Gauge;
 import com.codahale.metrics.Histogram;
 import com.codahale.metrics.Meter;
-import com.codahale.metrics.Metric;
 import com.codahale.metrics.MetricFilter;
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.Timer;
-import java.util.HashMap;
 import java.util.Map;
 import java.util.SortedMap;
-import org.apache.beam.runners.spark.aggregators.NamedAggregators;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Function;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Optional;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Predicate;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Predicates;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.FluentIterable;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableSortedMap;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Maps;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Ordering;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * A {@link MetricRegistry} decorator-like that supports {@link AggregatorMetric} and {@link
@@ -47,15 +36,9 @@
  * <p>{@link MetricRegistry} is not an interface, so this is not a by-the-book decorator. That said,
  * it delegates all metric related getters to the "decorated" instance.
  */
-@SuppressWarnings({
-  "rawtypes", // TODO(https://github.com/apache/beam/issues/20447)
-  "keyfor",
-  "nullness"
-}) // TODO(https://github.com/apache/beam/issues/20497)
+@SuppressWarnings({"rawtypes"}) // required by interface
 public class WithMetricsSupport extends MetricRegistry {
 
-  private static final Logger LOG = LoggerFactory.getLogger(WithMetricsSupport.class);
-
   private final MetricRegistry internalMetricRegistry;
 
   private WithMetricsSupport(final MetricRegistry internalMetricRegistry) {
@@ -88,95 +71,21 @@
 
   @Override
   public SortedMap<String, Gauge> getGauges(final MetricFilter filter) {
-    return new ImmutableSortedMap.Builder<String, Gauge>(
-            Ordering.from(String.CASE_INSENSITIVE_ORDER))
-        .putAll(internalMetricRegistry.getGauges(filter))
-        .putAll(extractGauges(internalMetricRegistry, filter))
-        .build();
-  }
+    ImmutableSortedMap.Builder<String, Gauge> builder =
+        new ImmutableSortedMap.Builder<>(Ordering.from(String.CASE_INSENSITIVE_ORDER));
 
-  private Map<String, Gauge> extractGauges(
-      final MetricRegistry metricRegistry, final MetricFilter filter) {
-    Map<String, Gauge> gauges = new HashMap<>();
+    Map<String, Gauge> gauges =
+        internalMetricRegistry.getGauges(
+            (n, m) -> filter.matches(n, m) || m instanceof BeamMetricSet);
 
-    // find the AggregatorMetric metrics from within all currently registered metrics
-    final Optional<Map<String, Gauge>> aggregatorMetrics =
-        FluentIterable.from(metricRegistry.getMetrics().entrySet())
-            .firstMatch(isAggregatorMetric())
-            .transform(aggregatorMetricToGauges());
-
-    // find the SparkBeamMetric metrics from within all currently registered metrics
-    final Optional<Map<String, Gauge>> beamMetrics =
-        FluentIterable.from(metricRegistry.getMetrics().entrySet())
-            .firstMatch(isSparkBeamMetric())
-            .transform(beamMetricToGauges());
-
-    if (aggregatorMetrics.isPresent()) {
-      gauges.putAll(Maps.filterEntries(aggregatorMetrics.get(), matches(filter)));
-    }
-
-    if (beamMetrics.isPresent()) {
-      gauges.putAll(Maps.filterEntries(beamMetrics.get(), matches(filter)));
-    }
-
-    return gauges;
-  }
-
-  private Function<Map.Entry<String, Metric>, Map<String, Gauge>> aggregatorMetricToGauges() {
-    return entry -> {
-      final NamedAggregators agg = ((AggregatorMetric) entry.getValue()).getNamedAggregators();
-      final String parentName = entry.getKey();
-      final Map<String, Gauge> gaugeMap = Maps.transformEntries(agg.renderAll(), toGauge());
-      final Map<String, Gauge> fullNameGaugeMap = Maps.newLinkedHashMap();
-      for (Map.Entry<String, Gauge> gaugeEntry : gaugeMap.entrySet()) {
-        fullNameGaugeMap.put(parentName + "." + gaugeEntry.getKey(), gaugeEntry.getValue());
+    for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
+      Gauge gauge = entry.getValue();
+      if (gauge instanceof BeamMetricSet) {
+        builder.putAll(((BeamMetricSet) gauge).getValue(entry.getKey(), filter));
+      } else {
+        builder.put(entry.getKey(), gauge);
       }
-      return Maps.filterValues(fullNameGaugeMap, Predicates.notNull());
-    };
-  }
-
-  private Function<Map.Entry<String, Metric>, Map<String, Gauge>> beamMetricToGauges() {
-    return entry -> {
-      final Map<String, ?> metrics = ((SparkBeamMetric) entry.getValue()).renderAll();
-      final String parentName = entry.getKey();
-      final Map<String, Gauge> gaugeMap = Maps.transformEntries(metrics, toGauge());
-      final Map<String, Gauge> fullNameGaugeMap = Maps.newLinkedHashMap();
-      for (Map.Entry<String, Gauge> gaugeEntry : gaugeMap.entrySet()) {
-        fullNameGaugeMap.put(parentName + "." + gaugeEntry.getKey(), gaugeEntry.getValue());
-      }
-      return Maps.filterValues(fullNameGaugeMap, Predicates.notNull());
-    };
-  }
-
-  private Maps.EntryTransformer<String, Object, Gauge> toGauge() {
-    return (name, rawValue) ->
-        () -> {
-          // at the moment the metric's type is assumed to be
-          // compatible with Double. While far from perfect, it seems reasonable at
-          // this point in time
-          try {
-            return Double.parseDouble(rawValue.toString());
-          } catch (final Exception e) {
-            LOG.warn(
-                "Failed reporting metric with name [{}], of type [{}], since it could not be"
-                    + " converted to double",
-                name,
-                rawValue.getClass().getSimpleName(),
-                e);
-            return null;
-          }
-        };
-  }
-
-  private Predicate<Map.Entry<String, Gauge>> matches(final MetricFilter filter) {
-    return entry -> filter.matches(entry.getKey(), entry.getValue());
-  }
-
-  private Predicate<Map.Entry<String, Metric>> isAggregatorMetric() {
-    return metricEntry -> (metricEntry.getValue() instanceof AggregatorMetric);
-  }
-
-  private Predicate<Map.Entry<String, Metric>> isSparkBeamMetric() {
-    return metricEntry -> (metricEntry.getValue() instanceof SparkBeamMetric);
+    }
+    return builder.build();
   }
 }
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/sink/CsvSink.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/sink/CsvSink.java
index d87cbd2..d880cd3 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/sink/CsvSink.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/sink/CsvSink.java
@@ -18,22 +18,69 @@
 package org.apache.beam.runners.spark.metrics.sink;
 
 import com.codahale.metrics.MetricRegistry;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.Properties;
 import org.apache.beam.runners.spark.metrics.AggregatorMetric;
 import org.apache.beam.runners.spark.metrics.WithMetricsSupport;
+import org.apache.spark.SecurityManager;
 import org.apache.spark.metrics.sink.Sink;
 
 /**
- * A Spark {@link Sink} that is tailored to report {@link AggregatorMetric} metrics to a CSV file.
+ * A {@link Sink} for <a href="https://spark.apache.org/docs/latest/monitoring.html#metrics">Spark's
+ * metric system</a> that is tailored to report {@link AggregatorMetric}s to a CSV file.
+ *
+ * <p>The sink is configured using Spark configuration parameters, for example:
+ *
+ * <pre>{@code
+ * "spark.metrics.conf.*.sink.csv.class"="org.apache.beam.runners.spark.metrics.sink.CsvSink"
+ * "spark.metrics.conf.*.sink.csv.directory"="<output_directory>"
+ * "spark.metrics.conf.*.sink.csv.period"=10
+ * "spark.metrics.conf.*.sink.csv.unit"=seconds
+ * }</pre>
  */
-// Intentionally overriding parent name because inheritors should replace the parent.
-@SuppressFBWarnings("NM_SAME_SIMPLE_NAME_AS_SUPERCLASS")
-public class CsvSink extends org.apache.spark.metrics.sink.CsvSink {
+public class CsvSink implements Sink {
+
+  // Initialized reflectively as done by Spark's MetricsSystem
+  private final org.apache.spark.metrics.sink.CsvSink delegate;
+
+  /** Constructor for Spark 3.1.x and earlier. */
   public CsvSink(
       final Properties properties,
       final MetricRegistry metricRegistry,
       final org.apache.spark.SecurityManager securityMgr) {
-    super(properties, WithMetricsSupport.forRegistry(metricRegistry), securityMgr);
+    try {
+      delegate =
+          org.apache.spark.metrics.sink.CsvSink.class
+              .getConstructor(Properties.class, MetricRegistry.class, SecurityManager.class)
+              .newInstance(properties, WithMetricsSupport.forRegistry(metricRegistry), securityMgr);
+    } catch (ReflectiveOperationException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  /** Constructor for Spark 3.2.x and later. */
+  public CsvSink(final Properties properties, final MetricRegistry metricRegistry) {
+    try {
+      delegate =
+          org.apache.spark.metrics.sink.CsvSink.class
+              .getConstructor(Properties.class, MetricRegistry.class)
+              .newInstance(properties, WithMetricsSupport.forRegistry(metricRegistry));
+    } catch (ReflectiveOperationException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  @Override
+  public void start() {
+    delegate.start();
+  }
+
+  @Override
+  public void stop() {
+    delegate.stop();
+  }
+
+  @Override
+  public void report() {
+    delegate.report();
   }
 }
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/sink/GraphiteSink.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/sink/GraphiteSink.java
index eca1b2b..0b21554 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/sink/GraphiteSink.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/metrics/sink/GraphiteSink.java
@@ -18,20 +18,72 @@
 package org.apache.beam.runners.spark.metrics.sink;
 
 import com.codahale.metrics.MetricRegistry;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.Properties;
 import org.apache.beam.runners.spark.metrics.AggregatorMetric;
 import org.apache.beam.runners.spark.metrics.WithMetricsSupport;
+import org.apache.spark.SecurityManager;
 import org.apache.spark.metrics.sink.Sink;
 
-/** A Spark {@link Sink} that is tailored to report {@link AggregatorMetric} metrics to Graphite. */
-// Intentionally overriding parent name because inheritors should replace the parent.
-@SuppressFBWarnings("NM_SAME_SIMPLE_NAME_AS_SUPERCLASS")
-public class GraphiteSink extends org.apache.spark.metrics.sink.GraphiteSink {
+/**
+ * A {@link Sink} for <a href="https://spark.apache.org/docs/latest/monitoring.html#metrics">Spark's
+ * metric system</a> that is tailored to report {@link AggregatorMetric}s to Graphite.
+ *
+ * <p>The sink is configured using Spark configuration parameters, for example:
+ *
+ * <pre>{@code
+ * "spark.metrics.conf.*.sink.graphite.class"="org.apache.beam.runners.spark.metrics.sink.GraphiteSink"
+ * "spark.metrics.conf.*.sink.graphite.host"="<graphite_hostname>"
+ * "spark.metrics.conf.*.sink.graphite.port"=<graphite_listening_port>
+ * "spark.metrics.conf.*.sink.graphite.period"=10
+ * "spark.metrics.conf.*.sink.graphite.unit"=seconds
+ * "spark.metrics.conf.*.sink.graphite.prefix"="<optional_prefix>"
+ * "spark.metrics.conf.*.sink.graphite.regex"="<optional_regex_to_send_matching_metrics>"
+ * }</pre>
+ */
+public class GraphiteSink implements Sink {
+
+  // Initialized reflectively as done by Spark's MetricsSystem
+  private final org.apache.spark.metrics.sink.GraphiteSink delegate;
+
+  /** Constructor for Spark 3.1.x and earlier. */
   public GraphiteSink(
       final Properties properties,
       final MetricRegistry metricRegistry,
-      final org.apache.spark.SecurityManager securityMgr) {
-    super(properties, WithMetricsSupport.forRegistry(metricRegistry), securityMgr);
+      final SecurityManager securityMgr) {
+    try {
+      delegate =
+          org.apache.spark.metrics.sink.GraphiteSink.class
+              .getConstructor(Properties.class, MetricRegistry.class, SecurityManager.class)
+              .newInstance(properties, WithMetricsSupport.forRegistry(metricRegistry), securityMgr);
+    } catch (ReflectiveOperationException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  /** Constructor for Spark 3.2.x and later. */
+  public GraphiteSink(final Properties properties, final MetricRegistry metricRegistry) {
+    try {
+      delegate =
+          org.apache.spark.metrics.sink.GraphiteSink.class
+              .getConstructor(Properties.class, MetricRegistry.class)
+              .newInstance(properties, WithMetricsSupport.forRegistry(metricRegistry));
+    } catch (ReflectiveOperationException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  @Override
+  public void start() {
+    delegate.start();
+  }
+
+  @Override
+  public void stop() {
+    delegate.stop();
+  }
+
+  @Override
+  public void report() {
+    delegate.report();
   }
 }
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetric.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetric.java
deleted file mode 100644
index 55590a6..0000000
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/AggregatorMetric.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.beam.runners.spark.structuredstreaming.metrics;
-
-import com.codahale.metrics.Metric;
-import org.apache.beam.runners.spark.structuredstreaming.aggregators.NamedAggregators;
-
-/** An adapter between the {@link NamedAggregators} and Codahale's {@link Metric} interface. */
-public class AggregatorMetric implements Metric {
-
-  private final NamedAggregators namedAggregators;
-
-  private AggregatorMetric(final NamedAggregators namedAggregators) {
-    this.namedAggregators = namedAggregators;
-  }
-
-  public static AggregatorMetric of(final NamedAggregators namedAggregators) {
-    return new AggregatorMetric(namedAggregators);
-  }
-
-  NamedAggregators getNamedAggregators() {
-    return namedAggregators;
-  }
-}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetric.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetric.java
deleted file mode 100644
index de146c6..0000000
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/SparkBeamMetric.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.beam.runners.spark.structuredstreaming.metrics;
-
-import static java.util.stream.Collectors.toList;
-import static org.apache.beam.runners.core.metrics.MetricsContainerStepMap.asAttemptedOnlyMetricResults;
-
-import com.codahale.metrics.Metric;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.beam.runners.core.metrics.MetricsContainerStepMap;
-import org.apache.beam.sdk.metrics.DistributionResult;
-import org.apache.beam.sdk.metrics.GaugeResult;
-import org.apache.beam.sdk.metrics.MetricKey;
-import org.apache.beam.sdk.metrics.MetricName;
-import org.apache.beam.sdk.metrics.MetricQueryResults;
-import org.apache.beam.sdk.metrics.MetricResult;
-import org.apache.beam.sdk.metrics.MetricResults;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList;
-
-/**
- * An adapter between the {@link MetricsContainerStepMap} and Codahale's {@link Metric} interface.
- */
-class SparkBeamMetric implements Metric {
-  private static final String ILLEGAL_CHARACTERS = "[^A-Za-z0-9-]";
-
-  Map<String, ?> renderAll() {
-    Map<String, Object> metrics = new HashMap<>();
-    MetricResults metricResults =
-        asAttemptedOnlyMetricResults(MetricsAccumulator.getInstance().value());
-    MetricQueryResults metricQueryResults = metricResults.allMetrics();
-    for (MetricResult<Long> metricResult : metricQueryResults.getCounters()) {
-      metrics.put(renderName(metricResult), metricResult.getAttempted());
-    }
-    for (MetricResult<DistributionResult> metricResult : metricQueryResults.getDistributions()) {
-      DistributionResult result = metricResult.getAttempted();
-      metrics.put(renderName(metricResult) + ".count", result.getCount());
-      metrics.put(renderName(metricResult) + ".sum", result.getSum());
-      metrics.put(renderName(metricResult) + ".min", result.getMin());
-      metrics.put(renderName(metricResult) + ".max", result.getMax());
-      metrics.put(renderName(metricResult) + ".mean", result.getMean());
-    }
-    for (MetricResult<GaugeResult> metricResult : metricQueryResults.getGauges()) {
-      metrics.put(renderName(metricResult), metricResult.getAttempted().getValue());
-    }
-    return metrics;
-  }
-
-  @VisibleForTesting
-  String renderName(MetricResult<?> metricResult) {
-    MetricKey key = metricResult.getKey();
-    MetricName name = key.metricName();
-    String step = key.stepName();
-
-    ArrayList<String> pieces = new ArrayList<>();
-
-    if (step != null) {
-      step = step.replaceAll(ILLEGAL_CHARACTERS, "_");
-      if (step.endsWith("_")) {
-        step = step.substring(0, step.length() - 1);
-      }
-      pieces.add(step);
-    }
-
-    pieces.addAll(
-        ImmutableList.of(name.getNamespace(), name.getName()).stream()
-            .map(str -> str.replaceAll(ILLEGAL_CHARACTERS, "_"))
-            .collect(toList()));
-
-    return String.join(".", pieces);
-  }
-}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/WithMetricsSupport.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/WithMetricsSupport.java
deleted file mode 100644
index c1c7b29..0000000
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/WithMetricsSupport.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.beam.runners.spark.structuredstreaming.metrics;
-
-import com.codahale.metrics.Counter;
-import com.codahale.metrics.Gauge;
-import com.codahale.metrics.Histogram;
-import com.codahale.metrics.Meter;
-import com.codahale.metrics.Metric;
-import com.codahale.metrics.MetricFilter;
-import com.codahale.metrics.MetricRegistry;
-import com.codahale.metrics.Timer;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.SortedMap;
-import org.apache.beam.runners.spark.structuredstreaming.aggregators.NamedAggregators;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Function;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Optional;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Predicate;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Predicates;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.FluentIterable;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableSortedMap;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Maps;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Ordering;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A {@link MetricRegistry} decorator-like that supports {@link AggregatorMetric} and {@link
- * SparkBeamMetric} as {@link Gauge Gauges}.
- *
- * <p>{@link MetricRegistry} is not an interface, so this is not a by-the-book decorator. That said,
- * it delegates all metric related getters to the "decorated" instance.
- */
-@SuppressWarnings({
-  "rawtypes", // TODO(https://github.com/apache/beam/issues/20447)
-  "keyfor",
-  "nullness"
-}) // TODO(https://github.com/apache/beam/issues/20497)
-public class WithMetricsSupport extends MetricRegistry {
-
-  private static final Logger LOG = LoggerFactory.getLogger(WithMetricsSupport.class);
-
-  private final MetricRegistry internalMetricRegistry;
-
-  private WithMetricsSupport(final MetricRegistry internalMetricRegistry) {
-    this.internalMetricRegistry = internalMetricRegistry;
-  }
-
-  public static WithMetricsSupport forRegistry(final MetricRegistry metricRegistry) {
-    return new WithMetricsSupport(metricRegistry);
-  }
-
-  @Override
-  public SortedMap<String, Timer> getTimers(final MetricFilter filter) {
-    return internalMetricRegistry.getTimers(filter);
-  }
-
-  @Override
-  public SortedMap<String, Meter> getMeters(final MetricFilter filter) {
-    return internalMetricRegistry.getMeters(filter);
-  }
-
-  @Override
-  public SortedMap<String, Histogram> getHistograms(final MetricFilter filter) {
-    return internalMetricRegistry.getHistograms(filter);
-  }
-
-  @Override
-  public SortedMap<String, Counter> getCounters(final MetricFilter filter) {
-    return internalMetricRegistry.getCounters(filter);
-  }
-
-  @Override
-  public SortedMap<String, Gauge> getGauges(final MetricFilter filter) {
-    return new ImmutableSortedMap.Builder<String, Gauge>(
-            Ordering.from(String.CASE_INSENSITIVE_ORDER))
-        .putAll(internalMetricRegistry.getGauges(filter))
-        .putAll(extractGauges(internalMetricRegistry, filter))
-        .build();
-  }
-
-  private Map<String, Gauge> extractGauges(
-      final MetricRegistry metricRegistry, final MetricFilter filter) {
-    Map<String, Gauge> gauges = new HashMap<>();
-
-    // find the AggregatorMetric metrics from within all currently registered metrics
-    final Optional<Map<String, Gauge>> aggregatorMetrics =
-        FluentIterable.from(metricRegistry.getMetrics().entrySet())
-            .firstMatch(isAggregatorMetric())
-            .transform(aggregatorMetricToGauges());
-
-    // find the SparkBeamMetric metrics from within all currently registered metrics
-    final Optional<Map<String, Gauge>> beamMetrics =
-        FluentIterable.from(metricRegistry.getMetrics().entrySet())
-            .firstMatch(isSparkBeamMetric())
-            .transform(beamMetricToGauges());
-
-    if (aggregatorMetrics.isPresent()) {
-      gauges.putAll(Maps.filterEntries(aggregatorMetrics.get(), matches(filter)));
-    }
-
-    if (beamMetrics.isPresent()) {
-      gauges.putAll(Maps.filterEntries(beamMetrics.get(), matches(filter)));
-    }
-
-    return gauges;
-  }
-
-  private Function<Map.Entry<String, Metric>, Map<String, Gauge>> aggregatorMetricToGauges() {
-    return entry -> {
-      final NamedAggregators agg = ((AggregatorMetric) entry.getValue()).getNamedAggregators();
-      final String parentName = entry.getKey();
-      final Map<String, Gauge> gaugeMap = Maps.transformEntries(agg.renderAll(), toGauge());
-      final Map<String, Gauge> fullNameGaugeMap = Maps.newLinkedHashMap();
-      for (Map.Entry<String, Gauge> gaugeEntry : gaugeMap.entrySet()) {
-        fullNameGaugeMap.put(parentName + "." + gaugeEntry.getKey(), gaugeEntry.getValue());
-      }
-      return Maps.filterValues(fullNameGaugeMap, Predicates.notNull());
-    };
-  }
-
-  private Function<Map.Entry<String, Metric>, Map<String, Gauge>> beamMetricToGauges() {
-    return entry -> {
-      final Map<String, ?> metrics = ((SparkBeamMetric) entry.getValue()).renderAll();
-      final String parentName = entry.getKey();
-      final Map<String, Gauge> gaugeMap = Maps.transformEntries(metrics, toGauge());
-      final Map<String, Gauge> fullNameGaugeMap = Maps.newLinkedHashMap();
-      for (Map.Entry<String, Gauge> gaugeEntry : gaugeMap.entrySet()) {
-        fullNameGaugeMap.put(parentName + "." + gaugeEntry.getKey(), gaugeEntry.getValue());
-      }
-      return Maps.filterValues(fullNameGaugeMap, Predicates.notNull());
-    };
-  }
-
-  private Maps.EntryTransformer<String, Object, Gauge> toGauge() {
-    return (name, rawValue) ->
-        () -> {
-          // at the moment the metric's type is assumed to be
-          // compatible with Double. While far from perfect, it seems reasonable at
-          // this point in time
-          try {
-            return Double.parseDouble(rawValue.toString());
-          } catch (final Exception e) {
-            LOG.warn(
-                "Failed reporting metric with name [{}], of type [{}], since it could not be"
-                    + " converted to double",
-                name,
-                rawValue.getClass().getSimpleName(),
-                e);
-            return null;
-          }
-        };
-  }
-
-  private Predicate<Map.Entry<String, Gauge>> matches(final MetricFilter filter) {
-    return entry -> filter.matches(entry.getKey(), entry.getValue());
-  }
-
-  private Predicate<Map.Entry<String, Metric>> isAggregatorMetric() {
-    return metricEntry -> (metricEntry.getValue() instanceof AggregatorMetric);
-  }
-
-  private Predicate<Map.Entry<String, Metric>> isSparkBeamMetric() {
-    return metricEntry -> (metricEntry.getValue() instanceof SparkBeamMetric);
-  }
-}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleCsvSink.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleCsvSink.java
deleted file mode 100644
index 7c1f2096..0000000
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleCsvSink.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.beam.runners.spark.structuredstreaming.metrics.sink;
-
-import com.codahale.metrics.MetricRegistry;
-import java.util.Properties;
-import org.apache.beam.runners.spark.structuredstreaming.metrics.AggregatorMetric;
-import org.apache.beam.runners.spark.structuredstreaming.metrics.WithMetricsSupport;
-import org.apache.spark.metrics.sink.Sink;
-
-/**
- * A Spark {@link Sink} that is tailored to report {@link AggregatorMetric} metrics to a CSV file.
- */
-public class CodahaleCsvSink extends org.apache.spark.metrics.sink.CsvSink {
-  public CodahaleCsvSink(
-      final Properties properties,
-      final MetricRegistry metricRegistry,
-      final org.apache.spark.SecurityManager securityMgr) {
-    super(properties, WithMetricsSupport.forRegistry(metricRegistry), securityMgr);
-  }
-}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleGraphiteSink.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleGraphiteSink.java
deleted file mode 100644
index 1dc4644..0000000
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/metrics/sink/CodahaleGraphiteSink.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.beam.runners.spark.structuredstreaming.metrics.sink;
-
-import com.codahale.metrics.MetricRegistry;
-import java.util.Properties;
-import org.apache.beam.runners.spark.structuredstreaming.metrics.AggregatorMetric;
-import org.apache.beam.runners.spark.structuredstreaming.metrics.WithMetricsSupport;
-import org.apache.spark.metrics.sink.Sink;
-
-/** A Spark {@link Sink} that is tailored to report {@link AggregatorMetric} metrics to Graphite. */
-public class CodahaleGraphiteSink extends org.apache.spark.metrics.sink.GraphiteSink {
-  public CodahaleGraphiteSink(
-      final Properties properties,
-      final MetricRegistry metricRegistry,
-      final org.apache.spark.SecurityManager securityMgr) {
-    super(properties, WithMetricsSupport.forRegistry(metricRegistry), securityMgr);
-  }
-}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpers.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpers.java
deleted file mode 100644
index 7b2f109..0000000
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpers.java
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.beam.runners.spark.structuredstreaming.translation.helpers;
-
-import static org.apache.spark.sql.types.DataTypes.BinaryType;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import org.apache.beam.sdk.coders.Coder;
-import org.apache.spark.sql.Encoder;
-import org.apache.spark.sql.Encoders;
-import org.apache.spark.sql.catalyst.expressions.Expression;
-import org.apache.spark.sql.catalyst.expressions.NonSQLExpression;
-import org.apache.spark.sql.catalyst.expressions.UnaryExpression;
-import org.apache.spark.sql.catalyst.expressions.codegen.Block;
-import org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator;
-import org.apache.spark.sql.catalyst.expressions.codegen.CodegenContext;
-import org.apache.spark.sql.catalyst.expressions.codegen.ExprCode;
-import org.apache.spark.sql.types.DataType;
-import org.apache.spark.sql.types.ObjectType;
-import org.checkerframework.checker.nullness.qual.Nullable;
-import scala.StringContext;
-import scala.collection.JavaConversions;
-import scala.reflect.ClassTag;
-
-/** {@link Encoders} utility class. */
-@SuppressWarnings({
-  "nullness" // TODO(https://github.com/apache/beam/issues/20497)
-})
-public class EncoderHelpers {
-  /**
-   * Wrap a Beam coder into a Spark Encoder using Catalyst Expression Encoders (which uses java code
-   * generation).
-   */
-  public static <T> Encoder<T> fromBeamCoder(Coder<T> coder) {
-    return EncoderFactory.fromBeamCoder(coder);
-  }
-
-  /**
-   * Catalyst Expression that serializes elements using Beam {@link Coder}.
-   *
-   * @param <T>: Type of elements ot be serialized.
-   */
-  public static class EncodeUsingBeamCoder<T> extends UnaryExpression
-      implements NonSQLExpression, Serializable {
-
-    private final Expression child;
-    private final Coder<T> coder;
-
-    public EncodeUsingBeamCoder(Expression child, Coder<T> coder) {
-      this.child = child;
-      this.coder = coder;
-    }
-
-    @Override
-    public Expression child() {
-      return child;
-    }
-
-    @Override
-    public ExprCode doGenCode(CodegenContext ctx, ExprCode ev) {
-      String accessCode = ctx.addReferenceObj("coder", coder, coder.getClass().getName());
-      ExprCode input = child.genCode(ctx);
-      String javaType = CodeGenerator.javaType(dataType());
-
-      List<String> parts = new ArrayList<>();
-      List<Object> args = new ArrayList<>();
-      /*
-        CODE GENERATED
-        final ${javaType} ${ev.value} = org.apache.beam.runners.spark.structuredstreaming.translation.helpers.EncoderHelpers.EncodeUsingBeamCoder.encode(${input.isNull()}, ${input.value}, ${coder});
-      */
-      parts.add("final ");
-      args.add(javaType);
-      parts.add(" ");
-      args.add(ev.value());
-      parts.add(
-          " = org.apache.beam.runners.spark.structuredstreaming.translation.helpers.EncoderHelpers.EncodeUsingBeamCoder.encode(");
-      args.add(input.isNull());
-      parts.add(", ");
-      args.add(input.value());
-      parts.add(", ");
-      args.add(accessCode);
-      parts.add(");");
-
-      StringContext sc =
-          new StringContext(JavaConversions.collectionAsScalaIterable(parts).toSeq());
-      Block code =
-          new Block.BlockHelper(sc).code(JavaConversions.collectionAsScalaIterable(args).toSeq());
-
-      return ev.copy(input.code().$plus(code), input.isNull(), ev.value());
-    }
-
-    @Override
-    public DataType dataType() {
-      return BinaryType;
-    }
-
-    @Override
-    public Object productElement(int n) {
-      switch (n) {
-        case 0:
-          return child;
-        case 1:
-          return coder;
-        default:
-          throw new ArrayIndexOutOfBoundsException("productElement out of bounds");
-      }
-    }
-
-    @Override
-    public int productArity() {
-      return 2;
-    }
-
-    @Override
-    public boolean canEqual(Object that) {
-      return (that instanceof EncodeUsingBeamCoder);
-    }
-
-    @Override
-    public boolean equals(@Nullable Object o) {
-      if (this == o) {
-        return true;
-      }
-      if (o == null || getClass() != o.getClass()) {
-        return false;
-      }
-      EncodeUsingBeamCoder<?> that = (EncodeUsingBeamCoder<?>) o;
-      return child.equals(that.child) && coder.equals(that.coder);
-    }
-
-    @Override
-    public int hashCode() {
-      return Objects.hash(super.hashCode(), child, coder);
-    }
-
-    /**
-     * Convert value to byte array (invoked by generated code in {@link #doGenCode(CodegenContext,
-     * ExprCode)}).
-     */
-    public static <T> byte[] encode(boolean isNull, @Nullable T value, Coder<T> coder) {
-      return isNull ? null : CoderHelpers.toByteArray(value, coder);
-    }
-  }
-
-  /**
-   * Catalyst Expression that deserializes elements using Beam {@link Coder}.
-   *
-   * @param <T>: Type of elements ot be serialized.
-   */
-  public static class DecodeUsingBeamCoder<T> extends UnaryExpression
-      implements NonSQLExpression, Serializable {
-
-    private final Expression child;
-    private final ClassTag<T> classTag;
-    private final Coder<T> coder;
-
-    public DecodeUsingBeamCoder(Expression child, ClassTag<T> classTag, Coder<T> coder) {
-      this.child = child;
-      this.classTag = classTag;
-      this.coder = coder;
-    }
-
-    @Override
-    public Expression child() {
-      return child;
-    }
-
-    @Override
-    public ExprCode doGenCode(CodegenContext ctx, ExprCode ev) {
-      String accessCode = ctx.addReferenceObj("coder", coder, coder.getClass().getName());
-      ExprCode input = child.genCode(ctx);
-      String javaType = CodeGenerator.javaType(dataType());
-
-      List<String> parts = new ArrayList<>();
-      List<Object> args = new ArrayList<>();
-      /*
-        CODE GENERATED:
-        final ${javaType} ${ev.value} = (${javaType}) org.apache.beam.runners.spark.structuredstreaming.translation.helpers.EncoderHelpers.DecodeUsingBeamCoder.decode(${input.value}, ${coder});
-      */
-      parts.add("final ");
-      args.add(javaType);
-      parts.add(" ");
-      args.add(ev.value());
-      parts.add(" = (");
-      args.add(javaType);
-      parts.add(
-          ") org.apache.beam.runners.spark.structuredstreaming.translation.helpers.EncoderHelpers.DecodeUsingBeamCoder.decode(");
-      args.add(input.isNull());
-      parts.add(", ");
-      args.add(input.value());
-      parts.add(", ");
-      args.add(accessCode);
-      parts.add(");");
-
-      StringContext sc =
-          new StringContext(JavaConversions.collectionAsScalaIterable(parts).toSeq());
-      Block code =
-          new Block.BlockHelper(sc).code(JavaConversions.collectionAsScalaIterable(args).toSeq());
-      return ev.copy(input.code().$plus(code), input.isNull(), ev.value());
-    }
-
-    @Override
-    public DataType dataType() {
-      return new ObjectType(classTag.runtimeClass());
-    }
-
-    @Override
-    public Object productElement(int n) {
-      switch (n) {
-        case 0:
-          return child;
-        case 1:
-          return classTag;
-        case 2:
-          return coder;
-        default:
-          throw new ArrayIndexOutOfBoundsException("productElement out of bounds");
-      }
-    }
-
-    @Override
-    public int productArity() {
-      return 3;
-    }
-
-    @Override
-    public boolean canEqual(Object that) {
-      return (that instanceof DecodeUsingBeamCoder);
-    }
-
-    @Override
-    public boolean equals(@Nullable Object o) {
-      if (this == o) {
-        return true;
-      }
-      if (o == null || getClass() != o.getClass()) {
-        return false;
-      }
-      DecodeUsingBeamCoder<?> that = (DecodeUsingBeamCoder<?>) o;
-      return child.equals(that.child) && classTag.equals(that.classTag) && coder.equals(that.coder);
-    }
-
-    @Override
-    public int hashCode() {
-      return Objects.hash(super.hashCode(), child, classTag, coder);
-    }
-
-    /**
-     * Convert value from byte array (invoked by generated code in {@link #doGenCode(CodegenContext,
-     * ExprCode)}).
-     */
-    public static <T> T decode(boolean isNull, byte @Nullable [] serialized, Coder<T> coder) {
-      return isNull ? null : CoderHelpers.fromByteArray(serialized, coder);
-    }
-  }
-}
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformTranslator.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformTranslator.java
index ecb2f7e..33dd4b0 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformTranslator.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformTranslator.java
@@ -363,10 +363,19 @@
           ParDo.MultiOutput<InputT, OutputT> transform, EvaluationContext context) {
         String stepName = context.getCurrentTransform().getFullName();
         DoFn<InputT, OutputT> doFn = transform.getFn();
+        DoFnSignature signature = DoFnSignatures.signatureForDoFn(doFn);
+
         checkState(
-            !DoFnSignatures.signatureForDoFn(doFn).processElement().isSplittable(),
+            !signature.processElement().isSplittable(),
             "Not expected to directly translate splittable DoFn, should have been overridden: %s",
             doFn);
+
+        // https://github.com/apache/beam/issues/22524
+        checkState(
+            signature.onWindowExpiration() == null,
+            "onWindowExpiration is not supported: %s",
+            doFn);
+
         JavaRDD<WindowedValue<InputT>> inRDD =
             ((BoundedDataset<InputT>) context.borrowDataset(transform)).getRDD();
         WindowingStrategy<?, ?> windowingStrategy =
@@ -376,7 +385,6 @@
         Map<TupleTag<?>, Coder<?>> outputCoders = context.getOutputCoders();
         JavaPairRDD<TupleTag<?>, WindowedValue<?>> all;
 
-        DoFnSignature signature = DoFnSignatures.getSignature(transform.getFn().getClass());
         boolean stateful =
             signature.stateDeclarations().size() > 0 || signature.timerDeclarations().size() > 0;
 
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/aggregators/metrics/sink/InMemoryMetrics.java b/runners/spark/src/test/java/org/apache/beam/runners/spark/aggregators/metrics/sink/InMemoryMetrics.java
index a4b3e54..b69275b 100644
--- a/runners/spark/src/test/java/org/apache/beam/runners/spark/aggregators/metrics/sink/InMemoryMetrics.java
+++ b/runners/spark/src/test/java/org/apache/beam/runners/spark/aggregators/metrics/sink/InMemoryMetrics.java
@@ -17,11 +17,13 @@
  */
 package org.apache.beam.runners.spark.aggregators.metrics.sink;
 
+import com.codahale.metrics.Gauge;
 import com.codahale.metrics.MetricFilter;
 import com.codahale.metrics.MetricRegistry;
+import java.util.Collection;
 import java.util.Properties;
 import org.apache.beam.runners.spark.metrics.WithMetricsSupport;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Predicates;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
 import org.apache.spark.metrics.sink.Sink;
 
 /** An in-memory {@link Sink} implementation for tests. */
@@ -30,6 +32,7 @@
   private static WithMetricsSupport extendedMetricsRegistry;
   private static MetricRegistry internalMetricRegistry;
 
+  // Constructor for Spark 3.1
   @SuppressWarnings("UnusedParameters")
   public InMemoryMetrics(
       final Properties properties,
@@ -39,26 +42,24 @@
     internalMetricRegistry = metricRegistry;
   }
 
-  @SuppressWarnings("TypeParameterUnusedInFormals")
-  public static <T> T valueOf(final String name) {
-    final T retVal;
+  // Constructor for Spark >= 3.2
+  @SuppressWarnings("UnusedParameters")
+  public InMemoryMetrics(final Properties properties, final MetricRegistry metricRegistry) {
+    extendedMetricsRegistry = WithMetricsSupport.forRegistry(metricRegistry);
+    internalMetricRegistry = metricRegistry;
+  }
 
+  @SuppressWarnings({"TypeParameterUnusedInFormals", "rawtypes"})
+  public static <T> T valueOf(final String name) {
     // this might fail in case we have multiple aggregators with the same suffix after
     // the last dot, but it should be good enough for tests.
-    if (extendedMetricsRegistry != null
-        && extendedMetricsRegistry.getGauges().keySet().stream()
-            .anyMatch(Predicates.containsPattern(name + "$")::apply)) {
-      String key =
-          extendedMetricsRegistry.getGauges().keySet().stream()
-              .filter(Predicates.containsPattern(name + "$")::apply)
-              .findFirst()
-              .get();
-      retVal = (T) extendedMetricsRegistry.getGauges().get(key).getValue();
+    if (extendedMetricsRegistry != null) {
+      Collection<Gauge> matches =
+          extendedMetricsRegistry.getGauges((n, m) -> n.endsWith(name)).values();
+      return matches.isEmpty() ? null : (T) Iterables.getOnlyElement(matches).getValue();
     } else {
-      retVal = null;
+      return null;
     }
-
-    return retVal;
   }
 
   @SuppressWarnings("WeakerAccess")
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/aggregators/metrics/sink/SparkMetricsSinkTest.java b/runners/spark/src/test/java/org/apache/beam/runners/spark/aggregators/metrics/sink/SparkMetricsSinkTest.java
index 0d067b5..edf164b 100644
--- a/runners/spark/src/test/java/org/apache/beam/runners/spark/aggregators/metrics/sink/SparkMetricsSinkTest.java
+++ b/runners/spark/src/test/java/org/apache/beam/runners/spark/aggregators/metrics/sink/SparkMetricsSinkTest.java
@@ -34,6 +34,7 @@
 import org.apache.beam.sdk.transforms.MapElements;
 import org.apache.beam.sdk.transforms.windowing.FixedWindows;
 import org.apache.beam.sdk.transforms.windowing.Window;
+import org.apache.beam.sdk.values.KV;
 import org.apache.beam.sdk.values.PCollection;
 import org.apache.beam.sdk.values.TimestampedValue;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList;
@@ -51,7 +52,10 @@
  * streaming modes.
  */
 public class SparkMetricsSinkTest {
-  @ClassRule public static SparkContextRule contextRule = new SparkContextRule();
+  @ClassRule
+  public static SparkContextRule contextRule =
+      new SparkContextRule(
+          KV.of("spark.metrics.conf.*.sink.memory.class", InMemoryMetrics.class.getName()));
 
   @Rule public ExternalResource inMemoryMetricsSink = new InMemoryMetricsSinkRule();
 
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/metrics/SparkBeamMetricTest.java b/runners/spark/src/test/java/org/apache/beam/runners/spark/metrics/SparkBeamMetricTest.java
index 1851e1d..df96a65 100644
--- a/runners/spark/src/test/java/org/apache/beam/runners/spark/metrics/SparkBeamMetricTest.java
+++ b/runners/spark/src/test/java/org/apache/beam/runners/spark/metrics/SparkBeamMetricTest.java
@@ -27,6 +27,7 @@
 
 /** Test SparkBeamMetric. */
 public class SparkBeamMetricTest {
+
   @Test
   public void testRenderName() {
     MetricResult<Object> metricResult =
@@ -35,10 +36,25 @@
                 "myStep.one.two(three)", MetricName.named("myNameSpace//", "myName()")),
             123,
             456);
-    String renderedName = SparkBeamMetric.renderName(metricResult);
+    String renderedName = SparkBeamMetric.renderName("", metricResult);
     assertThat(
         "Metric name was not rendered correctly",
         renderedName,
         equalTo("myStep_one_two_three.myNameSpace__.myName__"));
   }
+
+  @Test
+  public void testRenderNameWithPrefix() {
+    MetricResult<Object> metricResult =
+        MetricResult.create(
+            MetricKey.create(
+                "myStep.one.two(three)", MetricName.named("myNameSpace//", "myName()")),
+            123,
+            456);
+    String renderedName = SparkBeamMetric.renderName("prefix", metricResult);
+    assertThat(
+        "Metric name was not rendered correctly",
+        renderedName,
+        equalTo("prefix.myStep_one_two_three.myNameSpace__.myName__"));
+  }
 }
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpersTest.java b/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpersTest.java
deleted file mode 100644
index 3151a5f..0000000
--- a/runners/spark/src/test/java/org/apache/beam/runners/spark/structuredstreaming/translation/helpers/EncoderHelpersTest.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.beam.runners.spark.structuredstreaming.translation.helpers;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.Arrays;
-import java.util.List;
-import org.apache.beam.runners.spark.structuredstreaming.SparkSessionRule;
-import org.apache.beam.sdk.coders.VarIntCoder;
-import org.apache.spark.sql.Dataset;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
-
-/** Test of the wrapping of Beam Coders as Spark ExpressionEncoders. */
-@RunWith(JUnit4.class)
-public class EncoderHelpersTest {
-
-  @ClassRule public static SparkSessionRule sessionRule = new SparkSessionRule();
-
-  @Test
-  public void beamCoderToSparkEncoderTest() {
-    List<Integer> data = Arrays.asList(1, 2, 3);
-    Dataset<Integer> dataset =
-        sessionRule
-            .getSession()
-            .createDataset(data, EncoderHelpers.fromBeamCoder(VarIntCoder.of()));
-    assertEquals(data, dataset.collectAsList());
-  }
-}
diff --git a/runners/spark/src/test/resources/metrics.properties b/runners/spark/src/test/resources/metrics.properties
deleted file mode 100644
index 78705c2..0000000
--- a/runners/spark/src/test/resources/metrics.properties
+++ /dev/null
@@ -1,68 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# The "org.apache.beam.runners.spark.metrics.sink.XSink"
-# (a.k.a Beam.XSink) is only configured for the driver, the executors are set with a Spark native
-# implementation "org.apache.spark.metrics.sink.XSink" (a.k.a Spark.XSink).
-# This is due to sink class loading behavior, which is different on the driver and executors nodes.
-# Since Beam aggregators and metrics are reported via Spark accumulators and thus make their way to
-# the # driver, we only need the "Beam.XSink" on the driver side. Executor nodes can keep
-# reporting Spark native metrics using the traditional Spark.XSink.
-#
-# The current sink configuration pattern is therefore:
-#
-# driver.**.class   = Beam.XSink
-# executor.**.class = Spark.XSink
-
-
-# ************* A metrics sink for tests *************
-*.sink.memory.class=org.apache.beam.runners.spark.aggregators.metrics.sink.InMemoryMetrics
-# ************* End of InMemoryMetrics sink configuration section *************
-
-
-# ************* A sample configuration for outputting metrics to Graphite *************
-
-#driver.sink.graphite.class=org.apache.beam.runners.spark.metrics.sink.GraphiteSink
-#driver.sink.graphite.host=YOUR_HOST
-#driver.sink.graphite.port=2003
-#driver.sink.graphite.prefix=spark
-#driver.sink.graphite.period=1
-#driver.sink.graphite.unit=SECONDS
-
-#executor.sink.graphite.class=org.apache.spark.metrics.sink.GraphiteSink
-#executor.sink.graphite.host=YOUR_HOST
-#executor.sink.graphite.port=2003
-#executor.sink.graphite.prefix=spark
-#executor.sink.graphite.period=1
-#executor.sink.graphite.unit=SECONDS
-
-# ************* End of Graphite sik configuration section *************
-
-
-# ************* A sample configuration for outputting metrics to a CSV file. *************
-
-#driver.sink.csv.class=org.apache.beam.runners.spark.metrics.sink.CsvSink
-#driver.sink.csv.directory=/tmp/spark-metrics
-#driver.sink.csv.period=1
-#driver.sink.graphite.unit=SECONDS
-
-#executor.sink.csv.class=org.apache.spark.metrics.sink.CsvSink
-#executor.sink.csv.directory=/tmp/spark-metrics
-#executor.sink.csv.period=1
-#executor.sink.graphite.unit=SECONDS
-
-# ************* End of CSV sink configuration section *************
diff --git a/sdks/go.mod b/sdks/go.mod
index 614f462..8b552aa 100644
--- a/sdks/go.mod
+++ b/sdks/go.mod
@@ -23,10 +23,10 @@
 go 1.18
 
 require (
-	cloud.google.com/go/bigquery v1.35.0
+	cloud.google.com/go/bigquery v1.36.0
 	cloud.google.com/go/datastore v1.8.0
-	cloud.google.com/go/pubsub v1.23.1
-	cloud.google.com/go/storage v1.23.0
+	cloud.google.com/go/pubsub v1.24.0
+	cloud.google.com/go/storage v1.24.0
 	github.com/docker/go-connections v0.4.0
 	github.com/go-sql-driver/mysql v1.6.0
 	github.com/golang/protobuf v1.5.2 // TODO(danoliveira): Fully replace this with google.golang.org/protobuf
@@ -44,10 +44,10 @@
 	golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2
 	golang.org/x/sys v0.0.0-20220624220833-87e55d714810
 	golang.org/x/text v0.3.7
-	google.golang.org/api v0.87.0
-	google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03
+	google.golang.org/api v0.90.0
+	google.golang.org/genproto v0.0.0-20220714211235-042d03aeabc9
 	google.golang.org/grpc v1.48.0
-	google.golang.org/protobuf v1.28.0
+	google.golang.org/protobuf v1.28.1
 	gopkg.in/retry.v1 v1.0.3
 	gopkg.in/yaml.v2 v2.4.0
 )
@@ -72,7 +72,6 @@
 	github.com/golang/snappy v0.0.4 // indirect
 	github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
 	github.com/googleapis/gax-go/v2 v2.4.0 // indirect
-	github.com/googleapis/go-type-adapters v1.0.0 // indirect
 	github.com/inconshreveable/mousetrap v1.0.0 // indirect
 	github.com/klauspost/compress v1.13.1 // indirect
 	github.com/kr/text v0.2.0 // indirect
diff --git a/sdks/go.sum b/sdks/go.sum
index 17825e6..f1191b6 100644
--- a/sdks/go.sum
+++ b/sdks/go.sum
@@ -27,7 +27,6 @@
 cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
 cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
 cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
-cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U=
 cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
 cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
 cloud.google.com/go v0.102.1 h1:vpK6iQWv/2uUeFJth4/cBHsQAGjn1iIE6AAlxipRaA0=
@@ -38,8 +37,8 @@
 cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
 cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
 cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/bigquery v1.35.0 h1:VTPcaApKiOXMqMEqKKJvOr+wi15egvreNLeFQX1GWEo=
-cloud.google.com/go/bigquery v1.35.0/go.mod h1:lfJA66SOzluyfw7evgXMvt6UTjIDGrcYHlv1Ja7sgzE=
+cloud.google.com/go/bigquery v1.36.0 h1:sTAW05tQycLEDbxod+zgH8LTKDkPbbb30NROx2I9XVs=
+cloud.google.com/go/bigquery v1.36.0/go.mod h1:oEa/Pzgr6NNExtYYs26JiwMmllr1sYu1wPIJdxFX+fg=
 cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
 cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
 cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
@@ -48,30 +47,27 @@
 cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk=
 cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
 cloud.google.com/go/datacatalog v1.3.0 h1:3llKXv7cC1acsWjvWmG0NQQkYVSVgunMSfVk7h6zz8Q=
-cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=
 cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
 cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
 cloud.google.com/go/datastore v1.8.0 h1:2qo2G7hABSeqswa+5Ga3+QB8/ZwKOJmDsCISM9scmsU=
 cloud.google.com/go/datastore v1.8.0/go.mod h1:q1CpHVByTlXppdqTcu4LIhCsTn3fhtZ5R7+TajciO+M=
-cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=
 cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc=
 cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
 cloud.google.com/go/kms v1.4.0 h1:iElbfoE61VeLhnZcGOltqL8HIly8Nhbe5t6JlH9GXjo=
-cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
 cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
 cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
 cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
 cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/pubsub v1.23.1 h1:eVtkabVa+1M5ai67fGU+idws0hVb/KEPXiDmSS17+qc=
-cloud.google.com/go/pubsub v1.23.1/go.mod h1:ttM6nEGYK/2CnB36ndNySU3ZxPwpBk8cXM6+iOlxH9U=
+cloud.google.com/go/pubsub v1.24.0 h1:aCS6wSMzrc602OeXUMA66KGlyXxpdkHdwN+FSBv/sUg=
+cloud.google.com/go/pubsub v1.24.0/go.mod h1:rWv09Te1SsRpRGPiWOMDKraMQTJyJps4MkUCoMGUgqw=
 cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
 cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
 cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
 cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
 cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
 cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
-cloud.google.com/go/storage v1.23.0 h1:wWRIaDURQA8xxHguFCshYepGlrWIrbBnAmc7wfg07qY=
-cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
+cloud.google.com/go/storage v1.24.0 h1:a4N0gIkx83uoVFGz8B2eAV3OhN90QoWF5OZWLKl39ig=
+cloud.google.com/go/storage v1.24.0/go.mod h1:3xrJEFMXBsQLgxwThyjuD3aYlroL0TMRec1ypGUQ0KE=
 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
 github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
 github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
@@ -497,7 +493,6 @@
 github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk=
 github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
 github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
-github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA=
 github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
 github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
@@ -926,7 +921,6 @@
 golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
 golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
 golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
 golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
 golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1064,7 +1058,6 @@
 golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220624220833-87e55d714810 h1:rHZQSjJdAI4Xf5Qzeh2bBc5YJIkPFVM6oDtMFYmgws0=
 golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -1085,7 +1078,6 @@
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U=
-golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1192,10 +1184,8 @@
 google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
 google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
 google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
-google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
-google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
-google.golang.org/api v0.87.0 h1:pUQVF/F+X7Tl1lo4LJoJf5BOpjtmINU80p9XpYTU2p4=
-google.golang.org/api v0.87.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.90.0 h1:WMnUWAvihIClUYFNeFA69VTuR3duKS3IalMGDQcLvq8=
+google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1288,8 +1278,8 @@
 google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
 google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
 google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
-google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03 h1:W70HjnmXFJm+8RNjOpIDYW2nKsSi/af0VvIZUtYkwuU=
-google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220714211235-042d03aeabc9 h1:zfXhTgBfGlIh3jMXN06W8qbhFGsh6MJNJiYEuhTddOI=
+google.golang.org/genproto v0.0.0-20220714211235-042d03aeabc9/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE=
 google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -1341,8 +1331,9 @@
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
 google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
 gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/sdks/go/examples/fhirio/import/import.go b/sdks/go/examples/fhirio/import/import.go
new file mode 100644
index 0000000..ceb13eb
--- /dev/null
+++ b/sdks/go/examples/fhirio/import/import.go
@@ -0,0 +1,85 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// import is a pipeline example using the fhirio connector to bulk import FHIR
+// resources from GCS into a given FHIR store.
+//
+// Pre-requisites:
+// 1. NDJSON-encoded FHIR resources stored in GCS.
+// 2. Dataflow Runner enabled: https://cloud.google.com/dataflow/docs/quickstarts.
+// 3. A Google Cloud FHIR store.
+//
+// Running this pipeline requires providing a fully qualified GCS address
+// (potentially containing wildcards) to where your FHIR resources are stored, a
+// path to the FHIR store where the resources should be written to, in addition
+// to the usual flags for the Dataflow runner.
+//
+// An example command for executing this pipeline on GCP is as follows:
+//   export PROJECT="$(gcloud config get-value project)"
+//   export TEMP_LOCATION="gs://MY-BUCKET/temp"
+//   export STAGING_LOCATION="gs://MY-BUCKET/staging"
+//   export REGION="us-central1"
+//   export SOURCE_GCS_LOCATION="gs://MY_BUCKET/path/to/resources/**"
+//   export FHIR_STORE_PATH="MY_FHIR_STORE_PATH"
+//   cd ./sdks/go
+//   go run ./examples/fhirio/import/import.go \
+//     --runner=dataflow \
+//     --temp_location=$TEMP_LOCATION \
+//     --staging_location=$STAGING_LOCATION \
+//     --project=$PROJECT \
+//     --region=$REGION \
+//     --worker_harness_container_image=apache/beam_go_sdk:latest \
+//     --sourceGcsLocation=$SOURCE_GCS_LOCATION \
+//     --fhirStore=$FHIR_STORE_PATH
+package main
+
+import (
+	"context"
+	"flag"
+
+	"github.com/apache/beam/sdks/v2/go/pkg/beam"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/io/fhirio"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/io/textio"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/log"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/x/beamx"
+)
+
+var (
+	// Required flag with the source directory for GCS files to read, including
+	// wildcards. Directory should contain the resources files in NDJSON format.
+	sourceGcsLocation = flag.String("sourceGcsLocation", "", "The source directory for GCS files to read, including wildcards.")
+
+	// Required flag with target FHIR store to write data to, must be of the full format:
+	// "projects/project_id/locations/location/datasets/DATASET_ID/fhirStores/FHIR_STORE_ID"
+	fhirStore = flag.String("fhirStore", "", "The target FHIR Store to write data to, must be of the full format.")
+)
+
+func main() {
+	flag.Parse()
+	beam.Init()
+
+	p, s := beam.NewPipelineWithRoot()
+
+	// Read resources from GCS.
+	resourcesInGcs := textio.Read(s, *sourceGcsLocation)
+
+	// Import the read resources to the provided FHIR store.
+	fhirio.Import(s, *fhirStore, "", "", fhirio.ContentStructureResource, resourcesInGcs)
+
+	ctx := context.Background()
+	if err := beamx.Run(ctx, p); err != nil {
+		log.Fatalf(ctx, "Failed to execute job: %v", err)
+	}
+}
diff --git a/sdks/go/examples/fhirio/read_write_pubsub/read_write_pubsub.go b/sdks/go/examples/fhirio/read_write_pubsub/read_write_pubsub.go
new file mode 100644
index 0000000..fcf1108
--- /dev/null
+++ b/sdks/go/examples/fhirio/read_write_pubsub/read_write_pubsub.go
@@ -0,0 +1,157 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// read_write_pubsub is a pipeline example using the fhirio connector to read
+// FHIR resources from GCS, write them to a GCP FHIR store, and, if a PubSub
+// topic is provided, read the written resources from the FHIR store and log them
+// based on the PubSub notifications about store updates.
+//
+// Pre-requisites:
+// 1. NDJSON-encoded FHIR resources stored in GCS.
+// 2. Dataflow Runner enabled: https://cloud.google.com/dataflow/docs/quickstarts.
+// 3. A Google Cloud FHIR store. Optionally, PubSub notifications set up on the store.
+// (see: https://cloud.google.com/healthcare-api/docs/concepts/pubsub).
+//
+// Running this pipeline requires providing a fully qualified GCS address
+// (potentially containing wildcards) to where your FHIR resources are stored, a
+// path to the FHIR store where the resources should be written to, and,
+// optionally, the PubSub topic name your FHIR store is sending notifications to,
+// in addition to the usual flags for the Dataflow runner.
+//
+// An example command for executing this pipeline on GCP is as follows:
+//   export PROJECT="$(gcloud config get-value project)"
+//   export TEMP_LOCATION="gs://MY-BUCKET/temp"
+//   export STAGING_LOCATION="gs://MY-BUCKET/staging"
+//   export REGION="us-central1"
+//   export SOURCE_GCS_LOCATION="gs://MY_BUCKET/path/to/resources/**"
+//   export FHIR_STORE_PATH="MY_FHIR_STORE_PATH"
+//   export PUBSUB_TOPIC="MY_FHIR_STORE_TOPIC"
+//   cd ./sdks/go
+//   go run ./examples/fhirio/read_write_pubsub/read_write_pubsub.go \
+//     --runner=dataflow \
+//     --temp_location=$TEMP_LOCATION \
+//     --staging_location=$STAGING_LOCATION \
+//     --project=$PROJECT \
+//     --region=$REGION \
+//     --worker_harness_container_image=apache/beam_go_sdk:latest \
+//     --sourceGcsLocation=$SOURCE_GCS_LOCATION \
+//     --fhirStore=$FHIR_STORE_PATH \
+//     --pubsubTopic=$PUBSUB_TOPIC
+package main
+
+import (
+	"context"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/apache/beam/sdks/v2/go/pkg/beam"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/io/fhirio"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/io/pubsubio"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/io/textio"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/log"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/options/gcpopts"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/register"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/x/beamx"
+)
+
+var (
+	// Required flag with the source directory for GCS files to read, including
+	// wildcards. Directory should contain the resources files in NDJSON format.
+	sourceGcsLocation = flag.String("sourceGcsLocation", "", "The source directory for GCS files to read, including wildcards.")
+
+	// Required flag with target FHIR store to write data to, must be of the full format:
+	// "projects/project_id/locations/location/datasets/DATASET_ID/fhirStores/FHIR_STORE_ID"
+	fhirStore = flag.String("fhirStore", "", "The target FHIR Store to write data to, must be of the full format.")
+
+	// Optional flag with the pubsub topic of your FHIR store to read and log upon store updates.
+	pubsubTopic = flag.String("pubsubTopic", "", "The PubSub topic to listen to.")
+)
+
+func init() {
+	register.Function1x1[string, string](WrapInBundle)
+	register.DoFn2x0[context.Context, string](&LoggerFn{})
+}
+
+// WrapInBundle takes a FHIR resource string and wraps it as a Bundle resource.
+// Useful so we can publish the given resource through ExecuteBundles.
+func WrapInBundle(resource string) string {
+	var r struct {
+		ResourceType string `json:"resourceType"`
+	}
+	json.NewDecoder(strings.NewReader(resource)).Decode(&r)
+	return fmt.Sprintf(`{
+        "resourceType": "Bundle",
+        "type": "batch",
+        "entry": [
+        	{
+        		"request": {
+        			"method": "POST",
+        			"url": "%s"
+        		},
+        		"resource": %s
+        	}
+		]
+	}`, r.ResourceType, resource)
+}
+
+// LoggerFn is a helper DoFn to log elements received.
+type LoggerFn struct {
+	LogPrefix string
+}
+
+// ProcessElement logs each element it receives.
+func (fn *LoggerFn) ProcessElement(ctx context.Context, elm string) {
+	log.Infof(ctx, "%s: %v", fn.LogPrefix, elm)
+}
+
+// FinishBundle waits a bit so the job server finishes receiving logs.
+func (fn *LoggerFn) FinishBundle() {
+	time.Sleep(2 * time.Second)
+}
+
+func main() {
+	flag.Parse()
+	beam.Init()
+
+	p, s := beam.NewPipelineWithRoot()
+
+	// Read resources from GCS.
+	resourcesInGcs := textio.Read(s, *sourceGcsLocation)
+	resourceBundles := beam.ParDo(s, WrapInBundle, resourcesInGcs)
+
+	// Write resources to store.
+	_, failedWritesErrorMessage := fhirio.ExecuteBundles(s, *fhirStore, resourceBundles)
+	beam.ParDo0(s, &LoggerFn{"Failed Write"}, failedWritesErrorMessage)
+
+	if *pubsubTopic != "" {
+		// PubSub notifications will be emitted containing the path of the resource once
+		// it is written to the store. Simultaneously read notifications and resources
+		// from PubSub and store, respectively.
+		resourceNotifications := pubsubio.Read(s, *gcpopts.Project, *pubsubTopic, nil)
+		resourcesInFhirStore, deadLetters := fhirio.Read(s, resourceNotifications)
+
+		// Log the read resources or read errors to the server.
+		beam.ParDo0(s, &LoggerFn{"Read Resource"}, resourcesInFhirStore)
+		beam.ParDo0(s, &LoggerFn{"Got Dead Letter"}, deadLetters)
+	}
+
+	ctx := context.Background()
+	if err := beamx.Run(ctx, p); err != nil {
+		log.Fatalf(ctx, "Failed to execute job: %v", err)
+	}
+}
diff --git a/sdks/go/examples/native_wordcap/wordcap.go b/sdks/go/examples/native_wordcap/wordcap.go
index 9d1aded..9fa2d0e 100644
--- a/sdks/go/examples/native_wordcap/wordcap.go
+++ b/sdks/go/examples/native_wordcap/wordcap.go
@@ -30,7 +30,6 @@
 
 	"github.com/apache/beam/sdks/v2/go/examples/native_wordcap/nativepubsubio"
 	"github.com/apache/beam/sdks/v2/go/pkg/beam"
-	"github.com/apache/beam/sdks/v2/go/pkg/beam/core/util/stringx"
 	"github.com/apache/beam/sdks/v2/go/pkg/beam/log"
 	"github.com/apache/beam/sdks/v2/go/pkg/beam/options/gcpopts"
 	"github.com/apache/beam/sdks/v2/go/pkg/beam/util/pubsubx"
@@ -69,7 +68,9 @@
 	s := p.Root()
 
 	col := nativepubsubio.Read(ctx, s, project, *input, sub.ID())
-	str := beam.ParDo(s, stringx.FromBytes, col)
+	str := beam.ParDo(s, func(b []byte) string {
+		return (string)(b)
+	}, col)
 	cap := beam.ParDo(s, strings.ToUpper, str)
 	debug.Print(s, cap)
 
diff --git a/sdks/go/examples/streaming_wordcap/wordcap.go b/sdks/go/examples/streaming_wordcap/wordcap.go
index f684922..7b470b6 100644
--- a/sdks/go/examples/streaming_wordcap/wordcap.go
+++ b/sdks/go/examples/streaming_wordcap/wordcap.go
@@ -29,7 +29,6 @@
 	"strings"
 
 	"github.com/apache/beam/sdks/v2/go/pkg/beam"
-	"github.com/apache/beam/sdks/v2/go/pkg/beam/core/util/stringx"
 	"github.com/apache/beam/sdks/v2/go/pkg/beam/io/pubsubio"
 	"github.com/apache/beam/sdks/v2/go/pkg/beam/log"
 	"github.com/apache/beam/sdks/v2/go/pkg/beam/options/gcpopts"
@@ -71,7 +70,9 @@
 	s := p.Root()
 
 	col := pubsubio.Read(s, project, *input, &pubsubio.ReadOptions{Subscription: sub.ID()})
-	str := beam.ParDo(s, stringx.FromBytes, col)
+	str := beam.ParDo(s, func(b []byte) string {
+		return (string)(b)
+	}, col)
 	cap := beam.ParDo(s, strings.ToUpper, str)
 	debug.Print(s, cap)
 
diff --git a/sdks/go/pkg/beam/core/core.go b/sdks/go/pkg/beam/core/core.go
index 0c57da5..1dc0fc9 100644
--- a/sdks/go/pkg/beam/core/core.go
+++ b/sdks/go/pkg/beam/core/core.go
@@ -27,5 +27,5 @@
 	// SdkName is the human readable name of the SDK for UserAgents.
 	SdkName = "Apache Beam SDK for Go"
 	// SdkVersion is the current version of the SDK.
-	SdkVersion = "2.41.0.dev"
+	SdkVersion = "2.42.0.dev"
 )
diff --git a/sdks/go/pkg/beam/core/funcx/sideinput.go b/sdks/go/pkg/beam/core/funcx/sideinput.go
index 168ff3c..ab289ca 100644
--- a/sdks/go/pkg/beam/core/funcx/sideinput.go
+++ b/sdks/go/pkg/beam/core/funcx/sideinput.go
@@ -27,6 +27,7 @@
 	errIllegalParametersInIter     = "All parameters in an iter must be universal type, container type, or concrete type"
 	errIllegalParametersInReIter   = "Output of a reiter must be valid iter type"
 	errIllegalParametersInMultiMap = "Output of a multimap must be valid iter type"
+	errIllegalEventTimeInIter      = "Iterators with timestamp values (<ET,V> and <ET, K, V>) are not valid, as side input time stamps are not preserved after windowing. See https://github.com/apache/beam/issues/22404 for more information."
 )
 
 // IsIter returns true iff the supplied type is a "single sweep functional iterator".
@@ -56,8 +57,10 @@
 //
 //     func (*int) bool                   returns {int}
 //     func (*string, *int) bool          returns {string, int}
-//     func (*typex.EventTime, *int) bool returns {typex.EventTime, int}
 //
+// EventTimes are not allowed in iterator types as per the Beam model
+// (see https://github.com/apache/beam/issues/22404) for more
+// information.
 func UnfoldIter(t reflect.Type) ([]reflect.Type, bool) {
 	types, ok, _ := unfoldIter(t)
 	return types, ok
@@ -78,8 +81,7 @@
 	var ret []reflect.Type
 	skip := 0
 	if t.In(0).Kind() == reflect.Ptr && t.In(0).Elem() == typex.EventTimeType {
-		ret = append(ret, typex.EventTimeType)
-		skip = 1
+		return nil, false, errors.New(errIllegalEventTimeInIter)
 	}
 	if t.NumIn()-skip > 2 || t.NumIn() == skip {
 		return nil, false, nil
diff --git a/sdks/go/pkg/beam/core/funcx/sideinput_test.go b/sdks/go/pkg/beam/core/funcx/sideinput_test.go
index 71b3c7b..f9048ee 100644
--- a/sdks/go/pkg/beam/core/funcx/sideinput_test.go
+++ b/sdks/go/pkg/beam/core/funcx/sideinput_test.go
@@ -31,14 +31,14 @@
 		{func() bool { return false }, false},                 // no value
 		{func(*int) int { return 0 }, false},                  // no bool return
 		{func(int) bool { return false }, false},              // no ptr value
-		{func(*typex.EventTime) bool { return false }, false}, // no values
+		{func(*typex.EventTime) bool { return false }, false}, // EventTimes disallowed
 		{func(*int) bool { return false }, true},
-		{func(*typex.EventTime, *int) bool { return false }, true},
+		{func(*typex.EventTime, *int) bool { return false }, false}, // EventTimes disallowed
 		{func(*int, *string) bool { return false }, true},
 		{func(*typex.Y, *typex.Z) bool { return false }, true},
-		{func(*typex.EventTime, *int, *string) bool { return false }, true},
+		{func(*typex.EventTime, *int, *string) bool { return false }, false},            // EventTimes disallowed
 		{func(*int, *typex.Y, *typex.Z) bool { return false }, false},                   // too many values
-		{func(*typex.EventTime, *int, *typex.Y, *typex.Z) bool { return false }, false}, // too many values
+		{func(*typex.EventTime, *int, *typex.Y, *typex.Z) bool { return false }, false}, // too many values, EventTimes disallowed
 	}
 
 	for _, test := range tests {
@@ -58,7 +58,7 @@
 		{func(*int) func(*int) bool { return nil }, false},                         // takes parameters
 		{func(*int) (func(*int) bool, func(*int) bool) { return nil, nil }, false}, // too many iterators
 		{func() func(*int) bool { return nil }, true},
-		{func() func(*typex.EventTime, *int, *string) bool { return nil }, true},
+		{func() func(*typex.EventTime, *int, *string) bool { return nil }, false}, // EventTimes disallowed
 	}
 
 	for _, test := range tests {
@@ -75,13 +75,13 @@
 		Exp bool
 	}{
 		{func(int) func(*int) bool { return nil }, true},
-		{func() func(*int) bool { return nil }, false},      // Doesn't take an input (is a ReIter)
-		{func(*int) bool { return false }, false},           // Doesn't return an iterator (is an iterator)
-		{func(int) int { return 0 }, false},                 // Doesn't return an iterator (returns a value)
-		{func(string) func(*int) int { return nil }, false}, // Returned iterator isn't a boolean return
-		{func(string) func(int) bool { return nil }, false}, // Returned iterator doesn't have a pointer receiver
-		{func(string) func(*typex.EventTime, *int) bool { return nil }, true},
-		{func(string) func(*typex.EventTime, *int) { return nil }, false}, // Returned iterator does not have a bool return
+		{func() func(*int) bool { return nil }, false},                         // Doesn't take an input (is a ReIter)
+		{func(*int) bool { return false }, false},                              // Doesn't return an iterator (is an iterator)
+		{func(int) int { return 0 }, false},                                    // Doesn't return an iterator (returns a value)
+		{func(string) func(*int) int { return nil }, false},                    // Returned iterator isn't a boolean return
+		{func(string) func(int) bool { return nil }, false},                    // Returned iterator doesn't have a pointer receiver
+		{func(string) func(*typex.EventTime, *int) bool { return nil }, false}, // EventTimes disallowed
+		{func(string) func(*typex.EventTime, *int) { return nil }, false},      // Returned iterator does not have a bool return, EventTimes disallowed
 	}
 	for _, test := range tests {
 		val := reflect.TypeOf(test.Fn)
diff --git a/sdks/go/pkg/beam/core/runtime/exec/input.go b/sdks/go/pkg/beam/core/runtime/exec/input.go
index ab17a83..b673854 100644
--- a/sdks/go/pkg/beam/core/runtime/exec/input.go
+++ b/sdks/go/pkg/beam/core/runtime/exec/input.go
@@ -169,8 +169,6 @@
 	for i, t := range v.types {
 		var v reflect.Value
 		switch {
-		case t == typex.EventTimeType:
-			v = reflect.ValueOf(elm.Timestamp)
 		case isKey:
 			v = reflect.ValueOf(Convert(elm.Elm, t))
 			isKey = false
diff --git a/sdks/go/pkg/beam/core/runtime/exec/optimized/inputs.go b/sdks/go/pkg/beam/core/runtime/exec/optimized/inputs.go
index d66f8ed..3a1ae8e 100644
--- a/sdks/go/pkg/beam/core/runtime/exec/optimized/inputs.go
+++ b/sdks/go/pkg/beam/core/runtime/exec/optimized/inputs.go
@@ -28,1017 +28,511 @@
 
 func init() {
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte) bool)(nil)).Elem(), iterMakerByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte) bool)(nil)).Elem(), iterMakerETByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *[]byte) bool)(nil)).Elem(), iterMakerByteSliceByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *[]byte) bool)(nil)).Elem(), iterMakerETByteSliceByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *bool) bool)(nil)).Elem(), iterMakerByteSliceBool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *bool) bool)(nil)).Elem(), iterMakerETByteSliceBool)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *string) bool)(nil)).Elem(), iterMakerByteSliceString)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *string) bool)(nil)).Elem(), iterMakerETByteSliceString)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *int) bool)(nil)).Elem(), iterMakerByteSliceInt)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *int) bool)(nil)).Elem(), iterMakerETByteSliceInt)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *int8) bool)(nil)).Elem(), iterMakerByteSliceInt8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *int8) bool)(nil)).Elem(), iterMakerETByteSliceInt8)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *int16) bool)(nil)).Elem(), iterMakerByteSliceInt16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *int16) bool)(nil)).Elem(), iterMakerETByteSliceInt16)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *int32) bool)(nil)).Elem(), iterMakerByteSliceInt32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *int32) bool)(nil)).Elem(), iterMakerETByteSliceInt32)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *int64) bool)(nil)).Elem(), iterMakerByteSliceInt64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *int64) bool)(nil)).Elem(), iterMakerETByteSliceInt64)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *uint) bool)(nil)).Elem(), iterMakerByteSliceUint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *uint) bool)(nil)).Elem(), iterMakerETByteSliceUint)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *uint8) bool)(nil)).Elem(), iterMakerByteSliceUint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *uint8) bool)(nil)).Elem(), iterMakerETByteSliceUint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *uint16) bool)(nil)).Elem(), iterMakerByteSliceUint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *uint16) bool)(nil)).Elem(), iterMakerETByteSliceUint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *uint32) bool)(nil)).Elem(), iterMakerByteSliceUint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *uint32) bool)(nil)).Elem(), iterMakerETByteSliceUint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *uint64) bool)(nil)).Elem(), iterMakerByteSliceUint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *uint64) bool)(nil)).Elem(), iterMakerETByteSliceUint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *float32) bool)(nil)).Elem(), iterMakerByteSliceFloat32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *float32) bool)(nil)).Elem(), iterMakerETByteSliceFloat32)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *float64) bool)(nil)).Elem(), iterMakerByteSliceFloat64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *float64) bool)(nil)).Elem(), iterMakerETByteSliceFloat64)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *typex.T) bool)(nil)).Elem(), iterMakerByteSliceTypex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *typex.T) bool)(nil)).Elem(), iterMakerETByteSliceTypex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *typex.U) bool)(nil)).Elem(), iterMakerByteSliceTypex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *typex.U) bool)(nil)).Elem(), iterMakerETByteSliceTypex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *typex.V) bool)(nil)).Elem(), iterMakerByteSliceTypex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *typex.V) bool)(nil)).Elem(), iterMakerETByteSliceTypex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *typex.W) bool)(nil)).Elem(), iterMakerByteSliceTypex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *typex.W) bool)(nil)).Elem(), iterMakerETByteSliceTypex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *typex.X) bool)(nil)).Elem(), iterMakerByteSliceTypex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *typex.X) bool)(nil)).Elem(), iterMakerETByteSliceTypex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *typex.Y) bool)(nil)).Elem(), iterMakerByteSliceTypex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *typex.Y) bool)(nil)).Elem(), iterMakerETByteSliceTypex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*[]byte, *typex.Z) bool)(nil)).Elem(), iterMakerByteSliceTypex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *[]byte, *typex.Z) bool)(nil)).Elem(), iterMakerETByteSliceTypex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool) bool)(nil)).Elem(), iterMakerBool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool) bool)(nil)).Elem(), iterMakerETBool)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *[]byte) bool)(nil)).Elem(), iterMakerBoolByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *[]byte) bool)(nil)).Elem(), iterMakerETBoolByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *bool) bool)(nil)).Elem(), iterMakerBoolBool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *bool) bool)(nil)).Elem(), iterMakerETBoolBool)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *string) bool)(nil)).Elem(), iterMakerBoolString)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *string) bool)(nil)).Elem(), iterMakerETBoolString)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *int) bool)(nil)).Elem(), iterMakerBoolInt)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *int) bool)(nil)).Elem(), iterMakerETBoolInt)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *int8) bool)(nil)).Elem(), iterMakerBoolInt8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *int8) bool)(nil)).Elem(), iterMakerETBoolInt8)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *int16) bool)(nil)).Elem(), iterMakerBoolInt16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *int16) bool)(nil)).Elem(), iterMakerETBoolInt16)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *int32) bool)(nil)).Elem(), iterMakerBoolInt32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *int32) bool)(nil)).Elem(), iterMakerETBoolInt32)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *int64) bool)(nil)).Elem(), iterMakerBoolInt64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *int64) bool)(nil)).Elem(), iterMakerETBoolInt64)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *uint) bool)(nil)).Elem(), iterMakerBoolUint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *uint) bool)(nil)).Elem(), iterMakerETBoolUint)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *uint8) bool)(nil)).Elem(), iterMakerBoolUint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *uint8) bool)(nil)).Elem(), iterMakerETBoolUint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *uint16) bool)(nil)).Elem(), iterMakerBoolUint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *uint16) bool)(nil)).Elem(), iterMakerETBoolUint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *uint32) bool)(nil)).Elem(), iterMakerBoolUint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *uint32) bool)(nil)).Elem(), iterMakerETBoolUint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *uint64) bool)(nil)).Elem(), iterMakerBoolUint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *uint64) bool)(nil)).Elem(), iterMakerETBoolUint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *float32) bool)(nil)).Elem(), iterMakerBoolFloat32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *float32) bool)(nil)).Elem(), iterMakerETBoolFloat32)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *float64) bool)(nil)).Elem(), iterMakerBoolFloat64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *float64) bool)(nil)).Elem(), iterMakerETBoolFloat64)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *typex.T) bool)(nil)).Elem(), iterMakerBoolTypex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *typex.T) bool)(nil)).Elem(), iterMakerETBoolTypex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *typex.U) bool)(nil)).Elem(), iterMakerBoolTypex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *typex.U) bool)(nil)).Elem(), iterMakerETBoolTypex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *typex.V) bool)(nil)).Elem(), iterMakerBoolTypex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *typex.V) bool)(nil)).Elem(), iterMakerETBoolTypex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *typex.W) bool)(nil)).Elem(), iterMakerBoolTypex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *typex.W) bool)(nil)).Elem(), iterMakerETBoolTypex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *typex.X) bool)(nil)).Elem(), iterMakerBoolTypex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *typex.X) bool)(nil)).Elem(), iterMakerETBoolTypex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *typex.Y) bool)(nil)).Elem(), iterMakerBoolTypex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *typex.Y) bool)(nil)).Elem(), iterMakerETBoolTypex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*bool, *typex.Z) bool)(nil)).Elem(), iterMakerBoolTypex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *bool, *typex.Z) bool)(nil)).Elem(), iterMakerETBoolTypex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*string) bool)(nil)).Elem(), iterMakerString)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string) bool)(nil)).Elem(), iterMakerETString)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *[]byte) bool)(nil)).Elem(), iterMakerStringByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *[]byte) bool)(nil)).Elem(), iterMakerETStringByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *bool) bool)(nil)).Elem(), iterMakerStringBool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *bool) bool)(nil)).Elem(), iterMakerETStringBool)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *string) bool)(nil)).Elem(), iterMakerStringString)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *string) bool)(nil)).Elem(), iterMakerETStringString)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *int) bool)(nil)).Elem(), iterMakerStringInt)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *int) bool)(nil)).Elem(), iterMakerETStringInt)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *int8) bool)(nil)).Elem(), iterMakerStringInt8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *int8) bool)(nil)).Elem(), iterMakerETStringInt8)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *int16) bool)(nil)).Elem(), iterMakerStringInt16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *int16) bool)(nil)).Elem(), iterMakerETStringInt16)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *int32) bool)(nil)).Elem(), iterMakerStringInt32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *int32) bool)(nil)).Elem(), iterMakerETStringInt32)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *int64) bool)(nil)).Elem(), iterMakerStringInt64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *int64) bool)(nil)).Elem(), iterMakerETStringInt64)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *uint) bool)(nil)).Elem(), iterMakerStringUint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *uint) bool)(nil)).Elem(), iterMakerETStringUint)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *uint8) bool)(nil)).Elem(), iterMakerStringUint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *uint8) bool)(nil)).Elem(), iterMakerETStringUint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *uint16) bool)(nil)).Elem(), iterMakerStringUint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *uint16) bool)(nil)).Elem(), iterMakerETStringUint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *uint32) bool)(nil)).Elem(), iterMakerStringUint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *uint32) bool)(nil)).Elem(), iterMakerETStringUint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *uint64) bool)(nil)).Elem(), iterMakerStringUint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *uint64) bool)(nil)).Elem(), iterMakerETStringUint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *float32) bool)(nil)).Elem(), iterMakerStringFloat32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *float32) bool)(nil)).Elem(), iterMakerETStringFloat32)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *float64) bool)(nil)).Elem(), iterMakerStringFloat64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *float64) bool)(nil)).Elem(), iterMakerETStringFloat64)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *typex.T) bool)(nil)).Elem(), iterMakerStringTypex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *typex.T) bool)(nil)).Elem(), iterMakerETStringTypex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *typex.U) bool)(nil)).Elem(), iterMakerStringTypex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *typex.U) bool)(nil)).Elem(), iterMakerETStringTypex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *typex.V) bool)(nil)).Elem(), iterMakerStringTypex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *typex.V) bool)(nil)).Elem(), iterMakerETStringTypex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *typex.W) bool)(nil)).Elem(), iterMakerStringTypex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *typex.W) bool)(nil)).Elem(), iterMakerETStringTypex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *typex.X) bool)(nil)).Elem(), iterMakerStringTypex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *typex.X) bool)(nil)).Elem(), iterMakerETStringTypex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *typex.Y) bool)(nil)).Elem(), iterMakerStringTypex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *typex.Y) bool)(nil)).Elem(), iterMakerETStringTypex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*string, *typex.Z) bool)(nil)).Elem(), iterMakerStringTypex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *string, *typex.Z) bool)(nil)).Elem(), iterMakerETStringTypex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*int) bool)(nil)).Elem(), iterMakerInt)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int) bool)(nil)).Elem(), iterMakerETInt)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *[]byte) bool)(nil)).Elem(), iterMakerIntByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *[]byte) bool)(nil)).Elem(), iterMakerETIntByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *bool) bool)(nil)).Elem(), iterMakerIntBool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *bool) bool)(nil)).Elem(), iterMakerETIntBool)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *string) bool)(nil)).Elem(), iterMakerIntString)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *string) bool)(nil)).Elem(), iterMakerETIntString)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *int) bool)(nil)).Elem(), iterMakerIntInt)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *int) bool)(nil)).Elem(), iterMakerETIntInt)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *int8) bool)(nil)).Elem(), iterMakerIntInt8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *int8) bool)(nil)).Elem(), iterMakerETIntInt8)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *int16) bool)(nil)).Elem(), iterMakerIntInt16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *int16) bool)(nil)).Elem(), iterMakerETIntInt16)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *int32) bool)(nil)).Elem(), iterMakerIntInt32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *int32) bool)(nil)).Elem(), iterMakerETIntInt32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *int64) bool)(nil)).Elem(), iterMakerIntInt64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *int64) bool)(nil)).Elem(), iterMakerETIntInt64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *uint) bool)(nil)).Elem(), iterMakerIntUint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *uint) bool)(nil)).Elem(), iterMakerETIntUint)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *uint8) bool)(nil)).Elem(), iterMakerIntUint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *uint8) bool)(nil)).Elem(), iterMakerETIntUint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *uint16) bool)(nil)).Elem(), iterMakerIntUint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *uint16) bool)(nil)).Elem(), iterMakerETIntUint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *uint32) bool)(nil)).Elem(), iterMakerIntUint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *uint32) bool)(nil)).Elem(), iterMakerETIntUint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *uint64) bool)(nil)).Elem(), iterMakerIntUint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *uint64) bool)(nil)).Elem(), iterMakerETIntUint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *float32) bool)(nil)).Elem(), iterMakerIntFloat32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *float32) bool)(nil)).Elem(), iterMakerETIntFloat32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *float64) bool)(nil)).Elem(), iterMakerIntFloat64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *float64) bool)(nil)).Elem(), iterMakerETIntFloat64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *typex.T) bool)(nil)).Elem(), iterMakerIntTypex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *typex.T) bool)(nil)).Elem(), iterMakerETIntTypex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *typex.U) bool)(nil)).Elem(), iterMakerIntTypex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *typex.U) bool)(nil)).Elem(), iterMakerETIntTypex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *typex.V) bool)(nil)).Elem(), iterMakerIntTypex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *typex.V) bool)(nil)).Elem(), iterMakerETIntTypex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *typex.W) bool)(nil)).Elem(), iterMakerIntTypex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *typex.W) bool)(nil)).Elem(), iterMakerETIntTypex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *typex.X) bool)(nil)).Elem(), iterMakerIntTypex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *typex.X) bool)(nil)).Elem(), iterMakerETIntTypex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *typex.Y) bool)(nil)).Elem(), iterMakerIntTypex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *typex.Y) bool)(nil)).Elem(), iterMakerETIntTypex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*int, *typex.Z) bool)(nil)).Elem(), iterMakerIntTypex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int, *typex.Z) bool)(nil)).Elem(), iterMakerETIntTypex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8) bool)(nil)).Elem(), iterMakerInt8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8) bool)(nil)).Elem(), iterMakerETInt8)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *[]byte) bool)(nil)).Elem(), iterMakerInt8ByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *[]byte) bool)(nil)).Elem(), iterMakerETInt8ByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *bool) bool)(nil)).Elem(), iterMakerInt8Bool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *bool) bool)(nil)).Elem(), iterMakerETInt8Bool)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *string) bool)(nil)).Elem(), iterMakerInt8String)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *string) bool)(nil)).Elem(), iterMakerETInt8String)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *int) bool)(nil)).Elem(), iterMakerInt8Int)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *int) bool)(nil)).Elem(), iterMakerETInt8Int)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *int8) bool)(nil)).Elem(), iterMakerInt8Int8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *int8) bool)(nil)).Elem(), iterMakerETInt8Int8)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *int16) bool)(nil)).Elem(), iterMakerInt8Int16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *int16) bool)(nil)).Elem(), iterMakerETInt8Int16)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *int32) bool)(nil)).Elem(), iterMakerInt8Int32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *int32) bool)(nil)).Elem(), iterMakerETInt8Int32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *int64) bool)(nil)).Elem(), iterMakerInt8Int64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *int64) bool)(nil)).Elem(), iterMakerETInt8Int64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *uint) bool)(nil)).Elem(), iterMakerInt8Uint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *uint) bool)(nil)).Elem(), iterMakerETInt8Uint)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *uint8) bool)(nil)).Elem(), iterMakerInt8Uint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *uint8) bool)(nil)).Elem(), iterMakerETInt8Uint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *uint16) bool)(nil)).Elem(), iterMakerInt8Uint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *uint16) bool)(nil)).Elem(), iterMakerETInt8Uint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *uint32) bool)(nil)).Elem(), iterMakerInt8Uint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *uint32) bool)(nil)).Elem(), iterMakerETInt8Uint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *uint64) bool)(nil)).Elem(), iterMakerInt8Uint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *uint64) bool)(nil)).Elem(), iterMakerETInt8Uint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *float32) bool)(nil)).Elem(), iterMakerInt8Float32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *float32) bool)(nil)).Elem(), iterMakerETInt8Float32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *float64) bool)(nil)).Elem(), iterMakerInt8Float64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *float64) bool)(nil)).Elem(), iterMakerETInt8Float64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *typex.T) bool)(nil)).Elem(), iterMakerInt8Typex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *typex.T) bool)(nil)).Elem(), iterMakerETInt8Typex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *typex.U) bool)(nil)).Elem(), iterMakerInt8Typex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *typex.U) bool)(nil)).Elem(), iterMakerETInt8Typex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *typex.V) bool)(nil)).Elem(), iterMakerInt8Typex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *typex.V) bool)(nil)).Elem(), iterMakerETInt8Typex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *typex.W) bool)(nil)).Elem(), iterMakerInt8Typex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *typex.W) bool)(nil)).Elem(), iterMakerETInt8Typex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *typex.X) bool)(nil)).Elem(), iterMakerInt8Typex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *typex.X) bool)(nil)).Elem(), iterMakerETInt8Typex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *typex.Y) bool)(nil)).Elem(), iterMakerInt8Typex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *typex.Y) bool)(nil)).Elem(), iterMakerETInt8Typex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*int8, *typex.Z) bool)(nil)).Elem(), iterMakerInt8Typex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int8, *typex.Z) bool)(nil)).Elem(), iterMakerETInt8Typex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16) bool)(nil)).Elem(), iterMakerInt16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16) bool)(nil)).Elem(), iterMakerETInt16)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *[]byte) bool)(nil)).Elem(), iterMakerInt16ByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *[]byte) bool)(nil)).Elem(), iterMakerETInt16ByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *bool) bool)(nil)).Elem(), iterMakerInt16Bool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *bool) bool)(nil)).Elem(), iterMakerETInt16Bool)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *string) bool)(nil)).Elem(), iterMakerInt16String)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *string) bool)(nil)).Elem(), iterMakerETInt16String)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *int) bool)(nil)).Elem(), iterMakerInt16Int)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *int) bool)(nil)).Elem(), iterMakerETInt16Int)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *int8) bool)(nil)).Elem(), iterMakerInt16Int8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *int8) bool)(nil)).Elem(), iterMakerETInt16Int8)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *int16) bool)(nil)).Elem(), iterMakerInt16Int16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *int16) bool)(nil)).Elem(), iterMakerETInt16Int16)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *int32) bool)(nil)).Elem(), iterMakerInt16Int32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *int32) bool)(nil)).Elem(), iterMakerETInt16Int32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *int64) bool)(nil)).Elem(), iterMakerInt16Int64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *int64) bool)(nil)).Elem(), iterMakerETInt16Int64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *uint) bool)(nil)).Elem(), iterMakerInt16Uint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *uint) bool)(nil)).Elem(), iterMakerETInt16Uint)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *uint8) bool)(nil)).Elem(), iterMakerInt16Uint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *uint8) bool)(nil)).Elem(), iterMakerETInt16Uint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *uint16) bool)(nil)).Elem(), iterMakerInt16Uint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *uint16) bool)(nil)).Elem(), iterMakerETInt16Uint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *uint32) bool)(nil)).Elem(), iterMakerInt16Uint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *uint32) bool)(nil)).Elem(), iterMakerETInt16Uint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *uint64) bool)(nil)).Elem(), iterMakerInt16Uint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *uint64) bool)(nil)).Elem(), iterMakerETInt16Uint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *float32) bool)(nil)).Elem(), iterMakerInt16Float32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *float32) bool)(nil)).Elem(), iterMakerETInt16Float32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *float64) bool)(nil)).Elem(), iterMakerInt16Float64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *float64) bool)(nil)).Elem(), iterMakerETInt16Float64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *typex.T) bool)(nil)).Elem(), iterMakerInt16Typex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *typex.T) bool)(nil)).Elem(), iterMakerETInt16Typex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *typex.U) bool)(nil)).Elem(), iterMakerInt16Typex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *typex.U) bool)(nil)).Elem(), iterMakerETInt16Typex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *typex.V) bool)(nil)).Elem(), iterMakerInt16Typex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *typex.V) bool)(nil)).Elem(), iterMakerETInt16Typex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *typex.W) bool)(nil)).Elem(), iterMakerInt16Typex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *typex.W) bool)(nil)).Elem(), iterMakerETInt16Typex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *typex.X) bool)(nil)).Elem(), iterMakerInt16Typex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *typex.X) bool)(nil)).Elem(), iterMakerETInt16Typex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *typex.Y) bool)(nil)).Elem(), iterMakerInt16Typex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *typex.Y) bool)(nil)).Elem(), iterMakerETInt16Typex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*int16, *typex.Z) bool)(nil)).Elem(), iterMakerInt16Typex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int16, *typex.Z) bool)(nil)).Elem(), iterMakerETInt16Typex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32) bool)(nil)).Elem(), iterMakerInt32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32) bool)(nil)).Elem(), iterMakerETInt32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *[]byte) bool)(nil)).Elem(), iterMakerInt32ByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *[]byte) bool)(nil)).Elem(), iterMakerETInt32ByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *bool) bool)(nil)).Elem(), iterMakerInt32Bool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *bool) bool)(nil)).Elem(), iterMakerETInt32Bool)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *string) bool)(nil)).Elem(), iterMakerInt32String)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *string) bool)(nil)).Elem(), iterMakerETInt32String)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *int) bool)(nil)).Elem(), iterMakerInt32Int)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *int) bool)(nil)).Elem(), iterMakerETInt32Int)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *int8) bool)(nil)).Elem(), iterMakerInt32Int8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *int8) bool)(nil)).Elem(), iterMakerETInt32Int8)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *int16) bool)(nil)).Elem(), iterMakerInt32Int16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *int16) bool)(nil)).Elem(), iterMakerETInt32Int16)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *int32) bool)(nil)).Elem(), iterMakerInt32Int32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *int32) bool)(nil)).Elem(), iterMakerETInt32Int32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *int64) bool)(nil)).Elem(), iterMakerInt32Int64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *int64) bool)(nil)).Elem(), iterMakerETInt32Int64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *uint) bool)(nil)).Elem(), iterMakerInt32Uint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *uint) bool)(nil)).Elem(), iterMakerETInt32Uint)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *uint8) bool)(nil)).Elem(), iterMakerInt32Uint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *uint8) bool)(nil)).Elem(), iterMakerETInt32Uint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *uint16) bool)(nil)).Elem(), iterMakerInt32Uint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *uint16) bool)(nil)).Elem(), iterMakerETInt32Uint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *uint32) bool)(nil)).Elem(), iterMakerInt32Uint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *uint32) bool)(nil)).Elem(), iterMakerETInt32Uint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *uint64) bool)(nil)).Elem(), iterMakerInt32Uint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *uint64) bool)(nil)).Elem(), iterMakerETInt32Uint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *float32) bool)(nil)).Elem(), iterMakerInt32Float32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *float32) bool)(nil)).Elem(), iterMakerETInt32Float32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *float64) bool)(nil)).Elem(), iterMakerInt32Float64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *float64) bool)(nil)).Elem(), iterMakerETInt32Float64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *typex.T) bool)(nil)).Elem(), iterMakerInt32Typex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *typex.T) bool)(nil)).Elem(), iterMakerETInt32Typex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *typex.U) bool)(nil)).Elem(), iterMakerInt32Typex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *typex.U) bool)(nil)).Elem(), iterMakerETInt32Typex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *typex.V) bool)(nil)).Elem(), iterMakerInt32Typex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *typex.V) bool)(nil)).Elem(), iterMakerETInt32Typex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *typex.W) bool)(nil)).Elem(), iterMakerInt32Typex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *typex.W) bool)(nil)).Elem(), iterMakerETInt32Typex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *typex.X) bool)(nil)).Elem(), iterMakerInt32Typex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *typex.X) bool)(nil)).Elem(), iterMakerETInt32Typex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *typex.Y) bool)(nil)).Elem(), iterMakerInt32Typex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *typex.Y) bool)(nil)).Elem(), iterMakerETInt32Typex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*int32, *typex.Z) bool)(nil)).Elem(), iterMakerInt32Typex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int32, *typex.Z) bool)(nil)).Elem(), iterMakerETInt32Typex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64) bool)(nil)).Elem(), iterMakerInt64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64) bool)(nil)).Elem(), iterMakerETInt64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *[]byte) bool)(nil)).Elem(), iterMakerInt64ByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *[]byte) bool)(nil)).Elem(), iterMakerETInt64ByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *bool) bool)(nil)).Elem(), iterMakerInt64Bool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *bool) bool)(nil)).Elem(), iterMakerETInt64Bool)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *string) bool)(nil)).Elem(), iterMakerInt64String)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *string) bool)(nil)).Elem(), iterMakerETInt64String)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *int) bool)(nil)).Elem(), iterMakerInt64Int)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *int) bool)(nil)).Elem(), iterMakerETInt64Int)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *int8) bool)(nil)).Elem(), iterMakerInt64Int8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *int8) bool)(nil)).Elem(), iterMakerETInt64Int8)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *int16) bool)(nil)).Elem(), iterMakerInt64Int16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *int16) bool)(nil)).Elem(), iterMakerETInt64Int16)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *int32) bool)(nil)).Elem(), iterMakerInt64Int32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *int32) bool)(nil)).Elem(), iterMakerETInt64Int32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *int64) bool)(nil)).Elem(), iterMakerInt64Int64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *int64) bool)(nil)).Elem(), iterMakerETInt64Int64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *uint) bool)(nil)).Elem(), iterMakerInt64Uint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *uint) bool)(nil)).Elem(), iterMakerETInt64Uint)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *uint8) bool)(nil)).Elem(), iterMakerInt64Uint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *uint8) bool)(nil)).Elem(), iterMakerETInt64Uint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *uint16) bool)(nil)).Elem(), iterMakerInt64Uint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *uint16) bool)(nil)).Elem(), iterMakerETInt64Uint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *uint32) bool)(nil)).Elem(), iterMakerInt64Uint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *uint32) bool)(nil)).Elem(), iterMakerETInt64Uint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *uint64) bool)(nil)).Elem(), iterMakerInt64Uint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *uint64) bool)(nil)).Elem(), iterMakerETInt64Uint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *float32) bool)(nil)).Elem(), iterMakerInt64Float32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *float32) bool)(nil)).Elem(), iterMakerETInt64Float32)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *float64) bool)(nil)).Elem(), iterMakerInt64Float64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *float64) bool)(nil)).Elem(), iterMakerETInt64Float64)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *typex.T) bool)(nil)).Elem(), iterMakerInt64Typex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *typex.T) bool)(nil)).Elem(), iterMakerETInt64Typex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *typex.U) bool)(nil)).Elem(), iterMakerInt64Typex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *typex.U) bool)(nil)).Elem(), iterMakerETInt64Typex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *typex.V) bool)(nil)).Elem(), iterMakerInt64Typex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *typex.V) bool)(nil)).Elem(), iterMakerETInt64Typex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *typex.W) bool)(nil)).Elem(), iterMakerInt64Typex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *typex.W) bool)(nil)).Elem(), iterMakerETInt64Typex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *typex.X) bool)(nil)).Elem(), iterMakerInt64Typex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *typex.X) bool)(nil)).Elem(), iterMakerETInt64Typex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *typex.Y) bool)(nil)).Elem(), iterMakerInt64Typex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *typex.Y) bool)(nil)).Elem(), iterMakerETInt64Typex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*int64, *typex.Z) bool)(nil)).Elem(), iterMakerInt64Typex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *int64, *typex.Z) bool)(nil)).Elem(), iterMakerETInt64Typex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint) bool)(nil)).Elem(), iterMakerUint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint) bool)(nil)).Elem(), iterMakerETUint)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *[]byte) bool)(nil)).Elem(), iterMakerUintByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *[]byte) bool)(nil)).Elem(), iterMakerETUintByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *bool) bool)(nil)).Elem(), iterMakerUintBool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *bool) bool)(nil)).Elem(), iterMakerETUintBool)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *string) bool)(nil)).Elem(), iterMakerUintString)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *string) bool)(nil)).Elem(), iterMakerETUintString)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *int) bool)(nil)).Elem(), iterMakerUintInt)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *int) bool)(nil)).Elem(), iterMakerETUintInt)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *int8) bool)(nil)).Elem(), iterMakerUintInt8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *int8) bool)(nil)).Elem(), iterMakerETUintInt8)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *int16) bool)(nil)).Elem(), iterMakerUintInt16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *int16) bool)(nil)).Elem(), iterMakerETUintInt16)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *int32) bool)(nil)).Elem(), iterMakerUintInt32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *int32) bool)(nil)).Elem(), iterMakerETUintInt32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *int64) bool)(nil)).Elem(), iterMakerUintInt64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *int64) bool)(nil)).Elem(), iterMakerETUintInt64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *uint) bool)(nil)).Elem(), iterMakerUintUint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *uint) bool)(nil)).Elem(), iterMakerETUintUint)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *uint8) bool)(nil)).Elem(), iterMakerUintUint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *uint8) bool)(nil)).Elem(), iterMakerETUintUint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *uint16) bool)(nil)).Elem(), iterMakerUintUint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *uint16) bool)(nil)).Elem(), iterMakerETUintUint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *uint32) bool)(nil)).Elem(), iterMakerUintUint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *uint32) bool)(nil)).Elem(), iterMakerETUintUint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *uint64) bool)(nil)).Elem(), iterMakerUintUint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *uint64) bool)(nil)).Elem(), iterMakerETUintUint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *float32) bool)(nil)).Elem(), iterMakerUintFloat32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *float32) bool)(nil)).Elem(), iterMakerETUintFloat32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *float64) bool)(nil)).Elem(), iterMakerUintFloat64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *float64) bool)(nil)).Elem(), iterMakerETUintFloat64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *typex.T) bool)(nil)).Elem(), iterMakerUintTypex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *typex.T) bool)(nil)).Elem(), iterMakerETUintTypex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *typex.U) bool)(nil)).Elem(), iterMakerUintTypex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *typex.U) bool)(nil)).Elem(), iterMakerETUintTypex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *typex.V) bool)(nil)).Elem(), iterMakerUintTypex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *typex.V) bool)(nil)).Elem(), iterMakerETUintTypex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *typex.W) bool)(nil)).Elem(), iterMakerUintTypex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *typex.W) bool)(nil)).Elem(), iterMakerETUintTypex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *typex.X) bool)(nil)).Elem(), iterMakerUintTypex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *typex.X) bool)(nil)).Elem(), iterMakerETUintTypex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *typex.Y) bool)(nil)).Elem(), iterMakerUintTypex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *typex.Y) bool)(nil)).Elem(), iterMakerETUintTypex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint, *typex.Z) bool)(nil)).Elem(), iterMakerUintTypex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint, *typex.Z) bool)(nil)).Elem(), iterMakerETUintTypex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8) bool)(nil)).Elem(), iterMakerUint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8) bool)(nil)).Elem(), iterMakerETUint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *[]byte) bool)(nil)).Elem(), iterMakerUint8ByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *[]byte) bool)(nil)).Elem(), iterMakerETUint8ByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *bool) bool)(nil)).Elem(), iterMakerUint8Bool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *bool) bool)(nil)).Elem(), iterMakerETUint8Bool)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *string) bool)(nil)).Elem(), iterMakerUint8String)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *string) bool)(nil)).Elem(), iterMakerETUint8String)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *int) bool)(nil)).Elem(), iterMakerUint8Int)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *int) bool)(nil)).Elem(), iterMakerETUint8Int)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *int8) bool)(nil)).Elem(), iterMakerUint8Int8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *int8) bool)(nil)).Elem(), iterMakerETUint8Int8)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *int16) bool)(nil)).Elem(), iterMakerUint8Int16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *int16) bool)(nil)).Elem(), iterMakerETUint8Int16)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *int32) bool)(nil)).Elem(), iterMakerUint8Int32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *int32) bool)(nil)).Elem(), iterMakerETUint8Int32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *int64) bool)(nil)).Elem(), iterMakerUint8Int64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *int64) bool)(nil)).Elem(), iterMakerETUint8Int64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *uint) bool)(nil)).Elem(), iterMakerUint8Uint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *uint) bool)(nil)).Elem(), iterMakerETUint8Uint)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *uint8) bool)(nil)).Elem(), iterMakerUint8Uint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *uint8) bool)(nil)).Elem(), iterMakerETUint8Uint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *uint16) bool)(nil)).Elem(), iterMakerUint8Uint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *uint16) bool)(nil)).Elem(), iterMakerETUint8Uint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *uint32) bool)(nil)).Elem(), iterMakerUint8Uint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *uint32) bool)(nil)).Elem(), iterMakerETUint8Uint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *uint64) bool)(nil)).Elem(), iterMakerUint8Uint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *uint64) bool)(nil)).Elem(), iterMakerETUint8Uint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *float32) bool)(nil)).Elem(), iterMakerUint8Float32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *float32) bool)(nil)).Elem(), iterMakerETUint8Float32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *float64) bool)(nil)).Elem(), iterMakerUint8Float64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *float64) bool)(nil)).Elem(), iterMakerETUint8Float64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *typex.T) bool)(nil)).Elem(), iterMakerUint8Typex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *typex.T) bool)(nil)).Elem(), iterMakerETUint8Typex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *typex.U) bool)(nil)).Elem(), iterMakerUint8Typex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *typex.U) bool)(nil)).Elem(), iterMakerETUint8Typex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *typex.V) bool)(nil)).Elem(), iterMakerUint8Typex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *typex.V) bool)(nil)).Elem(), iterMakerETUint8Typex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *typex.W) bool)(nil)).Elem(), iterMakerUint8Typex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *typex.W) bool)(nil)).Elem(), iterMakerETUint8Typex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *typex.X) bool)(nil)).Elem(), iterMakerUint8Typex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *typex.X) bool)(nil)).Elem(), iterMakerETUint8Typex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *typex.Y) bool)(nil)).Elem(), iterMakerUint8Typex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *typex.Y) bool)(nil)).Elem(), iterMakerETUint8Typex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint8, *typex.Z) bool)(nil)).Elem(), iterMakerUint8Typex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint8, *typex.Z) bool)(nil)).Elem(), iterMakerETUint8Typex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16) bool)(nil)).Elem(), iterMakerUint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16) bool)(nil)).Elem(), iterMakerETUint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *[]byte) bool)(nil)).Elem(), iterMakerUint16ByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *[]byte) bool)(nil)).Elem(), iterMakerETUint16ByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *bool) bool)(nil)).Elem(), iterMakerUint16Bool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *bool) bool)(nil)).Elem(), iterMakerETUint16Bool)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *string) bool)(nil)).Elem(), iterMakerUint16String)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *string) bool)(nil)).Elem(), iterMakerETUint16String)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *int) bool)(nil)).Elem(), iterMakerUint16Int)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *int) bool)(nil)).Elem(), iterMakerETUint16Int)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *int8) bool)(nil)).Elem(), iterMakerUint16Int8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *int8) bool)(nil)).Elem(), iterMakerETUint16Int8)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *int16) bool)(nil)).Elem(), iterMakerUint16Int16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *int16) bool)(nil)).Elem(), iterMakerETUint16Int16)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *int32) bool)(nil)).Elem(), iterMakerUint16Int32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *int32) bool)(nil)).Elem(), iterMakerETUint16Int32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *int64) bool)(nil)).Elem(), iterMakerUint16Int64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *int64) bool)(nil)).Elem(), iterMakerETUint16Int64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *uint) bool)(nil)).Elem(), iterMakerUint16Uint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *uint) bool)(nil)).Elem(), iterMakerETUint16Uint)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *uint8) bool)(nil)).Elem(), iterMakerUint16Uint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *uint8) bool)(nil)).Elem(), iterMakerETUint16Uint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *uint16) bool)(nil)).Elem(), iterMakerUint16Uint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *uint16) bool)(nil)).Elem(), iterMakerETUint16Uint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *uint32) bool)(nil)).Elem(), iterMakerUint16Uint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *uint32) bool)(nil)).Elem(), iterMakerETUint16Uint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *uint64) bool)(nil)).Elem(), iterMakerUint16Uint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *uint64) bool)(nil)).Elem(), iterMakerETUint16Uint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *float32) bool)(nil)).Elem(), iterMakerUint16Float32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *float32) bool)(nil)).Elem(), iterMakerETUint16Float32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *float64) bool)(nil)).Elem(), iterMakerUint16Float64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *float64) bool)(nil)).Elem(), iterMakerETUint16Float64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *typex.T) bool)(nil)).Elem(), iterMakerUint16Typex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *typex.T) bool)(nil)).Elem(), iterMakerETUint16Typex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *typex.U) bool)(nil)).Elem(), iterMakerUint16Typex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *typex.U) bool)(nil)).Elem(), iterMakerETUint16Typex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *typex.V) bool)(nil)).Elem(), iterMakerUint16Typex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *typex.V) bool)(nil)).Elem(), iterMakerETUint16Typex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *typex.W) bool)(nil)).Elem(), iterMakerUint16Typex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *typex.W) bool)(nil)).Elem(), iterMakerETUint16Typex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *typex.X) bool)(nil)).Elem(), iterMakerUint16Typex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *typex.X) bool)(nil)).Elem(), iterMakerETUint16Typex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *typex.Y) bool)(nil)).Elem(), iterMakerUint16Typex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *typex.Y) bool)(nil)).Elem(), iterMakerETUint16Typex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint16, *typex.Z) bool)(nil)).Elem(), iterMakerUint16Typex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint16, *typex.Z) bool)(nil)).Elem(), iterMakerETUint16Typex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32) bool)(nil)).Elem(), iterMakerUint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32) bool)(nil)).Elem(), iterMakerETUint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *[]byte) bool)(nil)).Elem(), iterMakerUint32ByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *[]byte) bool)(nil)).Elem(), iterMakerETUint32ByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *bool) bool)(nil)).Elem(), iterMakerUint32Bool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *bool) bool)(nil)).Elem(), iterMakerETUint32Bool)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *string) bool)(nil)).Elem(), iterMakerUint32String)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *string) bool)(nil)).Elem(), iterMakerETUint32String)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *int) bool)(nil)).Elem(), iterMakerUint32Int)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *int) bool)(nil)).Elem(), iterMakerETUint32Int)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *int8) bool)(nil)).Elem(), iterMakerUint32Int8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *int8) bool)(nil)).Elem(), iterMakerETUint32Int8)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *int16) bool)(nil)).Elem(), iterMakerUint32Int16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *int16) bool)(nil)).Elem(), iterMakerETUint32Int16)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *int32) bool)(nil)).Elem(), iterMakerUint32Int32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *int32) bool)(nil)).Elem(), iterMakerETUint32Int32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *int64) bool)(nil)).Elem(), iterMakerUint32Int64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *int64) bool)(nil)).Elem(), iterMakerETUint32Int64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *uint) bool)(nil)).Elem(), iterMakerUint32Uint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *uint) bool)(nil)).Elem(), iterMakerETUint32Uint)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *uint8) bool)(nil)).Elem(), iterMakerUint32Uint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *uint8) bool)(nil)).Elem(), iterMakerETUint32Uint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *uint16) bool)(nil)).Elem(), iterMakerUint32Uint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *uint16) bool)(nil)).Elem(), iterMakerETUint32Uint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *uint32) bool)(nil)).Elem(), iterMakerUint32Uint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *uint32) bool)(nil)).Elem(), iterMakerETUint32Uint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *uint64) bool)(nil)).Elem(), iterMakerUint32Uint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *uint64) bool)(nil)).Elem(), iterMakerETUint32Uint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *float32) bool)(nil)).Elem(), iterMakerUint32Float32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *float32) bool)(nil)).Elem(), iterMakerETUint32Float32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *float64) bool)(nil)).Elem(), iterMakerUint32Float64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *float64) bool)(nil)).Elem(), iterMakerETUint32Float64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *typex.T) bool)(nil)).Elem(), iterMakerUint32Typex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *typex.T) bool)(nil)).Elem(), iterMakerETUint32Typex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *typex.U) bool)(nil)).Elem(), iterMakerUint32Typex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *typex.U) bool)(nil)).Elem(), iterMakerETUint32Typex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *typex.V) bool)(nil)).Elem(), iterMakerUint32Typex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *typex.V) bool)(nil)).Elem(), iterMakerETUint32Typex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *typex.W) bool)(nil)).Elem(), iterMakerUint32Typex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *typex.W) bool)(nil)).Elem(), iterMakerETUint32Typex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *typex.X) bool)(nil)).Elem(), iterMakerUint32Typex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *typex.X) bool)(nil)).Elem(), iterMakerETUint32Typex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *typex.Y) bool)(nil)).Elem(), iterMakerUint32Typex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *typex.Y) bool)(nil)).Elem(), iterMakerETUint32Typex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint32, *typex.Z) bool)(nil)).Elem(), iterMakerUint32Typex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint32, *typex.Z) bool)(nil)).Elem(), iterMakerETUint32Typex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64) bool)(nil)).Elem(), iterMakerUint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64) bool)(nil)).Elem(), iterMakerETUint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *[]byte) bool)(nil)).Elem(), iterMakerUint64ByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *[]byte) bool)(nil)).Elem(), iterMakerETUint64ByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *bool) bool)(nil)).Elem(), iterMakerUint64Bool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *bool) bool)(nil)).Elem(), iterMakerETUint64Bool)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *string) bool)(nil)).Elem(), iterMakerUint64String)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *string) bool)(nil)).Elem(), iterMakerETUint64String)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *int) bool)(nil)).Elem(), iterMakerUint64Int)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *int) bool)(nil)).Elem(), iterMakerETUint64Int)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *int8) bool)(nil)).Elem(), iterMakerUint64Int8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *int8) bool)(nil)).Elem(), iterMakerETUint64Int8)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *int16) bool)(nil)).Elem(), iterMakerUint64Int16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *int16) bool)(nil)).Elem(), iterMakerETUint64Int16)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *int32) bool)(nil)).Elem(), iterMakerUint64Int32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *int32) bool)(nil)).Elem(), iterMakerETUint64Int32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *int64) bool)(nil)).Elem(), iterMakerUint64Int64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *int64) bool)(nil)).Elem(), iterMakerETUint64Int64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *uint) bool)(nil)).Elem(), iterMakerUint64Uint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *uint) bool)(nil)).Elem(), iterMakerETUint64Uint)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *uint8) bool)(nil)).Elem(), iterMakerUint64Uint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *uint8) bool)(nil)).Elem(), iterMakerETUint64Uint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *uint16) bool)(nil)).Elem(), iterMakerUint64Uint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *uint16) bool)(nil)).Elem(), iterMakerETUint64Uint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *uint32) bool)(nil)).Elem(), iterMakerUint64Uint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *uint32) bool)(nil)).Elem(), iterMakerETUint64Uint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *uint64) bool)(nil)).Elem(), iterMakerUint64Uint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *uint64) bool)(nil)).Elem(), iterMakerETUint64Uint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *float32) bool)(nil)).Elem(), iterMakerUint64Float32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *float32) bool)(nil)).Elem(), iterMakerETUint64Float32)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *float64) bool)(nil)).Elem(), iterMakerUint64Float64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *float64) bool)(nil)).Elem(), iterMakerETUint64Float64)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *typex.T) bool)(nil)).Elem(), iterMakerUint64Typex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *typex.T) bool)(nil)).Elem(), iterMakerETUint64Typex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *typex.U) bool)(nil)).Elem(), iterMakerUint64Typex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *typex.U) bool)(nil)).Elem(), iterMakerETUint64Typex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *typex.V) bool)(nil)).Elem(), iterMakerUint64Typex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *typex.V) bool)(nil)).Elem(), iterMakerETUint64Typex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *typex.W) bool)(nil)).Elem(), iterMakerUint64Typex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *typex.W) bool)(nil)).Elem(), iterMakerETUint64Typex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *typex.X) bool)(nil)).Elem(), iterMakerUint64Typex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *typex.X) bool)(nil)).Elem(), iterMakerETUint64Typex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *typex.Y) bool)(nil)).Elem(), iterMakerUint64Typex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *typex.Y) bool)(nil)).Elem(), iterMakerETUint64Typex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*uint64, *typex.Z) bool)(nil)).Elem(), iterMakerUint64Typex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *uint64, *typex.Z) bool)(nil)).Elem(), iterMakerETUint64Typex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32) bool)(nil)).Elem(), iterMakerFloat32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32) bool)(nil)).Elem(), iterMakerETFloat32)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *[]byte) bool)(nil)).Elem(), iterMakerFloat32ByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *[]byte) bool)(nil)).Elem(), iterMakerETFloat32ByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *bool) bool)(nil)).Elem(), iterMakerFloat32Bool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *bool) bool)(nil)).Elem(), iterMakerETFloat32Bool)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *string) bool)(nil)).Elem(), iterMakerFloat32String)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *string) bool)(nil)).Elem(), iterMakerETFloat32String)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *int) bool)(nil)).Elem(), iterMakerFloat32Int)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *int) bool)(nil)).Elem(), iterMakerETFloat32Int)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *int8) bool)(nil)).Elem(), iterMakerFloat32Int8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *int8) bool)(nil)).Elem(), iterMakerETFloat32Int8)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *int16) bool)(nil)).Elem(), iterMakerFloat32Int16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *int16) bool)(nil)).Elem(), iterMakerETFloat32Int16)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *int32) bool)(nil)).Elem(), iterMakerFloat32Int32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *int32) bool)(nil)).Elem(), iterMakerETFloat32Int32)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *int64) bool)(nil)).Elem(), iterMakerFloat32Int64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *int64) bool)(nil)).Elem(), iterMakerETFloat32Int64)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *uint) bool)(nil)).Elem(), iterMakerFloat32Uint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *uint) bool)(nil)).Elem(), iterMakerETFloat32Uint)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *uint8) bool)(nil)).Elem(), iterMakerFloat32Uint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *uint8) bool)(nil)).Elem(), iterMakerETFloat32Uint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *uint16) bool)(nil)).Elem(), iterMakerFloat32Uint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *uint16) bool)(nil)).Elem(), iterMakerETFloat32Uint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *uint32) bool)(nil)).Elem(), iterMakerFloat32Uint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *uint32) bool)(nil)).Elem(), iterMakerETFloat32Uint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *uint64) bool)(nil)).Elem(), iterMakerFloat32Uint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *uint64) bool)(nil)).Elem(), iterMakerETFloat32Uint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *float32) bool)(nil)).Elem(), iterMakerFloat32Float32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *float32) bool)(nil)).Elem(), iterMakerETFloat32Float32)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *float64) bool)(nil)).Elem(), iterMakerFloat32Float64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *float64) bool)(nil)).Elem(), iterMakerETFloat32Float64)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *typex.T) bool)(nil)).Elem(), iterMakerFloat32Typex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *typex.T) bool)(nil)).Elem(), iterMakerETFloat32Typex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *typex.U) bool)(nil)).Elem(), iterMakerFloat32Typex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *typex.U) bool)(nil)).Elem(), iterMakerETFloat32Typex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *typex.V) bool)(nil)).Elem(), iterMakerFloat32Typex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *typex.V) bool)(nil)).Elem(), iterMakerETFloat32Typex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *typex.W) bool)(nil)).Elem(), iterMakerFloat32Typex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *typex.W) bool)(nil)).Elem(), iterMakerETFloat32Typex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *typex.X) bool)(nil)).Elem(), iterMakerFloat32Typex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *typex.X) bool)(nil)).Elem(), iterMakerETFloat32Typex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *typex.Y) bool)(nil)).Elem(), iterMakerFloat32Typex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *typex.Y) bool)(nil)).Elem(), iterMakerETFloat32Typex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*float32, *typex.Z) bool)(nil)).Elem(), iterMakerFloat32Typex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float32, *typex.Z) bool)(nil)).Elem(), iterMakerETFloat32Typex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64) bool)(nil)).Elem(), iterMakerFloat64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64) bool)(nil)).Elem(), iterMakerETFloat64)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *[]byte) bool)(nil)).Elem(), iterMakerFloat64ByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *[]byte) bool)(nil)).Elem(), iterMakerETFloat64ByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *bool) bool)(nil)).Elem(), iterMakerFloat64Bool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *bool) bool)(nil)).Elem(), iterMakerETFloat64Bool)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *string) bool)(nil)).Elem(), iterMakerFloat64String)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *string) bool)(nil)).Elem(), iterMakerETFloat64String)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *int) bool)(nil)).Elem(), iterMakerFloat64Int)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *int) bool)(nil)).Elem(), iterMakerETFloat64Int)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *int8) bool)(nil)).Elem(), iterMakerFloat64Int8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *int8) bool)(nil)).Elem(), iterMakerETFloat64Int8)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *int16) bool)(nil)).Elem(), iterMakerFloat64Int16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *int16) bool)(nil)).Elem(), iterMakerETFloat64Int16)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *int32) bool)(nil)).Elem(), iterMakerFloat64Int32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *int32) bool)(nil)).Elem(), iterMakerETFloat64Int32)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *int64) bool)(nil)).Elem(), iterMakerFloat64Int64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *int64) bool)(nil)).Elem(), iterMakerETFloat64Int64)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *uint) bool)(nil)).Elem(), iterMakerFloat64Uint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *uint) bool)(nil)).Elem(), iterMakerETFloat64Uint)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *uint8) bool)(nil)).Elem(), iterMakerFloat64Uint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *uint8) bool)(nil)).Elem(), iterMakerETFloat64Uint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *uint16) bool)(nil)).Elem(), iterMakerFloat64Uint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *uint16) bool)(nil)).Elem(), iterMakerETFloat64Uint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *uint32) bool)(nil)).Elem(), iterMakerFloat64Uint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *uint32) bool)(nil)).Elem(), iterMakerETFloat64Uint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *uint64) bool)(nil)).Elem(), iterMakerFloat64Uint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *uint64) bool)(nil)).Elem(), iterMakerETFloat64Uint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *float32) bool)(nil)).Elem(), iterMakerFloat64Float32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *float32) bool)(nil)).Elem(), iterMakerETFloat64Float32)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *float64) bool)(nil)).Elem(), iterMakerFloat64Float64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *float64) bool)(nil)).Elem(), iterMakerETFloat64Float64)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *typex.T) bool)(nil)).Elem(), iterMakerFloat64Typex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *typex.T) bool)(nil)).Elem(), iterMakerETFloat64Typex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *typex.U) bool)(nil)).Elem(), iterMakerFloat64Typex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *typex.U) bool)(nil)).Elem(), iterMakerETFloat64Typex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *typex.V) bool)(nil)).Elem(), iterMakerFloat64Typex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *typex.V) bool)(nil)).Elem(), iterMakerETFloat64Typex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *typex.W) bool)(nil)).Elem(), iterMakerFloat64Typex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *typex.W) bool)(nil)).Elem(), iterMakerETFloat64Typex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *typex.X) bool)(nil)).Elem(), iterMakerFloat64Typex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *typex.X) bool)(nil)).Elem(), iterMakerETFloat64Typex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *typex.Y) bool)(nil)).Elem(), iterMakerFloat64Typex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *typex.Y) bool)(nil)).Elem(), iterMakerETFloat64Typex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*float64, *typex.Z) bool)(nil)).Elem(), iterMakerFloat64Typex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *float64, *typex.Z) bool)(nil)).Elem(), iterMakerETFloat64Typex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T) bool)(nil)).Elem(), iterMakerTypex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T) bool)(nil)).Elem(), iterMakerETTypex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *[]byte) bool)(nil)).Elem(), iterMakerTypex_TByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *[]byte) bool)(nil)).Elem(), iterMakerETTypex_TByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *bool) bool)(nil)).Elem(), iterMakerTypex_TBool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *bool) bool)(nil)).Elem(), iterMakerETTypex_TBool)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *string) bool)(nil)).Elem(), iterMakerTypex_TString)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *string) bool)(nil)).Elem(), iterMakerETTypex_TString)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *int) bool)(nil)).Elem(), iterMakerTypex_TInt)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *int) bool)(nil)).Elem(), iterMakerETTypex_TInt)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *int8) bool)(nil)).Elem(), iterMakerTypex_TInt8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *int8) bool)(nil)).Elem(), iterMakerETTypex_TInt8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *int16) bool)(nil)).Elem(), iterMakerTypex_TInt16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *int16) bool)(nil)).Elem(), iterMakerETTypex_TInt16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *int32) bool)(nil)).Elem(), iterMakerTypex_TInt32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *int32) bool)(nil)).Elem(), iterMakerETTypex_TInt32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *int64) bool)(nil)).Elem(), iterMakerTypex_TInt64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *int64) bool)(nil)).Elem(), iterMakerETTypex_TInt64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *uint) bool)(nil)).Elem(), iterMakerTypex_TUint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *uint) bool)(nil)).Elem(), iterMakerETTypex_TUint)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *uint8) bool)(nil)).Elem(), iterMakerTypex_TUint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *uint8) bool)(nil)).Elem(), iterMakerETTypex_TUint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *uint16) bool)(nil)).Elem(), iterMakerTypex_TUint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *uint16) bool)(nil)).Elem(), iterMakerETTypex_TUint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *uint32) bool)(nil)).Elem(), iterMakerTypex_TUint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *uint32) bool)(nil)).Elem(), iterMakerETTypex_TUint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *uint64) bool)(nil)).Elem(), iterMakerTypex_TUint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *uint64) bool)(nil)).Elem(), iterMakerETTypex_TUint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *float32) bool)(nil)).Elem(), iterMakerTypex_TFloat32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *float32) bool)(nil)).Elem(), iterMakerETTypex_TFloat32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *float64) bool)(nil)).Elem(), iterMakerTypex_TFloat64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *float64) bool)(nil)).Elem(), iterMakerETTypex_TFloat64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *typex.T) bool)(nil)).Elem(), iterMakerTypex_TTypex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *typex.T) bool)(nil)).Elem(), iterMakerETTypex_TTypex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *typex.U) bool)(nil)).Elem(), iterMakerTypex_TTypex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *typex.U) bool)(nil)).Elem(), iterMakerETTypex_TTypex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *typex.V) bool)(nil)).Elem(), iterMakerTypex_TTypex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *typex.V) bool)(nil)).Elem(), iterMakerETTypex_TTypex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *typex.W) bool)(nil)).Elem(), iterMakerTypex_TTypex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *typex.W) bool)(nil)).Elem(), iterMakerETTypex_TTypex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *typex.X) bool)(nil)).Elem(), iterMakerTypex_TTypex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *typex.X) bool)(nil)).Elem(), iterMakerETTypex_TTypex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *typex.Y) bool)(nil)).Elem(), iterMakerTypex_TTypex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *typex.Y) bool)(nil)).Elem(), iterMakerETTypex_TTypex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.T, *typex.Z) bool)(nil)).Elem(), iterMakerTypex_TTypex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.T, *typex.Z) bool)(nil)).Elem(), iterMakerETTypex_TTypex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U) bool)(nil)).Elem(), iterMakerTypex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U) bool)(nil)).Elem(), iterMakerETTypex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *[]byte) bool)(nil)).Elem(), iterMakerTypex_UByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *[]byte) bool)(nil)).Elem(), iterMakerETTypex_UByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *bool) bool)(nil)).Elem(), iterMakerTypex_UBool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *bool) bool)(nil)).Elem(), iterMakerETTypex_UBool)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *string) bool)(nil)).Elem(), iterMakerTypex_UString)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *string) bool)(nil)).Elem(), iterMakerETTypex_UString)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *int) bool)(nil)).Elem(), iterMakerTypex_UInt)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *int) bool)(nil)).Elem(), iterMakerETTypex_UInt)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *int8) bool)(nil)).Elem(), iterMakerTypex_UInt8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *int8) bool)(nil)).Elem(), iterMakerETTypex_UInt8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *int16) bool)(nil)).Elem(), iterMakerTypex_UInt16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *int16) bool)(nil)).Elem(), iterMakerETTypex_UInt16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *int32) bool)(nil)).Elem(), iterMakerTypex_UInt32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *int32) bool)(nil)).Elem(), iterMakerETTypex_UInt32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *int64) bool)(nil)).Elem(), iterMakerTypex_UInt64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *int64) bool)(nil)).Elem(), iterMakerETTypex_UInt64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *uint) bool)(nil)).Elem(), iterMakerTypex_UUint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *uint) bool)(nil)).Elem(), iterMakerETTypex_UUint)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *uint8) bool)(nil)).Elem(), iterMakerTypex_UUint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *uint8) bool)(nil)).Elem(), iterMakerETTypex_UUint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *uint16) bool)(nil)).Elem(), iterMakerTypex_UUint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *uint16) bool)(nil)).Elem(), iterMakerETTypex_UUint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *uint32) bool)(nil)).Elem(), iterMakerTypex_UUint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *uint32) bool)(nil)).Elem(), iterMakerETTypex_UUint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *uint64) bool)(nil)).Elem(), iterMakerTypex_UUint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *uint64) bool)(nil)).Elem(), iterMakerETTypex_UUint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *float32) bool)(nil)).Elem(), iterMakerTypex_UFloat32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *float32) bool)(nil)).Elem(), iterMakerETTypex_UFloat32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *float64) bool)(nil)).Elem(), iterMakerTypex_UFloat64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *float64) bool)(nil)).Elem(), iterMakerETTypex_UFloat64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *typex.T) bool)(nil)).Elem(), iterMakerTypex_UTypex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *typex.T) bool)(nil)).Elem(), iterMakerETTypex_UTypex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *typex.U) bool)(nil)).Elem(), iterMakerTypex_UTypex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *typex.U) bool)(nil)).Elem(), iterMakerETTypex_UTypex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *typex.V) bool)(nil)).Elem(), iterMakerTypex_UTypex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *typex.V) bool)(nil)).Elem(), iterMakerETTypex_UTypex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *typex.W) bool)(nil)).Elem(), iterMakerTypex_UTypex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *typex.W) bool)(nil)).Elem(), iterMakerETTypex_UTypex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *typex.X) bool)(nil)).Elem(), iterMakerTypex_UTypex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *typex.X) bool)(nil)).Elem(), iterMakerETTypex_UTypex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *typex.Y) bool)(nil)).Elem(), iterMakerTypex_UTypex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *typex.Y) bool)(nil)).Elem(), iterMakerETTypex_UTypex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.U, *typex.Z) bool)(nil)).Elem(), iterMakerTypex_UTypex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.U, *typex.Z) bool)(nil)).Elem(), iterMakerETTypex_UTypex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V) bool)(nil)).Elem(), iterMakerTypex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V) bool)(nil)).Elem(), iterMakerETTypex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *[]byte) bool)(nil)).Elem(), iterMakerTypex_VByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *[]byte) bool)(nil)).Elem(), iterMakerETTypex_VByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *bool) bool)(nil)).Elem(), iterMakerTypex_VBool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *bool) bool)(nil)).Elem(), iterMakerETTypex_VBool)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *string) bool)(nil)).Elem(), iterMakerTypex_VString)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *string) bool)(nil)).Elem(), iterMakerETTypex_VString)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *int) bool)(nil)).Elem(), iterMakerTypex_VInt)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *int) bool)(nil)).Elem(), iterMakerETTypex_VInt)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *int8) bool)(nil)).Elem(), iterMakerTypex_VInt8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *int8) bool)(nil)).Elem(), iterMakerETTypex_VInt8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *int16) bool)(nil)).Elem(), iterMakerTypex_VInt16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *int16) bool)(nil)).Elem(), iterMakerETTypex_VInt16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *int32) bool)(nil)).Elem(), iterMakerTypex_VInt32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *int32) bool)(nil)).Elem(), iterMakerETTypex_VInt32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *int64) bool)(nil)).Elem(), iterMakerTypex_VInt64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *int64) bool)(nil)).Elem(), iterMakerETTypex_VInt64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *uint) bool)(nil)).Elem(), iterMakerTypex_VUint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *uint) bool)(nil)).Elem(), iterMakerETTypex_VUint)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *uint8) bool)(nil)).Elem(), iterMakerTypex_VUint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *uint8) bool)(nil)).Elem(), iterMakerETTypex_VUint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *uint16) bool)(nil)).Elem(), iterMakerTypex_VUint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *uint16) bool)(nil)).Elem(), iterMakerETTypex_VUint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *uint32) bool)(nil)).Elem(), iterMakerTypex_VUint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *uint32) bool)(nil)).Elem(), iterMakerETTypex_VUint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *uint64) bool)(nil)).Elem(), iterMakerTypex_VUint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *uint64) bool)(nil)).Elem(), iterMakerETTypex_VUint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *float32) bool)(nil)).Elem(), iterMakerTypex_VFloat32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *float32) bool)(nil)).Elem(), iterMakerETTypex_VFloat32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *float64) bool)(nil)).Elem(), iterMakerTypex_VFloat64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *float64) bool)(nil)).Elem(), iterMakerETTypex_VFloat64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *typex.T) bool)(nil)).Elem(), iterMakerTypex_VTypex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *typex.T) bool)(nil)).Elem(), iterMakerETTypex_VTypex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *typex.U) bool)(nil)).Elem(), iterMakerTypex_VTypex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *typex.U) bool)(nil)).Elem(), iterMakerETTypex_VTypex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *typex.V) bool)(nil)).Elem(), iterMakerTypex_VTypex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *typex.V) bool)(nil)).Elem(), iterMakerETTypex_VTypex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *typex.W) bool)(nil)).Elem(), iterMakerTypex_VTypex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *typex.W) bool)(nil)).Elem(), iterMakerETTypex_VTypex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *typex.X) bool)(nil)).Elem(), iterMakerTypex_VTypex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *typex.X) bool)(nil)).Elem(), iterMakerETTypex_VTypex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *typex.Y) bool)(nil)).Elem(), iterMakerTypex_VTypex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *typex.Y) bool)(nil)).Elem(), iterMakerETTypex_VTypex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.V, *typex.Z) bool)(nil)).Elem(), iterMakerTypex_VTypex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.V, *typex.Z) bool)(nil)).Elem(), iterMakerETTypex_VTypex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W) bool)(nil)).Elem(), iterMakerTypex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W) bool)(nil)).Elem(), iterMakerETTypex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *[]byte) bool)(nil)).Elem(), iterMakerTypex_WByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *[]byte) bool)(nil)).Elem(), iterMakerETTypex_WByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *bool) bool)(nil)).Elem(), iterMakerTypex_WBool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *bool) bool)(nil)).Elem(), iterMakerETTypex_WBool)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *string) bool)(nil)).Elem(), iterMakerTypex_WString)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *string) bool)(nil)).Elem(), iterMakerETTypex_WString)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *int) bool)(nil)).Elem(), iterMakerTypex_WInt)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *int) bool)(nil)).Elem(), iterMakerETTypex_WInt)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *int8) bool)(nil)).Elem(), iterMakerTypex_WInt8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *int8) bool)(nil)).Elem(), iterMakerETTypex_WInt8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *int16) bool)(nil)).Elem(), iterMakerTypex_WInt16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *int16) bool)(nil)).Elem(), iterMakerETTypex_WInt16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *int32) bool)(nil)).Elem(), iterMakerTypex_WInt32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *int32) bool)(nil)).Elem(), iterMakerETTypex_WInt32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *int64) bool)(nil)).Elem(), iterMakerTypex_WInt64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *int64) bool)(nil)).Elem(), iterMakerETTypex_WInt64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *uint) bool)(nil)).Elem(), iterMakerTypex_WUint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *uint) bool)(nil)).Elem(), iterMakerETTypex_WUint)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *uint8) bool)(nil)).Elem(), iterMakerTypex_WUint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *uint8) bool)(nil)).Elem(), iterMakerETTypex_WUint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *uint16) bool)(nil)).Elem(), iterMakerTypex_WUint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *uint16) bool)(nil)).Elem(), iterMakerETTypex_WUint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *uint32) bool)(nil)).Elem(), iterMakerTypex_WUint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *uint32) bool)(nil)).Elem(), iterMakerETTypex_WUint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *uint64) bool)(nil)).Elem(), iterMakerTypex_WUint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *uint64) bool)(nil)).Elem(), iterMakerETTypex_WUint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *float32) bool)(nil)).Elem(), iterMakerTypex_WFloat32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *float32) bool)(nil)).Elem(), iterMakerETTypex_WFloat32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *float64) bool)(nil)).Elem(), iterMakerTypex_WFloat64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *float64) bool)(nil)).Elem(), iterMakerETTypex_WFloat64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *typex.T) bool)(nil)).Elem(), iterMakerTypex_WTypex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *typex.T) bool)(nil)).Elem(), iterMakerETTypex_WTypex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *typex.U) bool)(nil)).Elem(), iterMakerTypex_WTypex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *typex.U) bool)(nil)).Elem(), iterMakerETTypex_WTypex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *typex.V) bool)(nil)).Elem(), iterMakerTypex_WTypex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *typex.V) bool)(nil)).Elem(), iterMakerETTypex_WTypex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *typex.W) bool)(nil)).Elem(), iterMakerTypex_WTypex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *typex.W) bool)(nil)).Elem(), iterMakerETTypex_WTypex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *typex.X) bool)(nil)).Elem(), iterMakerTypex_WTypex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *typex.X) bool)(nil)).Elem(), iterMakerETTypex_WTypex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *typex.Y) bool)(nil)).Elem(), iterMakerTypex_WTypex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *typex.Y) bool)(nil)).Elem(), iterMakerETTypex_WTypex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.W, *typex.Z) bool)(nil)).Elem(), iterMakerTypex_WTypex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.W, *typex.Z) bool)(nil)).Elem(), iterMakerETTypex_WTypex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X) bool)(nil)).Elem(), iterMakerTypex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X) bool)(nil)).Elem(), iterMakerETTypex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *[]byte) bool)(nil)).Elem(), iterMakerTypex_XByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *[]byte) bool)(nil)).Elem(), iterMakerETTypex_XByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *bool) bool)(nil)).Elem(), iterMakerTypex_XBool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *bool) bool)(nil)).Elem(), iterMakerETTypex_XBool)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *string) bool)(nil)).Elem(), iterMakerTypex_XString)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *string) bool)(nil)).Elem(), iterMakerETTypex_XString)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *int) bool)(nil)).Elem(), iterMakerTypex_XInt)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *int) bool)(nil)).Elem(), iterMakerETTypex_XInt)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *int8) bool)(nil)).Elem(), iterMakerTypex_XInt8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *int8) bool)(nil)).Elem(), iterMakerETTypex_XInt8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *int16) bool)(nil)).Elem(), iterMakerTypex_XInt16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *int16) bool)(nil)).Elem(), iterMakerETTypex_XInt16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *int32) bool)(nil)).Elem(), iterMakerTypex_XInt32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *int32) bool)(nil)).Elem(), iterMakerETTypex_XInt32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *int64) bool)(nil)).Elem(), iterMakerTypex_XInt64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *int64) bool)(nil)).Elem(), iterMakerETTypex_XInt64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *uint) bool)(nil)).Elem(), iterMakerTypex_XUint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *uint) bool)(nil)).Elem(), iterMakerETTypex_XUint)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *uint8) bool)(nil)).Elem(), iterMakerTypex_XUint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *uint8) bool)(nil)).Elem(), iterMakerETTypex_XUint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *uint16) bool)(nil)).Elem(), iterMakerTypex_XUint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *uint16) bool)(nil)).Elem(), iterMakerETTypex_XUint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *uint32) bool)(nil)).Elem(), iterMakerTypex_XUint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *uint32) bool)(nil)).Elem(), iterMakerETTypex_XUint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *uint64) bool)(nil)).Elem(), iterMakerTypex_XUint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *uint64) bool)(nil)).Elem(), iterMakerETTypex_XUint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *float32) bool)(nil)).Elem(), iterMakerTypex_XFloat32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *float32) bool)(nil)).Elem(), iterMakerETTypex_XFloat32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *float64) bool)(nil)).Elem(), iterMakerTypex_XFloat64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *float64) bool)(nil)).Elem(), iterMakerETTypex_XFloat64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *typex.T) bool)(nil)).Elem(), iterMakerTypex_XTypex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *typex.T) bool)(nil)).Elem(), iterMakerETTypex_XTypex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *typex.U) bool)(nil)).Elem(), iterMakerTypex_XTypex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *typex.U) bool)(nil)).Elem(), iterMakerETTypex_XTypex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *typex.V) bool)(nil)).Elem(), iterMakerTypex_XTypex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *typex.V) bool)(nil)).Elem(), iterMakerETTypex_XTypex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *typex.W) bool)(nil)).Elem(), iterMakerTypex_XTypex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *typex.W) bool)(nil)).Elem(), iterMakerETTypex_XTypex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *typex.X) bool)(nil)).Elem(), iterMakerTypex_XTypex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *typex.X) bool)(nil)).Elem(), iterMakerETTypex_XTypex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *typex.Y) bool)(nil)).Elem(), iterMakerTypex_XTypex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *typex.Y) bool)(nil)).Elem(), iterMakerETTypex_XTypex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.X, *typex.Z) bool)(nil)).Elem(), iterMakerTypex_XTypex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.X, *typex.Z) bool)(nil)).Elem(), iterMakerETTypex_XTypex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y) bool)(nil)).Elem(), iterMakerTypex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y) bool)(nil)).Elem(), iterMakerETTypex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *[]byte) bool)(nil)).Elem(), iterMakerTypex_YByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *[]byte) bool)(nil)).Elem(), iterMakerETTypex_YByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *bool) bool)(nil)).Elem(), iterMakerTypex_YBool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *bool) bool)(nil)).Elem(), iterMakerETTypex_YBool)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *string) bool)(nil)).Elem(), iterMakerTypex_YString)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *string) bool)(nil)).Elem(), iterMakerETTypex_YString)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *int) bool)(nil)).Elem(), iterMakerTypex_YInt)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *int) bool)(nil)).Elem(), iterMakerETTypex_YInt)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *int8) bool)(nil)).Elem(), iterMakerTypex_YInt8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *int8) bool)(nil)).Elem(), iterMakerETTypex_YInt8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *int16) bool)(nil)).Elem(), iterMakerTypex_YInt16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *int16) bool)(nil)).Elem(), iterMakerETTypex_YInt16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *int32) bool)(nil)).Elem(), iterMakerTypex_YInt32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *int32) bool)(nil)).Elem(), iterMakerETTypex_YInt32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *int64) bool)(nil)).Elem(), iterMakerTypex_YInt64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *int64) bool)(nil)).Elem(), iterMakerETTypex_YInt64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *uint) bool)(nil)).Elem(), iterMakerTypex_YUint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *uint) bool)(nil)).Elem(), iterMakerETTypex_YUint)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *uint8) bool)(nil)).Elem(), iterMakerTypex_YUint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *uint8) bool)(nil)).Elem(), iterMakerETTypex_YUint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *uint16) bool)(nil)).Elem(), iterMakerTypex_YUint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *uint16) bool)(nil)).Elem(), iterMakerETTypex_YUint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *uint32) bool)(nil)).Elem(), iterMakerTypex_YUint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *uint32) bool)(nil)).Elem(), iterMakerETTypex_YUint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *uint64) bool)(nil)).Elem(), iterMakerTypex_YUint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *uint64) bool)(nil)).Elem(), iterMakerETTypex_YUint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *float32) bool)(nil)).Elem(), iterMakerTypex_YFloat32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *float32) bool)(nil)).Elem(), iterMakerETTypex_YFloat32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *float64) bool)(nil)).Elem(), iterMakerTypex_YFloat64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *float64) bool)(nil)).Elem(), iterMakerETTypex_YFloat64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *typex.T) bool)(nil)).Elem(), iterMakerTypex_YTypex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *typex.T) bool)(nil)).Elem(), iterMakerETTypex_YTypex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *typex.U) bool)(nil)).Elem(), iterMakerTypex_YTypex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *typex.U) bool)(nil)).Elem(), iterMakerETTypex_YTypex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *typex.V) bool)(nil)).Elem(), iterMakerTypex_YTypex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *typex.V) bool)(nil)).Elem(), iterMakerETTypex_YTypex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *typex.W) bool)(nil)).Elem(), iterMakerTypex_YTypex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *typex.W) bool)(nil)).Elem(), iterMakerETTypex_YTypex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *typex.X) bool)(nil)).Elem(), iterMakerTypex_YTypex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *typex.X) bool)(nil)).Elem(), iterMakerETTypex_YTypex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *typex.Y) bool)(nil)).Elem(), iterMakerTypex_YTypex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *typex.Y) bool)(nil)).Elem(), iterMakerETTypex_YTypex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Y, *typex.Z) bool)(nil)).Elem(), iterMakerTypex_YTypex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Y, *typex.Z) bool)(nil)).Elem(), iterMakerETTypex_YTypex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z) bool)(nil)).Elem(), iterMakerTypex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z) bool)(nil)).Elem(), iterMakerETTypex_Z)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *[]byte) bool)(nil)).Elem(), iterMakerTypex_ZByteSlice)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *[]byte) bool)(nil)).Elem(), iterMakerETTypex_ZByteSlice)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *bool) bool)(nil)).Elem(), iterMakerTypex_ZBool)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *bool) bool)(nil)).Elem(), iterMakerETTypex_ZBool)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *string) bool)(nil)).Elem(), iterMakerTypex_ZString)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *string) bool)(nil)).Elem(), iterMakerETTypex_ZString)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *int) bool)(nil)).Elem(), iterMakerTypex_ZInt)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *int) bool)(nil)).Elem(), iterMakerETTypex_ZInt)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *int8) bool)(nil)).Elem(), iterMakerTypex_ZInt8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *int8) bool)(nil)).Elem(), iterMakerETTypex_ZInt8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *int16) bool)(nil)).Elem(), iterMakerTypex_ZInt16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *int16) bool)(nil)).Elem(), iterMakerETTypex_ZInt16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *int32) bool)(nil)).Elem(), iterMakerTypex_ZInt32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *int32) bool)(nil)).Elem(), iterMakerETTypex_ZInt32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *int64) bool)(nil)).Elem(), iterMakerTypex_ZInt64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *int64) bool)(nil)).Elem(), iterMakerETTypex_ZInt64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *uint) bool)(nil)).Elem(), iterMakerTypex_ZUint)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *uint) bool)(nil)).Elem(), iterMakerETTypex_ZUint)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *uint8) bool)(nil)).Elem(), iterMakerTypex_ZUint8)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *uint8) bool)(nil)).Elem(), iterMakerETTypex_ZUint8)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *uint16) bool)(nil)).Elem(), iterMakerTypex_ZUint16)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *uint16) bool)(nil)).Elem(), iterMakerETTypex_ZUint16)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *uint32) bool)(nil)).Elem(), iterMakerTypex_ZUint32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *uint32) bool)(nil)).Elem(), iterMakerETTypex_ZUint32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *uint64) bool)(nil)).Elem(), iterMakerTypex_ZUint64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *uint64) bool)(nil)).Elem(), iterMakerETTypex_ZUint64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *float32) bool)(nil)).Elem(), iterMakerTypex_ZFloat32)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *float32) bool)(nil)).Elem(), iterMakerETTypex_ZFloat32)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *float64) bool)(nil)).Elem(), iterMakerTypex_ZFloat64)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *float64) bool)(nil)).Elem(), iterMakerETTypex_ZFloat64)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *typex.T) bool)(nil)).Elem(), iterMakerTypex_ZTypex_T)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *typex.T) bool)(nil)).Elem(), iterMakerETTypex_ZTypex_T)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *typex.U) bool)(nil)).Elem(), iterMakerTypex_ZTypex_U)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *typex.U) bool)(nil)).Elem(), iterMakerETTypex_ZTypex_U)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *typex.V) bool)(nil)).Elem(), iterMakerTypex_ZTypex_V)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *typex.V) bool)(nil)).Elem(), iterMakerETTypex_ZTypex_V)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *typex.W) bool)(nil)).Elem(), iterMakerTypex_ZTypex_W)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *typex.W) bool)(nil)).Elem(), iterMakerETTypex_ZTypex_W)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *typex.X) bool)(nil)).Elem(), iterMakerTypex_ZTypex_X)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *typex.X) bool)(nil)).Elem(), iterMakerETTypex_ZTypex_X)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *typex.Y) bool)(nil)).Elem(), iterMakerTypex_ZTypex_Y)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *typex.Y) bool)(nil)).Elem(), iterMakerETTypex_ZTypex_Y)
 	exec.RegisterInput(reflect.TypeOf((*func(*typex.Z, *typex.Z) bool)(nil)).Elem(), iterMakerTypex_ZTypex_Z)
-	exec.RegisterInput(reflect.TypeOf((*func(*typex.EventTime, *typex.Z, *typex.Z) bool)(nil)).Elem(), iterMakerETTypex_ZTypex_Z)
 }
 
 type iterNative struct {
@@ -1088,26 +582,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSlice(et *typex.EventTime, val *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.([]byte)
-	return true
-}
-
-func iterMakerETByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSlice
-	return ret
-}
-
 func (v *iterNative) readByteSliceByteSlice(key *[]byte, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1127,27 +601,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceByteSlice(et *typex.EventTime, key *[]byte, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETByteSliceByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceByteSlice
-	return ret
-}
-
 func (v *iterNative) readByteSliceBool(key *[]byte, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1167,27 +620,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceBool(et *typex.EventTime, key *[]byte, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETByteSliceBool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceBool
-	return ret
-}
-
 func (v *iterNative) readByteSliceString(key *[]byte, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1207,27 +639,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceString(et *typex.EventTime, key *[]byte, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETByteSliceString(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceString
-	return ret
-}
-
 func (v *iterNative) readByteSliceInt(key *[]byte, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1247,27 +658,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceInt(et *typex.EventTime, key *[]byte, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETByteSliceInt(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceInt
-	return ret
-}
-
 func (v *iterNative) readByteSliceInt8(key *[]byte, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1287,27 +677,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceInt8(et *typex.EventTime, key *[]byte, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETByteSliceInt8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceInt8
-	return ret
-}
-
 func (v *iterNative) readByteSliceInt16(key *[]byte, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1327,27 +696,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceInt16(et *typex.EventTime, key *[]byte, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETByteSliceInt16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceInt16
-	return ret
-}
-
 func (v *iterNative) readByteSliceInt32(key *[]byte, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1367,27 +715,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceInt32(et *typex.EventTime, key *[]byte, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETByteSliceInt32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceInt32
-	return ret
-}
-
 func (v *iterNative) readByteSliceInt64(key *[]byte, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1407,27 +734,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceInt64(et *typex.EventTime, key *[]byte, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETByteSliceInt64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceInt64
-	return ret
-}
-
 func (v *iterNative) readByteSliceUint(key *[]byte, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1447,27 +753,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceUint(et *typex.EventTime, key *[]byte, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETByteSliceUint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceUint
-	return ret
-}
-
 func (v *iterNative) readByteSliceUint8(key *[]byte, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1487,27 +772,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceUint8(et *typex.EventTime, key *[]byte, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETByteSliceUint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceUint8
-	return ret
-}
-
 func (v *iterNative) readByteSliceUint16(key *[]byte, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1527,27 +791,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceUint16(et *typex.EventTime, key *[]byte, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETByteSliceUint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceUint16
-	return ret
-}
-
 func (v *iterNative) readByteSliceUint32(key *[]byte, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1567,27 +810,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceUint32(et *typex.EventTime, key *[]byte, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETByteSliceUint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceUint32
-	return ret
-}
-
 func (v *iterNative) readByteSliceUint64(key *[]byte, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1607,27 +829,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceUint64(et *typex.EventTime, key *[]byte, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETByteSliceUint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceUint64
-	return ret
-}
-
 func (v *iterNative) readByteSliceFloat32(key *[]byte, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1647,27 +848,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceFloat32(et *typex.EventTime, key *[]byte, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETByteSliceFloat32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceFloat32
-	return ret
-}
-
 func (v *iterNative) readByteSliceFloat64(key *[]byte, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1687,27 +867,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceFloat64(et *typex.EventTime, key *[]byte, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETByteSliceFloat64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceFloat64
-	return ret
-}
-
 func (v *iterNative) readByteSliceTypex_T(key *[]byte, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1727,27 +886,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceTypex_T(et *typex.EventTime, key *[]byte, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETByteSliceTypex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceTypex_T
-	return ret
-}
-
 func (v *iterNative) readByteSliceTypex_U(key *[]byte, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1767,27 +905,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceTypex_U(et *typex.EventTime, key *[]byte, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETByteSliceTypex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceTypex_U
-	return ret
-}
-
 func (v *iterNative) readByteSliceTypex_V(key *[]byte, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1807,27 +924,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceTypex_V(et *typex.EventTime, key *[]byte, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETByteSliceTypex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceTypex_V
-	return ret
-}
-
 func (v *iterNative) readByteSliceTypex_W(key *[]byte, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1847,27 +943,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceTypex_W(et *typex.EventTime, key *[]byte, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETByteSliceTypex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceTypex_W
-	return ret
-}
-
 func (v *iterNative) readByteSliceTypex_X(key *[]byte, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1887,27 +962,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceTypex_X(et *typex.EventTime, key *[]byte, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETByteSliceTypex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceTypex_X
-	return ret
-}
-
 func (v *iterNative) readByteSliceTypex_Y(key *[]byte, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1927,27 +981,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceTypex_Y(et *typex.EventTime, key *[]byte, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETByteSliceTypex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceTypex_Y
-	return ret
-}
-
 func (v *iterNative) readByteSliceTypex_Z(key *[]byte, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -1967,27 +1000,6 @@
 	return ret
 }
 
-func (v *iterNative) readETByteSliceTypex_Z(et *typex.EventTime, key *[]byte, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.([]byte)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETByteSliceTypex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETByteSliceTypex_Z
-	return ret
-}
-
 func (v *iterNative) readBool(val *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2006,26 +1018,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBool(et *typex.EventTime, val *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(bool)
-	return true
-}
-
-func iterMakerETBool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBool
-	return ret
-}
-
 func (v *iterNative) readBoolByteSlice(key *bool, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2045,27 +1037,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolByteSlice(et *typex.EventTime, key *bool, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETBoolByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolByteSlice
-	return ret
-}
-
 func (v *iterNative) readBoolBool(key *bool, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2085,27 +1056,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolBool(et *typex.EventTime, key *bool, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETBoolBool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolBool
-	return ret
-}
-
 func (v *iterNative) readBoolString(key *bool, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2125,27 +1075,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolString(et *typex.EventTime, key *bool, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETBoolString(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolString
-	return ret
-}
-
 func (v *iterNative) readBoolInt(key *bool, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2165,27 +1094,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolInt(et *typex.EventTime, key *bool, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETBoolInt(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolInt
-	return ret
-}
-
 func (v *iterNative) readBoolInt8(key *bool, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2205,27 +1113,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolInt8(et *typex.EventTime, key *bool, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETBoolInt8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolInt8
-	return ret
-}
-
 func (v *iterNative) readBoolInt16(key *bool, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2245,27 +1132,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolInt16(et *typex.EventTime, key *bool, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETBoolInt16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolInt16
-	return ret
-}
-
 func (v *iterNative) readBoolInt32(key *bool, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2285,27 +1151,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolInt32(et *typex.EventTime, key *bool, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETBoolInt32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolInt32
-	return ret
-}
-
 func (v *iterNative) readBoolInt64(key *bool, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2325,27 +1170,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolInt64(et *typex.EventTime, key *bool, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETBoolInt64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolInt64
-	return ret
-}
-
 func (v *iterNative) readBoolUint(key *bool, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2365,27 +1189,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolUint(et *typex.EventTime, key *bool, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETBoolUint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolUint
-	return ret
-}
-
 func (v *iterNative) readBoolUint8(key *bool, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2405,27 +1208,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolUint8(et *typex.EventTime, key *bool, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETBoolUint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolUint8
-	return ret
-}
-
 func (v *iterNative) readBoolUint16(key *bool, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2445,27 +1227,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolUint16(et *typex.EventTime, key *bool, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETBoolUint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolUint16
-	return ret
-}
-
 func (v *iterNative) readBoolUint32(key *bool, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2485,27 +1246,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolUint32(et *typex.EventTime, key *bool, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETBoolUint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolUint32
-	return ret
-}
-
 func (v *iterNative) readBoolUint64(key *bool, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2525,27 +1265,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolUint64(et *typex.EventTime, key *bool, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETBoolUint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolUint64
-	return ret
-}
-
 func (v *iterNative) readBoolFloat32(key *bool, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2565,27 +1284,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolFloat32(et *typex.EventTime, key *bool, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETBoolFloat32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolFloat32
-	return ret
-}
-
 func (v *iterNative) readBoolFloat64(key *bool, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2605,27 +1303,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolFloat64(et *typex.EventTime, key *bool, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETBoolFloat64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolFloat64
-	return ret
-}
-
 func (v *iterNative) readBoolTypex_T(key *bool, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2645,27 +1322,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolTypex_T(et *typex.EventTime, key *bool, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETBoolTypex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolTypex_T
-	return ret
-}
-
 func (v *iterNative) readBoolTypex_U(key *bool, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2685,27 +1341,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolTypex_U(et *typex.EventTime, key *bool, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETBoolTypex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolTypex_U
-	return ret
-}
-
 func (v *iterNative) readBoolTypex_V(key *bool, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2725,27 +1360,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolTypex_V(et *typex.EventTime, key *bool, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETBoolTypex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolTypex_V
-	return ret
-}
-
 func (v *iterNative) readBoolTypex_W(key *bool, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2765,27 +1379,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolTypex_W(et *typex.EventTime, key *bool, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETBoolTypex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolTypex_W
-	return ret
-}
-
 func (v *iterNative) readBoolTypex_X(key *bool, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2805,27 +1398,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolTypex_X(et *typex.EventTime, key *bool, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETBoolTypex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolTypex_X
-	return ret
-}
-
 func (v *iterNative) readBoolTypex_Y(key *bool, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2845,27 +1417,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolTypex_Y(et *typex.EventTime, key *bool, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETBoolTypex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolTypex_Y
-	return ret
-}
-
 func (v *iterNative) readBoolTypex_Z(key *bool, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2885,27 +1436,6 @@
 	return ret
 }
 
-func (v *iterNative) readETBoolTypex_Z(et *typex.EventTime, key *bool, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(bool)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETBoolTypex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETBoolTypex_Z
-	return ret
-}
-
 func (v *iterNative) readString(val *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2924,26 +1454,6 @@
 	return ret
 }
 
-func (v *iterNative) readETString(et *typex.EventTime, val *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(string)
-	return true
-}
-
-func iterMakerETString(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETString
-	return ret
-}
-
 func (v *iterNative) readStringByteSlice(key *string, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -2963,27 +1473,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringByteSlice(et *typex.EventTime, key *string, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETStringByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringByteSlice
-	return ret
-}
-
 func (v *iterNative) readStringBool(key *string, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3003,27 +1492,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringBool(et *typex.EventTime, key *string, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETStringBool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringBool
-	return ret
-}
-
 func (v *iterNative) readStringString(key *string, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3043,27 +1511,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringString(et *typex.EventTime, key *string, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETStringString(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringString
-	return ret
-}
-
 func (v *iterNative) readStringInt(key *string, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3083,27 +1530,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringInt(et *typex.EventTime, key *string, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETStringInt(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringInt
-	return ret
-}
-
 func (v *iterNative) readStringInt8(key *string, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3123,27 +1549,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringInt8(et *typex.EventTime, key *string, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETStringInt8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringInt8
-	return ret
-}
-
 func (v *iterNative) readStringInt16(key *string, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3163,27 +1568,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringInt16(et *typex.EventTime, key *string, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETStringInt16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringInt16
-	return ret
-}
-
 func (v *iterNative) readStringInt32(key *string, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3203,27 +1587,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringInt32(et *typex.EventTime, key *string, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETStringInt32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringInt32
-	return ret
-}
-
 func (v *iterNative) readStringInt64(key *string, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3243,27 +1606,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringInt64(et *typex.EventTime, key *string, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETStringInt64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringInt64
-	return ret
-}
-
 func (v *iterNative) readStringUint(key *string, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3283,27 +1625,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringUint(et *typex.EventTime, key *string, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETStringUint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringUint
-	return ret
-}
-
 func (v *iterNative) readStringUint8(key *string, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3323,27 +1644,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringUint8(et *typex.EventTime, key *string, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETStringUint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringUint8
-	return ret
-}
-
 func (v *iterNative) readStringUint16(key *string, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3363,27 +1663,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringUint16(et *typex.EventTime, key *string, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETStringUint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringUint16
-	return ret
-}
-
 func (v *iterNative) readStringUint32(key *string, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3403,27 +1682,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringUint32(et *typex.EventTime, key *string, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETStringUint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringUint32
-	return ret
-}
-
 func (v *iterNative) readStringUint64(key *string, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3443,27 +1701,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringUint64(et *typex.EventTime, key *string, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETStringUint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringUint64
-	return ret
-}
-
 func (v *iterNative) readStringFloat32(key *string, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3483,27 +1720,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringFloat32(et *typex.EventTime, key *string, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETStringFloat32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringFloat32
-	return ret
-}
-
 func (v *iterNative) readStringFloat64(key *string, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3523,27 +1739,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringFloat64(et *typex.EventTime, key *string, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETStringFloat64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringFloat64
-	return ret
-}
-
 func (v *iterNative) readStringTypex_T(key *string, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3563,27 +1758,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringTypex_T(et *typex.EventTime, key *string, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETStringTypex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringTypex_T
-	return ret
-}
-
 func (v *iterNative) readStringTypex_U(key *string, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3603,27 +1777,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringTypex_U(et *typex.EventTime, key *string, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETStringTypex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringTypex_U
-	return ret
-}
-
 func (v *iterNative) readStringTypex_V(key *string, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3643,27 +1796,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringTypex_V(et *typex.EventTime, key *string, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETStringTypex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringTypex_V
-	return ret
-}
-
 func (v *iterNative) readStringTypex_W(key *string, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3683,27 +1815,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringTypex_W(et *typex.EventTime, key *string, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETStringTypex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringTypex_W
-	return ret
-}
-
 func (v *iterNative) readStringTypex_X(key *string, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3723,27 +1834,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringTypex_X(et *typex.EventTime, key *string, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETStringTypex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringTypex_X
-	return ret
-}
-
 func (v *iterNative) readStringTypex_Y(key *string, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3763,27 +1853,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringTypex_Y(et *typex.EventTime, key *string, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETStringTypex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringTypex_Y
-	return ret
-}
-
 func (v *iterNative) readStringTypex_Z(key *string, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3803,27 +1872,6 @@
 	return ret
 }
 
-func (v *iterNative) readETStringTypex_Z(et *typex.EventTime, key *string, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(string)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETStringTypex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETStringTypex_Z
-	return ret
-}
-
 func (v *iterNative) readInt(val *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3842,26 +1890,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt(et *typex.EventTime, val *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(int)
-	return true
-}
-
-func iterMakerETInt(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt
-	return ret
-}
-
 func (v *iterNative) readIntByteSlice(key *int, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3881,27 +1909,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntByteSlice(et *typex.EventTime, key *int, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETIntByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntByteSlice
-	return ret
-}
-
 func (v *iterNative) readIntBool(key *int, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3921,27 +1928,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntBool(et *typex.EventTime, key *int, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETIntBool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntBool
-	return ret
-}
-
 func (v *iterNative) readIntString(key *int, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -3961,27 +1947,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntString(et *typex.EventTime, key *int, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETIntString(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntString
-	return ret
-}
-
 func (v *iterNative) readIntInt(key *int, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4001,27 +1966,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntInt(et *typex.EventTime, key *int, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETIntInt(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntInt
-	return ret
-}
-
 func (v *iterNative) readIntInt8(key *int, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4041,27 +1985,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntInt8(et *typex.EventTime, key *int, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETIntInt8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntInt8
-	return ret
-}
-
 func (v *iterNative) readIntInt16(key *int, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4081,27 +2004,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntInt16(et *typex.EventTime, key *int, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETIntInt16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntInt16
-	return ret
-}
-
 func (v *iterNative) readIntInt32(key *int, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4121,27 +2023,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntInt32(et *typex.EventTime, key *int, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETIntInt32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntInt32
-	return ret
-}
-
 func (v *iterNative) readIntInt64(key *int, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4161,27 +2042,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntInt64(et *typex.EventTime, key *int, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETIntInt64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntInt64
-	return ret
-}
-
 func (v *iterNative) readIntUint(key *int, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4201,27 +2061,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntUint(et *typex.EventTime, key *int, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETIntUint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntUint
-	return ret
-}
-
 func (v *iterNative) readIntUint8(key *int, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4241,27 +2080,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntUint8(et *typex.EventTime, key *int, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETIntUint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntUint8
-	return ret
-}
-
 func (v *iterNative) readIntUint16(key *int, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4281,27 +2099,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntUint16(et *typex.EventTime, key *int, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETIntUint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntUint16
-	return ret
-}
-
 func (v *iterNative) readIntUint32(key *int, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4321,27 +2118,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntUint32(et *typex.EventTime, key *int, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETIntUint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntUint32
-	return ret
-}
-
 func (v *iterNative) readIntUint64(key *int, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4361,27 +2137,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntUint64(et *typex.EventTime, key *int, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETIntUint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntUint64
-	return ret
-}
-
 func (v *iterNative) readIntFloat32(key *int, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4401,27 +2156,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntFloat32(et *typex.EventTime, key *int, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETIntFloat32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntFloat32
-	return ret
-}
-
 func (v *iterNative) readIntFloat64(key *int, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4441,27 +2175,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntFloat64(et *typex.EventTime, key *int, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETIntFloat64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntFloat64
-	return ret
-}
-
 func (v *iterNative) readIntTypex_T(key *int, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4481,27 +2194,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntTypex_T(et *typex.EventTime, key *int, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETIntTypex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntTypex_T
-	return ret
-}
-
 func (v *iterNative) readIntTypex_U(key *int, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4521,27 +2213,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntTypex_U(et *typex.EventTime, key *int, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETIntTypex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntTypex_U
-	return ret
-}
-
 func (v *iterNative) readIntTypex_V(key *int, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4561,27 +2232,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntTypex_V(et *typex.EventTime, key *int, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETIntTypex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntTypex_V
-	return ret
-}
-
 func (v *iterNative) readIntTypex_W(key *int, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4601,27 +2251,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntTypex_W(et *typex.EventTime, key *int, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETIntTypex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntTypex_W
-	return ret
-}
-
 func (v *iterNative) readIntTypex_X(key *int, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4641,27 +2270,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntTypex_X(et *typex.EventTime, key *int, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETIntTypex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntTypex_X
-	return ret
-}
-
 func (v *iterNative) readIntTypex_Y(key *int, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4681,27 +2289,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntTypex_Y(et *typex.EventTime, key *int, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETIntTypex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntTypex_Y
-	return ret
-}
-
 func (v *iterNative) readIntTypex_Z(key *int, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4721,27 +2308,6 @@
 	return ret
 }
 
-func (v *iterNative) readETIntTypex_Z(et *typex.EventTime, key *int, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETIntTypex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETIntTypex_Z
-	return ret
-}
-
 func (v *iterNative) readInt8(val *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4760,26 +2326,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8(et *typex.EventTime, val *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(int8)
-	return true
-}
-
-func iterMakerETInt8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8
-	return ret
-}
-
 func (v *iterNative) readInt8ByteSlice(key *int8, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4799,27 +2345,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8ByteSlice(et *typex.EventTime, key *int8, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETInt8ByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8ByteSlice
-	return ret
-}
-
 func (v *iterNative) readInt8Bool(key *int8, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4839,27 +2364,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Bool(et *typex.EventTime, key *int8, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETInt8Bool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Bool
-	return ret
-}
-
 func (v *iterNative) readInt8String(key *int8, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4879,27 +2383,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8String(et *typex.EventTime, key *int8, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETInt8String(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8String
-	return ret
-}
-
 func (v *iterNative) readInt8Int(key *int8, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4919,27 +2402,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Int(et *typex.EventTime, key *int8, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETInt8Int(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Int
-	return ret
-}
-
 func (v *iterNative) readInt8Int8(key *int8, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4959,27 +2421,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Int8(et *typex.EventTime, key *int8, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETInt8Int8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Int8
-	return ret
-}
-
 func (v *iterNative) readInt8Int16(key *int8, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -4999,27 +2440,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Int16(et *typex.EventTime, key *int8, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETInt8Int16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Int16
-	return ret
-}
-
 func (v *iterNative) readInt8Int32(key *int8, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5039,27 +2459,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Int32(et *typex.EventTime, key *int8, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETInt8Int32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Int32
-	return ret
-}
-
 func (v *iterNative) readInt8Int64(key *int8, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5079,27 +2478,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Int64(et *typex.EventTime, key *int8, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETInt8Int64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Int64
-	return ret
-}
-
 func (v *iterNative) readInt8Uint(key *int8, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5119,27 +2497,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Uint(et *typex.EventTime, key *int8, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETInt8Uint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Uint
-	return ret
-}
-
 func (v *iterNative) readInt8Uint8(key *int8, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5159,27 +2516,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Uint8(et *typex.EventTime, key *int8, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETInt8Uint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Uint8
-	return ret
-}
-
 func (v *iterNative) readInt8Uint16(key *int8, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5199,27 +2535,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Uint16(et *typex.EventTime, key *int8, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETInt8Uint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Uint16
-	return ret
-}
-
 func (v *iterNative) readInt8Uint32(key *int8, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5239,27 +2554,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Uint32(et *typex.EventTime, key *int8, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETInt8Uint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Uint32
-	return ret
-}
-
 func (v *iterNative) readInt8Uint64(key *int8, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5279,27 +2573,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Uint64(et *typex.EventTime, key *int8, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETInt8Uint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Uint64
-	return ret
-}
-
 func (v *iterNative) readInt8Float32(key *int8, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5319,27 +2592,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Float32(et *typex.EventTime, key *int8, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETInt8Float32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Float32
-	return ret
-}
-
 func (v *iterNative) readInt8Float64(key *int8, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5359,27 +2611,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Float64(et *typex.EventTime, key *int8, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETInt8Float64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Float64
-	return ret
-}
-
 func (v *iterNative) readInt8Typex_T(key *int8, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5399,27 +2630,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Typex_T(et *typex.EventTime, key *int8, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETInt8Typex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Typex_T
-	return ret
-}
-
 func (v *iterNative) readInt8Typex_U(key *int8, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5439,27 +2649,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Typex_U(et *typex.EventTime, key *int8, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETInt8Typex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Typex_U
-	return ret
-}
-
 func (v *iterNative) readInt8Typex_V(key *int8, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5479,27 +2668,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Typex_V(et *typex.EventTime, key *int8, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETInt8Typex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Typex_V
-	return ret
-}
-
 func (v *iterNative) readInt8Typex_W(key *int8, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5519,27 +2687,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Typex_W(et *typex.EventTime, key *int8, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETInt8Typex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Typex_W
-	return ret
-}
-
 func (v *iterNative) readInt8Typex_X(key *int8, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5559,27 +2706,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Typex_X(et *typex.EventTime, key *int8, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETInt8Typex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Typex_X
-	return ret
-}
-
 func (v *iterNative) readInt8Typex_Y(key *int8, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5599,27 +2725,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Typex_Y(et *typex.EventTime, key *int8, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETInt8Typex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Typex_Y
-	return ret
-}
-
 func (v *iterNative) readInt8Typex_Z(key *int8, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5639,27 +2744,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt8Typex_Z(et *typex.EventTime, key *int8, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int8)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETInt8Typex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt8Typex_Z
-	return ret
-}
-
 func (v *iterNative) readInt16(val *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5678,26 +2762,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16(et *typex.EventTime, val *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(int16)
-	return true
-}
-
-func iterMakerETInt16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16
-	return ret
-}
-
 func (v *iterNative) readInt16ByteSlice(key *int16, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5717,27 +2781,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16ByteSlice(et *typex.EventTime, key *int16, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETInt16ByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16ByteSlice
-	return ret
-}
-
 func (v *iterNative) readInt16Bool(key *int16, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5757,27 +2800,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Bool(et *typex.EventTime, key *int16, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETInt16Bool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Bool
-	return ret
-}
-
 func (v *iterNative) readInt16String(key *int16, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5797,27 +2819,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16String(et *typex.EventTime, key *int16, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETInt16String(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16String
-	return ret
-}
-
 func (v *iterNative) readInt16Int(key *int16, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5837,27 +2838,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Int(et *typex.EventTime, key *int16, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETInt16Int(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Int
-	return ret
-}
-
 func (v *iterNative) readInt16Int8(key *int16, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5877,27 +2857,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Int8(et *typex.EventTime, key *int16, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETInt16Int8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Int8
-	return ret
-}
-
 func (v *iterNative) readInt16Int16(key *int16, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5917,27 +2876,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Int16(et *typex.EventTime, key *int16, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETInt16Int16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Int16
-	return ret
-}
-
 func (v *iterNative) readInt16Int32(key *int16, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5957,27 +2895,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Int32(et *typex.EventTime, key *int16, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETInt16Int32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Int32
-	return ret
-}
-
 func (v *iterNative) readInt16Int64(key *int16, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -5997,27 +2914,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Int64(et *typex.EventTime, key *int16, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETInt16Int64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Int64
-	return ret
-}
-
 func (v *iterNative) readInt16Uint(key *int16, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6037,27 +2933,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Uint(et *typex.EventTime, key *int16, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETInt16Uint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Uint
-	return ret
-}
-
 func (v *iterNative) readInt16Uint8(key *int16, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6077,27 +2952,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Uint8(et *typex.EventTime, key *int16, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETInt16Uint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Uint8
-	return ret
-}
-
 func (v *iterNative) readInt16Uint16(key *int16, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6117,27 +2971,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Uint16(et *typex.EventTime, key *int16, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETInt16Uint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Uint16
-	return ret
-}
-
 func (v *iterNative) readInt16Uint32(key *int16, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6157,27 +2990,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Uint32(et *typex.EventTime, key *int16, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETInt16Uint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Uint32
-	return ret
-}
-
 func (v *iterNative) readInt16Uint64(key *int16, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6197,27 +3009,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Uint64(et *typex.EventTime, key *int16, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETInt16Uint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Uint64
-	return ret
-}
-
 func (v *iterNative) readInt16Float32(key *int16, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6237,27 +3028,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Float32(et *typex.EventTime, key *int16, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETInt16Float32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Float32
-	return ret
-}
-
 func (v *iterNative) readInt16Float64(key *int16, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6277,27 +3047,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Float64(et *typex.EventTime, key *int16, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETInt16Float64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Float64
-	return ret
-}
-
 func (v *iterNative) readInt16Typex_T(key *int16, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6317,27 +3066,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Typex_T(et *typex.EventTime, key *int16, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETInt16Typex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Typex_T
-	return ret
-}
-
 func (v *iterNative) readInt16Typex_U(key *int16, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6357,27 +3085,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Typex_U(et *typex.EventTime, key *int16, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETInt16Typex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Typex_U
-	return ret
-}
-
 func (v *iterNative) readInt16Typex_V(key *int16, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6397,27 +3104,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Typex_V(et *typex.EventTime, key *int16, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETInt16Typex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Typex_V
-	return ret
-}
-
 func (v *iterNative) readInt16Typex_W(key *int16, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6437,27 +3123,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Typex_W(et *typex.EventTime, key *int16, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETInt16Typex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Typex_W
-	return ret
-}
-
 func (v *iterNative) readInt16Typex_X(key *int16, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6477,27 +3142,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Typex_X(et *typex.EventTime, key *int16, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETInt16Typex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Typex_X
-	return ret
-}
-
 func (v *iterNative) readInt16Typex_Y(key *int16, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6517,27 +3161,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Typex_Y(et *typex.EventTime, key *int16, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETInt16Typex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Typex_Y
-	return ret
-}
-
 func (v *iterNative) readInt16Typex_Z(key *int16, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6557,27 +3180,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt16Typex_Z(et *typex.EventTime, key *int16, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int16)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETInt16Typex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt16Typex_Z
-	return ret
-}
-
 func (v *iterNative) readInt32(val *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6596,26 +3198,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32(et *typex.EventTime, val *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(int32)
-	return true
-}
-
-func iterMakerETInt32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32
-	return ret
-}
-
 func (v *iterNative) readInt32ByteSlice(key *int32, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6635,27 +3217,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32ByteSlice(et *typex.EventTime, key *int32, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETInt32ByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32ByteSlice
-	return ret
-}
-
 func (v *iterNative) readInt32Bool(key *int32, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6675,27 +3236,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Bool(et *typex.EventTime, key *int32, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETInt32Bool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Bool
-	return ret
-}
-
 func (v *iterNative) readInt32String(key *int32, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6715,27 +3255,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32String(et *typex.EventTime, key *int32, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETInt32String(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32String
-	return ret
-}
-
 func (v *iterNative) readInt32Int(key *int32, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6755,27 +3274,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Int(et *typex.EventTime, key *int32, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETInt32Int(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Int
-	return ret
-}
-
 func (v *iterNative) readInt32Int8(key *int32, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6795,27 +3293,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Int8(et *typex.EventTime, key *int32, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETInt32Int8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Int8
-	return ret
-}
-
 func (v *iterNative) readInt32Int16(key *int32, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6835,27 +3312,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Int16(et *typex.EventTime, key *int32, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETInt32Int16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Int16
-	return ret
-}
-
 func (v *iterNative) readInt32Int32(key *int32, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6875,27 +3331,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Int32(et *typex.EventTime, key *int32, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETInt32Int32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Int32
-	return ret
-}
-
 func (v *iterNative) readInt32Int64(key *int32, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6915,27 +3350,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Int64(et *typex.EventTime, key *int32, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETInt32Int64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Int64
-	return ret
-}
-
 func (v *iterNative) readInt32Uint(key *int32, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6955,27 +3369,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Uint(et *typex.EventTime, key *int32, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETInt32Uint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Uint
-	return ret
-}
-
 func (v *iterNative) readInt32Uint8(key *int32, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -6995,27 +3388,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Uint8(et *typex.EventTime, key *int32, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETInt32Uint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Uint8
-	return ret
-}
-
 func (v *iterNative) readInt32Uint16(key *int32, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7035,27 +3407,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Uint16(et *typex.EventTime, key *int32, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETInt32Uint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Uint16
-	return ret
-}
-
 func (v *iterNative) readInt32Uint32(key *int32, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7075,27 +3426,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Uint32(et *typex.EventTime, key *int32, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETInt32Uint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Uint32
-	return ret
-}
-
 func (v *iterNative) readInt32Uint64(key *int32, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7115,27 +3445,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Uint64(et *typex.EventTime, key *int32, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETInt32Uint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Uint64
-	return ret
-}
-
 func (v *iterNative) readInt32Float32(key *int32, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7155,27 +3464,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Float32(et *typex.EventTime, key *int32, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETInt32Float32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Float32
-	return ret
-}
-
 func (v *iterNative) readInt32Float64(key *int32, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7195,27 +3483,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Float64(et *typex.EventTime, key *int32, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETInt32Float64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Float64
-	return ret
-}
-
 func (v *iterNative) readInt32Typex_T(key *int32, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7235,27 +3502,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Typex_T(et *typex.EventTime, key *int32, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETInt32Typex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Typex_T
-	return ret
-}
-
 func (v *iterNative) readInt32Typex_U(key *int32, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7275,27 +3521,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Typex_U(et *typex.EventTime, key *int32, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETInt32Typex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Typex_U
-	return ret
-}
-
 func (v *iterNative) readInt32Typex_V(key *int32, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7315,27 +3540,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Typex_V(et *typex.EventTime, key *int32, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETInt32Typex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Typex_V
-	return ret
-}
-
 func (v *iterNative) readInt32Typex_W(key *int32, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7355,27 +3559,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Typex_W(et *typex.EventTime, key *int32, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETInt32Typex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Typex_W
-	return ret
-}
-
 func (v *iterNative) readInt32Typex_X(key *int32, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7395,27 +3578,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Typex_X(et *typex.EventTime, key *int32, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETInt32Typex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Typex_X
-	return ret
-}
-
 func (v *iterNative) readInt32Typex_Y(key *int32, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7435,27 +3597,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Typex_Y(et *typex.EventTime, key *int32, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETInt32Typex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Typex_Y
-	return ret
-}
-
 func (v *iterNative) readInt32Typex_Z(key *int32, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7475,27 +3616,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt32Typex_Z(et *typex.EventTime, key *int32, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int32)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETInt32Typex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt32Typex_Z
-	return ret
-}
-
 func (v *iterNative) readInt64(val *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7514,26 +3634,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64(et *typex.EventTime, val *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(int64)
-	return true
-}
-
-func iterMakerETInt64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64
-	return ret
-}
-
 func (v *iterNative) readInt64ByteSlice(key *int64, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7553,27 +3653,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64ByteSlice(et *typex.EventTime, key *int64, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETInt64ByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64ByteSlice
-	return ret
-}
-
 func (v *iterNative) readInt64Bool(key *int64, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7593,27 +3672,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Bool(et *typex.EventTime, key *int64, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETInt64Bool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Bool
-	return ret
-}
-
 func (v *iterNative) readInt64String(key *int64, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7633,27 +3691,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64String(et *typex.EventTime, key *int64, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETInt64String(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64String
-	return ret
-}
-
 func (v *iterNative) readInt64Int(key *int64, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7673,27 +3710,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Int(et *typex.EventTime, key *int64, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETInt64Int(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Int
-	return ret
-}
-
 func (v *iterNative) readInt64Int8(key *int64, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7713,27 +3729,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Int8(et *typex.EventTime, key *int64, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETInt64Int8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Int8
-	return ret
-}
-
 func (v *iterNative) readInt64Int16(key *int64, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7753,27 +3748,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Int16(et *typex.EventTime, key *int64, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETInt64Int16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Int16
-	return ret
-}
-
 func (v *iterNative) readInt64Int32(key *int64, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7793,27 +3767,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Int32(et *typex.EventTime, key *int64, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETInt64Int32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Int32
-	return ret
-}
-
 func (v *iterNative) readInt64Int64(key *int64, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7833,27 +3786,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Int64(et *typex.EventTime, key *int64, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETInt64Int64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Int64
-	return ret
-}
-
 func (v *iterNative) readInt64Uint(key *int64, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7873,27 +3805,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Uint(et *typex.EventTime, key *int64, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETInt64Uint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Uint
-	return ret
-}
-
 func (v *iterNative) readInt64Uint8(key *int64, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7913,27 +3824,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Uint8(et *typex.EventTime, key *int64, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETInt64Uint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Uint8
-	return ret
-}
-
 func (v *iterNative) readInt64Uint16(key *int64, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7953,27 +3843,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Uint16(et *typex.EventTime, key *int64, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETInt64Uint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Uint16
-	return ret
-}
-
 func (v *iterNative) readInt64Uint32(key *int64, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -7993,27 +3862,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Uint32(et *typex.EventTime, key *int64, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETInt64Uint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Uint32
-	return ret
-}
-
 func (v *iterNative) readInt64Uint64(key *int64, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8033,27 +3881,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Uint64(et *typex.EventTime, key *int64, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETInt64Uint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Uint64
-	return ret
-}
-
 func (v *iterNative) readInt64Float32(key *int64, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8073,27 +3900,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Float32(et *typex.EventTime, key *int64, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETInt64Float32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Float32
-	return ret
-}
-
 func (v *iterNative) readInt64Float64(key *int64, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8113,27 +3919,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Float64(et *typex.EventTime, key *int64, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETInt64Float64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Float64
-	return ret
-}
-
 func (v *iterNative) readInt64Typex_T(key *int64, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8153,27 +3938,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Typex_T(et *typex.EventTime, key *int64, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETInt64Typex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Typex_T
-	return ret
-}
-
 func (v *iterNative) readInt64Typex_U(key *int64, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8193,27 +3957,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Typex_U(et *typex.EventTime, key *int64, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETInt64Typex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Typex_U
-	return ret
-}
-
 func (v *iterNative) readInt64Typex_V(key *int64, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8233,27 +3976,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Typex_V(et *typex.EventTime, key *int64, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETInt64Typex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Typex_V
-	return ret
-}
-
 func (v *iterNative) readInt64Typex_W(key *int64, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8273,27 +3995,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Typex_W(et *typex.EventTime, key *int64, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETInt64Typex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Typex_W
-	return ret
-}
-
 func (v *iterNative) readInt64Typex_X(key *int64, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8313,27 +4014,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Typex_X(et *typex.EventTime, key *int64, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETInt64Typex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Typex_X
-	return ret
-}
-
 func (v *iterNative) readInt64Typex_Y(key *int64, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8353,27 +4033,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Typex_Y(et *typex.EventTime, key *int64, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETInt64Typex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Typex_Y
-	return ret
-}
-
 func (v *iterNative) readInt64Typex_Z(key *int64, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8393,27 +4052,6 @@
 	return ret
 }
 
-func (v *iterNative) readETInt64Typex_Z(et *typex.EventTime, key *int64, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(int64)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETInt64Typex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETInt64Typex_Z
-	return ret
-}
-
 func (v *iterNative) readUint(val *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8432,26 +4070,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint(et *typex.EventTime, val *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(uint)
-	return true
-}
-
-func iterMakerETUint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint
-	return ret
-}
-
 func (v *iterNative) readUintByteSlice(key *uint, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8471,27 +4089,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintByteSlice(et *typex.EventTime, key *uint, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETUintByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintByteSlice
-	return ret
-}
-
 func (v *iterNative) readUintBool(key *uint, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8511,27 +4108,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintBool(et *typex.EventTime, key *uint, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETUintBool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintBool
-	return ret
-}
-
 func (v *iterNative) readUintString(key *uint, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8551,27 +4127,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintString(et *typex.EventTime, key *uint, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETUintString(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintString
-	return ret
-}
-
 func (v *iterNative) readUintInt(key *uint, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8591,27 +4146,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintInt(et *typex.EventTime, key *uint, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETUintInt(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintInt
-	return ret
-}
-
 func (v *iterNative) readUintInt8(key *uint, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8631,27 +4165,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintInt8(et *typex.EventTime, key *uint, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETUintInt8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintInt8
-	return ret
-}
-
 func (v *iterNative) readUintInt16(key *uint, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8671,27 +4184,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintInt16(et *typex.EventTime, key *uint, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETUintInt16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintInt16
-	return ret
-}
-
 func (v *iterNative) readUintInt32(key *uint, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8711,27 +4203,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintInt32(et *typex.EventTime, key *uint, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETUintInt32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintInt32
-	return ret
-}
-
 func (v *iterNative) readUintInt64(key *uint, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8751,27 +4222,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintInt64(et *typex.EventTime, key *uint, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETUintInt64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintInt64
-	return ret
-}
-
 func (v *iterNative) readUintUint(key *uint, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8791,27 +4241,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintUint(et *typex.EventTime, key *uint, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETUintUint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintUint
-	return ret
-}
-
 func (v *iterNative) readUintUint8(key *uint, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8831,27 +4260,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintUint8(et *typex.EventTime, key *uint, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETUintUint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintUint8
-	return ret
-}
-
 func (v *iterNative) readUintUint16(key *uint, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8871,27 +4279,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintUint16(et *typex.EventTime, key *uint, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETUintUint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintUint16
-	return ret
-}
-
 func (v *iterNative) readUintUint32(key *uint, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8911,27 +4298,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintUint32(et *typex.EventTime, key *uint, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETUintUint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintUint32
-	return ret
-}
-
 func (v *iterNative) readUintUint64(key *uint, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8951,27 +4317,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintUint64(et *typex.EventTime, key *uint, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETUintUint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintUint64
-	return ret
-}
-
 func (v *iterNative) readUintFloat32(key *uint, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -8991,27 +4336,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintFloat32(et *typex.EventTime, key *uint, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETUintFloat32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintFloat32
-	return ret
-}
-
 func (v *iterNative) readUintFloat64(key *uint, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9031,27 +4355,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintFloat64(et *typex.EventTime, key *uint, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETUintFloat64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintFloat64
-	return ret
-}
-
 func (v *iterNative) readUintTypex_T(key *uint, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9071,27 +4374,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintTypex_T(et *typex.EventTime, key *uint, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETUintTypex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintTypex_T
-	return ret
-}
-
 func (v *iterNative) readUintTypex_U(key *uint, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9111,27 +4393,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintTypex_U(et *typex.EventTime, key *uint, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETUintTypex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintTypex_U
-	return ret
-}
-
 func (v *iterNative) readUintTypex_V(key *uint, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9151,27 +4412,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintTypex_V(et *typex.EventTime, key *uint, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETUintTypex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintTypex_V
-	return ret
-}
-
 func (v *iterNative) readUintTypex_W(key *uint, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9191,27 +4431,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintTypex_W(et *typex.EventTime, key *uint, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETUintTypex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintTypex_W
-	return ret
-}
-
 func (v *iterNative) readUintTypex_X(key *uint, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9231,27 +4450,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintTypex_X(et *typex.EventTime, key *uint, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETUintTypex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintTypex_X
-	return ret
-}
-
 func (v *iterNative) readUintTypex_Y(key *uint, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9271,27 +4469,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintTypex_Y(et *typex.EventTime, key *uint, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETUintTypex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintTypex_Y
-	return ret
-}
-
 func (v *iterNative) readUintTypex_Z(key *uint, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9311,27 +4488,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUintTypex_Z(et *typex.EventTime, key *uint, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETUintTypex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUintTypex_Z
-	return ret
-}
-
 func (v *iterNative) readUint8(val *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9350,26 +4506,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8(et *typex.EventTime, val *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(uint8)
-	return true
-}
-
-func iterMakerETUint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8
-	return ret
-}
-
 func (v *iterNative) readUint8ByteSlice(key *uint8, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9389,27 +4525,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8ByteSlice(et *typex.EventTime, key *uint8, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETUint8ByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8ByteSlice
-	return ret
-}
-
 func (v *iterNative) readUint8Bool(key *uint8, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9429,27 +4544,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Bool(et *typex.EventTime, key *uint8, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETUint8Bool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Bool
-	return ret
-}
-
 func (v *iterNative) readUint8String(key *uint8, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9469,27 +4563,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8String(et *typex.EventTime, key *uint8, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETUint8String(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8String
-	return ret
-}
-
 func (v *iterNative) readUint8Int(key *uint8, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9509,27 +4582,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Int(et *typex.EventTime, key *uint8, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETUint8Int(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Int
-	return ret
-}
-
 func (v *iterNative) readUint8Int8(key *uint8, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9549,27 +4601,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Int8(et *typex.EventTime, key *uint8, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETUint8Int8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Int8
-	return ret
-}
-
 func (v *iterNative) readUint8Int16(key *uint8, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9589,27 +4620,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Int16(et *typex.EventTime, key *uint8, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETUint8Int16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Int16
-	return ret
-}
-
 func (v *iterNative) readUint8Int32(key *uint8, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9629,27 +4639,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Int32(et *typex.EventTime, key *uint8, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETUint8Int32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Int32
-	return ret
-}
-
 func (v *iterNative) readUint8Int64(key *uint8, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9669,27 +4658,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Int64(et *typex.EventTime, key *uint8, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETUint8Int64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Int64
-	return ret
-}
-
 func (v *iterNative) readUint8Uint(key *uint8, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9709,27 +4677,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Uint(et *typex.EventTime, key *uint8, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETUint8Uint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Uint
-	return ret
-}
-
 func (v *iterNative) readUint8Uint8(key *uint8, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9749,27 +4696,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Uint8(et *typex.EventTime, key *uint8, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETUint8Uint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Uint8
-	return ret
-}
-
 func (v *iterNative) readUint8Uint16(key *uint8, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9789,27 +4715,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Uint16(et *typex.EventTime, key *uint8, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETUint8Uint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Uint16
-	return ret
-}
-
 func (v *iterNative) readUint8Uint32(key *uint8, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9829,27 +4734,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Uint32(et *typex.EventTime, key *uint8, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETUint8Uint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Uint32
-	return ret
-}
-
 func (v *iterNative) readUint8Uint64(key *uint8, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9869,27 +4753,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Uint64(et *typex.EventTime, key *uint8, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETUint8Uint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Uint64
-	return ret
-}
-
 func (v *iterNative) readUint8Float32(key *uint8, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9909,27 +4772,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Float32(et *typex.EventTime, key *uint8, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETUint8Float32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Float32
-	return ret
-}
-
 func (v *iterNative) readUint8Float64(key *uint8, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9949,27 +4791,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Float64(et *typex.EventTime, key *uint8, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETUint8Float64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Float64
-	return ret
-}
-
 func (v *iterNative) readUint8Typex_T(key *uint8, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -9989,27 +4810,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Typex_T(et *typex.EventTime, key *uint8, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETUint8Typex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Typex_T
-	return ret
-}
-
 func (v *iterNative) readUint8Typex_U(key *uint8, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10029,27 +4829,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Typex_U(et *typex.EventTime, key *uint8, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETUint8Typex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Typex_U
-	return ret
-}
-
 func (v *iterNative) readUint8Typex_V(key *uint8, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10069,27 +4848,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Typex_V(et *typex.EventTime, key *uint8, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETUint8Typex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Typex_V
-	return ret
-}
-
 func (v *iterNative) readUint8Typex_W(key *uint8, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10109,27 +4867,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Typex_W(et *typex.EventTime, key *uint8, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETUint8Typex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Typex_W
-	return ret
-}
-
 func (v *iterNative) readUint8Typex_X(key *uint8, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10149,27 +4886,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Typex_X(et *typex.EventTime, key *uint8, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETUint8Typex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Typex_X
-	return ret
-}
-
 func (v *iterNative) readUint8Typex_Y(key *uint8, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10189,27 +4905,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Typex_Y(et *typex.EventTime, key *uint8, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETUint8Typex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Typex_Y
-	return ret
-}
-
 func (v *iterNative) readUint8Typex_Z(key *uint8, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10229,27 +4924,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint8Typex_Z(et *typex.EventTime, key *uint8, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint8)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETUint8Typex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint8Typex_Z
-	return ret
-}
-
 func (v *iterNative) readUint16(val *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10268,26 +4942,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16(et *typex.EventTime, val *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(uint16)
-	return true
-}
-
-func iterMakerETUint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16
-	return ret
-}
-
 func (v *iterNative) readUint16ByteSlice(key *uint16, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10307,27 +4961,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16ByteSlice(et *typex.EventTime, key *uint16, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETUint16ByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16ByteSlice
-	return ret
-}
-
 func (v *iterNative) readUint16Bool(key *uint16, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10347,27 +4980,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Bool(et *typex.EventTime, key *uint16, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETUint16Bool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Bool
-	return ret
-}
-
 func (v *iterNative) readUint16String(key *uint16, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10387,27 +4999,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16String(et *typex.EventTime, key *uint16, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETUint16String(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16String
-	return ret
-}
-
 func (v *iterNative) readUint16Int(key *uint16, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10427,27 +5018,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Int(et *typex.EventTime, key *uint16, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETUint16Int(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Int
-	return ret
-}
-
 func (v *iterNative) readUint16Int8(key *uint16, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10467,27 +5037,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Int8(et *typex.EventTime, key *uint16, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETUint16Int8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Int8
-	return ret
-}
-
 func (v *iterNative) readUint16Int16(key *uint16, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10507,27 +5056,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Int16(et *typex.EventTime, key *uint16, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETUint16Int16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Int16
-	return ret
-}
-
 func (v *iterNative) readUint16Int32(key *uint16, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10547,27 +5075,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Int32(et *typex.EventTime, key *uint16, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETUint16Int32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Int32
-	return ret
-}
-
 func (v *iterNative) readUint16Int64(key *uint16, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10587,27 +5094,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Int64(et *typex.EventTime, key *uint16, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETUint16Int64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Int64
-	return ret
-}
-
 func (v *iterNative) readUint16Uint(key *uint16, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10627,27 +5113,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Uint(et *typex.EventTime, key *uint16, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETUint16Uint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Uint
-	return ret
-}
-
 func (v *iterNative) readUint16Uint8(key *uint16, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10667,27 +5132,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Uint8(et *typex.EventTime, key *uint16, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETUint16Uint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Uint8
-	return ret
-}
-
 func (v *iterNative) readUint16Uint16(key *uint16, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10707,27 +5151,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Uint16(et *typex.EventTime, key *uint16, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETUint16Uint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Uint16
-	return ret
-}
-
 func (v *iterNative) readUint16Uint32(key *uint16, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10747,27 +5170,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Uint32(et *typex.EventTime, key *uint16, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETUint16Uint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Uint32
-	return ret
-}
-
 func (v *iterNative) readUint16Uint64(key *uint16, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10787,27 +5189,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Uint64(et *typex.EventTime, key *uint16, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETUint16Uint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Uint64
-	return ret
-}
-
 func (v *iterNative) readUint16Float32(key *uint16, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10827,27 +5208,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Float32(et *typex.EventTime, key *uint16, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETUint16Float32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Float32
-	return ret
-}
-
 func (v *iterNative) readUint16Float64(key *uint16, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10867,27 +5227,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Float64(et *typex.EventTime, key *uint16, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETUint16Float64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Float64
-	return ret
-}
-
 func (v *iterNative) readUint16Typex_T(key *uint16, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10907,27 +5246,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Typex_T(et *typex.EventTime, key *uint16, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETUint16Typex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Typex_T
-	return ret
-}
-
 func (v *iterNative) readUint16Typex_U(key *uint16, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10947,27 +5265,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Typex_U(et *typex.EventTime, key *uint16, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETUint16Typex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Typex_U
-	return ret
-}
-
 func (v *iterNative) readUint16Typex_V(key *uint16, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -10987,27 +5284,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Typex_V(et *typex.EventTime, key *uint16, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETUint16Typex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Typex_V
-	return ret
-}
-
 func (v *iterNative) readUint16Typex_W(key *uint16, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11027,27 +5303,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Typex_W(et *typex.EventTime, key *uint16, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETUint16Typex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Typex_W
-	return ret
-}
-
 func (v *iterNative) readUint16Typex_X(key *uint16, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11067,27 +5322,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Typex_X(et *typex.EventTime, key *uint16, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETUint16Typex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Typex_X
-	return ret
-}
-
 func (v *iterNative) readUint16Typex_Y(key *uint16, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11107,27 +5341,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Typex_Y(et *typex.EventTime, key *uint16, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETUint16Typex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Typex_Y
-	return ret
-}
-
 func (v *iterNative) readUint16Typex_Z(key *uint16, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11147,27 +5360,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint16Typex_Z(et *typex.EventTime, key *uint16, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint16)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETUint16Typex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint16Typex_Z
-	return ret
-}
-
 func (v *iterNative) readUint32(val *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11186,26 +5378,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32(et *typex.EventTime, val *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(uint32)
-	return true
-}
-
-func iterMakerETUint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32
-	return ret
-}
-
 func (v *iterNative) readUint32ByteSlice(key *uint32, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11225,27 +5397,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32ByteSlice(et *typex.EventTime, key *uint32, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETUint32ByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32ByteSlice
-	return ret
-}
-
 func (v *iterNative) readUint32Bool(key *uint32, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11265,27 +5416,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Bool(et *typex.EventTime, key *uint32, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETUint32Bool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Bool
-	return ret
-}
-
 func (v *iterNative) readUint32String(key *uint32, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11305,27 +5435,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32String(et *typex.EventTime, key *uint32, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETUint32String(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32String
-	return ret
-}
-
 func (v *iterNative) readUint32Int(key *uint32, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11345,27 +5454,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Int(et *typex.EventTime, key *uint32, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETUint32Int(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Int
-	return ret
-}
-
 func (v *iterNative) readUint32Int8(key *uint32, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11385,27 +5473,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Int8(et *typex.EventTime, key *uint32, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETUint32Int8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Int8
-	return ret
-}
-
 func (v *iterNative) readUint32Int16(key *uint32, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11425,27 +5492,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Int16(et *typex.EventTime, key *uint32, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETUint32Int16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Int16
-	return ret
-}
-
 func (v *iterNative) readUint32Int32(key *uint32, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11465,27 +5511,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Int32(et *typex.EventTime, key *uint32, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETUint32Int32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Int32
-	return ret
-}
-
 func (v *iterNative) readUint32Int64(key *uint32, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11505,27 +5530,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Int64(et *typex.EventTime, key *uint32, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETUint32Int64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Int64
-	return ret
-}
-
 func (v *iterNative) readUint32Uint(key *uint32, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11545,27 +5549,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Uint(et *typex.EventTime, key *uint32, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETUint32Uint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Uint
-	return ret
-}
-
 func (v *iterNative) readUint32Uint8(key *uint32, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11585,27 +5568,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Uint8(et *typex.EventTime, key *uint32, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETUint32Uint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Uint8
-	return ret
-}
-
 func (v *iterNative) readUint32Uint16(key *uint32, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11625,27 +5587,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Uint16(et *typex.EventTime, key *uint32, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETUint32Uint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Uint16
-	return ret
-}
-
 func (v *iterNative) readUint32Uint32(key *uint32, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11665,27 +5606,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Uint32(et *typex.EventTime, key *uint32, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETUint32Uint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Uint32
-	return ret
-}
-
 func (v *iterNative) readUint32Uint64(key *uint32, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11705,27 +5625,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Uint64(et *typex.EventTime, key *uint32, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETUint32Uint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Uint64
-	return ret
-}
-
 func (v *iterNative) readUint32Float32(key *uint32, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11745,27 +5644,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Float32(et *typex.EventTime, key *uint32, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETUint32Float32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Float32
-	return ret
-}
-
 func (v *iterNative) readUint32Float64(key *uint32, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11785,27 +5663,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Float64(et *typex.EventTime, key *uint32, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETUint32Float64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Float64
-	return ret
-}
-
 func (v *iterNative) readUint32Typex_T(key *uint32, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11825,27 +5682,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Typex_T(et *typex.EventTime, key *uint32, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETUint32Typex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Typex_T
-	return ret
-}
-
 func (v *iterNative) readUint32Typex_U(key *uint32, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11865,27 +5701,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Typex_U(et *typex.EventTime, key *uint32, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETUint32Typex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Typex_U
-	return ret
-}
-
 func (v *iterNative) readUint32Typex_V(key *uint32, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11905,27 +5720,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Typex_V(et *typex.EventTime, key *uint32, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETUint32Typex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Typex_V
-	return ret
-}
-
 func (v *iterNative) readUint32Typex_W(key *uint32, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11945,27 +5739,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Typex_W(et *typex.EventTime, key *uint32, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETUint32Typex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Typex_W
-	return ret
-}
-
 func (v *iterNative) readUint32Typex_X(key *uint32, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -11985,27 +5758,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Typex_X(et *typex.EventTime, key *uint32, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETUint32Typex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Typex_X
-	return ret
-}
-
 func (v *iterNative) readUint32Typex_Y(key *uint32, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12025,27 +5777,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Typex_Y(et *typex.EventTime, key *uint32, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETUint32Typex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Typex_Y
-	return ret
-}
-
 func (v *iterNative) readUint32Typex_Z(key *uint32, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12065,27 +5796,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint32Typex_Z(et *typex.EventTime, key *uint32, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint32)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETUint32Typex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint32Typex_Z
-	return ret
-}
-
 func (v *iterNative) readUint64(val *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12104,26 +5814,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64(et *typex.EventTime, val *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(uint64)
-	return true
-}
-
-func iterMakerETUint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64
-	return ret
-}
-
 func (v *iterNative) readUint64ByteSlice(key *uint64, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12143,27 +5833,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64ByteSlice(et *typex.EventTime, key *uint64, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETUint64ByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64ByteSlice
-	return ret
-}
-
 func (v *iterNative) readUint64Bool(key *uint64, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12183,27 +5852,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Bool(et *typex.EventTime, key *uint64, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETUint64Bool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Bool
-	return ret
-}
-
 func (v *iterNative) readUint64String(key *uint64, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12223,27 +5871,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64String(et *typex.EventTime, key *uint64, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETUint64String(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64String
-	return ret
-}
-
 func (v *iterNative) readUint64Int(key *uint64, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12263,27 +5890,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Int(et *typex.EventTime, key *uint64, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETUint64Int(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Int
-	return ret
-}
-
 func (v *iterNative) readUint64Int8(key *uint64, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12303,27 +5909,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Int8(et *typex.EventTime, key *uint64, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETUint64Int8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Int8
-	return ret
-}
-
 func (v *iterNative) readUint64Int16(key *uint64, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12343,27 +5928,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Int16(et *typex.EventTime, key *uint64, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETUint64Int16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Int16
-	return ret
-}
-
 func (v *iterNative) readUint64Int32(key *uint64, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12383,27 +5947,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Int32(et *typex.EventTime, key *uint64, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETUint64Int32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Int32
-	return ret
-}
-
 func (v *iterNative) readUint64Int64(key *uint64, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12423,27 +5966,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Int64(et *typex.EventTime, key *uint64, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETUint64Int64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Int64
-	return ret
-}
-
 func (v *iterNative) readUint64Uint(key *uint64, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12463,27 +5985,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Uint(et *typex.EventTime, key *uint64, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETUint64Uint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Uint
-	return ret
-}
-
 func (v *iterNative) readUint64Uint8(key *uint64, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12503,27 +6004,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Uint8(et *typex.EventTime, key *uint64, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETUint64Uint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Uint8
-	return ret
-}
-
 func (v *iterNative) readUint64Uint16(key *uint64, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12543,27 +6023,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Uint16(et *typex.EventTime, key *uint64, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETUint64Uint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Uint16
-	return ret
-}
-
 func (v *iterNative) readUint64Uint32(key *uint64, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12583,27 +6042,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Uint32(et *typex.EventTime, key *uint64, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETUint64Uint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Uint32
-	return ret
-}
-
 func (v *iterNative) readUint64Uint64(key *uint64, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12623,27 +6061,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Uint64(et *typex.EventTime, key *uint64, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETUint64Uint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Uint64
-	return ret
-}
-
 func (v *iterNative) readUint64Float32(key *uint64, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12663,27 +6080,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Float32(et *typex.EventTime, key *uint64, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETUint64Float32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Float32
-	return ret
-}
-
 func (v *iterNative) readUint64Float64(key *uint64, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12703,27 +6099,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Float64(et *typex.EventTime, key *uint64, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETUint64Float64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Float64
-	return ret
-}
-
 func (v *iterNative) readUint64Typex_T(key *uint64, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12743,27 +6118,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Typex_T(et *typex.EventTime, key *uint64, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETUint64Typex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Typex_T
-	return ret
-}
-
 func (v *iterNative) readUint64Typex_U(key *uint64, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12783,27 +6137,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Typex_U(et *typex.EventTime, key *uint64, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETUint64Typex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Typex_U
-	return ret
-}
-
 func (v *iterNative) readUint64Typex_V(key *uint64, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12823,27 +6156,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Typex_V(et *typex.EventTime, key *uint64, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETUint64Typex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Typex_V
-	return ret
-}
-
 func (v *iterNative) readUint64Typex_W(key *uint64, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12863,27 +6175,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Typex_W(et *typex.EventTime, key *uint64, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETUint64Typex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Typex_W
-	return ret
-}
-
 func (v *iterNative) readUint64Typex_X(key *uint64, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12903,27 +6194,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Typex_X(et *typex.EventTime, key *uint64, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETUint64Typex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Typex_X
-	return ret
-}
-
 func (v *iterNative) readUint64Typex_Y(key *uint64, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12943,27 +6213,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Typex_Y(et *typex.EventTime, key *uint64, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETUint64Typex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Typex_Y
-	return ret
-}
-
 func (v *iterNative) readUint64Typex_Z(key *uint64, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -12983,27 +6232,6 @@
 	return ret
 }
 
-func (v *iterNative) readETUint64Typex_Z(et *typex.EventTime, key *uint64, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(uint64)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETUint64Typex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETUint64Typex_Z
-	return ret
-}
-
 func (v *iterNative) readFloat32(val *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13022,26 +6250,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32(et *typex.EventTime, val *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(float32)
-	return true
-}
-
-func iterMakerETFloat32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32
-	return ret
-}
-
 func (v *iterNative) readFloat32ByteSlice(key *float32, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13061,27 +6269,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32ByteSlice(et *typex.EventTime, key *float32, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETFloat32ByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32ByteSlice
-	return ret
-}
-
 func (v *iterNative) readFloat32Bool(key *float32, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13101,27 +6288,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Bool(et *typex.EventTime, key *float32, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETFloat32Bool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Bool
-	return ret
-}
-
 func (v *iterNative) readFloat32String(key *float32, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13141,27 +6307,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32String(et *typex.EventTime, key *float32, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETFloat32String(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32String
-	return ret
-}
-
 func (v *iterNative) readFloat32Int(key *float32, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13181,27 +6326,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Int(et *typex.EventTime, key *float32, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETFloat32Int(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Int
-	return ret
-}
-
 func (v *iterNative) readFloat32Int8(key *float32, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13221,27 +6345,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Int8(et *typex.EventTime, key *float32, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETFloat32Int8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Int8
-	return ret
-}
-
 func (v *iterNative) readFloat32Int16(key *float32, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13261,27 +6364,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Int16(et *typex.EventTime, key *float32, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETFloat32Int16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Int16
-	return ret
-}
-
 func (v *iterNative) readFloat32Int32(key *float32, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13301,27 +6383,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Int32(et *typex.EventTime, key *float32, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETFloat32Int32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Int32
-	return ret
-}
-
 func (v *iterNative) readFloat32Int64(key *float32, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13341,27 +6402,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Int64(et *typex.EventTime, key *float32, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETFloat32Int64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Int64
-	return ret
-}
-
 func (v *iterNative) readFloat32Uint(key *float32, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13381,27 +6421,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Uint(et *typex.EventTime, key *float32, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETFloat32Uint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Uint
-	return ret
-}
-
 func (v *iterNative) readFloat32Uint8(key *float32, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13421,27 +6440,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Uint8(et *typex.EventTime, key *float32, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETFloat32Uint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Uint8
-	return ret
-}
-
 func (v *iterNative) readFloat32Uint16(key *float32, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13461,27 +6459,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Uint16(et *typex.EventTime, key *float32, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETFloat32Uint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Uint16
-	return ret
-}
-
 func (v *iterNative) readFloat32Uint32(key *float32, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13501,27 +6478,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Uint32(et *typex.EventTime, key *float32, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETFloat32Uint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Uint32
-	return ret
-}
-
 func (v *iterNative) readFloat32Uint64(key *float32, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13541,27 +6497,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Uint64(et *typex.EventTime, key *float32, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETFloat32Uint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Uint64
-	return ret
-}
-
 func (v *iterNative) readFloat32Float32(key *float32, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13581,27 +6516,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Float32(et *typex.EventTime, key *float32, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETFloat32Float32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Float32
-	return ret
-}
-
 func (v *iterNative) readFloat32Float64(key *float32, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13621,27 +6535,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Float64(et *typex.EventTime, key *float32, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETFloat32Float64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Float64
-	return ret
-}
-
 func (v *iterNative) readFloat32Typex_T(key *float32, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13661,27 +6554,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Typex_T(et *typex.EventTime, key *float32, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETFloat32Typex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Typex_T
-	return ret
-}
-
 func (v *iterNative) readFloat32Typex_U(key *float32, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13701,27 +6573,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Typex_U(et *typex.EventTime, key *float32, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETFloat32Typex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Typex_U
-	return ret
-}
-
 func (v *iterNative) readFloat32Typex_V(key *float32, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13741,27 +6592,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Typex_V(et *typex.EventTime, key *float32, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETFloat32Typex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Typex_V
-	return ret
-}
-
 func (v *iterNative) readFloat32Typex_W(key *float32, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13781,27 +6611,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Typex_W(et *typex.EventTime, key *float32, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETFloat32Typex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Typex_W
-	return ret
-}
-
 func (v *iterNative) readFloat32Typex_X(key *float32, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13821,27 +6630,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Typex_X(et *typex.EventTime, key *float32, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETFloat32Typex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Typex_X
-	return ret
-}
-
 func (v *iterNative) readFloat32Typex_Y(key *float32, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13861,27 +6649,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Typex_Y(et *typex.EventTime, key *float32, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETFloat32Typex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Typex_Y
-	return ret
-}
-
 func (v *iterNative) readFloat32Typex_Z(key *float32, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13901,27 +6668,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat32Typex_Z(et *typex.EventTime, key *float32, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float32)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETFloat32Typex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat32Typex_Z
-	return ret
-}
-
 func (v *iterNative) readFloat64(val *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13940,26 +6686,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64(et *typex.EventTime, val *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(float64)
-	return true
-}
-
-func iterMakerETFloat64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64
-	return ret
-}
-
 func (v *iterNative) readFloat64ByteSlice(key *float64, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -13979,27 +6705,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64ByteSlice(et *typex.EventTime, key *float64, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETFloat64ByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64ByteSlice
-	return ret
-}
-
 func (v *iterNative) readFloat64Bool(key *float64, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14019,27 +6724,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Bool(et *typex.EventTime, key *float64, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETFloat64Bool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Bool
-	return ret
-}
-
 func (v *iterNative) readFloat64String(key *float64, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14059,27 +6743,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64String(et *typex.EventTime, key *float64, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETFloat64String(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64String
-	return ret
-}
-
 func (v *iterNative) readFloat64Int(key *float64, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14099,27 +6762,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Int(et *typex.EventTime, key *float64, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETFloat64Int(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Int
-	return ret
-}
-
 func (v *iterNative) readFloat64Int8(key *float64, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14139,27 +6781,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Int8(et *typex.EventTime, key *float64, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETFloat64Int8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Int8
-	return ret
-}
-
 func (v *iterNative) readFloat64Int16(key *float64, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14179,27 +6800,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Int16(et *typex.EventTime, key *float64, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETFloat64Int16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Int16
-	return ret
-}
-
 func (v *iterNative) readFloat64Int32(key *float64, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14219,27 +6819,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Int32(et *typex.EventTime, key *float64, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETFloat64Int32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Int32
-	return ret
-}
-
 func (v *iterNative) readFloat64Int64(key *float64, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14259,27 +6838,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Int64(et *typex.EventTime, key *float64, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETFloat64Int64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Int64
-	return ret
-}
-
 func (v *iterNative) readFloat64Uint(key *float64, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14299,27 +6857,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Uint(et *typex.EventTime, key *float64, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETFloat64Uint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Uint
-	return ret
-}
-
 func (v *iterNative) readFloat64Uint8(key *float64, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14339,27 +6876,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Uint8(et *typex.EventTime, key *float64, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETFloat64Uint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Uint8
-	return ret
-}
-
 func (v *iterNative) readFloat64Uint16(key *float64, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14379,27 +6895,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Uint16(et *typex.EventTime, key *float64, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETFloat64Uint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Uint16
-	return ret
-}
-
 func (v *iterNative) readFloat64Uint32(key *float64, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14419,27 +6914,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Uint32(et *typex.EventTime, key *float64, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETFloat64Uint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Uint32
-	return ret
-}
-
 func (v *iterNative) readFloat64Uint64(key *float64, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14459,27 +6933,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Uint64(et *typex.EventTime, key *float64, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETFloat64Uint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Uint64
-	return ret
-}
-
 func (v *iterNative) readFloat64Float32(key *float64, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14499,27 +6952,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Float32(et *typex.EventTime, key *float64, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETFloat64Float32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Float32
-	return ret
-}
-
 func (v *iterNative) readFloat64Float64(key *float64, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14539,27 +6971,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Float64(et *typex.EventTime, key *float64, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETFloat64Float64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Float64
-	return ret
-}
-
 func (v *iterNative) readFloat64Typex_T(key *float64, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14579,27 +6990,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Typex_T(et *typex.EventTime, key *float64, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETFloat64Typex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Typex_T
-	return ret
-}
-
 func (v *iterNative) readFloat64Typex_U(key *float64, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14619,27 +7009,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Typex_U(et *typex.EventTime, key *float64, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETFloat64Typex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Typex_U
-	return ret
-}
-
 func (v *iterNative) readFloat64Typex_V(key *float64, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14659,27 +7028,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Typex_V(et *typex.EventTime, key *float64, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETFloat64Typex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Typex_V
-	return ret
-}
-
 func (v *iterNative) readFloat64Typex_W(key *float64, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14699,27 +7047,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Typex_W(et *typex.EventTime, key *float64, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETFloat64Typex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Typex_W
-	return ret
-}
-
 func (v *iterNative) readFloat64Typex_X(key *float64, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14739,27 +7066,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Typex_X(et *typex.EventTime, key *float64, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETFloat64Typex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Typex_X
-	return ret
-}
-
 func (v *iterNative) readFloat64Typex_Y(key *float64, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14779,27 +7085,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Typex_Y(et *typex.EventTime, key *float64, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETFloat64Typex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Typex_Y
-	return ret
-}
-
 func (v *iterNative) readFloat64Typex_Z(key *float64, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14819,27 +7104,6 @@
 	return ret
 }
 
-func (v *iterNative) readETFloat64Typex_Z(et *typex.EventTime, key *float64, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(float64)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETFloat64Typex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETFloat64Typex_Z
-	return ret
-}
-
 func (v *iterNative) readTypex_T(val *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14858,26 +7122,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_T(et *typex.EventTime, val *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(typex.T)
-	return true
-}
-
-func iterMakerETTypex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_T
-	return ret
-}
-
 func (v *iterNative) readTypex_TByteSlice(key *typex.T, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14897,27 +7141,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TByteSlice(et *typex.EventTime, key *typex.T, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETTypex_TByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TByteSlice
-	return ret
-}
-
 func (v *iterNative) readTypex_TBool(key *typex.T, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14937,27 +7160,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TBool(et *typex.EventTime, key *typex.T, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETTypex_TBool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TBool
-	return ret
-}
-
 func (v *iterNative) readTypex_TString(key *typex.T, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -14977,27 +7179,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TString(et *typex.EventTime, key *typex.T, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETTypex_TString(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TString
-	return ret
-}
-
 func (v *iterNative) readTypex_TInt(key *typex.T, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15017,27 +7198,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TInt(et *typex.EventTime, key *typex.T, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETTypex_TInt(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TInt
-	return ret
-}
-
 func (v *iterNative) readTypex_TInt8(key *typex.T, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15057,27 +7217,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TInt8(et *typex.EventTime, key *typex.T, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETTypex_TInt8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TInt8
-	return ret
-}
-
 func (v *iterNative) readTypex_TInt16(key *typex.T, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15097,27 +7236,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TInt16(et *typex.EventTime, key *typex.T, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETTypex_TInt16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TInt16
-	return ret
-}
-
 func (v *iterNative) readTypex_TInt32(key *typex.T, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15137,27 +7255,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TInt32(et *typex.EventTime, key *typex.T, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETTypex_TInt32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TInt32
-	return ret
-}
-
 func (v *iterNative) readTypex_TInt64(key *typex.T, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15177,27 +7274,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TInt64(et *typex.EventTime, key *typex.T, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETTypex_TInt64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TInt64
-	return ret
-}
-
 func (v *iterNative) readTypex_TUint(key *typex.T, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15217,27 +7293,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TUint(et *typex.EventTime, key *typex.T, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETTypex_TUint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TUint
-	return ret
-}
-
 func (v *iterNative) readTypex_TUint8(key *typex.T, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15257,27 +7312,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TUint8(et *typex.EventTime, key *typex.T, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETTypex_TUint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TUint8
-	return ret
-}
-
 func (v *iterNative) readTypex_TUint16(key *typex.T, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15297,27 +7331,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TUint16(et *typex.EventTime, key *typex.T, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETTypex_TUint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TUint16
-	return ret
-}
-
 func (v *iterNative) readTypex_TUint32(key *typex.T, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15337,27 +7350,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TUint32(et *typex.EventTime, key *typex.T, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETTypex_TUint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TUint32
-	return ret
-}
-
 func (v *iterNative) readTypex_TUint64(key *typex.T, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15377,27 +7369,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TUint64(et *typex.EventTime, key *typex.T, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETTypex_TUint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TUint64
-	return ret
-}
-
 func (v *iterNative) readTypex_TFloat32(key *typex.T, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15417,27 +7388,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TFloat32(et *typex.EventTime, key *typex.T, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETTypex_TFloat32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TFloat32
-	return ret
-}
-
 func (v *iterNative) readTypex_TFloat64(key *typex.T, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15457,27 +7407,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TFloat64(et *typex.EventTime, key *typex.T, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETTypex_TFloat64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TFloat64
-	return ret
-}
-
 func (v *iterNative) readTypex_TTypex_T(key *typex.T, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15497,27 +7426,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TTypex_T(et *typex.EventTime, key *typex.T, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETTypex_TTypex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TTypex_T
-	return ret
-}
-
 func (v *iterNative) readTypex_TTypex_U(key *typex.T, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15537,27 +7445,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TTypex_U(et *typex.EventTime, key *typex.T, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETTypex_TTypex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TTypex_U
-	return ret
-}
-
 func (v *iterNative) readTypex_TTypex_V(key *typex.T, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15577,27 +7464,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TTypex_V(et *typex.EventTime, key *typex.T, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETTypex_TTypex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TTypex_V
-	return ret
-}
-
 func (v *iterNative) readTypex_TTypex_W(key *typex.T, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15617,27 +7483,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TTypex_W(et *typex.EventTime, key *typex.T, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETTypex_TTypex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TTypex_W
-	return ret
-}
-
 func (v *iterNative) readTypex_TTypex_X(key *typex.T, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15657,27 +7502,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TTypex_X(et *typex.EventTime, key *typex.T, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETTypex_TTypex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TTypex_X
-	return ret
-}
-
 func (v *iterNative) readTypex_TTypex_Y(key *typex.T, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15697,27 +7521,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TTypex_Y(et *typex.EventTime, key *typex.T, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETTypex_TTypex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TTypex_Y
-	return ret
-}
-
 func (v *iterNative) readTypex_TTypex_Z(key *typex.T, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15737,27 +7540,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_TTypex_Z(et *typex.EventTime, key *typex.T, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.T)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETTypex_TTypex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_TTypex_Z
-	return ret
-}
-
 func (v *iterNative) readTypex_U(val *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15776,26 +7558,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_U(et *typex.EventTime, val *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(typex.U)
-	return true
-}
-
-func iterMakerETTypex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_U
-	return ret
-}
-
 func (v *iterNative) readTypex_UByteSlice(key *typex.U, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15815,27 +7577,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UByteSlice(et *typex.EventTime, key *typex.U, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETTypex_UByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UByteSlice
-	return ret
-}
-
 func (v *iterNative) readTypex_UBool(key *typex.U, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15855,27 +7596,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UBool(et *typex.EventTime, key *typex.U, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETTypex_UBool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UBool
-	return ret
-}
-
 func (v *iterNative) readTypex_UString(key *typex.U, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15895,27 +7615,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UString(et *typex.EventTime, key *typex.U, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETTypex_UString(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UString
-	return ret
-}
-
 func (v *iterNative) readTypex_UInt(key *typex.U, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15935,27 +7634,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UInt(et *typex.EventTime, key *typex.U, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETTypex_UInt(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UInt
-	return ret
-}
-
 func (v *iterNative) readTypex_UInt8(key *typex.U, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -15975,27 +7653,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UInt8(et *typex.EventTime, key *typex.U, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETTypex_UInt8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UInt8
-	return ret
-}
-
 func (v *iterNative) readTypex_UInt16(key *typex.U, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16015,27 +7672,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UInt16(et *typex.EventTime, key *typex.U, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETTypex_UInt16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UInt16
-	return ret
-}
-
 func (v *iterNative) readTypex_UInt32(key *typex.U, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16055,27 +7691,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UInt32(et *typex.EventTime, key *typex.U, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETTypex_UInt32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UInt32
-	return ret
-}
-
 func (v *iterNative) readTypex_UInt64(key *typex.U, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16095,27 +7710,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UInt64(et *typex.EventTime, key *typex.U, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETTypex_UInt64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UInt64
-	return ret
-}
-
 func (v *iterNative) readTypex_UUint(key *typex.U, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16135,27 +7729,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UUint(et *typex.EventTime, key *typex.U, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETTypex_UUint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UUint
-	return ret
-}
-
 func (v *iterNative) readTypex_UUint8(key *typex.U, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16175,27 +7748,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UUint8(et *typex.EventTime, key *typex.U, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETTypex_UUint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UUint8
-	return ret
-}
-
 func (v *iterNative) readTypex_UUint16(key *typex.U, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16215,27 +7767,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UUint16(et *typex.EventTime, key *typex.U, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETTypex_UUint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UUint16
-	return ret
-}
-
 func (v *iterNative) readTypex_UUint32(key *typex.U, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16255,27 +7786,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UUint32(et *typex.EventTime, key *typex.U, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETTypex_UUint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UUint32
-	return ret
-}
-
 func (v *iterNative) readTypex_UUint64(key *typex.U, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16295,27 +7805,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UUint64(et *typex.EventTime, key *typex.U, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETTypex_UUint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UUint64
-	return ret
-}
-
 func (v *iterNative) readTypex_UFloat32(key *typex.U, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16335,27 +7824,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UFloat32(et *typex.EventTime, key *typex.U, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETTypex_UFloat32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UFloat32
-	return ret
-}
-
 func (v *iterNative) readTypex_UFloat64(key *typex.U, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16375,27 +7843,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UFloat64(et *typex.EventTime, key *typex.U, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETTypex_UFloat64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UFloat64
-	return ret
-}
-
 func (v *iterNative) readTypex_UTypex_T(key *typex.U, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16415,27 +7862,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UTypex_T(et *typex.EventTime, key *typex.U, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETTypex_UTypex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UTypex_T
-	return ret
-}
-
 func (v *iterNative) readTypex_UTypex_U(key *typex.U, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16455,27 +7881,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UTypex_U(et *typex.EventTime, key *typex.U, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETTypex_UTypex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UTypex_U
-	return ret
-}
-
 func (v *iterNative) readTypex_UTypex_V(key *typex.U, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16495,27 +7900,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UTypex_V(et *typex.EventTime, key *typex.U, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETTypex_UTypex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UTypex_V
-	return ret
-}
-
 func (v *iterNative) readTypex_UTypex_W(key *typex.U, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16535,27 +7919,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UTypex_W(et *typex.EventTime, key *typex.U, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETTypex_UTypex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UTypex_W
-	return ret
-}
-
 func (v *iterNative) readTypex_UTypex_X(key *typex.U, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16575,27 +7938,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UTypex_X(et *typex.EventTime, key *typex.U, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETTypex_UTypex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UTypex_X
-	return ret
-}
-
 func (v *iterNative) readTypex_UTypex_Y(key *typex.U, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16615,27 +7957,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UTypex_Y(et *typex.EventTime, key *typex.U, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETTypex_UTypex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UTypex_Y
-	return ret
-}
-
 func (v *iterNative) readTypex_UTypex_Z(key *typex.U, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16655,27 +7976,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_UTypex_Z(et *typex.EventTime, key *typex.U, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.U)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETTypex_UTypex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_UTypex_Z
-	return ret
-}
-
 func (v *iterNative) readTypex_V(val *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16694,26 +7994,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_V(et *typex.EventTime, val *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(typex.V)
-	return true
-}
-
-func iterMakerETTypex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_V
-	return ret
-}
-
 func (v *iterNative) readTypex_VByteSlice(key *typex.V, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16733,27 +8013,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VByteSlice(et *typex.EventTime, key *typex.V, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETTypex_VByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VByteSlice
-	return ret
-}
-
 func (v *iterNative) readTypex_VBool(key *typex.V, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16773,27 +8032,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VBool(et *typex.EventTime, key *typex.V, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETTypex_VBool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VBool
-	return ret
-}
-
 func (v *iterNative) readTypex_VString(key *typex.V, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16813,27 +8051,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VString(et *typex.EventTime, key *typex.V, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETTypex_VString(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VString
-	return ret
-}
-
 func (v *iterNative) readTypex_VInt(key *typex.V, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16853,27 +8070,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VInt(et *typex.EventTime, key *typex.V, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETTypex_VInt(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VInt
-	return ret
-}
-
 func (v *iterNative) readTypex_VInt8(key *typex.V, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16893,27 +8089,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VInt8(et *typex.EventTime, key *typex.V, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETTypex_VInt8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VInt8
-	return ret
-}
-
 func (v *iterNative) readTypex_VInt16(key *typex.V, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16933,27 +8108,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VInt16(et *typex.EventTime, key *typex.V, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETTypex_VInt16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VInt16
-	return ret
-}
-
 func (v *iterNative) readTypex_VInt32(key *typex.V, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -16973,27 +8127,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VInt32(et *typex.EventTime, key *typex.V, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETTypex_VInt32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VInt32
-	return ret
-}
-
 func (v *iterNative) readTypex_VInt64(key *typex.V, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17013,27 +8146,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VInt64(et *typex.EventTime, key *typex.V, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETTypex_VInt64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VInt64
-	return ret
-}
-
 func (v *iterNative) readTypex_VUint(key *typex.V, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17053,27 +8165,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VUint(et *typex.EventTime, key *typex.V, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETTypex_VUint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VUint
-	return ret
-}
-
 func (v *iterNative) readTypex_VUint8(key *typex.V, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17093,27 +8184,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VUint8(et *typex.EventTime, key *typex.V, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETTypex_VUint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VUint8
-	return ret
-}
-
 func (v *iterNative) readTypex_VUint16(key *typex.V, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17133,27 +8203,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VUint16(et *typex.EventTime, key *typex.V, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETTypex_VUint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VUint16
-	return ret
-}
-
 func (v *iterNative) readTypex_VUint32(key *typex.V, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17173,27 +8222,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VUint32(et *typex.EventTime, key *typex.V, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETTypex_VUint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VUint32
-	return ret
-}
-
 func (v *iterNative) readTypex_VUint64(key *typex.V, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17213,27 +8241,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VUint64(et *typex.EventTime, key *typex.V, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETTypex_VUint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VUint64
-	return ret
-}
-
 func (v *iterNative) readTypex_VFloat32(key *typex.V, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17253,27 +8260,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VFloat32(et *typex.EventTime, key *typex.V, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETTypex_VFloat32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VFloat32
-	return ret
-}
-
 func (v *iterNative) readTypex_VFloat64(key *typex.V, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17293,27 +8279,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VFloat64(et *typex.EventTime, key *typex.V, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETTypex_VFloat64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VFloat64
-	return ret
-}
-
 func (v *iterNative) readTypex_VTypex_T(key *typex.V, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17333,27 +8298,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VTypex_T(et *typex.EventTime, key *typex.V, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETTypex_VTypex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VTypex_T
-	return ret
-}
-
 func (v *iterNative) readTypex_VTypex_U(key *typex.V, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17373,27 +8317,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VTypex_U(et *typex.EventTime, key *typex.V, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETTypex_VTypex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VTypex_U
-	return ret
-}
-
 func (v *iterNative) readTypex_VTypex_V(key *typex.V, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17413,27 +8336,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VTypex_V(et *typex.EventTime, key *typex.V, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETTypex_VTypex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VTypex_V
-	return ret
-}
-
 func (v *iterNative) readTypex_VTypex_W(key *typex.V, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17453,27 +8355,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VTypex_W(et *typex.EventTime, key *typex.V, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETTypex_VTypex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VTypex_W
-	return ret
-}
-
 func (v *iterNative) readTypex_VTypex_X(key *typex.V, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17493,27 +8374,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VTypex_X(et *typex.EventTime, key *typex.V, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETTypex_VTypex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VTypex_X
-	return ret
-}
-
 func (v *iterNative) readTypex_VTypex_Y(key *typex.V, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17533,27 +8393,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VTypex_Y(et *typex.EventTime, key *typex.V, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETTypex_VTypex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VTypex_Y
-	return ret
-}
-
 func (v *iterNative) readTypex_VTypex_Z(key *typex.V, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17573,27 +8412,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_VTypex_Z(et *typex.EventTime, key *typex.V, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.V)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETTypex_VTypex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_VTypex_Z
-	return ret
-}
-
 func (v *iterNative) readTypex_W(val *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17612,26 +8430,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_W(et *typex.EventTime, val *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(typex.W)
-	return true
-}
-
-func iterMakerETTypex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_W
-	return ret
-}
-
 func (v *iterNative) readTypex_WByteSlice(key *typex.W, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17651,27 +8449,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WByteSlice(et *typex.EventTime, key *typex.W, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETTypex_WByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WByteSlice
-	return ret
-}
-
 func (v *iterNative) readTypex_WBool(key *typex.W, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17691,27 +8468,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WBool(et *typex.EventTime, key *typex.W, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETTypex_WBool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WBool
-	return ret
-}
-
 func (v *iterNative) readTypex_WString(key *typex.W, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17731,27 +8487,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WString(et *typex.EventTime, key *typex.W, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETTypex_WString(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WString
-	return ret
-}
-
 func (v *iterNative) readTypex_WInt(key *typex.W, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17771,27 +8506,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WInt(et *typex.EventTime, key *typex.W, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETTypex_WInt(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WInt
-	return ret
-}
-
 func (v *iterNative) readTypex_WInt8(key *typex.W, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17811,27 +8525,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WInt8(et *typex.EventTime, key *typex.W, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETTypex_WInt8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WInt8
-	return ret
-}
-
 func (v *iterNative) readTypex_WInt16(key *typex.W, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17851,27 +8544,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WInt16(et *typex.EventTime, key *typex.W, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETTypex_WInt16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WInt16
-	return ret
-}
-
 func (v *iterNative) readTypex_WInt32(key *typex.W, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17891,27 +8563,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WInt32(et *typex.EventTime, key *typex.W, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETTypex_WInt32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WInt32
-	return ret
-}
-
 func (v *iterNative) readTypex_WInt64(key *typex.W, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17931,27 +8582,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WInt64(et *typex.EventTime, key *typex.W, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETTypex_WInt64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WInt64
-	return ret
-}
-
 func (v *iterNative) readTypex_WUint(key *typex.W, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -17971,27 +8601,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WUint(et *typex.EventTime, key *typex.W, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETTypex_WUint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WUint
-	return ret
-}
-
 func (v *iterNative) readTypex_WUint8(key *typex.W, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18011,27 +8620,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WUint8(et *typex.EventTime, key *typex.W, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETTypex_WUint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WUint8
-	return ret
-}
-
 func (v *iterNative) readTypex_WUint16(key *typex.W, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18051,27 +8639,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WUint16(et *typex.EventTime, key *typex.W, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETTypex_WUint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WUint16
-	return ret
-}
-
 func (v *iterNative) readTypex_WUint32(key *typex.W, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18091,27 +8658,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WUint32(et *typex.EventTime, key *typex.W, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETTypex_WUint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WUint32
-	return ret
-}
-
 func (v *iterNative) readTypex_WUint64(key *typex.W, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18131,27 +8677,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WUint64(et *typex.EventTime, key *typex.W, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETTypex_WUint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WUint64
-	return ret
-}
-
 func (v *iterNative) readTypex_WFloat32(key *typex.W, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18171,27 +8696,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WFloat32(et *typex.EventTime, key *typex.W, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETTypex_WFloat32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WFloat32
-	return ret
-}
-
 func (v *iterNative) readTypex_WFloat64(key *typex.W, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18211,27 +8715,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WFloat64(et *typex.EventTime, key *typex.W, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETTypex_WFloat64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WFloat64
-	return ret
-}
-
 func (v *iterNative) readTypex_WTypex_T(key *typex.W, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18251,27 +8734,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WTypex_T(et *typex.EventTime, key *typex.W, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETTypex_WTypex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WTypex_T
-	return ret
-}
-
 func (v *iterNative) readTypex_WTypex_U(key *typex.W, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18291,27 +8753,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WTypex_U(et *typex.EventTime, key *typex.W, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETTypex_WTypex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WTypex_U
-	return ret
-}
-
 func (v *iterNative) readTypex_WTypex_V(key *typex.W, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18331,27 +8772,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WTypex_V(et *typex.EventTime, key *typex.W, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETTypex_WTypex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WTypex_V
-	return ret
-}
-
 func (v *iterNative) readTypex_WTypex_W(key *typex.W, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18371,27 +8791,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WTypex_W(et *typex.EventTime, key *typex.W, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETTypex_WTypex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WTypex_W
-	return ret
-}
-
 func (v *iterNative) readTypex_WTypex_X(key *typex.W, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18411,27 +8810,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WTypex_X(et *typex.EventTime, key *typex.W, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETTypex_WTypex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WTypex_X
-	return ret
-}
-
 func (v *iterNative) readTypex_WTypex_Y(key *typex.W, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18451,27 +8829,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WTypex_Y(et *typex.EventTime, key *typex.W, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETTypex_WTypex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WTypex_Y
-	return ret
-}
-
 func (v *iterNative) readTypex_WTypex_Z(key *typex.W, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18491,27 +8848,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_WTypex_Z(et *typex.EventTime, key *typex.W, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.W)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETTypex_WTypex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_WTypex_Z
-	return ret
-}
-
 func (v *iterNative) readTypex_X(val *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18530,26 +8866,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_X(et *typex.EventTime, val *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(typex.X)
-	return true
-}
-
-func iterMakerETTypex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_X
-	return ret
-}
-
 func (v *iterNative) readTypex_XByteSlice(key *typex.X, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18569,27 +8885,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XByteSlice(et *typex.EventTime, key *typex.X, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETTypex_XByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XByteSlice
-	return ret
-}
-
 func (v *iterNative) readTypex_XBool(key *typex.X, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18609,27 +8904,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XBool(et *typex.EventTime, key *typex.X, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETTypex_XBool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XBool
-	return ret
-}
-
 func (v *iterNative) readTypex_XString(key *typex.X, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18649,27 +8923,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XString(et *typex.EventTime, key *typex.X, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETTypex_XString(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XString
-	return ret
-}
-
 func (v *iterNative) readTypex_XInt(key *typex.X, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18689,27 +8942,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XInt(et *typex.EventTime, key *typex.X, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETTypex_XInt(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XInt
-	return ret
-}
-
 func (v *iterNative) readTypex_XInt8(key *typex.X, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18729,27 +8961,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XInt8(et *typex.EventTime, key *typex.X, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETTypex_XInt8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XInt8
-	return ret
-}
-
 func (v *iterNative) readTypex_XInt16(key *typex.X, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18769,27 +8980,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XInt16(et *typex.EventTime, key *typex.X, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETTypex_XInt16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XInt16
-	return ret
-}
-
 func (v *iterNative) readTypex_XInt32(key *typex.X, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18809,27 +8999,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XInt32(et *typex.EventTime, key *typex.X, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETTypex_XInt32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XInt32
-	return ret
-}
-
 func (v *iterNative) readTypex_XInt64(key *typex.X, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18849,27 +9018,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XInt64(et *typex.EventTime, key *typex.X, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETTypex_XInt64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XInt64
-	return ret
-}
-
 func (v *iterNative) readTypex_XUint(key *typex.X, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18889,27 +9037,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XUint(et *typex.EventTime, key *typex.X, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETTypex_XUint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XUint
-	return ret
-}
-
 func (v *iterNative) readTypex_XUint8(key *typex.X, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18929,27 +9056,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XUint8(et *typex.EventTime, key *typex.X, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETTypex_XUint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XUint8
-	return ret
-}
-
 func (v *iterNative) readTypex_XUint16(key *typex.X, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -18969,27 +9075,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XUint16(et *typex.EventTime, key *typex.X, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETTypex_XUint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XUint16
-	return ret
-}
-
 func (v *iterNative) readTypex_XUint32(key *typex.X, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19009,27 +9094,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XUint32(et *typex.EventTime, key *typex.X, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETTypex_XUint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XUint32
-	return ret
-}
-
 func (v *iterNative) readTypex_XUint64(key *typex.X, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19049,27 +9113,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XUint64(et *typex.EventTime, key *typex.X, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETTypex_XUint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XUint64
-	return ret
-}
-
 func (v *iterNative) readTypex_XFloat32(key *typex.X, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19089,27 +9132,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XFloat32(et *typex.EventTime, key *typex.X, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETTypex_XFloat32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XFloat32
-	return ret
-}
-
 func (v *iterNative) readTypex_XFloat64(key *typex.X, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19129,27 +9151,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XFloat64(et *typex.EventTime, key *typex.X, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETTypex_XFloat64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XFloat64
-	return ret
-}
-
 func (v *iterNative) readTypex_XTypex_T(key *typex.X, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19169,27 +9170,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XTypex_T(et *typex.EventTime, key *typex.X, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETTypex_XTypex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XTypex_T
-	return ret
-}
-
 func (v *iterNative) readTypex_XTypex_U(key *typex.X, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19209,27 +9189,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XTypex_U(et *typex.EventTime, key *typex.X, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETTypex_XTypex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XTypex_U
-	return ret
-}
-
 func (v *iterNative) readTypex_XTypex_V(key *typex.X, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19249,27 +9208,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XTypex_V(et *typex.EventTime, key *typex.X, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETTypex_XTypex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XTypex_V
-	return ret
-}
-
 func (v *iterNative) readTypex_XTypex_W(key *typex.X, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19289,27 +9227,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XTypex_W(et *typex.EventTime, key *typex.X, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETTypex_XTypex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XTypex_W
-	return ret
-}
-
 func (v *iterNative) readTypex_XTypex_X(key *typex.X, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19329,27 +9246,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XTypex_X(et *typex.EventTime, key *typex.X, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETTypex_XTypex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XTypex_X
-	return ret
-}
-
 func (v *iterNative) readTypex_XTypex_Y(key *typex.X, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19369,27 +9265,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XTypex_Y(et *typex.EventTime, key *typex.X, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETTypex_XTypex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XTypex_Y
-	return ret
-}
-
 func (v *iterNative) readTypex_XTypex_Z(key *typex.X, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19409,27 +9284,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_XTypex_Z(et *typex.EventTime, key *typex.X, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.X)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETTypex_XTypex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_XTypex_Z
-	return ret
-}
-
 func (v *iterNative) readTypex_Y(val *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19448,26 +9302,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_Y(et *typex.EventTime, val *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(typex.Y)
-	return true
-}
-
-func iterMakerETTypex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_Y
-	return ret
-}
-
 func (v *iterNative) readTypex_YByteSlice(key *typex.Y, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19487,27 +9321,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YByteSlice(et *typex.EventTime, key *typex.Y, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETTypex_YByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YByteSlice
-	return ret
-}
-
 func (v *iterNative) readTypex_YBool(key *typex.Y, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19527,27 +9340,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YBool(et *typex.EventTime, key *typex.Y, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETTypex_YBool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YBool
-	return ret
-}
-
 func (v *iterNative) readTypex_YString(key *typex.Y, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19567,27 +9359,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YString(et *typex.EventTime, key *typex.Y, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETTypex_YString(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YString
-	return ret
-}
-
 func (v *iterNative) readTypex_YInt(key *typex.Y, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19607,27 +9378,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YInt(et *typex.EventTime, key *typex.Y, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETTypex_YInt(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YInt
-	return ret
-}
-
 func (v *iterNative) readTypex_YInt8(key *typex.Y, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19647,27 +9397,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YInt8(et *typex.EventTime, key *typex.Y, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETTypex_YInt8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YInt8
-	return ret
-}
-
 func (v *iterNative) readTypex_YInt16(key *typex.Y, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19687,27 +9416,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YInt16(et *typex.EventTime, key *typex.Y, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETTypex_YInt16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YInt16
-	return ret
-}
-
 func (v *iterNative) readTypex_YInt32(key *typex.Y, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19727,27 +9435,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YInt32(et *typex.EventTime, key *typex.Y, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETTypex_YInt32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YInt32
-	return ret
-}
-
 func (v *iterNative) readTypex_YInt64(key *typex.Y, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19767,27 +9454,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YInt64(et *typex.EventTime, key *typex.Y, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETTypex_YInt64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YInt64
-	return ret
-}
-
 func (v *iterNative) readTypex_YUint(key *typex.Y, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19807,27 +9473,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YUint(et *typex.EventTime, key *typex.Y, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETTypex_YUint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YUint
-	return ret
-}
-
 func (v *iterNative) readTypex_YUint8(key *typex.Y, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19847,27 +9492,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YUint8(et *typex.EventTime, key *typex.Y, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETTypex_YUint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YUint8
-	return ret
-}
-
 func (v *iterNative) readTypex_YUint16(key *typex.Y, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19887,27 +9511,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YUint16(et *typex.EventTime, key *typex.Y, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETTypex_YUint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YUint16
-	return ret
-}
-
 func (v *iterNative) readTypex_YUint32(key *typex.Y, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19927,27 +9530,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YUint32(et *typex.EventTime, key *typex.Y, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETTypex_YUint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YUint32
-	return ret
-}
-
 func (v *iterNative) readTypex_YUint64(key *typex.Y, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -19967,27 +9549,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YUint64(et *typex.EventTime, key *typex.Y, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETTypex_YUint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YUint64
-	return ret
-}
-
 func (v *iterNative) readTypex_YFloat32(key *typex.Y, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20007,27 +9568,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YFloat32(et *typex.EventTime, key *typex.Y, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETTypex_YFloat32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YFloat32
-	return ret
-}
-
 func (v *iterNative) readTypex_YFloat64(key *typex.Y, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20047,27 +9587,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YFloat64(et *typex.EventTime, key *typex.Y, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETTypex_YFloat64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YFloat64
-	return ret
-}
-
 func (v *iterNative) readTypex_YTypex_T(key *typex.Y, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20087,27 +9606,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YTypex_T(et *typex.EventTime, key *typex.Y, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETTypex_YTypex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YTypex_T
-	return ret
-}
-
 func (v *iterNative) readTypex_YTypex_U(key *typex.Y, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20127,27 +9625,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YTypex_U(et *typex.EventTime, key *typex.Y, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETTypex_YTypex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YTypex_U
-	return ret
-}
-
 func (v *iterNative) readTypex_YTypex_V(key *typex.Y, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20167,27 +9644,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YTypex_V(et *typex.EventTime, key *typex.Y, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETTypex_YTypex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YTypex_V
-	return ret
-}
-
 func (v *iterNative) readTypex_YTypex_W(key *typex.Y, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20207,27 +9663,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YTypex_W(et *typex.EventTime, key *typex.Y, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETTypex_YTypex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YTypex_W
-	return ret
-}
-
 func (v *iterNative) readTypex_YTypex_X(key *typex.Y, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20247,27 +9682,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YTypex_X(et *typex.EventTime, key *typex.Y, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETTypex_YTypex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YTypex_X
-	return ret
-}
-
 func (v *iterNative) readTypex_YTypex_Y(key *typex.Y, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20287,27 +9701,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YTypex_Y(et *typex.EventTime, key *typex.Y, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETTypex_YTypex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YTypex_Y
-	return ret
-}
-
 func (v *iterNative) readTypex_YTypex_Z(key *typex.Y, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20327,27 +9720,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_YTypex_Z(et *typex.EventTime, key *typex.Y, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Y)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETTypex_YTypex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_YTypex_Z
-	return ret
-}
-
 func (v *iterNative) readTypex_Z(val *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20366,26 +9738,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_Z(et *typex.EventTime, val *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*val = elm.Elm.(typex.Z)
-	return true
-}
-
-func iterMakerETTypex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_Z
-	return ret
-}
-
 func (v *iterNative) readTypex_ZByteSlice(key *typex.Z, value *[]byte) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20405,27 +9757,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZByteSlice(et *typex.EventTime, key *typex.Z, value *[]byte) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.([]byte)
-	return true
-}
-
-func iterMakerETTypex_ZByteSlice(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZByteSlice
-	return ret
-}
-
 func (v *iterNative) readTypex_ZBool(key *typex.Z, value *bool) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20445,27 +9776,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZBool(et *typex.EventTime, key *typex.Z, value *bool) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(bool)
-	return true
-}
-
-func iterMakerETTypex_ZBool(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZBool
-	return ret
-}
-
 func (v *iterNative) readTypex_ZString(key *typex.Z, value *string) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20485,27 +9795,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZString(et *typex.EventTime, key *typex.Z, value *string) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(string)
-	return true
-}
-
-func iterMakerETTypex_ZString(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZString
-	return ret
-}
-
 func (v *iterNative) readTypex_ZInt(key *typex.Z, value *int) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20525,27 +9814,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZInt(et *typex.EventTime, key *typex.Z, value *int) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(int)
-	return true
-}
-
-func iterMakerETTypex_ZInt(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZInt
-	return ret
-}
-
 func (v *iterNative) readTypex_ZInt8(key *typex.Z, value *int8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20565,27 +9833,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZInt8(et *typex.EventTime, key *typex.Z, value *int8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(int8)
-	return true
-}
-
-func iterMakerETTypex_ZInt8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZInt8
-	return ret
-}
-
 func (v *iterNative) readTypex_ZInt16(key *typex.Z, value *int16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20605,27 +9852,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZInt16(et *typex.EventTime, key *typex.Z, value *int16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(int16)
-	return true
-}
-
-func iterMakerETTypex_ZInt16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZInt16
-	return ret
-}
-
 func (v *iterNative) readTypex_ZInt32(key *typex.Z, value *int32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20645,27 +9871,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZInt32(et *typex.EventTime, key *typex.Z, value *int32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(int32)
-	return true
-}
-
-func iterMakerETTypex_ZInt32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZInt32
-	return ret
-}
-
 func (v *iterNative) readTypex_ZInt64(key *typex.Z, value *int64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20685,27 +9890,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZInt64(et *typex.EventTime, key *typex.Z, value *int64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(int64)
-	return true
-}
-
-func iterMakerETTypex_ZInt64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZInt64
-	return ret
-}
-
 func (v *iterNative) readTypex_ZUint(key *typex.Z, value *uint) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20725,27 +9909,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZUint(et *typex.EventTime, key *typex.Z, value *uint) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(uint)
-	return true
-}
-
-func iterMakerETTypex_ZUint(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZUint
-	return ret
-}
-
 func (v *iterNative) readTypex_ZUint8(key *typex.Z, value *uint8) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20765,27 +9928,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZUint8(et *typex.EventTime, key *typex.Z, value *uint8) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(uint8)
-	return true
-}
-
-func iterMakerETTypex_ZUint8(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZUint8
-	return ret
-}
-
 func (v *iterNative) readTypex_ZUint16(key *typex.Z, value *uint16) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20805,27 +9947,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZUint16(et *typex.EventTime, key *typex.Z, value *uint16) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(uint16)
-	return true
-}
-
-func iterMakerETTypex_ZUint16(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZUint16
-	return ret
-}
-
 func (v *iterNative) readTypex_ZUint32(key *typex.Z, value *uint32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20845,27 +9966,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZUint32(et *typex.EventTime, key *typex.Z, value *uint32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(uint32)
-	return true
-}
-
-func iterMakerETTypex_ZUint32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZUint32
-	return ret
-}
-
 func (v *iterNative) readTypex_ZUint64(key *typex.Z, value *uint64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20885,27 +9985,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZUint64(et *typex.EventTime, key *typex.Z, value *uint64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(uint64)
-	return true
-}
-
-func iterMakerETTypex_ZUint64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZUint64
-	return ret
-}
-
 func (v *iterNative) readTypex_ZFloat32(key *typex.Z, value *float32) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20925,27 +10004,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZFloat32(et *typex.EventTime, key *typex.Z, value *float32) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(float32)
-	return true
-}
-
-func iterMakerETTypex_ZFloat32(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZFloat32
-	return ret
-}
-
 func (v *iterNative) readTypex_ZFloat64(key *typex.Z, value *float64) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -20965,27 +10023,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZFloat64(et *typex.EventTime, key *typex.Z, value *float64) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(float64)
-	return true
-}
-
-func iterMakerETTypex_ZFloat64(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZFloat64
-	return ret
-}
-
 func (v *iterNative) readTypex_ZTypex_T(key *typex.Z, value *typex.T) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -21005,27 +10042,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZTypex_T(et *typex.EventTime, key *typex.Z, value *typex.T) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(typex.T)
-	return true
-}
-
-func iterMakerETTypex_ZTypex_T(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZTypex_T
-	return ret
-}
-
 func (v *iterNative) readTypex_ZTypex_U(key *typex.Z, value *typex.U) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -21045,27 +10061,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZTypex_U(et *typex.EventTime, key *typex.Z, value *typex.U) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(typex.U)
-	return true
-}
-
-func iterMakerETTypex_ZTypex_U(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZTypex_U
-	return ret
-}
-
 func (v *iterNative) readTypex_ZTypex_V(key *typex.Z, value *typex.V) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -21085,27 +10080,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZTypex_V(et *typex.EventTime, key *typex.Z, value *typex.V) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(typex.V)
-	return true
-}
-
-func iterMakerETTypex_ZTypex_V(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZTypex_V
-	return ret
-}
-
 func (v *iterNative) readTypex_ZTypex_W(key *typex.Z, value *typex.W) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -21125,27 +10099,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZTypex_W(et *typex.EventTime, key *typex.Z, value *typex.W) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(typex.W)
-	return true
-}
-
-func iterMakerETTypex_ZTypex_W(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZTypex_W
-	return ret
-}
-
 func (v *iterNative) readTypex_ZTypex_X(key *typex.Z, value *typex.X) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -21165,27 +10118,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZTypex_X(et *typex.EventTime, key *typex.Z, value *typex.X) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(typex.X)
-	return true
-}
-
-func iterMakerETTypex_ZTypex_X(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZTypex_X
-	return ret
-}
-
 func (v *iterNative) readTypex_ZTypex_Y(key *typex.Z, value *typex.Y) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -21205,27 +10137,6 @@
 	return ret
 }
 
-func (v *iterNative) readETTypex_ZTypex_Y(et *typex.EventTime, key *typex.Z, value *typex.Y) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(typex.Y)
-	return true
-}
-
-func iterMakerETTypex_ZTypex_Y(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZTypex_Y
-	return ret
-}
-
 func (v *iterNative) readTypex_ZTypex_Z(key *typex.Z, value *typex.Z) bool {
 	elm, err := v.cur.Read()
 	if err != nil {
@@ -21244,24 +10155,3 @@
 	ret.fn = ret.readTypex_ZTypex_Z
 	return ret
 }
-
-func (v *iterNative) readETTypex_ZTypex_Z(et *typex.EventTime, key *typex.Z, value *typex.Z) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-	*et = elm.Timestamp
-	*key = elm.Elm.(typex.Z)
-	*value = elm.Elm2.(typex.Z)
-	return true
-}
-
-func iterMakerETTypex_ZTypex_Z(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readETTypex_ZTypex_Z
-	return ret
-}
diff --git a/sdks/go/pkg/beam/core/runtime/exec/optimized/inputs.tmpl b/sdks/go/pkg/beam/core/runtime/exec/optimized/inputs.tmpl
index 338ce41..f9d78dc 100644
--- a/sdks/go/pkg/beam/core/runtime/exec/optimized/inputs.tmpl
+++ b/sdks/go/pkg/beam/core/runtime/exec/optimized/inputs.tmpl
@@ -30,10 +30,8 @@
 func init() {
 {{- range $x := .X}}
     exec.RegisterInput(reflect.TypeOf((*func (*{{$x.Type}})bool)(nil)).Elem(), iterMaker{{$x.Name}})
-    exec.RegisterInput(reflect.TypeOf((*func (*typex.EventTime, *{{$x.Type}})bool)(nil)).Elem(), iterMakerET{{$x.Name}})
 {{- range $y := .Y}}
     exec.RegisterInput(reflect.TypeOf((*func (*{{$x.Type}}, *{{$y.Type}})bool)(nil)).Elem(), iterMaker{{$x.Name}}{{$y.Name}})
-    exec.RegisterInput(reflect.TypeOf((*func (*typex.EventTime, *{{$x.Type}}, *{{$y.Type}})bool)(nil)).Elem(), iterMakerET{{$x.Name}}{{$y.Name}})
 {{- end}}
 {{- end}}
 }
@@ -86,26 +84,6 @@
 	return ret
 }
 
-func (v *iterNative) readET{{$x.Name}}(et *typex.EventTime, val *{{$x.Type}}) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-    *et = elm.Timestamp
-    *val = elm.Elm.({{$x.Type}})
-    return true
-}
-
-func iterMakerET{{$x.Name}}(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readET{{$x.Name}}
-	return ret
-}
-
 {{range $y := .Y}}
 func (v *iterNative) read{{$x.Name}}{{$y.Name}}(key *{{$x.Type}}, value *{{$y.Type}}) bool {
 	elm, err := v.cur.Read()
@@ -125,26 +103,5 @@
 	ret.fn = ret.read{{$x.Name}}{{$y.Name}}
 	return ret
 }
-
-func (v *iterNative) readET{{$x.Name}}{{$y.Name}}(et *typex.EventTime, key *{{$x.Type}}, value *{{$y.Type}}) bool {
-	elm, err := v.cur.Read()
-	if err != nil {
-		if err == io.EOF {
-			return false
-		}
-		panic(fmt.Sprintf("broken stream: %v", err))
-	}
-
-    *et = elm.Timestamp
-    *key = elm.Elm.({{$x.Type}})
-    *value = elm.Elm2.({{$y.Type}})
-    return true
-}
-
-func iterMakerET{{$x.Name}}{{$y.Name}}(s exec.ReStream) exec.ReusableInput {
-	ret := &iterNative{s: s}
-	ret.fn = ret.readET{{$x.Name}}{{$y.Name}}
-	return ret
-}
 {{end}}
 {{end}}
diff --git a/sdks/go/pkg/beam/core/runtime/exec/translate.go b/sdks/go/pkg/beam/core/runtime/exec/translate.go
index 0539173..d9d56dd 100644
--- a/sdks/go/pkg/beam/core/runtime/exec/translate.go
+++ b/sdks/go/pkg/beam/core/runtime/exec/translate.go
@@ -28,7 +28,6 @@
 	v1pb "github.com/apache/beam/sdks/v2/go/pkg/beam/core/runtime/graphx/v1"
 	"github.com/apache/beam/sdks/v2/go/pkg/beam/core/typex"
 	"github.com/apache/beam/sdks/v2/go/pkg/beam/core/util/protox"
-	"github.com/apache/beam/sdks/v2/go/pkg/beam/core/util/stringx"
 	"github.com/apache/beam/sdks/v2/go/pkg/beam/internal/errors"
 	fnpb "github.com/apache/beam/sdks/v2/go/pkg/beam/model/fnexecution_v1"
 	pipepb "github.com/apache/beam/sdks/v2/go/pkg/beam/model/pipeline_v1"
@@ -689,7 +688,7 @@
 	if len(m) == 0 {
 		return nil
 	}
-	if len(m) == 1 && stringx.Keys(m)[0] == "bogus" {
+	if _, ok := m["bogus"]; ok && len(m) == 1 {
 		return nil // Ignore special bogus node for legacy Dataflow.
 	}
 
diff --git a/sdks/go/pkg/beam/core/runtime/graphx/translate.go b/sdks/go/pkg/beam/core/runtime/graphx/translate.go
index add3254..77797d7 100644
--- a/sdks/go/pkg/beam/core/runtime/graphx/translate.go
+++ b/sdks/go/pkg/beam/core/runtime/graphx/translate.go
@@ -834,7 +834,7 @@
 				MergeStatus:     pipepb.MergeStatus_NON_MERGING,
 				WindowCoderId:   windowCoderId,
 				ClosingBehavior: pipepb.ClosingBehavior_EMIT_IF_NONEMPTY,
-				AllowedLateness: 0,
+				AllowedLateness: int64(in.From.WindowingStrategy().AllowedLateness),
 				OnTimeBehavior:  pipepb.OnTimeBehavior_FIRE_ALWAYS,
 			})
 	}
@@ -1053,7 +1053,7 @@
 		AccumulationMode: makeAccumulationMode(w.AccumulationMode),
 		OutputTime:       pipepb.OutputTime_END_OF_WINDOW,
 		ClosingBehavior:  pipepb.ClosingBehavior_EMIT_IF_NONEMPTY,
-		AllowedLateness:  0,
+		AllowedLateness:  int64(w.AllowedLateness),
 		OnTimeBehavior:   pipepb.OnTimeBehavior_FIRE_IF_NONEMPTY,
 	}
 	return ws, nil
diff --git a/sdks/go/pkg/beam/core/runtime/harness/init/init.go b/sdks/go/pkg/beam/core/runtime/harness/init/init.go
index 6dd2f81..7cbf215 100644
--- a/sdks/go/pkg/beam/core/runtime/harness/init/init.go
+++ b/sdks/go/pkg/beam/core/runtime/harness/init/init.go
@@ -31,6 +31,8 @@
 
 	"github.com/apache/beam/sdks/v2/go/pkg/beam/core/runtime"
 	"github.com/apache/beam/sdks/v2/go/pkg/beam/core/runtime/harness"
+
+	// Import gcs filesystem so that it can be used to upload heap dumps.
 	_ "github.com/apache/beam/sdks/v2/go/pkg/beam/io/filesystem/gcs"
 	"github.com/apache/beam/sdks/v2/go/pkg/beam/util/grpcx"
 	"github.com/apache/beam/sdks/v2/go/pkg/beam/util/syscallx"
diff --git a/sdks/go/pkg/beam/core/util/stringx/bytes.go b/sdks/go/pkg/beam/core/util/stringx/bytes.go
deleted file mode 100644
index bf4cd11..0000000
--- a/sdks/go/pkg/beam/core/util/stringx/bytes.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package stringx contains utilities for working with strings. It
-// complements the standard "strings" package.
-//
-// Deprecated: the utilities in this package are unused within the code base
-// and will be removed in a future Beam release.
-package stringx
-
-// ToBytes converts a string to a byte slice.
-func ToBytes(s string) []byte {
-	return ([]byte)(s)
-}
-
-// FromBytes converts a byte slice to a string.
-func FromBytes(b []byte) string {
-	return (string)(b)
-}
diff --git a/sdks/go/pkg/beam/core/util/stringx/map.go b/sdks/go/pkg/beam/core/util/stringx/map.go
deleted file mode 100644
index 3dec662..0000000
--- a/sdks/go/pkg/beam/core/util/stringx/map.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stringx
-
-// Keys returns the domain of a map[string]string.
-func Keys(m map[string]string) []string {
-	var keys []string
-	for k := range m {
-		keys = append(keys, k)
-	}
-	return keys
-}
-
-// Values returns the values of a map[string]string.
-func Values(m map[string]string) []string {
-	var values []string
-	for _, v := range m {
-		values = append(values, v)
-	}
-	return values
-}
-
-// AnyValue returns a value of a map[string]string. Panics
-// if the map is empty.
-func AnyValue(m map[string]string) string {
-	for _, v := range m {
-		return v
-	}
-	panic("map empty")
-}
-
-// SingleValue returns the single value of a map[string]string.
-// Panics if the map does not contain exactly one value.
-func SingleValue(m map[string]string) string {
-	if len(m) != 1 {
-		panic("map not singleton")
-	}
-	return AnyValue(m)
-}
diff --git a/sdks/go/pkg/beam/io/fhirio/common.go b/sdks/go/pkg/beam/io/fhirio/common.go
index 4c5a8dc..b2a26c7 100644
--- a/sdks/go/pkg/beam/io/fhirio/common.go
+++ b/sdks/go/pkg/beam/io/fhirio/common.go
@@ -24,6 +24,7 @@
 	"encoding/json"
 	"io"
 	"net/http"
+	"strings"
 	"time"
 
 	"github.com/apache/beam/sdks/v2/go/pkg/beam"
@@ -67,16 +68,26 @@
 	return string(bodyBytes), nil
 }
 
+type operationCounters struct {
+	successCount, errorCount beam.Counter
+}
+
+func (c *operationCounters) setup(namespace string) {
+	c.successCount = beam.NewCounter(namespace, operationSuccessCounterName)
+	c.errorCount = beam.NewCounter(namespace, operationErrorCounterName)
+}
+
 type operationResults struct {
 	Successes int64 `json:"success,string"`
 	Failures  int64 `json:"failure,string"`
 }
 
 type fhirStoreClient interface {
-	readResource(resourcePath string) (*http.Response, error)
-	executeBundle(storePath string, bundle []byte) (*http.Response, error)
+	readResource(resourcePath []byte) (*http.Response, error)
+	executeBundle(storePath string, bundle string) (*http.Response, error)
 	search(storePath, resourceType string, queries map[string]string, pageToken string) (*http.Response, error)
 	deidentify(srcStorePath, dstStorePath string, deidConfig *healthcare.DeidentifyConfig) (operationResults, error)
+	importResources(storePath, gcsURI string, contentStructure ContentStructure) (operationResults, error)
 }
 
 type fhirStoreClientImpl struct {
@@ -95,12 +106,16 @@
 	return c.healthcareService.Projects.Locations.Datasets.FhirStores.Fhir
 }
 
-func (c *fhirStoreClientImpl) readResource(resourcePath string) (*http.Response, error) {
-	return c.fhirService().Read(resourcePath).Do()
+func (c *fhirStoreClientImpl) fhirStoreService() *healthcare.ProjectsLocationsDatasetsFhirStoresService {
+	return c.healthcareService.Projects.Locations.Datasets.FhirStores
 }
 
-func (c *fhirStoreClientImpl) executeBundle(storePath string, bundle []byte) (*http.Response, error) {
-	return c.fhirService().ExecuteBundle(storePath, bytes.NewReader(bundle)).Do()
+func (c *fhirStoreClientImpl) readResource(resourcePath []byte) (*http.Response, error) {
+	return c.fhirService().Read(string(resourcePath)).Do()
+}
+
+func (c *fhirStoreClientImpl) executeBundle(storePath, bundle string) (*http.Response, error) {
+	return c.fhirService().ExecuteBundle(storePath, strings.NewReader(bundle)).Do()
 }
 
 func (c *fhirStoreClientImpl) search(storePath, resourceType string, queries map[string]string, pageToken string) (*http.Response, error) {
@@ -125,7 +140,19 @@
 		Config:           deidConfig,
 		DestinationStore: dstStorePath,
 	}
-	operation, err := c.healthcareService.Projects.Locations.Datasets.FhirStores.Deidentify(srcStorePath, deidRequest).Do()
+	operation, err := c.fhirStoreService().Deidentify(srcStorePath, deidRequest).Do()
+	if err != nil {
+		return operationResults{}, err
+	}
+	return c.pollTilCompleteAndCollectResults(operation)
+}
+
+func (c *fhirStoreClientImpl) importResources(storePath, gcsURI string, contentStructure ContentStructure) (operationResults, error) {
+	importRequest := &healthcare.ImportResourcesRequest{
+		ContentStructure: contentStructure.String(),
+		GcsSource:        &healthcare.GoogleCloudHealthcareV1FhirGcsSource{Uri: gcsURI},
+	}
+	operation, err := c.fhirStoreService().Import(storePath, importRequest).Do()
 	if err != nil {
 		return operationResults{}, err
 	}
diff --git a/sdks/go/pkg/beam/io/fhirio/deidentify.go b/sdks/go/pkg/beam/io/fhirio/deidentify.go
index 1655798..2c37749 100644
--- a/sdks/go/pkg/beam/io/fhirio/deidentify.go
+++ b/sdks/go/pkg/beam/io/fhirio/deidentify.go
@@ -34,10 +34,9 @@
 
 type deidentifyFn struct {
 	fnCommonVariables
+	operationCounters
 	SourceStorePath, DestinationStorePath string
 	DeidentifyConfig                      *healthcare.DeidentifyConfig
-	operationErrorCount                   beam.Counter
-	operationSuccessCount                 beam.Counter
 }
 
 func (fn deidentifyFn) String() string {
@@ -46,8 +45,7 @@
 
 func (fn *deidentifyFn) Setup() {
 	fn.fnCommonVariables.setup(fn.String())
-	fn.operationErrorCount = beam.NewCounter(fn.String(), operationErrorCounterName)
-	fn.operationSuccessCount = beam.NewCounter(fn.String(), operationSuccessCounterName)
+	fn.operationCounters.setup(fn.String())
 }
 
 func (fn *deidentifyFn) ProcessElement(ctx context.Context, _ []byte, emitDstStore func(string)) {
@@ -56,11 +54,11 @@
 	})
 	if err != nil {
 		log.Warnf(ctx, "Deidentify operation failed. Reason: %v", err)
-		fn.operationErrorCount.Inc(ctx, 1)
+		fn.operationCounters.errorCount.Inc(ctx, 1)
 		return
 	}
 
-	fn.operationSuccessCount.Inc(ctx, 1)
+	fn.operationCounters.successCount.Inc(ctx, 1)
 	fn.resourcesSuccessCount.Inc(ctx, result.Successes)
 	fn.resourcesErrorCount.Inc(ctx, result.Failures)
 	emitDstStore(fn.DestinationStorePath)
diff --git a/sdks/go/pkg/beam/io/fhirio/deidentify_test.go b/sdks/go/pkg/beam/io/fhirio/deidentify_test.go
index d47ad6c..10f281c 100644
--- a/sdks/go/pkg/beam/io/fhirio/deidentify_test.go
+++ b/sdks/go/pkg/beam/io/fhirio/deidentify_test.go
@@ -40,10 +40,7 @@
 	p, s := beam.NewPipelineWithRoot()
 	out := deidentify(s, "src", "dst", nil, &fakeFhirStoreClient{
 		fakeDeidentify: func(string, string, *healthcare.DeidentifyConfig) (operationResults, error) {
-			return operationResults{
-				Successes: 5,
-				Failures:  2,
-			}, nil
+			return testOperationResult, nil
 		},
 	})
 	passert.Count(s, out, "", 1)
@@ -51,6 +48,6 @@
 	result := ptest.RunAndValidate(t, p)
 	validateCounter(t, result, operationErrorCounterName, 0)
 	validateCounter(t, result, operationSuccessCounterName, 1)
-	validateCounter(t, result, errorCounterName, 2)
-	validateCounter(t, result, successCounterName, 5)
+	validateCounter(t, result, errorCounterName, int(testOperationResult.Failures))
+	validateCounter(t, result, successCounterName, int(testOperationResult.Successes))
 }
diff --git a/sdks/go/pkg/beam/io/fhirio/execute_bundles.go b/sdks/go/pkg/beam/io/fhirio/execute_bundles.go
index 4b7757b..9ba5919 100644
--- a/sdks/go/pkg/beam/io/fhirio/execute_bundles.go
+++ b/sdks/go/pkg/beam/io/fhirio/execute_bundles.go
@@ -37,7 +37,7 @@
 )
 
 func init() {
-	register.DoFn4x0[context.Context, []byte, func(string), func(string)]((*executeBundleFn)(nil))
+	register.DoFn4x0[context.Context, string, func(string), func(string)]((*executeBundleFn)(nil))
 	register.Emitter1[string]()
 }
 
@@ -57,7 +57,7 @@
 	fn.successesCount = beam.NewCounter(fn.String(), baseMetricPrefix+"success_count")
 }
 
-func (fn *executeBundleFn) ProcessElement(ctx context.Context, inputBundleBody []byte, emitSuccess, emitFailure func(string)) {
+func (fn *executeBundleFn) ProcessElement(ctx context.Context, inputBundleBody string, emitSuccess, emitFailure func(string)) {
 	response, err := executeAndRecordLatency(ctx, &fn.latencyMs, func() (*http.Response, error) {
 		return fn.client.executeBundle(fn.FhirStorePath, inputBundleBody)
 	})
diff --git a/sdks/go/pkg/beam/io/fhirio/execute_bundles_test.go b/sdks/go/pkg/beam/io/fhirio/execute_bundles_test.go
index e312269..d0bb5df 100644
--- a/sdks/go/pkg/beam/io/fhirio/execute_bundles_test.go
+++ b/sdks/go/pkg/beam/io/fhirio/execute_bundles_test.go
@@ -54,7 +54,7 @@
 		},
 	}
 
-	testBundles := [][]byte{[]byte("foo"), []byte("bar")}
+	testBundles := []string{"foo", "bar"}
 	for _, testCase := range testCases {
 		t.Run(testCase.name, func(t *testing.T) {
 			p, s, bundles := ptest.CreateList(testBundles)
diff --git a/sdks/go/pkg/beam/io/fhirio/import.go b/sdks/go/pkg/beam/io/fhirio/import.go
new file mode 100644
index 0000000..0d59be4
--- /dev/null
+++ b/sdks/go/pkg/beam/io/fhirio/import.go
@@ -0,0 +1,260 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package fhirio provides an API for reading and writing resources to Google
+// Cloud Healthcare Fhir stores.
+// Experimental.
+package fhirio
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"path/filepath"
+	"strings"
+
+	"github.com/apache/beam/sdks/v2/go/pkg/beam"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/io/filesystem"
+	_ "github.com/apache/beam/sdks/v2/go/pkg/beam/io/filesystem/gcs"
+	_ "github.com/apache/beam/sdks/v2/go/pkg/beam/io/filesystem/local"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/log"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/register"
+	"github.com/google/uuid"
+)
+
+func init() {
+	register.DoFn3x0[context.Context, string, func(string)]((*importFn)(nil))
+	register.DoFn4x0[context.Context, string, func(string), func(string)]((*createBatchFilesFn)(nil))
+	register.Emitter1[string]()
+}
+
+// ContentStructure representation as per:
+// https://cloud.google.com/healthcare-api/docs/reference/rest/v1/projects.locations.datasets.fhirStores/import#contentstructure
+type ContentStructure int
+
+const (
+	// ContentStructureUnspecified is to be used as argument to Import if the content
+	// structure is not specified, the default value BUNDLE is used.
+	ContentStructureUnspecified ContentStructure = iota
+
+	// ContentStructureBundle is to be used as argument to Import if the source file
+	// contains one or more lines of newline-delimited JSON (ndjson). Each line is a
+	// bundle that contains one or more resources.
+	ContentStructureBundle
+
+	// ContentStructureResource is to be used as argument to Import if the source
+	// file contains one or more lines of newline-delimited JSON (ndjson). Each line
+	// is a single resource.
+	ContentStructureResource
+)
+
+func (cs ContentStructure) String() string {
+	switch cs {
+	case ContentStructureBundle:
+		return "BUNDLE"
+	case ContentStructureResource:
+		return "RESOURCE"
+	case ContentStructureUnspecified:
+		fallthrough
+	default:
+		return "CONTENT_STRUCTURE_UNSPECIFIED"
+	}
+}
+
+type createBatchFilesFn struct {
+	fs              filesystem.Interface
+	batchFileWriter io.WriteCloser
+	batchFilePath   string
+	TempLocation    string
+}
+
+func (fn *createBatchFilesFn) StartBundle(ctx context.Context, _, _ func(string)) error {
+	fs, err := filesystem.New(ctx, fn.TempLocation)
+	if err != nil {
+		return err
+	}
+	fn.fs = fs
+	fn.batchFilePath = fmt.Sprintf("%s/fhirImportBatch-%v.ndjson", fn.TempLocation, uuid.New())
+	log.Infof(ctx, "Opening to write batch file: %v", fn.batchFilePath)
+	fn.batchFileWriter, err = fn.fs.OpenWrite(ctx, fn.batchFilePath)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (fn *createBatchFilesFn) ProcessElement(ctx context.Context, resource string, _, emitFailedResource func(string)) {
+	_, err := fn.batchFileWriter.Write([]byte(resource + "\n"))
+	if err != nil {
+		log.Warnf(ctx, "Failed to write resource to batch file. Reason: %v", err)
+		emitFailedResource(resource)
+	}
+}
+
+func (fn *createBatchFilesFn) FinishBundle(ctx context.Context, emitBatchFilePath, _ func(string)) {
+	fn.batchFileWriter.Close()
+	fn.batchFileWriter = nil
+	fn.fs.Close()
+	fn.fs = nil
+	emitBatchFilePath(fn.batchFilePath)
+	log.Infof(ctx, "Batch file created: %v", fn.batchFilePath)
+}
+
+type importFn struct {
+	fnCommonVariables
+	operationCounters
+	fs                               filesystem.Interface
+	batchFilesPath                   []string
+	tempBatchDir                     string
+	FhirStorePath                    string
+	TempLocation, DeadLetterLocation string
+	ContentStructure                 ContentStructure
+}
+
+func (fn importFn) String() string {
+	return "importFn"
+}
+
+func (fn *importFn) Setup() {
+	fn.fnCommonVariables.setup(fn.String())
+	fn.operationCounters.setup(fn.String())
+}
+
+func (fn *importFn) StartBundle(ctx context.Context, _ func(string)) error {
+	fs, err := filesystem.New(ctx, fn.TempLocation)
+	if err != nil {
+		return err
+	}
+	fn.fs = fs
+	fn.batchFilesPath = make([]string, 0)
+	fn.tempBatchDir = fmt.Sprintf("%s/tmp-%v", fn.TempLocation, uuid.New())
+	return nil
+}
+
+func (fn *importFn) ProcessElement(ctx context.Context, batchFilePath string, _ func(string)) {
+	updatedBatchFilePath := fmt.Sprintf("%s/%s", fn.tempBatchDir, filepath.Base(batchFilePath))
+	err := filesystem.Rename(ctx, fn.fs, batchFilePath, updatedBatchFilePath)
+	if err != nil {
+		updatedBatchFilePath = batchFilePath
+		log.Warnf(ctx, "Failed to move %v to temp location. Reason: %v", batchFilePath, err)
+	}
+	fn.batchFilesPath = append(fn.batchFilesPath, updatedBatchFilePath)
+}
+
+func (fn *importFn) FinishBundle(ctx context.Context, emitDeadLetter func(string)) {
+	defer func() {
+		fn.fs.Close()
+		fn.fs = nil
+		fn.batchFilesPath = nil
+	}()
+
+	importURI := fn.tempBatchDir + "/*.ndjson"
+	log.Infof(ctx, "About to begin import operation with importURI: %v", importURI)
+	result, err := executeAndRecordLatency(ctx, &fn.latencyMs, func() (operationResults, error) {
+		return fn.client.importResources(fn.FhirStorePath, importURI, fn.ContentStructure)
+	})
+	if err != nil {
+		fn.moveToDeadLetterOrRemoveFailedImportBatchFiles(ctx)
+		fn.operationCounters.errorCount.Inc(ctx, 1)
+		deadLetterMessage := fmt.Sprintf("Failed to import [%v]. Reason: %v", importURI, err)
+		log.Warn(ctx, deadLetterMessage)
+		emitDeadLetter(deadLetterMessage)
+		return
+	}
+
+	log.Infof(ctx, "Imported %v. Results: %v", importURI, result)
+	fn.operationCounters.successCount.Inc(ctx, 1)
+	fn.resourcesSuccessCount.Inc(ctx, result.Successes)
+	fn.resourcesErrorCount.Inc(ctx, result.Failures)
+	fn.removeTempBatchFiles(ctx)
+}
+
+func (fn *importFn) moveToDeadLetterOrRemoveFailedImportBatchFiles(ctx context.Context) {
+	if fn.DeadLetterLocation == "" {
+		log.Info(ctx, "Deadletter path not provided. Remove failed import batch files instead.")
+		fn.removeTempBatchFiles(ctx)
+		return
+	}
+
+	log.Infof(ctx, "Moving failed import files to Deadletter path: [%v]", fn.DeadLetterLocation)
+	for _, p := range fn.batchFilesPath {
+		err := filesystem.Rename(ctx, fn.fs, p, fmt.Sprintf("%s/%s", fn.DeadLetterLocation, filepath.Base(p)))
+		if err != nil {
+			log.Warnf(ctx, "Failed to move failed imported file %v to %v. Reason: %v", p, fn.DeadLetterLocation, err)
+		}
+	}
+}
+
+func (fn *importFn) removeTempBatchFiles(ctx context.Context) {
+	for _, p := range fn.batchFilesPath {
+		err := fn.fs.(filesystem.Remover).Remove(ctx, p)
+		if err != nil {
+			log.Warnf(ctx, "Failed to delete temp batch file [%v]. Reason: %v", p, err)
+		}
+	}
+}
+
+// Import consumes FHIR resources as input PCollection<string> and imports them
+// into a given Google Cloud Healthcare FHIR store. It does so by creating batch
+// files in the provided Google Cloud Storage `tempDir` and importing those files
+// to the store through FHIR import API method: https://cloud.google.com/healthcare-api/docs/concepts/fhir-import.
+// If `tempDir` is not provided, it falls back to the dataflow temp_location flag.
+// Resources that fail to be included in the batch files are included as the
+// first output PCollection. In case a batch file fails to be imported, it will
+// be moved to the `deadLetterDir` and an error message will be provided in the
+// second output PCollection. If `deadLetterDir` is not provided, the failed
+// import files will be deleted and be irretrievable, but the error message will
+// still be provided.
+func Import(s beam.Scope, fhirStorePath, tempDir, deadLetterDir string, contentStructure ContentStructure, resources beam.PCollection) (beam.PCollection, beam.PCollection) {
+	s = s.Scope("fhirio.Import")
+
+	if tempDir == "" {
+		tempDir = tryFallbackToDataflowTempDirOrPanic()
+	}
+	tempDir = strings.TrimSuffix(tempDir, "/")
+	deadLetterDir = strings.TrimSuffix(deadLetterDir, "/")
+
+	return importResourcesInBatches(s, fhirStorePath, tempDir, deadLetterDir, contentStructure, resources, nil)
+}
+
+// This is useful as an entry point for testing because we can provide a fake FHIR store client.
+func importResourcesInBatches(s beam.Scope, fhirStorePath, tempDir, deadLetterDir string, contentStructure ContentStructure, resources beam.PCollection, client fhirStoreClient) (beam.PCollection, beam.PCollection) {
+	batchFiles, failedResources := beam.ParDo2(s, &createBatchFilesFn{TempLocation: tempDir}, resources)
+	failedImportsDeadLetter := beam.ParDo(
+		s,
+		&importFn{
+			fnCommonVariables:  fnCommonVariables{client: client},
+			FhirStorePath:      fhirStorePath,
+			TempLocation:       tempDir,
+			DeadLetterLocation: deadLetterDir,
+			ContentStructure:   contentStructure,
+		},
+		batchFiles,
+	)
+	return failedResources, failedImportsDeadLetter
+}
+
+func tryFallbackToDataflowTempDirOrPanic() string {
+	beam.PipelineOptions.LoadOptionsFromFlags(nil)
+	if f := beam.PipelineOptions.Get("temp_location"); f != "" {
+		return f
+	}
+
+	// temp_location is optional, so fallback to staging_location.
+	if f := beam.PipelineOptions.Get("staging_location"); f != "" {
+		return f
+	}
+	panic("could not resolve to a temp directory for import batch files")
+}
diff --git a/sdks/go/pkg/beam/io/fhirio/import_test.go b/sdks/go/pkg/beam/io/fhirio/import_test.go
new file mode 100644
index 0000000..5e5f1ee
--- /dev/null
+++ b/sdks/go/pkg/beam/io/fhirio/import_test.go
@@ -0,0 +1,94 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fhirio
+
+import (
+	"io/ioutil"
+	"os"
+	"testing"
+
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/testing/passert"
+	"github.com/apache/beam/sdks/v2/go/pkg/beam/testing/ptest"
+)
+
+func setupTempAndDeadLetterDirs() (string, string, func()) {
+	tempPath, _ := os.MkdirTemp("", "temp")
+	deadLetterPath, _ := os.MkdirTemp("", "deadletter")
+	return tempPath, deadLetterPath, func() {
+		os.RemoveAll(tempPath)
+		os.RemoveAll(deadLetterPath)
+	}
+}
+
+func TestImport(t *testing.T) {
+	testCases := []struct {
+		name                     string
+		client                   fhirStoreClient
+		expectedErrorCount       int
+		expectedSuccessCount     int
+		expectedOperationResults operationResults
+	}{
+		{
+			name:                     "Import returns error",
+			client:                   requestReturnErrorFakeClient,
+			expectedErrorCount:       1,
+			expectedOperationResults: operationResults{},
+		},
+		{
+			name: "Import returns successfully",
+			client: &fakeFhirStoreClient{fakeImportResources: func(string, string, ContentStructure) (operationResults, error) {
+				return testOperationResult, nil
+			}},
+			expectedErrorCount:       0,
+			expectedOperationResults: testOperationResult,
+		},
+	}
+
+	for _, testCase := range testCases {
+		t.Run(testCase.name, func(t *testing.T) {
+			tempDirPath, deadLetterDirPath, teardownDirs := setupTempAndDeadLetterDirs()
+			defer teardownDirs()
+
+			testResources := []string{"foo", "bar"}
+			p, s, testResourcesCol := ptest.CreateList(testResources)
+			failedResources, deadLetter := importResourcesInBatches(
+				s,
+				"",
+				tempDirPath,
+				deadLetterDirPath,
+				0,
+				testResourcesCol,
+				testCase.client,
+			)
+
+			passert.Empty(s, failedResources)
+			passert.Count(s, deadLetter, "", testCase.expectedErrorCount)
+
+			pipelineResult := ptest.RunAndValidate(t, p)
+			validateCounter(t, pipelineResult, operationErrorCounterName, testCase.expectedErrorCount)
+			validateCounter(t, pipelineResult, successCounterName, int(testCase.expectedOperationResults.Successes))
+			validateCounter(t, pipelineResult, errorCounterName, int(testCase.expectedOperationResults.Failures))
+
+			if filesInTemp, _ := ioutil.ReadDir(tempDirPath); len(filesInTemp) != 0 {
+				t.Fatalf("expected 0 files in temp path, but got %v", len(filesInTemp))
+			}
+
+			if filesInDeadLetter, _ := ioutil.ReadDir(deadLetterDirPath); len(filesInDeadLetter) != testCase.expectedErrorCount {
+				t.Fatalf("expected 1 file in deadletter path, but got %v", len(filesInDeadLetter))
+			}
+		})
+	}
+}
diff --git a/sdks/go/pkg/beam/io/fhirio/read.go b/sdks/go/pkg/beam/io/fhirio/read.go
index d604161..04e81e0 100644
--- a/sdks/go/pkg/beam/io/fhirio/read.go
+++ b/sdks/go/pkg/beam/io/fhirio/read.go
@@ -28,7 +28,7 @@
 )
 
 func init() {
-	register.DoFn4x0[context.Context, string, func(string), func(string)]((*readResourceFn)(nil))
+	register.DoFn4x0[context.Context, []byte, func(string), func(string)]((*readResourceFn)(nil))
 	register.Emitter1[string]()
 }
 
@@ -44,7 +44,7 @@
 	fn.fnCommonVariables.setup(fn.String())
 }
 
-func (fn *readResourceFn) ProcessElement(ctx context.Context, resourcePath string, emitResource, emitDeadLetter func(string)) {
+func (fn *readResourceFn) ProcessElement(ctx context.Context, resourcePath []byte, emitResource, emitDeadLetter func(string)) {
 	response, err := executeAndRecordLatency(ctx, &fn.latencyMs, func() (*http.Response, error) {
 		return fn.client.readResource(resourcePath)
 	})
diff --git a/sdks/go/pkg/beam/io/fhirio/read_test.go b/sdks/go/pkg/beam/io/fhirio/read_test.go
index cfbfbe6..70c7d79 100644
--- a/sdks/go/pkg/beam/io/fhirio/read_test.go
+++ b/sdks/go/pkg/beam/io/fhirio/read_test.go
@@ -48,7 +48,7 @@
 		},
 	}
 
-	testResourcePaths := []string{"foo", "bar"}
+	testResourcePaths := [][]byte{[]byte("foo"), []byte("bar")}
 	for _, testCase := range testCases {
 		t.Run(testCase.name, func(t *testing.T) {
 			p, s, resourcePaths := ptest.CreateList(testResourcePaths)
diff --git a/sdks/go/pkg/beam/io/fhirio/utils_test.go b/sdks/go/pkg/beam/io/fhirio/utils_test.go
index 154d607..aa9fbc0 100644
--- a/sdks/go/pkg/beam/io/fhirio/utils_test.go
+++ b/sdks/go/pkg/beam/io/fhirio/utils_test.go
@@ -27,12 +27,14 @@
 )
 
 var (
+	testOperationResult = operationResults{Successes: 5, Failures: 2}
+
 	fakeRequestReturnErrorMessage = "internal error"
 	requestReturnErrorFakeClient  = &fakeFhirStoreClient{
-		fakeReadResources: func(string) (*http.Response, error) {
+		fakeReadResources: func([]byte) (*http.Response, error) {
 			return nil, errors.New(fakeRequestReturnErrorMessage)
 		},
-		fakeExecuteBundles: func(string, []byte) (*http.Response, error) {
+		fakeExecuteBundles: func(string, string) (*http.Response, error) {
 			return nil, errors.New(fakeRequestReturnErrorMessage)
 		},
 		fakeSearch: func(string, string, map[string]string, string) (*http.Response, error) {
@@ -41,6 +43,9 @@
 		fakeDeidentify: func(string, string, *healthcare.DeidentifyConfig) (operationResults, error) {
 			return operationResults{}, errors.New(fakeRequestReturnErrorMessage)
 		},
+		fakeImportResources: func(string, string, ContentStructure) (operationResults, error) {
+			return operationResults{}, errors.New(fakeRequestReturnErrorMessage)
+		},
 	}
 
 	badStatusFakeResponse = &http.Response{
@@ -48,10 +53,10 @@
 		StatusCode: http.StatusForbidden,
 	}
 	badStatusFakeClient = &fakeFhirStoreClient{
-		fakeReadResources: func(string) (*http.Response, error) {
+		fakeReadResources: func([]byte) (*http.Response, error) {
 			return badStatusFakeResponse, nil
 		},
-		fakeExecuteBundles: func(string, []byte) (*http.Response, error) {
+		fakeExecuteBundles: func(string, string) (*http.Response, error) {
 			return badStatusFakeResponse, nil
 		},
 		fakeSearch: func(string, string, map[string]string, string) (*http.Response, error) {
@@ -69,10 +74,10 @@
 		StatusCode: http.StatusOK,
 	}
 	bodyReaderErrorFakeClient = &fakeFhirStoreClient{
-		fakeReadResources: func(string) (*http.Response, error) {
+		fakeReadResources: func([]byte) (*http.Response, error) {
 			return bodyReaderErrorFakeResponse, nil
 		},
-		fakeExecuteBundles: func(string, []byte) (*http.Response, error) {
+		fakeExecuteBundles: func(string, string) (*http.Response, error) {
 			return bodyReaderErrorFakeResponse, nil
 		},
 		fakeSearch: func(string, string, map[string]string, string) (*http.Response, error) {
@@ -85,7 +90,7 @@
 		StatusCode: http.StatusOK,
 	}
 	emptyResponseBodyFakeClient = &fakeFhirStoreClient{
-		fakeExecuteBundles: func(string, []byte) (*http.Response, error) {
+		fakeExecuteBundles: func(string, string) (*http.Response, error) {
 			return emptyBodyReaderFakeResponse, nil
 		},
 		fakeSearch: func(string, string, map[string]string, string) (*http.Response, error) {
@@ -95,17 +100,18 @@
 )
 
 type fakeFhirStoreClient struct {
-	fakeReadResources  func(string) (*http.Response, error)
-	fakeExecuteBundles func(string, []byte) (*http.Response, error)
-	fakeSearch         func(string, string, map[string]string, string) (*http.Response, error)
-	fakeDeidentify     func(string, string, *healthcare.DeidentifyConfig) (operationResults, error)
+	fakeReadResources   func([]byte) (*http.Response, error)
+	fakeExecuteBundles  func(string, string) (*http.Response, error)
+	fakeSearch          func(string, string, map[string]string, string) (*http.Response, error)
+	fakeDeidentify      func(string, string, *healthcare.DeidentifyConfig) (operationResults, error)
+	fakeImportResources func(string, string, ContentStructure) (operationResults, error)
 }
 
-func (c *fakeFhirStoreClient) executeBundle(storePath string, bundle []byte) (*http.Response, error) {
+func (c *fakeFhirStoreClient) executeBundle(storePath, bundle string) (*http.Response, error) {
 	return c.fakeExecuteBundles(storePath, bundle)
 }
 
-func (c *fakeFhirStoreClient) readResource(resourcePath string) (*http.Response, error) {
+func (c *fakeFhirStoreClient) readResource(resourcePath []byte) (*http.Response, error) {
 	return c.fakeReadResources(resourcePath)
 }
 
@@ -117,6 +123,10 @@
 	return c.fakeDeidentify(srcStorePath, dstStorePath, deidConfig)
 }
 
+func (c *fakeFhirStoreClient) importResources(storePath, gcsURI string, contentStructure ContentStructure) (operationResults, error) {
+	return c.fakeImportResources(storePath, gcsURI, contentStructure)
+}
+
 // Useful to fake the Body of a http.Response.
 type fakeReaderCloser struct {
 	io.Closer
diff --git a/sdks/go/pkg/beam/runners/direct/direct.go b/sdks/go/pkg/beam/runners/direct/direct.go
index 01bb46e..21cbb11 100644
--- a/sdks/go/pkg/beam/runners/direct/direct.go
+++ b/sdks/go/pkg/beam/runners/direct/direct.go
@@ -333,6 +333,9 @@
 	case graph.WindowInto:
 		u = &exec.WindowInto{UID: b.idgen.New(), Fn: edge.WindowFn, Out: out[0]}
 
+	case graph.External:
+		return nil, errors.Errorf("external transforms like %v are not supported in the Go direct runner, please execute your pipeline on a different runner", edge)
+
 	default:
 		return nil, errors.Errorf("unexpected edge: %v", edge)
 	}
diff --git a/sdks/go/pkg/beam/runners/session/session.go b/sdks/go/pkg/beam/runners/session/session.go
deleted file mode 100644
index eb4d291..0000000
--- a/sdks/go/pkg/beam/runners/session/session.go
+++ /dev/null
@@ -1,353 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Deprecated: the session runner is no longer maintained and will be removed
-// in a later release.
-package session
-
-import (
-	"bufio"
-	"context"
-	"flag"
-	"fmt"
-	"io"
-	"net"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"runtime"
-	"strings"
-	"sync"
-	"time"
-
-	"github.com/apache/beam/sdks/v2/go/pkg/beam"
-	"github.com/apache/beam/sdks/v2/go/pkg/beam/core/runtime/harness/session"
-	"github.com/apache/beam/sdks/v2/go/pkg/beam/internal/errors"
-	"github.com/apache/beam/sdks/v2/go/pkg/beam/log"
-	fnpb "github.com/apache/beam/sdks/v2/go/pkg/beam/model/fnexecution_v1"
-	pipepb "github.com/apache/beam/sdks/v2/go/pkg/beam/model/pipeline_v1"
-	"github.com/golang/protobuf/proto"
-	"google.golang.org/grpc"
-)
-
-const (
-	// The maximum length of an encoded varint. We can Peek this much data
-	// and find a value to decode.
-	peekLen = 9
-)
-
-func init() {
-	beam.RegisterRunner("session", Execute)
-}
-
-var sessionFile = flag.String("session_file", "", "Session file for the runner")
-
-// controlServer manages the FnAPI control channel.
-type controlServer struct {
-	fnpb.UnimplementedBeamFnControlServer
-
-	filename   string
-	wg         *sync.WaitGroup // used to signal when the session is completed
-	ctrlStream fnpb.BeamFnControl_ControlServer
-	dataServer *grpc.Server
-	dataStream fnpb.BeamFnData_DataServer
-	dwg        *sync.WaitGroup
-}
-
-func (c *controlServer) Control(stream fnpb.BeamFnControl_ControlServer) error {
-	fmt.Println("Go SDK connected")
-	c.ctrlStream = stream
-	// We have a connected worker. Start reading the session file and issuing
-	// commands.
-
-	c.readSession(c.filename)
-	c.wg.Done()
-	fmt.Println("session replay complete")
-	return nil
-}
-
-func (c *controlServer) GetProcessBundleDescriptor(ctx context.Context, r *fnpb.GetProcessBundleDescriptorRequest) (*fnpb.ProcessBundleDescriptor, error) {
-	return nil, nil
-}
-
-func (c *controlServer) establishDataChannel(beamPort, tcpPort string) {
-	if c.dataServer != nil {
-		// Already a data server, we're done
-		return
-	}
-
-	// grpc can allow a grpc service running on two different ports, but there's
-	// no way (in Go at least) to differentiate the two of them to identify the
-	// source of the incoming data. So we don't even try. Session files that
-	// specify data ports have the content rewritten to use the port that
-	// the data server is listening on.
-
-	c.dataServer = grpc.NewServer()
-	fnpb.RegisterBeamFnDataServer(c.dataServer, &dataServer{ctrl: c})
-	dp, err := net.Listen("tcp", tcpPort)
-	if err != nil {
-		panic(err)
-	}
-	c.dwg = &sync.WaitGroup{}
-	c.dwg.Add(1)
-	go c.dataServer.Serve(dp)
-}
-
-func (c *controlServer) registerStream(stream fnpb.BeamFnData_DataServer) {
-	c.dataStream = stream
-	c.dwg.Done()
-}
-
-// TODO(wcn): move this code to a session file framework. I imagine this will
-// take an additional function argument that performs the handleEntry() work.
-func (c *controlServer) readSession(filename string) {
-	// Keep the reading simple by ensuring the buffer is large enough
-	// to hold any single recorded message. Since grpc has a message
-	// cap of 4 megs, we make our buffer larger. Future versions of the
-	// header will include this constant, so we can read the header
-	// unbuffered, then move to the appropriately sized buffer reader.
-	f, err := os.Open(filename)
-	if err != nil {
-		panic(err)
-	}
-
-	br := bufio.NewReaderSize(f, 5000000)
-	for {
-		b, err := br.Peek(peekLen)
-		if err != nil && err != io.EOF {
-			panic(errors.Wrap(err, "Problem peeking length value"))
-		}
-		if err == io.EOF {
-			break
-		}
-		l, inc := proto.DecodeVarint(b)
-		br.Discard(inc)
-
-		// Read out the entry header message.
-		b, err = br.Peek(int(l))
-		if err != nil {
-			panic(errors.Wrap(err, "Problem peeking entry header value"))
-		}
-		var hMsg session.EntryHeader
-		if err := proto.Unmarshal(b, &hMsg); err != nil {
-			panic(errors.Wrap(err, "Error decoding entry header"))
-		}
-		br.Discard(int(l))
-
-		msgBytes, err := br.Peek(int(hMsg.Len))
-		if err != nil {
-			panic(errors.Wrap(err, "Couldn't peek message"))
-		}
-
-		var bMsg session.Entry
-		if err := proto.Unmarshal(msgBytes, &bMsg); err != nil {
-			panic(errors.Wrap(err, "Error decoding message"))
-		}
-		c.handleEntry(&bMsg)
-		br.Discard(int(hMsg.Len))
-	}
-}
-
-func (c *controlServer) handleEntry(msg *session.Entry) {
-	/*
-		if msg.Kind != session.Entry_LOG_ENTRIES {
-			fmt.Printf("handleEntry: %v\n", msg.Kind.String())
-		}
-	*/
-	switch msg.Msg.(type) {
-	case *session.Entry_Elems:
-		if msg.GetKind() == session.Kind_DATA_RECEIVED {
-			c.dwg.Wait()
-			c.dataStream.Send(msg.GetElems())
-		}
-	case *session.Entry_InstResp:
-		_, err := c.ctrlStream.Recv()
-		if err == io.EOF {
-			panic("SDK closed connection but work remaining")
-		}
-
-		if err != nil {
-			return
-		}
-
-	case *session.Entry_InstReq:
-		// Look for the register requests and extract the port information.
-		ir := msg.GetInstReq()
-		c.ctrlStream.Send(ir)
-
-		if rr := ir.GetRegister(); rr != nil {
-			for _, desc := range rr.GetProcessBundleDescriptor() {
-				for beamPort, t := range desc.GetTransforms() {
-					s := t.GetSpec()
-					if s.GetUrn() == "beam:runner:source:v1" {
-						tcpPort := extractPortSpec(s)
-						c.establishDataChannel(beamPort, tcpPort)
-					}
-					if s.GetUrn() == "beam:runner:sink:v1" {
-						tcpPort := extractPortSpec(s)
-						c.establishDataChannel(beamPort, tcpPort)
-					}
-				}
-			}
-		}
-	}
-}
-
-func extractPortSpec(spec *pipepb.FunctionSpec) string {
-	var port fnpb.RemoteGrpcPort
-	if err := proto.Unmarshal(spec.GetPayload(), &port); err != nil {
-		panic(err)
-	}
-	lp := port.ApiServiceDescriptor.Url
-	// Leave the colon, so as to match the form net.Listen uses.
-	bp := strings.Replace(lp, "localhost", "", 1)
-	if bp != lp {
-		return bp
-	}
-	panic("unable to extract port")
-}
-
-// dataServer manages the FnAPI data channel.
-type dataServer struct {
-	fnpb.UnimplementedBeamFnDataServer
-
-	ctrl *controlServer
-}
-
-func (d *dataServer) Data(stream fnpb.BeamFnData_DataServer) error {
-	// This goroutine is only used for reading data. The stream object
-	// is passed to the control server so that all data is sent from
-	// a single goroutine to ensure proper ordering.
-
-	d.ctrl.registerStream(stream)
-
-	// Consume data messages that are received
-	for {
-		in, err := stream.Recv()
-		if err == io.EOF {
-			return nil
-		}
-
-		if err != nil {
-			return err
-		}
-
-		_ = in
-		//log.Printf("Data received: %v", in)
-	}
-}
-
-// loggingServer manages the FnAPI logging channel.
-type loggingServer struct {
-	fnpb.UnimplementedBeamFnLoggingServer
-}
-
-func (l *loggingServer) Logging(stream fnpb.BeamFnLogging_LoggingServer) error {
-	// This stream object is only used here. The stream is used for receiving, and
-	// no sends happen on it.
-	for {
-		in, err := stream.Recv()
-		if err == io.EOF {
-			return nil
-		}
-
-		if err != nil {
-			return err
-		}
-
-		for _, e := range in.GetLogEntries() {
-			log.Info(stream.Context(), e.GetMessage())
-		}
-	}
-}
-
-// Execute launches the supplied pipeline using a session file as the source of inputs.
-func Execute(ctx context.Context, p *beam.Pipeline) (beam.PipelineResult, error) {
-	worker, err := buildLocalBinary(ctx)
-	if err != nil {
-		return nil, errors.WithContext(err, "building worker binary")
-	}
-
-	log.Infof(ctx, "built worker binary at %s\n", worker)
-
-	// Start up the grpc logging service.
-	ls := grpc.NewServer()
-	fnpb.RegisterBeamFnLoggingServer(ls, &loggingServer{})
-	logPort, err := net.Listen("tcp", ":0")
-	if err != nil {
-		panic("No logging port")
-	}
-	go ls.Serve(logPort)
-
-	// The wait group is used by the control service goroutine to signal
-	// completion.
-	var wg sync.WaitGroup
-	wg.Add(1)
-
-	cs := grpc.NewServer()
-	fnpb.RegisterBeamFnControlServer(cs, &controlServer{
-		filename: *sessionFile,
-		wg:       &wg,
-	})
-
-	ctrlPort, err := net.Listen("tcp", ":0")
-	if err != nil {
-		panic("No control port")
-	}
-	go cs.Serve(ctrlPort)
-
-	fmt.Println("fake harness initialized")
-	cmd := exec.Command(
-		worker,
-		"--worker",
-		fmt.Sprintf("--logging_endpoint=%s", logPort.Addr().String()),
-		fmt.Sprintf("--control_endpoint=%s", ctrlPort.Addr().String()),
-		"--persist_dir=/tmp/worker")
-	go cmd.Start()
-
-	wg.Wait()
-	return nil, nil
-}
-
-// buildLocalBinary is cribbed from the Dataflow runner, but doesn't force the
-// Linux architecture, since the worker runs in the pipeline launch
-// environment.
-func buildLocalBinary(ctx context.Context) (string, error) {
-	ret := filepath.Join(os.TempDir(), fmt.Sprintf("session-runner-%v", time.Now().UnixNano()))
-
-	program := ""
-	for i := 3; ; i++ {
-		_, file, _, ok := runtime.Caller(i)
-		if !ok || strings.HasSuffix(file, "runtime/proc.go") {
-			break
-		}
-		program = file
-	}
-	if program == "" {
-		return "", errors.New("could not detect user main")
-	}
-
-	log.Infof(ctx, "Compiling %v as %v", program, ret)
-
-	// Cross-compile given go program. Not awesome.
-	build := []string{"go", "build", "-o", ret, program}
-
-	cmd := exec.Command(build[0], build[1:]...)
-	if out, err := cmd.CombinedOutput(); err != nil {
-		log.Info(ctx, string(out))
-		return "", errors.Wrapf(err, "failed to compile %v", program)
-	}
-	return ret, nil
-}
diff --git a/sdks/go/pkg/beam/util/syscallx/syscall_default.go b/sdks/go/pkg/beam/util/syscallx/syscall_default.go
index 67d188d..be9dc29 100644
--- a/sdks/go/pkg/beam/util/syscallx/syscall_default.go
+++ b/sdks/go/pkg/beam/util/syscallx/syscall_default.go
@@ -28,6 +28,7 @@
 	return 0, ErrUnsupported
 }
 
+// SetProcessMemoryCeiling sets current and max process memory limit.
 func SetProcessMemoryCeiling(softCeiling, hardCeiling uint64) error {
 	return ErrUnsupported
 }
diff --git a/sdks/go/pkg/beam/util/syscallx/syscall_linux.go b/sdks/go/pkg/beam/util/syscallx/syscall_linux.go
index f18ee87..ee5624e 100644
--- a/sdks/go/pkg/beam/util/syscallx/syscall_linux.go
+++ b/sdks/go/pkg/beam/util/syscallx/syscall_linux.go
@@ -38,6 +38,7 @@
 	return stat.Bavail * uint64(stat.Bsize), nil
 }
 
+// SetProcessMemoryCeiling sets current and max process memory limit.
 func SetProcessMemoryCeiling(softCeiling, hardCeiling uint64) error {
 	var rLimit unix.Rlimit
 
diff --git a/sdks/go/pkg/beam/validate.go b/sdks/go/pkg/beam/validate.go
index 9987bfe..2bc00fd 100644
--- a/sdks/go/pkg/beam/validate.go
+++ b/sdks/go/pkg/beam/validate.go
@@ -71,8 +71,8 @@
 		if !typex.IsUniversal(v.Var) {
 			return nil, errors.Errorf("type var %s must be a universal type", v.Var)
 		}
-		if !typex.IsConcrete(v.T) {
-			return nil, errors.Errorf("type value %s must be a concrete type", v.T)
+		if ok, err := typex.CheckConcrete(v.T); !ok {
+			return nil, errors.Wrapf(err, "type value %s must be a concrete type", v.T)
 		}
 		typedefs[v.Var.Name()] = v.T
 	}
diff --git a/sdks/go/test/integration/io/fhirio/fhirio_test.go b/sdks/go/test/integration/io/fhirio/fhirio_test.go
index 20fdc33..03e3654 100644
--- a/sdks/go/test/integration/io/fhirio/fhirio_test.go
+++ b/sdks/go/test/integration/io/fhirio/fhirio_test.go
@@ -16,7 +16,6 @@
 package fhirio
 
 import (
-	"bytes"
 	"context"
 	"crypto/rand"
 	"encoding/json"
@@ -43,8 +42,9 @@
 )
 
 const (
-	datasetPathFmt = "projects/%s/locations/%s/datasets/apache-beam-integration-testing"
-	testDataDir    = "../../../../data/fhir_bundles/"
+	datasetPathFmt  = "projects/%s/locations/%s/datasets/apache-beam-integration-testing"
+	tempStoragePath = "gs://temp-storage-for-end-to-end-tests"
+	testDataDir     = "../../../../data/fhir_bundles/"
 )
 
 var (
@@ -54,7 +54,7 @@
 
 type fhirStoreInfo struct {
 	path           string
-	resourcesPaths []string
+	resourcesPaths [][]byte
 }
 
 func checkFlags(t *testing.T) {
@@ -94,7 +94,7 @@
 	}
 	createdFhirStorePath := createdFhirStore.Name
 
-	var resourcePaths []string
+	var resourcePaths [][]byte
 	if shouldPopulateStore {
 		resourcePaths = populateStore(createdFhirStorePath)
 		if len(resourcePaths) == 0 {
@@ -127,10 +127,10 @@
 
 // Populates fhir store with data. Note that failure to populate some data is not
 // detrimental to the tests, so it is fine to ignore.
-func populateStore(storePath string) []string {
-	resourcePaths := make([]string, 0)
+func populateStore(storePath string) [][]byte {
+	resourcePaths := make([][]byte, 0)
 	for _, bundle := range readPrettyBundles() {
-		response, err := storeService.ExecuteBundle(storePath, bytes.NewReader(bundle)).Do()
+		response, err := storeService.ExecuteBundle(storePath, strings.NewReader(bundle)).Do()
 		if err != nil {
 			continue
 		}
@@ -164,32 +164,33 @@
 	return resourcePaths
 }
 
-func readPrettyBundles() [][]byte {
+func readPrettyBundles() []string {
 	files, _ := os.ReadDir(testDataDir)
-	bundles := make([][]byte, len(files))
+	bundles := make([]string, len(files))
 	for i, file := range files {
-		bundles[i], _ = os.ReadFile(testDataDir + file.Name())
+		bundle, _ := os.ReadFile(testDataDir + file.Name())
+		bundles[i] = string(bundle)
 	}
 	return bundles
 }
 
-func extractResourcePathFrom(resourceLocationURL string) (string, error) {
+func extractResourcePathFrom(resourceLocationURL string) ([]byte, error) {
 	// The resource location url is in the following format:
 	// https://healthcare.googleapis.com/v1/projects/PROJECT_ID/locations/LOCATION/datasets/DATASET_ID/fhirStores/STORE_ID/fhir/RESOURCE_NAME/RESOURCE_ID/_history/HISTORY_ID
 	// But the API calls use this format: projects/PROJECT_ID/locations/LOCATION/datasets/DATASET_ID/fhirStores/STORE_ID/fhir/RESOURCE_NAME/RESOURCE_ID
 	startIdx := strings.Index(resourceLocationURL, "projects/")
 	endIdx := strings.Index(resourceLocationURL, "/_history")
 	if startIdx == -1 || endIdx == -1 {
-		return "", errors.New("resource location url is invalid")
+		return nil, errors.New("resource location url is invalid")
 	}
-	return resourceLocationURL[startIdx:endIdx], nil
+	return []byte(resourceLocationURL[startIdx:endIdx]), nil
 }
 
 func readTestTask(t *testing.T, s beam.Scope, testStoreInfo fhirStoreInfo) func() {
 	t.Helper()
 
 	s = s.Scope("fhirio_test.readTestTask")
-	testResources := append(testStoreInfo.resourcesPaths, testStoreInfo.path+"/fhir/Patient/invalid")
+	testResources := append(testStoreInfo.resourcesPaths, []byte(testStoreInfo.path+"/fhir/Patient/invalid"))
 	resourcePathsPCollection := beam.CreateList(s, testResources)
 	resources, failedReads := fhirio.Read(s, resourcePathsPCollection)
 	passert.Count(s, resources, "", len(testStoreInfo.resourcesPaths))
@@ -197,7 +198,7 @@
 	return nil
 }
 
-func executeBundlesTestTask(t *testing.T, s beam.Scope, testStoreInfo fhirStoreInfo) func() {
+func executeBundlesTestTask(t *testing.T, s beam.Scope, _ fhirStoreInfo) func() {
 	t.Helper()
 
 	s = s.Scope("fhirio_test.executeBundlesTestTask")
@@ -244,6 +245,24 @@
 	return teardownDstFhirStore
 }
 
+func importTestTask(t *testing.T, s beam.Scope, _ fhirStoreInfo) func() {
+	t.Helper()
+
+	s = s.Scope("fhirio_test.importTestTask")
+
+	fhirStorePath, teardownFhirStore := setupEmptyFhirStore(t)
+
+	patientTestResource := `{"resourceType":"Patient","id":"c1q34623-b02c-3f8b-92ea-873fc4db60da","name":[{"use":"official","family":"Smith","given":["Alice"]}],"gender":"female","birthDate":"1970-01-01"}`
+	practitionerTestResource := `{"resourceType":"Practitioner","id":"b0e04623-b02c-3f8b-92ea-943fc4db60da","name":[{"family":"Tillman293","given":["Franklin857"],"prefix":["Dr."]}],"address":[{"line":["295 VARNUM AVENUE"],"city":"LOWELL","state":"MA","postalCode":"01854","country":"US"}],"gender":"male"}`
+	testResources := beam.Create(s, patientTestResource, practitionerTestResource)
+
+	failedResources, deadLetter := fhirio.Import(s, fhirStorePath, tempStoragePath, tempStoragePath, fhirio.ContentStructureResource, testResources)
+	passert.Empty(s, failedResources)
+	passert.Empty(s, deadLetter)
+
+	return teardownFhirStore
+}
+
 func TestFhirIO(t *testing.T) {
 	integration.CheckFilters(t)
 	checkFlags(t)
@@ -259,6 +278,7 @@
 		executeBundlesTestTask,
 		searchTestTask,
 		deidentifyTestTask,
+		importTestTask,
 	}
 	teardownTasks := make([]func(), len(testTasks))
 	for i, testTaskCallable := range testTasks {
diff --git a/sdks/java/container/license_scripts/pull_licenses_java.py b/sdks/java/container/license_scripts/pull_licenses_java.py
index cfa23f8..f453b6c 100644
--- a/sdks/java/container/license_scripts/pull_licenses_java.py
+++ b/sdks/java/container/license_scripts/pull_licenses_java.py
@@ -55,7 +55,12 @@
         logging.info('Replaced local file URL with {url} for {dep}'.format(url=url, dep=dep))
 
     try:
-        url_read = urlopen(Request(url, headers={'User-Agent': 'Apache Beam'}))
+        url_read = urlopen(Request(url, headers={
+            'User-Agent': 'Apache Beam',
+            # MPL license fails to resolve redirects without this header
+            # see https://github.com/apache/beam/issues/22394
+            'accept-language': 'en-US,en;q=0.9',
+        }))
         with open(file_name, 'wb') as temp_write:
             shutil.copyfileobj(url_read, temp_write)
         logging.debug(
diff --git a/sdks/java/core/jmh/build.gradle b/sdks/java/core/jmh/build.gradle
new file mode 100644
index 0000000..06df6ab
--- /dev/null
+++ b/sdks/java/core/jmh/build.gradle
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+plugins { id 'org.apache.beam.module' }
+
+applyJavaNature(
+  automaticModuleName: 'org.apache.beam.sdk.jmh',
+  enableJmh: true,
+  publish: false)
+
+description = "Apache Beam :: SDKs :: Java :: Core :: JMH"
+ext.summary = "This contains JMH benchmarks for the SDK Core for Beam Java"
+
+dependencies {
+  implementation project(path: ":sdks:java:core", configuration: "shadow")
+  implementation library.java.joda_time
+  implementation library.java.vendored_grpc_1_43_2
+  implementation library.java.vendored_guava_26_0_jre
+  runtimeOnly library.java.slf4j_jdk14
+  testImplementation library.java.junit
+  testImplementation library.java.hamcrest
+}
diff --git a/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/schemas/GetterBasedSchemaProviderBenchmark.java b/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/schemas/GetterBasedSchemaProviderBenchmark.java
new file mode 100644
index 0000000..c4fc533
--- /dev/null
+++ b/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/schemas/GetterBasedSchemaProviderBenchmark.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.jmh.schemas;
+
+import org.apache.beam.sdk.jmh.schemas.RowBundles.ArrayOfNestedStringBundle;
+import org.apache.beam.sdk.jmh.schemas.RowBundles.ArrayOfStringBundle;
+import org.apache.beam.sdk.jmh.schemas.RowBundles.ByteBufferBundle;
+import org.apache.beam.sdk.jmh.schemas.RowBundles.BytesBundle;
+import org.apache.beam.sdk.jmh.schemas.RowBundles.DateTimeBundle;
+import org.apache.beam.sdk.jmh.schemas.RowBundles.IntBundle;
+import org.apache.beam.sdk.jmh.schemas.RowBundles.MapOfIntBundle;
+import org.apache.beam.sdk.jmh.schemas.RowBundles.MapOfNestedIntBundle;
+import org.apache.beam.sdk.jmh.schemas.RowBundles.NestedBytesBundle;
+import org.apache.beam.sdk.jmh.schemas.RowBundles.NestedIntBundle;
+import org.apache.beam.sdk.jmh.schemas.RowBundles.StringBuilderBundle;
+import org.apache.beam.sdk.jmh.schemas.RowBundles.StringBundle;
+import org.apache.beam.sdk.schemas.GetterBasedSchemaProvider;
+import org.apache.beam.sdk.values.RowWithGetters;
+import org.apache.beam.sdk.values.TypeDescriptor;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.infra.Blackhole;
+
+/**
+ * Benchmarks for {@link GetterBasedSchemaProvider} on reading / writing fields based on {@link
+ * GetterBasedSchemaProvider#toRowFunction(TypeDescriptor) toRowFunction} / {@link
+ * GetterBasedSchemaProvider#fromRowFunction(TypeDescriptor) fromRowFunction}.
+ *
+ * <p>Each benchmark method invocation, depending on {@link RowBundle#action}, either reads a single
+ * field of a bundle of {@link RowBundle#bundleSize n} rows using the corresponding getter via
+ * {@link RowWithGetters#getValue} or writes that field using the corresponding setter to a new
+ * object instance.
+ *
+ * <p>Rows are created upfront and provided as JMH {@link State} to exclude initialization costs
+ * from the measurement.
+ *
+ * <ul>
+ *   <li>The score doesn't reflect read / write access only, measurement includes iterating over a
+ *       large number of rows.
+ *   <li>All rows contain just a single field. Nevertheless it is tricky to compare scores between
+ *       different benchmarks: nested structures are read recursively and collections / maps are
+ *       iterated through.
+ * </ul>
+ */
+public class GetterBasedSchemaProviderBenchmark {
+  @Benchmark
+  public void processIntField(IntBundle state, Blackhole bh) {
+    state.processRows(bh);
+  }
+
+  @Benchmark
+  public void processNestedIntField(NestedIntBundle state, Blackhole bh) {
+    state.processRows(bh);
+  }
+
+  @Benchmark
+  public void processStringField(StringBundle state, Blackhole bh) {
+    state.processRows(bh);
+  }
+
+  @Benchmark
+  public void processStringBuilderField(StringBuilderBundle state, Blackhole bh) {
+    state.processRows(bh);
+  }
+
+  @Benchmark
+  public void processDateTimeField(DateTimeBundle state, Blackhole bh) {
+    state.processRows(bh);
+  }
+
+  @Benchmark
+  public void processBytesField(BytesBundle state, Blackhole bh) {
+    state.processRows(bh);
+  }
+
+  @Benchmark
+  public void processNestedBytesField(NestedBytesBundle state, Blackhole bh) {
+    state.processRows(bh);
+  }
+
+  @Benchmark
+  public void processByteBufferField(ByteBufferBundle state, Blackhole bh) {
+    state.processRows(bh);
+  }
+
+  @Benchmark
+  public void processArrayOfStringField(ArrayOfStringBundle state, Blackhole bh) {
+    state.processRows(bh);
+  }
+
+  @Benchmark
+  public void processArrayOfNestedStringField(ArrayOfNestedStringBundle state, Blackhole bh) {
+    state.processRows(bh);
+  }
+
+  @Benchmark
+  public void processMapOfIntField(MapOfIntBundle state, Blackhole bh) {
+    state.processRows(bh);
+  }
+
+  @Benchmark
+  public void processMapOfNestedIntField(MapOfNestedIntBundle state, Blackhole bh) {
+    state.processRows(bh);
+  }
+}
diff --git a/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/schemas/RowBundle.java b/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/schemas/RowBundle.java
new file mode 100644
index 0000000..4368fd7
--- /dev/null
+++ b/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/schemas/RowBundle.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.jmh.schemas;
+
+import java.nio.charset.StandardCharsets;
+import org.apache.beam.sdk.schemas.Factory;
+import org.apache.beam.sdk.schemas.GetterBasedSchemaProvider;
+import org.apache.beam.sdk.schemas.NoSuchSchemaException;
+import org.apache.beam.sdk.schemas.Schema;
+import org.apache.beam.sdk.schemas.Schema.FieldType;
+import org.apache.beam.sdk.schemas.SchemaCoder;
+import org.apache.beam.sdk.schemas.SchemaRegistry;
+import org.apache.beam.sdk.transforms.SerializableFunction;
+import org.apache.beam.sdk.values.Row;
+import org.apache.beam.sdk.values.RowWithGetters;
+import org.apache.beam.sdk.values.RowWithStorage;
+import org.apache.beam.sdk.values.TypeDescriptor;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap;
+import org.joda.time.DateTime;
+import org.joda.time.Duration;
+import org.joda.time.Instant;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.infra.Blackhole;
+
+/**
+ * Bundle of rows according to the configured {@link Factory} as input for benchmarks.
+ *
+ * <p>When reading, rows are created during {@link #setup()} to exclude initialization costs from
+ * the measurement. To prevent unintended cache hits in {@link RowWithGetters}, a new bundle of rows
+ * must be generated before every invocation.
+ *
+ * <p>Setup per {@link Level#Invocation} has considerable drawbacks. Though, given that processing
+ * bundles of rows (n={@link #bundleSize}) takes well above 1 ms, each individual invocation can be
+ * adequately timestamped without risking generating wrong results.
+ */
+@State(Scope.Benchmark)
+public class RowBundle<T> {
+  public enum Action {
+    /**
+     * Write field to object using {@link
+     * GetterBasedSchemaProvider#fromRowFunction(TypeDescriptor)}.
+     *
+     * <p>Use {@link RowWithStorage} to bypass optimizations in RowWithGetters for writes.
+     */
+    WRITE,
+
+    /**
+     * Read field from {@link RowWithGetters} provided by {@link
+     * GetterBasedSchemaProvider#toRowFunction(TypeDescriptor)}.
+     */
+    READ_ONCE,
+
+    /**
+     * Repeatedly (3x) read field from {@link RowWithGetters} provided by {@link
+     * GetterBasedSchemaProvider#toRowFunction(TypeDescriptor)}.
+     */
+    READ_REPEATED
+  }
+
+  private static final SchemaRegistry REGISTRY = SchemaRegistry.createDefault();
+
+  private final SerializableFunction<Row, T> fromRow;
+  private final SerializableFunction<T, Row> toRow;
+
+  private final Row rowWithStorage;
+
+  private final T rowTarget;
+
+  private Row[] rows;
+
+  @Param("1000000")
+  int bundleSize;
+
+  @Param({"READ_ONCE", "READ_REPEATED", "WRITE"})
+  Action action;
+
+  public RowBundle() {
+    this(null); // unused, just to prevent warnings
+  }
+
+  public RowBundle(Class<T> clazz) {
+    try {
+      SchemaCoder<T> coder = REGISTRY.getSchemaCoder(clazz);
+      if (coder.getSchema().getFieldCount() != 1) {
+        throw new IllegalArgumentException("Expected class with a single field");
+      }
+      fromRow = coder.getFromRowFunction();
+      toRow = coder.getToRowFunction();
+      rowWithStorage = createRowWithStorage(coder.getSchema());
+      rowTarget = fromRow.apply(rowWithStorage);
+    } catch (NoSuchSchemaException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Setup(Level.Invocation)
+  public void setup() {
+    // no mutable state in case of writes, skip setup
+    if (action == Action.WRITE) {
+      return;
+    }
+    if (rows == null) {
+      rows = new Row[bundleSize];
+    }
+    // new rows (with getters) for each invocation to prevent accidental cache hits
+    for (int i = 0; i < bundleSize; i++) {
+      rows[i] = toRow.apply(rowTarget);
+    }
+  }
+
+  /** Runs benchmark iteration on a bundle of rows. */
+  public void processRows(Blackhole blackhole) {
+    if (action == Action.READ_ONCE) {
+      readRowsOnce(blackhole);
+    } else if (action == Action.READ_REPEATED) {
+      readRowsRepeatedly(blackhole);
+    } else {
+      writeRows(blackhole);
+    }
+  }
+
+  /** Reads single field from row (of type {@link RowWithGetters}). */
+  protected void readField(Row row, Blackhole blackhole) {
+    blackhole.consume(row.getValue(0));
+  }
+
+  private void readRowsOnce(Blackhole blackhole) {
+    for (Row row : rows) {
+      readField(row, blackhole);
+    }
+  }
+
+  private void readRowsRepeatedly(Blackhole blackhole) {
+    for (Row row : rows) {
+      readField(row, blackhole);
+      readField(row, blackhole);
+      readField(row, blackhole);
+    }
+  }
+
+  private void writeRows(Blackhole blackhole) {
+    for (int i = 0; i < bundleSize; i++) {
+      blackhole.consume(fromRow.apply(rowWithStorage));
+    }
+  }
+
+  private static final Instant TODAY = DateTime.now().withTimeAtStartOfDay().toInstant();
+
+  /** Creates row of type {@link RowWithStorage} with single field matching the provided schema. */
+  private static Row createRowWithStorage(Schema schema) {
+    return RowWithStorage.withSchema(schema)
+        .attachValues(createValue(42, schema.getField(0).getType()));
+  }
+
+  private static Object createValue(int val, FieldType type) {
+    switch (type.getTypeName()) {
+      case STRING:
+        return String.valueOf(val);
+      case INT32:
+        return val;
+      case BYTES:
+        return String.valueOf(val).getBytes(StandardCharsets.UTF_8);
+      case DATETIME:
+        return TODAY.minus(Duration.standardHours(val));
+      case ROW:
+        return createRowWithStorage(type.getRowSchema());
+      case ARRAY:
+      case ITERABLE:
+        return ImmutableList.of(createValue(val, type.getCollectionElementType()));
+      case MAP:
+        return ImmutableMap.of(
+            createValue(val, type.getMapKeyType()), createValue(val, type.getMapValueType()));
+      default:
+        throw new RuntimeException("No value factory for type " + type);
+    }
+  }
+}
diff --git a/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/schemas/RowBundles.java b/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/schemas/RowBundles.java
new file mode 100644
index 0000000..a1a8ca7
--- /dev/null
+++ b/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/schemas/RowBundles.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.jmh.schemas;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Map;
+import org.apache.beam.sdk.schemas.JavaFieldSchema;
+import org.apache.beam.sdk.schemas.annotations.DefaultSchema;
+import org.apache.beam.sdk.values.Row;
+import org.joda.time.DateTime;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.infra.Blackhole;
+
+public interface RowBundles {
+  @State(Scope.Benchmark)
+  class IntBundle extends RowBundle<IntBundle.Field> {
+    public IntBundle() {
+      super(Field.class);
+    }
+
+    @DefaultSchema(JavaFieldSchema.class)
+    public static class Field {
+      public int field;
+    }
+  }
+
+  @State(Scope.Benchmark)
+  class NestedIntBundle extends RowBundle<NestedIntBundle.Field> {
+    public NestedIntBundle() {
+      super(Field.class);
+    }
+
+    @DefaultSchema(JavaFieldSchema.class)
+    public static class Field {
+      public IntBundle.Field field;
+    }
+
+    @Override
+    protected final void readField(Row row, Blackhole bh) {
+      bh.consume(row.getRow(0).getValue(0));
+    }
+  }
+
+  @State(Scope.Benchmark)
+  class StringBundle extends RowBundle<StringBundle.Field> {
+    public StringBundle() {
+      super(Field.class);
+    }
+
+    @DefaultSchema(JavaFieldSchema.class)
+    public static class Field {
+      public String field;
+    }
+  }
+
+  @State(Scope.Benchmark)
+  class StringBuilderBundle extends RowBundle<StringBuilderBundle.Field> {
+    public StringBuilderBundle() {
+      super(Field.class);
+    }
+
+    @DefaultSchema(JavaFieldSchema.class)
+    public static class Field {
+      public StringBuilder field;
+    }
+  }
+
+  @State(Scope.Benchmark)
+  class DateTimeBundle extends RowBundle<DateTimeBundle.Field> {
+    public DateTimeBundle() {
+      super(Field.class);
+    }
+
+    @DefaultSchema(JavaFieldSchema.class)
+    public static class Field {
+      public DateTime field;
+    }
+  }
+
+  @State(Scope.Benchmark)
+  class BytesBundle extends RowBundle<BytesBundle.Field> {
+    public BytesBundle() {
+      super(Field.class);
+    }
+
+    @DefaultSchema(JavaFieldSchema.class)
+    public static class Field {
+      public byte[] field;
+    }
+  }
+
+  @State(Scope.Benchmark)
+  class NestedBytesBundle extends RowBundle<NestedBytesBundle.Field> {
+    public NestedBytesBundle() {
+      super(Field.class);
+    }
+
+    @DefaultSchema(JavaFieldSchema.class)
+    public static class Field {
+      public BytesBundle.Field field;
+    }
+
+    @Override
+    protected final void readField(Row row, Blackhole bh) {
+      bh.consume(row.getRow(0).getValue(0));
+    }
+  }
+
+  @State(Scope.Benchmark)
+  class ByteBufferBundle extends RowBundle<ByteBufferBundle.Field> {
+    public ByteBufferBundle() {
+      super(Field.class);
+    }
+
+    @DefaultSchema(JavaFieldSchema.class)
+    public static class Field {
+      public ByteBuffer field;
+    }
+  }
+
+  @State(Scope.Benchmark)
+  class ArrayOfStringBundle extends RowBundle<ArrayOfStringBundle.Field> {
+    public ArrayOfStringBundle() {
+      super(Field.class);
+    }
+
+    @DefaultSchema(JavaFieldSchema.class)
+    public static class Field {
+      public String[] field;
+    }
+
+    @Override
+    protected final void readField(Row row, Blackhole bh) {
+      bh.consume(((List<String>) row.getValue(0)).get(0));
+    }
+  }
+
+  @State(Scope.Benchmark)
+  class ArrayOfNestedStringBundle extends RowBundle<ArrayOfNestedStringBundle.Field> {
+    public ArrayOfNestedStringBundle() {
+      super(Field.class);
+    }
+
+    @DefaultSchema(JavaFieldSchema.class)
+    public static class Field {
+      public StringBundle.Field[] field;
+    }
+
+    @Override
+    protected final void readField(Row row, Blackhole bh) {
+      bh.consume(((List<Row>) row.getValue(0)).get(0).getValue(0));
+    }
+  }
+
+  @State(Scope.Benchmark)
+  class MapOfIntBundle extends RowBundle<MapOfIntBundle.Field> {
+    public MapOfIntBundle() {
+      super(Field.class);
+    }
+
+    @DefaultSchema(JavaFieldSchema.class)
+    public static class Field {
+      public Map<Integer, Integer> field;
+    }
+
+    @Override
+    protected final void readField(Row row, Blackhole bh) {
+      Map.Entry<?, ?> entry = row.getMap(0).entrySet().iterator().next();
+      bh.consume(entry.getKey());
+      bh.consume(entry.getValue());
+    }
+  }
+
+  @State(Scope.Benchmark)
+  class MapOfNestedIntBundle extends RowBundle<MapOfNestedIntBundle.Field> {
+    public MapOfNestedIntBundle() {
+      super(Field.class);
+    }
+
+    @DefaultSchema(JavaFieldSchema.class)
+    public static class Field {
+      public Map<Integer, NestedIntBundle.Field> field;
+    }
+
+    @Override
+    protected final void readField(Row row, Blackhole bh) {
+      Map.Entry<Integer, Row> entry = row.<Integer, Row>getMap(0).entrySet().iterator().next();
+      bh.consume(entry.getKey());
+      bh.consume(entry.getValue().getValue(0));
+    }
+  }
+}
diff --git a/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/schemas/package-info.java b/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/schemas/package-info.java
new file mode 100644
index 0000000..f192347
--- /dev/null
+++ b/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/schemas/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** Benchmarks for schemas. */
+package org.apache.beam.sdk.jmh.schemas;
diff --git a/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/util/ByteStringOutputStreamBenchmark.java b/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/util/ByteStringOutputStreamBenchmark.java
new file mode 100644
index 0000000..2a33c76
--- /dev/null
+++ b/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/util/ByteStringOutputStreamBenchmark.java
@@ -0,0 +1,416 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.jmh.util;
+
+import java.util.List;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
+import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
+import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.UnsafeByteOperations;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Splitter;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.infra.Blackhole;
+
+/** Benchmarks for {@link ByteStringOutputStream}. */
+public class ByteStringOutputStreamBenchmark {
+
+  private static final int MANY_WRITES = 10_000;
+  private static final int FEW_WRITES = 5;
+  private static final byte[] LARGE_BUFFER = new byte[1000];
+  private static final byte[] SMALL_BUFFER = new byte[20];
+
+  @State(Scope.Thread)
+  public static class ProtobufByteStringOutputStream {
+    final ByteString.Output output = ByteString.newOutput();
+
+    @TearDown
+    public void tearDown() throws Exception {
+      output.close();
+    }
+  }
+
+  @State(Scope.Thread)
+  public static class SdkCoreByteStringOutputStream {
+    final ByteStringOutputStream output = new ByteStringOutputStream();
+
+    @TearDown
+    public void tearDown() throws Exception {
+      output.close();
+    }
+  }
+
+  @Benchmark
+  public void testSdkCoreByteStringOutputStreamManyMixedWritesWithoutReuse() throws Exception {
+    ByteStringOutputStream output = new ByteStringOutputStream();
+    for (int i = 0; i < MANY_WRITES; i++) {
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+      output.write(LARGE_BUFFER);
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+    }
+    if (output.toByteString().size()
+        != (4 + 2 * SMALL_BUFFER.length + LARGE_BUFFER.length) * MANY_WRITES) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testSdkCoreByteStringOutputStreamFewMixedWritesWithoutReuse() throws Exception {
+    ByteStringOutputStream output = new ByteStringOutputStream();
+    for (int i = 0; i < FEW_WRITES; i++) {
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+      output.write(LARGE_BUFFER);
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+    }
+    if (output.toByteString().size()
+        != (4 + 2 * SMALL_BUFFER.length + LARGE_BUFFER.length) * FEW_WRITES) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testProtobufByteStringOutputStreamManyMixedWritesWithoutReuse() throws Exception {
+    ByteString.Output output = ByteString.newOutput();
+    for (int i = 0; i < MANY_WRITES; i++) {
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+      output.write(LARGE_BUFFER);
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+    }
+    if (output.toByteString().size()
+        != (4 + 2 * SMALL_BUFFER.length + LARGE_BUFFER.length) * MANY_WRITES) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testProtobufByteStringOutputStreamFewMixedWritesWithoutReuse() throws Exception {
+    ByteString.Output output = ByteString.newOutput();
+    for (int i = 0; i < FEW_WRITES; i++) {
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+      output.write(LARGE_BUFFER);
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+    }
+    if (output.toByteString().size()
+        != (4 + 2 * SMALL_BUFFER.length + LARGE_BUFFER.length) * FEW_WRITES) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testProtobufByteStringOutputStreamManyTinyWrites() throws Exception {
+    ByteString.Output output = ByteString.newOutput();
+    for (int i = 0; i < MANY_WRITES; ++i) {
+      output.write(1);
+    }
+    if (output.toByteString().size() != MANY_WRITES) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testProtobufByteStringOutputStreamManySmallWrites() throws Exception {
+    ByteString.Output output = ByteString.newOutput();
+    for (int i = 0; i < MANY_WRITES; ++i) {
+      output.write(SMALL_BUFFER);
+    }
+    if (output.toByteString().size() != MANY_WRITES * SMALL_BUFFER.length) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testProtobufByteStringOutputStreamManyLargeWrites() throws Exception {
+    ByteString.Output output = ByteString.newOutput();
+    for (int i = 0; i < MANY_WRITES; ++i) {
+      output.write(LARGE_BUFFER);
+    }
+    if (output.toByteString().size() != MANY_WRITES * LARGE_BUFFER.length) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testProtobufByteStringOutputStreamFewTinyWrites() throws Exception {
+    ByteString.Output output = ByteString.newOutput();
+    for (int i = 0; i < FEW_WRITES; ++i) {
+      output.write(1);
+    }
+    if (output.toByteString().size() != FEW_WRITES) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testProtobufByteStringOutputStreamFewSmallWrites() throws Exception {
+    ByteString.Output output = ByteString.newOutput();
+    for (int i = 0; i < FEW_WRITES; ++i) {
+      output.write(SMALL_BUFFER);
+    }
+    if (output.toByteString().size() != FEW_WRITES * SMALL_BUFFER.length) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testProtobufByteStringOutputStreamFewLargeWrites() throws Exception {
+    ByteString.Output output = ByteString.newOutput();
+    for (int i = 0; i < FEW_WRITES; ++i) {
+      output.write(LARGE_BUFFER);
+    }
+    if (output.toByteString().size() != FEW_WRITES * LARGE_BUFFER.length) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testProtobufByteStringOutputStreamManyMixedWritesWithReuse(
+      ProtobufByteStringOutputStream state) throws Exception {
+    ByteString.Output output = state.output;
+    for (int i = 0; i < 9850; i++) {
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+      output.write(LARGE_BUFFER);
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+    }
+    if (output.toByteString().size()
+        != (4 + 2 * SMALL_BUFFER.length + LARGE_BUFFER.length) * 9850) {
+      throw new IllegalArgumentException();
+    }
+    output.reset();
+  }
+
+  @Benchmark
+  public void testProtobufByteStringOutputStreamFewMixedWritesWithReuse(
+      ProtobufByteStringOutputStream state) throws Exception {
+    ByteString.Output output = state.output;
+    for (int i = 0; i < FEW_WRITES; i++) {
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+      output.write(LARGE_BUFFER);
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+    }
+    if (output.toByteString().size()
+        != (4 + 2 * SMALL_BUFFER.length + LARGE_BUFFER.length) * FEW_WRITES) {
+      throw new IllegalArgumentException();
+    }
+    output.reset();
+  }
+
+  @Benchmark
+  public void testSdkCoreByteStringOutputStreamManyTinyWrites() throws Exception {
+    ByteStringOutputStream output = new ByteStringOutputStream();
+    for (int i = 0; i < MANY_WRITES; ++i) {
+      output.write(1);
+    }
+    if (output.toByteString().size() != MANY_WRITES) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testSdkCoreByteStringOutputStreamManySmallWrites() throws Exception {
+    ByteStringOutputStream output = new ByteStringOutputStream();
+    for (int i = 0; i < MANY_WRITES; ++i) {
+      output.write(SMALL_BUFFER);
+    }
+    if (output.toByteString().size() != MANY_WRITES * SMALL_BUFFER.length) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testSdkCoreByteStringOutputStreamManyLargeWrites() throws Exception {
+    ByteStringOutputStream output = new ByteStringOutputStream();
+    for (int i = 0; i < MANY_WRITES; ++i) {
+      output.write(LARGE_BUFFER);
+    }
+    if (output.toByteString().size() != MANY_WRITES * LARGE_BUFFER.length) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testSdkCoreByteStringOutputStreamFewTinyWrites() throws Exception {
+    ByteStringOutputStream output = new ByteStringOutputStream();
+    for (int i = 0; i < FEW_WRITES; ++i) {
+      output.write(1);
+    }
+    if (output.toByteString().size() != FEW_WRITES) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testSdkCoreByteStringOutputStreamFewSmallWrites() throws Exception {
+    ByteStringOutputStream output = new ByteStringOutputStream();
+    for (int i = 0; i < FEW_WRITES; ++i) {
+      output.write(SMALL_BUFFER);
+    }
+    if (output.toByteString().size() != FEW_WRITES * SMALL_BUFFER.length) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testSdkCoreByteStringOutputStreamFewLargeWrites() throws Exception {
+    ByteStringOutputStream output = new ByteStringOutputStream();
+    for (int i = 0; i < FEW_WRITES; ++i) {
+      output.write(LARGE_BUFFER);
+    }
+    if (output.toByteString().size() != FEW_WRITES * LARGE_BUFFER.length) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testSdkCoreByteStringOutputStreamManyMixedWritesWithReuse(
+      SdkCoreByteStringOutputStream state) throws Exception {
+    ByteStringOutputStream output = state.output;
+    for (int i = 0; i < 9850; i++) {
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+      output.write(LARGE_BUFFER);
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+    }
+    if (output.toByteStringAndReset().size()
+        != (4 + 2 * SMALL_BUFFER.length + LARGE_BUFFER.length) * 9850) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  @Benchmark
+  public void testSdkCoreByteStringOutputStreamFewMixedWritesWithReuse(
+      SdkCoreByteStringOutputStream state) throws Exception {
+    ByteStringOutputStream output = state.output;
+    for (int i = 0; i < FEW_WRITES; i++) {
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+      output.write(LARGE_BUFFER);
+      output.write(1);
+      output.write(SMALL_BUFFER);
+      output.write(1);
+    }
+    if (output.toByteStringAndReset().size()
+        != (4 + 2 * SMALL_BUFFER.length + LARGE_BUFFER.length) * FEW_WRITES) {
+      throw new IllegalArgumentException();
+    }
+    output.close();
+  }
+
+  /**
+   * These benchmarks below provide good details as to the cost of creating a new buffer vs copying
+   * a subset of the existing one and re-using the larger one.
+   */
+  public static class NewVsCopy {
+    @State(Scope.Thread)
+    public static class ArrayCopyState {
+      @Param({
+        "512/1024", "640/1024", "768/1024", "896/1024",
+        "4096/8192", "5120/8192", "6144/8192", "7168/8192",
+        "20480/65536", "24576/65536", "28672/65536", "32768/65536",
+        "131072/262144", "163840/262144", "196608/262144", "229376/262144",
+        "524288/1048576", "655360/1048576", "786432/1048576", "917504/1048576"
+      })
+      String copyVsNew;
+
+      int copyThreshold;
+      int byteArraySize;
+      public byte[] src;
+
+      @Setup
+      public void setup() {
+        List<String> parts = Splitter.on('/').splitToList(copyVsNew);
+        copyThreshold = Integer.parseInt(parts.get(0));
+        byteArraySize = Integer.parseInt(parts.get(1));
+        src = new byte[byteArraySize];
+      }
+    }
+
+    @Benchmark
+    public void testCopyArray(ArrayCopyState state, Blackhole bh) {
+      byte[] dest = new byte[state.copyThreshold];
+      System.arraycopy(state.src, 0, dest, 0, state.copyThreshold);
+      bh.consume(UnsafeByteOperations.unsafeWrap(dest));
+    }
+
+    @State(Scope.Benchmark)
+    public static class ArrayNewState {
+      @Param({"1024", "8192", "65536", "262144", "1048576"})
+      int byteArraySize;
+
+      public byte[] src;
+
+      @Setup
+      public void setup() {
+        src = new byte[byteArraySize];
+      }
+    }
+
+    @Benchmark
+    public void testNewArray(ArrayNewState state, Blackhole bh) {
+      bh.consume(UnsafeByteOperations.unsafeWrap(state.src, 0, state.byteArraySize));
+      state.src = new byte[state.byteArraySize];
+    }
+  }
+}
diff --git a/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/util/package-info.java b/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/util/package-info.java
new file mode 100644
index 0000000..647f31b
--- /dev/null
+++ b/sdks/java/core/jmh/src/main/java/org/apache/beam/sdk/jmh/util/package-info.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** Benchmarks for core SDK utility classes. */
+@DefaultAnnotation(NonNull.class)
+package org.apache.beam.sdk.jmh.util;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import org.checkerframework.checker.nullness.qual.NonNull;
diff --git a/sdks/java/core/jmh/src/test/java/org/apache/beam/sdk/jmh/util/ByteStringOutputStreamBenchmarkTest.java b/sdks/java/core/jmh/src/test/java/org/apache/beam/sdk/jmh/util/ByteStringOutputStreamBenchmarkTest.java
new file mode 100644
index 0000000..31e15fb
--- /dev/null
+++ b/sdks/java/core/jmh/src/test/java/org/apache/beam/sdk/jmh/util/ByteStringOutputStreamBenchmarkTest.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.jmh.util;
+
+import org.apache.beam.sdk.jmh.util.ByteStringOutputStreamBenchmark.NewVsCopy.ArrayCopyState;
+import org.apache.beam.sdk.jmh.util.ByteStringOutputStreamBenchmark.NewVsCopy.ArrayNewState;
+import org.apache.beam.sdk.jmh.util.ByteStringOutputStreamBenchmark.ProtobufByteStringOutputStream;
+import org.apache.beam.sdk.jmh.util.ByteStringOutputStreamBenchmark.SdkCoreByteStringOutputStream;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+import org.openjdk.jmh.infra.Blackhole;
+
+/** Tests for {@link ByteStringOutputStreamBenchmark}. */
+@RunWith(JUnit4.class)
+public class ByteStringOutputStreamBenchmarkTest {
+  @Test
+  public void testProtobufByteStringOutputStream() throws Exception {
+    new ByteStringOutputStreamBenchmark()
+        .testProtobufByteStringOutputStreamFewMixedWritesWithoutReuse();
+    new ByteStringOutputStreamBenchmark()
+        .testProtobufByteStringOutputStreamFewMixedWritesWithReuse(
+            new ProtobufByteStringOutputStream());
+    new ByteStringOutputStreamBenchmark().testProtobufByteStringOutputStreamFewLargeWrites();
+    new ByteStringOutputStreamBenchmark().testProtobufByteStringOutputStreamFewSmallWrites();
+    new ByteStringOutputStreamBenchmark().testProtobufByteStringOutputStreamFewTinyWrites();
+    new ByteStringOutputStreamBenchmark()
+        .testProtobufByteStringOutputStreamManyMixedWritesWithoutReuse();
+    new ByteStringOutputStreamBenchmark()
+        .testProtobufByteStringOutputStreamManyMixedWritesWithReuse(
+            new ProtobufByteStringOutputStream());
+    new ByteStringOutputStreamBenchmark().testProtobufByteStringOutputStreamManyLargeWrites();
+    new ByteStringOutputStreamBenchmark().testProtobufByteStringOutputStreamManySmallWrites();
+    new ByteStringOutputStreamBenchmark().testProtobufByteStringOutputStreamManyTinyWrites();
+  }
+
+  @Test
+  public void testSdkCoreByteStringOutputStream() throws Exception {
+    new ByteStringOutputStreamBenchmark()
+        .testSdkCoreByteStringOutputStreamFewMixedWritesWithoutReuse();
+    new ByteStringOutputStreamBenchmark()
+        .testSdkCoreByteStringOutputStreamFewMixedWritesWithReuse(
+            new SdkCoreByteStringOutputStream());
+    new ByteStringOutputStreamBenchmark().testSdkCoreByteStringOutputStreamFewLargeWrites();
+    new ByteStringOutputStreamBenchmark().testSdkCoreByteStringOutputStreamFewSmallWrites();
+    new ByteStringOutputStreamBenchmark().testSdkCoreByteStringOutputStreamFewTinyWrites();
+    new ByteStringOutputStreamBenchmark()
+        .testSdkCoreByteStringOutputStreamManyMixedWritesWithoutReuse();
+    new ByteStringOutputStreamBenchmark()
+        .testSdkCoreByteStringOutputStreamManyMixedWritesWithReuse(
+            new SdkCoreByteStringOutputStream());
+    new ByteStringOutputStreamBenchmark().testSdkCoreByteStringOutputStreamManyLargeWrites();
+    new ByteStringOutputStreamBenchmark().testSdkCoreByteStringOutputStreamManySmallWrites();
+    new ByteStringOutputStreamBenchmark().testSdkCoreByteStringOutputStreamManyTinyWrites();
+  }
+
+  @Test
+  public void testNewVsCopy() throws Exception {
+    Blackhole bh =
+        new Blackhole(
+            "Today's password is swordfish. I understand instantiating Blackholes directly is dangerous.");
+    ArrayCopyState copyState = new ArrayCopyState();
+    copyState.copyVsNew = "512/2048";
+    copyState.setup();
+
+    ArrayNewState newState = new ArrayNewState();
+    newState.byteArraySize = 2048;
+    newState.setup();
+
+    new ByteStringOutputStreamBenchmark.NewVsCopy().testCopyArray(copyState, bh);
+    new ByteStringOutputStreamBenchmark.NewVsCopy().testNewArray(newState, bh);
+  }
+}
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/ByteStringOutputStream.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/ByteStringOutputStream.java
new file mode 100644
index 0000000..112032e
--- /dev/null
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/ByteStringOutputStream.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.util;
+
+import java.io.OutputStream;
+import javax.annotation.concurrent.NotThreadSafe;
+import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
+import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.UnsafeByteOperations;
+
+/**
+ * An {@link OutputStream} that produces {@link ByteString}s.
+ *
+ * <p>Closing this output stream does nothing.
+ *
+ * <p>This class is not thread safe and expects appropriate locking to be used in a thread-safe
+ * manner. This differs from {@link ByteString.Output} which synchronizes its writes.
+ */
+@NotThreadSafe
+public final class ByteStringOutputStream extends OutputStream {
+
+  // This constant was chosen based upon Protobufs ByteString#CONCATENATE_BY_COPY which
+  // isn't public to prevent copying the bytes again when concatenating ByteStrings instead
+  // of appending.
+  private static final int DEFAULT_CAPACITY = 128;
+
+  // ByteStringOutputStreamBenchmark.NewVsCopy shows that we actually are faster
+  // creating a 4 new arrays that are 256k vs one that is 1024k by almost a factor
+  // of 2.
+  //
+  // This number should be tuned periodically as hardware changes.
+  private static final int MAX_CHUNK_SIZE = 256 * 1024;
+
+  // ByteString to be concatenated to create the result
+  private ByteString result;
+
+  // Current buffer to which we are writing
+  private byte[] buffer;
+
+  // Location in buffer[] to which we write the next byte.
+  private int bufferPos;
+
+  /** Creates a new output stream with a default capacity. */
+  public ByteStringOutputStream() {
+    this(DEFAULT_CAPACITY);
+  }
+
+  /**
+   * Creates a new output stream with the specified initial capacity.
+   *
+   * @param initialCapacity the initial capacity of the output stream.
+   */
+  public ByteStringOutputStream(int initialCapacity) {
+    if (initialCapacity < 0) {
+      throw new IllegalArgumentException("Initial capacity < 0");
+    }
+    this.buffer = new byte[initialCapacity];
+    this.result = ByteString.EMPTY;
+  }
+
+  @Override
+  public void write(int b) {
+    if (bufferPos == buffer.length) {
+      // We want to increase our total capacity by 50% but not larger than the max chunk size.
+      result = result.concat(UnsafeByteOperations.unsafeWrap(buffer));
+      buffer = new byte[Math.min(Math.max(1, result.size()), MAX_CHUNK_SIZE)];
+      bufferPos = 0;
+    }
+    buffer[bufferPos++] = (byte) b;
+  }
+
+  @Override
+  public void write(byte[] b, int offset, int length) {
+    int remainingSpaceInBuffer = buffer.length - bufferPos;
+    while (length > remainingSpaceInBuffer) {
+      // Use up the current buffer
+      System.arraycopy(b, offset, buffer, bufferPos, remainingSpaceInBuffer);
+      offset += remainingSpaceInBuffer;
+      length -= remainingSpaceInBuffer;
+
+      result = result.concat(UnsafeByteOperations.unsafeWrap(buffer));
+      // We want to increase our total capacity but not larger than the max chunk size.
+      remainingSpaceInBuffer = Math.min(Math.max(length, result.size()), MAX_CHUNK_SIZE);
+      buffer = new byte[remainingSpaceInBuffer];
+      bufferPos = 0;
+    }
+
+    System.arraycopy(b, offset, buffer, bufferPos, length);
+    bufferPos += length;
+  }
+
+  /**
+   * Creates a byte string with the size and contents of this output stream.
+   *
+   * <p>Note that the caller must not invoke {#link {@link #toByteStringAndReset} as the internal
+   * buffer maybe mutated by a future {@link #write} mutating {@link ByteString}s returned in the
+   * past.
+   */
+  public ByteString toByteString() {
+    // We specifically choose to concatenate here since the user won't be re-using the buffer.
+    return result.concat(UnsafeByteOperations.unsafeWrap(buffer, 0, bufferPos));
+  }
+
+  /**
+   * Creates a byte string with the size and contents of this output stream and resets the output
+   * stream to be re-used possibly re-using any existing buffers.
+   */
+  public ByteString toByteStringAndReset() {
+    ByteString rval;
+    if (bufferPos > 0) {
+      final boolean copy;
+      // These thresholds are from the results of ByteStringOutputStreamBenchmark.CopyVewNew
+      // which show that at these thresholds we should copy the bytes instead to re-use
+      // the existing buffer since creating a new one is more expensive.
+      if (buffer.length <= 128) {
+        // Always copy small byte arrays to prevent large chunks of wasted space
+        // when dealing with very small amounts of data.
+        copy = true;
+      } else if (buffer.length <= 1024) {
+        copy = bufferPos <= buffer.length * 0.875;
+      } else if (buffer.length <= 8192) {
+        copy = bufferPos <= buffer.length * 0.75;
+      } else {
+        copy = bufferPos <= buffer.length * 0.4375;
+      }
+      if (copy) {
+        byte[] bufferCopy = new byte[bufferPos];
+        System.arraycopy(buffer, 0, bufferCopy, 0, bufferPos);
+        rval = result.concat(UnsafeByteOperations.unsafeWrap(bufferCopy));
+      } else {
+        rval = result.concat(UnsafeByteOperations.unsafeWrap(buffer, 0, bufferPos));
+        buffer = new byte[Math.min(rval.size(), MAX_CHUNK_SIZE)];
+      }
+      bufferPos = 0;
+    } else {
+      rval = result;
+    }
+    result = ByteString.EMPTY;
+    return rval;
+  }
+
+  /**
+   * Returns the current size of the output stream.
+   *
+   * @return the current size of the output stream
+   */
+  public int size() {
+    return result.size() + bufferPos;
+  }
+
+  @Override
+  public String toString() {
+    return String.format(
+        "<ByteStringOutputStream@%s size=%d>",
+        Integer.toHexString(System.identityHashCode(this)), size());
+  }
+}
diff --git a/sdks/java/core/src/test/java/org/apache/beam/sdk/PipelineTest.java b/sdks/java/core/src/test/java/org/apache/beam/sdk/PipelineTest.java
index 1bb4caa..5e19fa0 100644
--- a/sdks/java/core/src/test/java/org/apache/beam/sdk/PipelineTest.java
+++ b/sdks/java/core/src/test/java/org/apache/beam/sdk/PipelineTest.java
@@ -18,7 +18,6 @@
 package org.apache.beam.sdk;
 
 import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.Matchers.anyOf;
 import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.hasItem;
 import static org.hamcrest.Matchers.instanceOf;
@@ -407,13 +406,9 @@
         new PipelineVisitor.Defaults() {
           @Override
           public CompositeBehavior enterCompositeTransform(Node node) {
-            if (!node.isRootNode()) {
-              assertThat(
-                  node.getTransform().getClass(),
-                  not(
-                      anyOf(
-                          Matchers.equalTo(GenerateSequence.class),
-                          Matchers.equalTo(Create.Values.class))));
+            String fullName = node.getFullName();
+            if (fullName.equals("unbounded") || fullName.equals("bounded")) {
+              assertThat(node.getTransform(), Matchers.instanceOf(EmptyFlatten.class));
             }
             return CompositeBehavior.ENTER_TRANSFORM;
           }
diff --git a/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/GroupIntoBatchesTest.java b/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/GroupIntoBatchesTest.java
index 9ef4c10..9a5bad2 100644
--- a/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/GroupIntoBatchesTest.java
+++ b/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/GroupIntoBatchesTest.java
@@ -35,10 +35,12 @@
 import org.apache.beam.sdk.testing.TestStream.Event;
 import org.apache.beam.sdk.testing.TestStream.ProcessingTimeEvent;
 import org.apache.beam.sdk.testing.TestStream.WatermarkEvent;
+import org.apache.beam.sdk.testing.UsesOnWindowExpiration;
 import org.apache.beam.sdk.testing.UsesStatefulParDo;
 import org.apache.beam.sdk.testing.UsesTestStream;
 import org.apache.beam.sdk.testing.UsesTestStreamWithProcessingTime;
 import org.apache.beam.sdk.testing.UsesTimersInParDo;
+import org.apache.beam.sdk.testing.ValidatesRunner;
 import org.apache.beam.sdk.transforms.windowing.AfterPane;
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
 import org.apache.beam.sdk.transforms.windowing.FixedWindows;
@@ -97,7 +99,13 @@
   }
 
   @Test
-  @Category({NeedsRunner.class, UsesTimersInParDo.class, UsesStatefulParDo.class})
+  @Category({
+    ValidatesRunner.class,
+    NeedsRunner.class,
+    UsesTimersInParDo.class,
+    UsesStatefulParDo.class,
+    UsesOnWindowExpiration.class
+  })
   public void testInGlobalWindowBatchSizeCount() {
     PCollection<KV<String, Iterable<String>>> collection =
         pipeline
@@ -130,7 +138,13 @@
   }
 
   @Test
-  @Category({NeedsRunner.class, UsesTimersInParDo.class, UsesStatefulParDo.class})
+  @Category({
+    ValidatesRunner.class,
+    NeedsRunner.class,
+    UsesTimersInParDo.class,
+    UsesStatefulParDo.class,
+    UsesOnWindowExpiration.class
+  })
   public void testInGlobalWindowBatchSizeByteSize() {
     PCollection<KV<String, Iterable<String>>> collection =
         pipeline
@@ -172,7 +186,13 @@
   }
 
   @Test
-  @Category({NeedsRunner.class, UsesTimersInParDo.class, UsesStatefulParDo.class})
+  @Category({
+    ValidatesRunner.class,
+    NeedsRunner.class,
+    UsesTimersInParDo.class,
+    UsesStatefulParDo.class,
+    UsesOnWindowExpiration.class
+  })
   public void testInGlobalWindowBatchSizeByteSizeFn() {
     PCollection<KV<String, Iterable<String>>> collection =
         pipeline
@@ -223,7 +243,13 @@
   }
 
   @Test
-  @Category({NeedsRunner.class, UsesTimersInParDo.class, UsesStatefulParDo.class})
+  @Category({
+    ValidatesRunner.class,
+    NeedsRunner.class,
+    UsesTimersInParDo.class,
+    UsesStatefulParDo.class,
+    UsesOnWindowExpiration.class
+  })
   public void testWithShardedKeyInGlobalWindow() {
     // Since with default sharding, the number of subshards of a key is nondeterministic, create
     // a large number of input elements and a small batch size and check there is no batch larger
@@ -300,14 +326,22 @@
                       numFullBatches > totalNumBatches / 2);
                   return null;
                 });
-    pipeline
-        .runWithAdditionalOptionArgs(ImmutableList.of("--targetParallelism=1"))
-        .waitUntilFinish();
+    if (pipeline.getOptions().getRunner().getSimpleName().equals("DirectRunner")) {
+      pipeline.runWithAdditionalOptionArgs(ImmutableList.of("--targetParallelism=1"));
+    } else {
+      pipeline.run();
+    }
   }
 
   /** test behavior when the number of input elements is not evenly divisible by batch size. */
   @Test
-  @Category({NeedsRunner.class, UsesTimersInParDo.class, UsesStatefulParDo.class})
+  @Category({
+    ValidatesRunner.class,
+    NeedsRunner.class,
+    UsesTimersInParDo.class,
+    UsesStatefulParDo.class,
+    UsesOnWindowExpiration.class
+  })
   public void testWithUnevenBatches() {
     PCollection<KV<String, Iterable<String>>> collection =
         pipeline
@@ -315,6 +349,7 @@
             .apply(GroupIntoBatches.ofSize(BATCH_SIZE))
             // set output coder
             .setCoder(KvCoder.of(StringUtf8Coder.of(), IterableCoder.of(StringUtf8Coder.of())));
+
     PAssert.that("Incorrect batch size in one or more elements", collection)
         .satisfies(
             new SerializableFunction<Iterable<KV<String, Iterable<String>>>, Void>() {
@@ -345,10 +380,12 @@
 
   @Test
   @Category({
+    ValidatesRunner.class,
     NeedsRunner.class,
     UsesTimersInParDo.class,
     UsesTestStream.class,
-    UsesStatefulParDo.class
+    UsesStatefulParDo.class,
+    UsesOnWindowExpiration.class
   })
   public void testInStreamingMode() {
     int timestampInterval = 1;
@@ -449,11 +486,13 @@
 
   @Test
   @Category({
+    ValidatesRunner.class,
     NeedsRunner.class,
     UsesTimersInParDo.class,
     UsesTestStream.class,
     UsesTestStreamWithProcessingTime.class,
-    UsesStatefulParDo.class
+    UsesStatefulParDo.class,
+    UsesOnWindowExpiration.class
   })
   public void testBufferingTimerInFixedWindow() {
     final Duration windowDuration = Duration.standardSeconds(4);
@@ -572,11 +611,13 @@
 
   @Test
   @Category({
+    ValidatesRunner.class,
     NeedsRunner.class,
     UsesTimersInParDo.class,
     UsesTestStream.class,
     UsesTestStreamWithProcessingTime.class,
-    UsesStatefulParDo.class
+    UsesStatefulParDo.class,
+    UsesOnWindowExpiration.class
   })
   public void testBufferingTimerInGlobalWindow() {
     final Duration maxBufferingDuration = Duration.standardSeconds(5);
diff --git a/sdks/java/core/src/test/java/org/apache/beam/sdk/util/ByteStringOutputStreamTest.java b/sdks/java/core/src/test/java/org/apache/beam/sdk/util/ByteStringOutputStreamTest.java
new file mode 100644
index 0000000..faa77cf
--- /dev/null
+++ b/sdks/java/core/src/test/java/org/apache/beam/sdk/util/ByteStringOutputStreamTest.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.UnsafeByteOperations;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+@RunWith(JUnit4.class)
+public class ByteStringOutputStreamTest {
+
+  @Test
+  public void testInvalidInitialCapacity() throws Exception {
+    assertThrows(
+        "Initial capacity < 0",
+        IllegalArgumentException.class,
+        () -> new ByteStringOutputStream(-1));
+  }
+
+  @Test
+  public void testWriteBytes() throws Exception {
+    ByteStringOutputStream out = new ByteStringOutputStream();
+    assertEquals(0, out.size());
+    for (int numElements = 0; numElements < 1024 * 1024; numElements = next(numElements)) {
+      ByteArrayOutputStream baos = new ByteArrayOutputStream();
+      DataOutputStream dataOut = new DataOutputStream(baos);
+      try {
+        for (int i = 0; i < numElements; ++i) {
+          dataOut.writeInt(i);
+        }
+        dataOut.close();
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+      dataOut.close();
+      byte[] testBuffer = baos.toByteArray();
+
+      for (int pos = 0; pos < testBuffer.length; ) {
+        if (testBuffer[pos] == 0) {
+          out.write(testBuffer[pos]);
+          pos += 1;
+        } else {
+          int len = Math.min(testBuffer.length - pos, Math.abs(testBuffer[pos]));
+          out.write(testBuffer, pos, len);
+          pos += len;
+        }
+        assertEquals(pos, out.size());
+      }
+      assertEquals(UnsafeByteOperations.unsafeWrap(testBuffer), out.toByteString());
+      assertEquals(UnsafeByteOperations.unsafeWrap(testBuffer), out.toByteStringAndReset());
+    }
+  }
+
+  @Test
+  public void testWriteBytesWithZeroInitialCapacity() throws Exception {
+    for (int numElements = 0; numElements < 1024 * 1024; numElements = next(numElements)) {
+      ByteArrayOutputStream baos = new ByteArrayOutputStream();
+      DataOutputStream dataOut = new DataOutputStream(baos);
+      try {
+        for (int i = 0; i < numElements; ++i) {
+          dataOut.writeInt(i);
+        }
+        dataOut.close();
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+      dataOut.close();
+      byte[] testBuffer = baos.toByteArray();
+
+      ByteStringOutputStream out = new ByteStringOutputStream(0);
+      assertEquals(0, out.size());
+
+      for (int pos = 0; pos < testBuffer.length; ) {
+        if (testBuffer[pos] == 0) {
+          out.write(testBuffer[pos]);
+          pos += 1;
+        } else {
+          int len = Math.min(testBuffer.length - pos, Math.abs(testBuffer[pos]));
+          out.write(testBuffer, pos, len);
+          pos += len;
+        }
+        assertEquals(pos, out.size());
+      }
+      assertEquals(UnsafeByteOperations.unsafeWrap(testBuffer), out.toByteString());
+      assertEquals(UnsafeByteOperations.unsafeWrap(testBuffer), out.toByteStringAndReset());
+    }
+  }
+
+  // Grow the elements based upon an approximation of the fibonacci sequence.
+  private static int next(int current) {
+    double a = Math.max(1, current * (1 + Math.sqrt(5)) / 2.0);
+    return (int) Math.round(a);
+  }
+}
diff --git a/sdks/java/expansion-service/src/test/java/org/apache/beam/sdk/expansion/service/ExpansionServiceTest.java b/sdks/java/expansion-service/src/test/java/org/apache/beam/sdk/expansion/service/ExpansionServiceTest.java
index b9cbdc7..39076d0 100644
--- a/sdks/java/expansion-service/src/test/java/org/apache/beam/sdk/expansion/service/ExpansionServiceTest.java
+++ b/sdks/java/expansion-service/src/test/java/org/apache/beam/sdk/expansion/service/ExpansionServiceTest.java
@@ -53,8 +53,8 @@
 import org.apache.beam.sdk.schemas.annotations.DefaultSchema;
 import org.apache.beam.sdk.transforms.Count;
 import org.apache.beam.sdk.transforms.Impulse;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.values.Row;
-import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Charsets;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap;
@@ -353,7 +353,7 @@
 
   private static ExternalTransforms.ExternalConfigurationPayload
       encodeRowIntoExternalConfigurationPayload(Row row) {
-    ByteString.Output outputStream = ByteString.newOutput();
+    ByteStringOutputStream outputStream = new ByteStringOutputStream();
     try {
       SchemaCoder.of(row.getSchema()).encode(row, outputStream);
     } catch (IOException e) {
diff --git a/sdks/java/expansion-service/src/test/java/org/apache/beam/sdk/expansion/service/JavaClassLookupTransformProviderTest.java b/sdks/java/expansion-service/src/test/java/org/apache/beam/sdk/expansion/service/JavaClassLookupTransformProviderTest.java
index 631d208..26b6a59 100644
--- a/sdks/java/expansion-service/src/test/java/org/apache/beam/sdk/expansion/service/JavaClassLookupTransformProviderTest.java
+++ b/sdks/java/expansion-service/src/test/java/org/apache/beam/sdk/expansion/service/JavaClassLookupTransformProviderTest.java
@@ -59,6 +59,7 @@
 import org.apache.beam.sdk.transforms.DoFn;
 import org.apache.beam.sdk.transforms.PTransform;
 import org.apache.beam.sdk.transforms.ParDo;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.values.PBegin;
 import org.apache.beam.sdk.values.PCollection;
 import org.apache.beam.sdk.values.Row;
@@ -1144,7 +1145,7 @@
   }
 
   private ByteString getProtoPayloadFromRow(Row row) {
-    ByteString.Output outputStream = ByteString.newOutput();
+    ByteStringOutputStream outputStream = new ByteStringOutputStream();
     try {
       SchemaCoder.of(row.getSchema()).encode(row, outputStream);
     } catch (IOException e) {
diff --git a/sdks/java/extensions/google-cloud-platform-core/src/main/java/org/apache/beam/sdk/extensions/gcp/util/GcsUtil.java b/sdks/java/extensions/google-cloud-platform-core/src/main/java/org/apache/beam/sdk/extensions/gcp/util/GcsUtil.java
index 30ce9db..c5d92a1 100644
--- a/sdks/java/extensions/google-cloud-platform-core/src/main/java/org/apache/beam/sdk/extensions/gcp/util/GcsUtil.java
+++ b/sdks/java/extensions/google-cloud-platform-core/src/main/java/org/apache/beam/sdk/extensions/gcp/util/GcsUtil.java
@@ -57,6 +57,7 @@
 import java.nio.file.AccessDeniedException;
 import java.nio.file.FileAlreadyExistsException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -915,6 +916,33 @@
         } else {
           throw new FileNotFoundException(e.getMessage());
         }
+      } else if (e.getCode() == 403
+          && e.getErrors().size() == 1
+          && e.getErrors().get(0).getReason().equals("retentionPolicyNotMet")) {
+        List<StorageObjectOrIOException> srcAndDestObjects = getObjects(Arrays.asList(from, to));
+        String srcHash = srcAndDestObjects.get(0).storageObject().getMd5Hash();
+        String destHash = srcAndDestObjects.get(1).storageObject().getMd5Hash();
+        if (srcHash != null && srcHash.equals(destHash)) {
+          // Source and destination are identical. Treat this as a successful rewrite
+          LOG.warn(
+              "Caught retentionPolicyNotMet error while rewriting to a bucket with retention "
+                  + "policy. Skipping because destination {} and source {} are considered identical "
+                  + "because their MD5 Hashes are equal.",
+              getFrom(),
+              getTo());
+
+          if (deleteSource) {
+            readyToEnqueue = true;
+            performDelete = true;
+          } else {
+            readyToEnqueue = false;
+          }
+          lastError = null;
+        } else {
+          // User is attempting to write to a file that hasn't met its retention policy yet.
+          // Not a transient error so likely will not be fixed by a retry
+          throw new IOException(e.getMessage());
+        }
       } else {
         lastError = e;
         readyToEnqueue = true;
diff --git a/sdks/java/extensions/google-cloud-platform-core/src/test/java/org/apache/beam/sdk/extensions/gcp/util/GcsUtilTest.java b/sdks/java/extensions/google-cloud-platform-core/src/test/java/org/apache/beam/sdk/extensions/gcp/util/GcsUtilTest.java
index 7d19053..8c02d21 100644
--- a/sdks/java/extensions/google-cloud-platform-core/src/test/java/org/apache/beam/sdk/extensions/gcp/util/GcsUtilTest.java
+++ b/sdks/java/extensions/google-cloud-platform-core/src/test/java/org/apache/beam/sdk/extensions/gcp/util/GcsUtilTest.java
@@ -1099,6 +1099,8 @@
                 GoogleJsonError error = new GoogleJsonError();
                 error.setCode(HttpStatusCodes.STATUS_CODE_NOT_FOUND);
                 cb.onFailure(error, null);
+              } catch (GoogleJsonResponseException e) {
+                cb.onFailure(e.getDetails(), null);
               } catch (Exception e) {
                 System.out.println("Propagating exception as server error " + e);
                 e.printStackTrace();
@@ -1260,6 +1262,82 @@
   }
 
   @Test
+  public void testThrowRetentionPolicyNotMetErrorWhenUnequalChecksum() throws IOException {
+    // ./gradlew sdks:java:extensions:google-cloud-platform-core:test --tests
+    // org.apache.beam.sdk.extensions.gcp.util.GcsUtilTest.testHanRetentionPolicyNotMetError
+    GcsUtil gcsUtil = gcsOptionsWithTestCredential().getGcsUtil();
+
+    Storage mockStorage = Mockito.mock(Storage.class);
+    gcsUtil.setStorageClient(mockStorage);
+    gcsUtil.setBatchRequestSupplier(() -> new FakeBatcher());
+
+    Storage.Objects mockStorageObjects = Mockito.mock(Storage.Objects.class);
+    Storage.Objects.Get mockGetRequest1 = Mockito.mock(Storage.Objects.Get.class);
+    Storage.Objects.Get mockGetRequest2 = Mockito.mock(Storage.Objects.Get.class);
+    Storage.Objects.Rewrite mockStorageRewrite = Mockito.mock(Storage.Objects.Rewrite.class);
+
+    // Gcs object to be used when checking the hash of the files during rewrite fail.
+    StorageObject srcObject = new StorageObject().setMd5Hash("a");
+    StorageObject destObject = new StorageObject().setMd5Hash("b");
+
+    when(mockStorage.objects()).thenReturn(mockStorageObjects);
+    when(mockStorageObjects.rewrite("bucket", "s0", "bucket", "d0", null))
+        .thenReturn(mockStorageRewrite);
+    when(mockStorageRewrite.execute())
+        .thenThrow(googleJsonResponseException(403, "retentionPolicyNotMet", "Too soon"));
+    when(mockStorageObjects.get("bucket", "s0")).thenReturn(mockGetRequest1);
+    when(mockGetRequest1.execute()).thenReturn(srcObject);
+    when(mockStorageObjects.get("bucket", "d0")).thenReturn(mockGetRequest2);
+    when(mockGetRequest2.execute()).thenReturn(destObject);
+
+    assertThrows(IOException.class, () -> gcsUtil.rename(makeStrings("s", 1), makeStrings("d", 1)));
+
+    verify(mockStorageRewrite, times(1)).execute();
+  }
+
+  @Test
+  public void testIgnoreRetentionPolicyNotMetErrorWhenEqualChecksum() throws IOException {
+    GcsUtil gcsUtil = gcsOptionsWithTestCredential().getGcsUtil();
+
+    Storage mockStorage = Mockito.mock(Storage.class);
+    gcsUtil.setStorageClient(mockStorage);
+    gcsUtil.setBatchRequestSupplier(() -> new FakeBatcher());
+
+    Storage.Objects mockStorageObjects = Mockito.mock(Storage.Objects.class);
+    Storage.Objects.Get mockGetRequest = Mockito.mock(Storage.Objects.Get.class);
+    Storage.Objects.Rewrite mockStorageRewrite1 = Mockito.mock(Storage.Objects.Rewrite.class);
+    Storage.Objects.Rewrite mockStorageRewrite2 = Mockito.mock(Storage.Objects.Rewrite.class);
+    Storage.Objects.Delete mockStorageDelete = Mockito.mock(Storage.Objects.Delete.class);
+
+    // Gcs object to be used when checking the hash of the files during rewrite fail.
+    StorageObject gcsObject = new StorageObject().setMd5Hash("a");
+
+    when(mockStorage.objects()).thenReturn(mockStorageObjects);
+    // First rewrite with retentionPolicyNotMet error.
+    when(mockStorageObjects.rewrite("bucket", "s0", "bucket", "d0", null))
+        .thenReturn(mockStorageRewrite1);
+    when(mockStorageRewrite1.execute())
+        .thenThrow(googleJsonResponseException(403, "retentionPolicyNotMet", "Too soon"));
+    when(mockStorageObjects.get(any(), any())) // to access object hash during error handling
+        .thenReturn(mockGetRequest);
+    when(mockGetRequest.execute())
+        .thenReturn(gcsObject); // both source and destination will get the same hash
+    when(mockStorageObjects.delete("bucket", "s0")).thenReturn(mockStorageDelete);
+
+    // Second rewrite should not be affected.
+    when(mockStorageObjects.rewrite("bucket", "s1", "bucket", "d1", null))
+        .thenReturn(mockStorageRewrite2);
+    when(mockStorageRewrite2.execute()).thenReturn(new RewriteResponse().setDone(true));
+    when(mockStorageObjects.delete("bucket", "s1")).thenReturn(mockStorageDelete);
+
+    gcsUtil.rename(makeStrings("s", 2), makeStrings("d", 2));
+
+    verify(mockStorageRewrite1, times(1)).execute();
+    verify(mockStorageRewrite2, times(1)).execute();
+    verify(mockStorageDelete, times(2)).execute();
+  }
+
+  @Test
   public void testMakeRemoveBatches() throws IOException {
     GcsUtil gcsUtil = gcsOptionsWithTestCredential().getGcsUtil();
 
diff --git a/sdks/java/extensions/protobuf/src/main/java/org/apache/beam/sdk/extensions/protobuf/ProtoByteBuddyUtils.java b/sdks/java/extensions/protobuf/src/main/java/org/apache/beam/sdk/extensions/protobuf/ProtoByteBuddyUtils.java
index 534a2e7..84712eb 100644
--- a/sdks/java/extensions/protobuf/src/main/java/org/apache/beam/sdk/extensions/protobuf/ProtoByteBuddyUtils.java
+++ b/sdks/java/extensions/protobuf/src/main/java/org/apache/beam/sdk/extensions/protobuf/ProtoByteBuddyUtils.java
@@ -103,8 +103,9 @@
 import org.apache.beam.sdk.util.common.ReflectHelpers;
 import org.apache.beam.sdk.values.Row;
 import org.apache.beam.sdk.values.TypeDescriptor;
-import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.CaseFormat;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Strings;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Lists;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Maps;
@@ -190,15 +191,59 @@
   private static final String DEFAULT_PROTO_GETTER_PREFIX = "get";
   private static final String DEFAULT_PROTO_SETTER_PREFIX = "set";
 
+  // https://github.com/apache/beam/issues/21626: there is a slight difference between 'protoc' and
+  // Guava CaseFormat regarding the camel case conversion
+  // - guava keeps the first character after a number lower case
+  // - protoc makes it upper case
+  // based on
+  // https://github.com/protocolbuffers/protobuf/blob/ec79d0d328c7e6cea15cc27fbeb9b018ca289590/src/google/protobuf/compiler/java/helpers.cc#L173-L208
+  @VisibleForTesting
+  static String convertProtoPropertyNameToJavaPropertyName(String input) {
+    boolean capitalizeNextLetter = true;
+    Preconditions.checkArgument(!Strings.isNullOrEmpty(input));
+    StringBuilder result = new StringBuilder(input.length());
+    for (int i = 0; i < input.length(); i++) {
+      final char c = input.charAt(i);
+      if (Character.isLowerCase(c)) {
+        if (capitalizeNextLetter) {
+          result.append(Character.toUpperCase(c));
+        } else {
+          result.append(c);
+        }
+        capitalizeNextLetter = false;
+      } else if (Character.isUpperCase(c)) {
+        if (i == 0 && !capitalizeNextLetter) {
+          // Force first letter to lower-case unless explicitly told to
+          // capitalize it.
+          result.append(Character.toLowerCase(c));
+        } else {
+          // Capital letters after the first are left as-is.
+          result.append(c);
+        }
+        capitalizeNextLetter = false;
+      } else if ('0' <= c && c <= '9') {
+        result.append(c);
+        capitalizeNextLetter = true;
+      } else {
+        capitalizeNextLetter = true;
+      }
+    }
+    // Add a trailing "_" if the name should be altered.
+    if (input.charAt(input.length() - 1) == '#') {
+      result.append('_');
+    }
+    return result.toString();
+  }
+
   static String protoGetterName(String name, FieldType fieldType) {
-    final String camel = CaseFormat.LOWER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL, name);
+    final String camel = convertProtoPropertyNameToJavaPropertyName(name);
     return DEFAULT_PROTO_GETTER_PREFIX
         + camel
         + PROTO_GETTER_SUFFIX.getOrDefault(fieldType.getTypeName(), "");
   }
 
   static String protoSetterName(String name, FieldType fieldType) {
-    final String camel = CaseFormat.LOWER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL, name);
+    final String camel = convertProtoPropertyNameToJavaPropertyName(name);
     return protoSetterPrefix(fieldType) + camel;
   }
 
diff --git a/sdks/java/extensions/protobuf/src/test/java/org/apache/beam/sdk/extensions/protobuf/ProtoByteBuddyUtilsTest.java b/sdks/java/extensions/protobuf/src/test/java/org/apache/beam/sdk/extensions/protobuf/ProtoByteBuddyUtilsTest.java
new file mode 100644
index 0000000..e722a77
--- /dev/null
+++ b/sdks/java/extensions/protobuf/src/test/java/org/apache/beam/sdk/extensions/protobuf/ProtoByteBuddyUtilsTest.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.extensions.protobuf;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+@RunWith(JUnit4.class)
+public class ProtoByteBuddyUtilsTest {
+
+  private static final String PROTO_PROPERTY_WITH_UNDERSCORE = "foo_bar_id";
+  private static final String PROTO_PROPERTY_WITH_NUMBER = "foo2bar_id";
+
+  private static final String JAVA_PROPERTY_FOR_PROTO_PROPERTY_WITH_UNDERSCORE = "FooBarId";
+  private static final String JAVA_PROPERTY_FOR_PROTO_PROPERTY_WITH_NUMBER = "Foo2BarId";
+
+  @Test
+  public void testGetterNameCreationForProtoPropertyWithUnderscore() {
+    Assert.assertEquals(
+        JAVA_PROPERTY_FOR_PROTO_PROPERTY_WITH_UNDERSCORE,
+        ProtoByteBuddyUtils.convertProtoPropertyNameToJavaPropertyName(
+            PROTO_PROPERTY_WITH_UNDERSCORE));
+  }
+
+  @Test
+  public void testGetterNameCreationForProtoPropertyWithNumber() {
+    Assert.assertEquals(
+        JAVA_PROPERTY_FOR_PROTO_PROPERTY_WITH_NUMBER,
+        ProtoByteBuddyUtils.convertProtoPropertyNameToJavaPropertyName(PROTO_PROPERTY_WITH_NUMBER));
+  }
+
+  @Test
+  public void testGetterExistenceForProtoPropertyWithUnderscore() {
+    try {
+      Assert.assertNotNull(
+          ProtoByteBuddyUtilsMessages.ProtoByteBuddyUtilsMessageWithUnderscore.class.getMethod(
+              "get" + JAVA_PROPERTY_FOR_PROTO_PROPERTY_WITH_UNDERSCORE));
+    } catch (NoSuchMethodException e) {
+      Assert.fail(
+          "Unable to find expected getter method for "
+              + PROTO_PROPERTY_WITH_UNDERSCORE
+              + " -> "
+              + e);
+    }
+  }
+
+  @Test
+  public void testGetterExistenceForProtoPropertyWithNumber() {
+    try {
+      Assert.assertNotNull(
+          ProtoByteBuddyUtilsMessages.ProtoByteBuddyUtilsMessageWithNumber.class.getMethod(
+              "get" + JAVA_PROPERTY_FOR_PROTO_PROPERTY_WITH_NUMBER));
+    } catch (NoSuchMethodException e) {
+      Assert.fail(
+          "Unable to find expected getter method for "
+              + JAVA_PROPERTY_FOR_PROTO_PROPERTY_WITH_NUMBER
+              + " -> "
+              + e);
+    }
+  }
+}
diff --git a/sdks/java/extensions/protobuf/src/test/proto/proto_byte_buddy_utils_messages.proto b/sdks/java/extensions/protobuf/src/test/proto/proto_byte_buddy_utils_messages.proto
new file mode 100644
index 0000000..efade15
--- /dev/null
+++ b/sdks/java/extensions/protobuf/src/test/proto/proto_byte_buddy_utils_messages.proto
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Protocol Buffer messages used for testing ProtoByteBuddyUtils implementation.
+ */
+
+syntax = "proto2";
+
+package proto2_schema_messages;
+
+option java_package = "org.apache.beam.sdk.extensions.protobuf";
+
+message ProtoByteBuddyUtilsMessageWithUnderscore {
+  optional int32 foo_bar_id = 1;
+}
+
+message ProtoByteBuddyUtilsMessageWithNumber {
+  optional int32 foo2bar_id = 1;
+}
diff --git a/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/transforms/DataframeTransform.java b/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/transforms/DataframeTransform.java
index 1c95669..720adbd 100644
--- a/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/transforms/DataframeTransform.java
+++ b/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/transforms/DataframeTransform.java
@@ -23,7 +23,7 @@
 import org.apache.beam.sdk.values.PCollection;
 import org.apache.beam.sdk.values.Row;
 
-/** Wrapper for invoking external Python DataframeTransform. */
+/** Wrapper for invoking external Python {@code DataframeTransform}. @Experimental */
 public class DataframeTransform extends PTransform<PCollection<Row>, PCollection<Row>> {
   private final String func;
   private final boolean includeIndexes;
diff --git a/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/transforms/PythonMap.java b/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/transforms/PythonMap.java
index e1eb9cb..d2e5cb5 100644
--- a/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/transforms/PythonMap.java
+++ b/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/transforms/PythonMap.java
@@ -24,6 +24,7 @@
 import org.apache.beam.sdk.values.PCollection;
 import org.checkerframework.checker.nullness.qual.Nullable;
 
+/** Wrapper for invoking external Python {@code Map} transforms.. @Experimental */
 public class PythonMap<InputT, OutputT>
     extends PTransform<PCollection<? extends InputT>, PCollection<OutputT>> {
 
diff --git a/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/transforms/RunInference.java b/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/transforms/RunInference.java
index 209c706..ec4191c 100644
--- a/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/transforms/RunInference.java
+++ b/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/transforms/RunInference.java
@@ -18,21 +18,28 @@
 package org.apache.beam.sdk.extensions.python.transforms;
 
 import java.util.Map;
+import org.apache.beam.sdk.coders.Coder;
+import org.apache.beam.sdk.coders.KvCoder;
 import org.apache.beam.sdk.coders.RowCoder;
 import org.apache.beam.sdk.extensions.python.PythonExternalTransform;
 import org.apache.beam.sdk.schemas.Schema;
+import org.apache.beam.sdk.schemas.Schema.FieldType;
 import org.apache.beam.sdk.transforms.PTransform;
 import org.apache.beam.sdk.util.PythonCallableSource;
+import org.apache.beam.sdk.values.KV;
 import org.apache.beam.sdk.values.PCollection;
 import org.apache.beam.sdk.values.Row;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap;
+import org.checkerframework.checker.nullness.qual.Nullable;
 
-/** Wrapper for invoking external Python RunInference. */
-public class RunInference extends PTransform<PCollection<?>, PCollection<Row>> {
+/** Wrapper for invoking external Python {@code RunInference}. @Experimental */
+public class RunInference<OutputT> extends PTransform<PCollection<?>, PCollection<OutputT>> {
+
   private final String modelLoader;
   private final Schema schema;
   private final Map<String, Object> kwargs;
   private final String expansionService;
+  private final @Nullable Coder<?> keyCoder;
 
   /**
    * Instantiates a multi-language wrapper for a Python RunInference with a given model loader.
@@ -42,12 +49,39 @@
    * @param inferenceType A schema field type for the inference column in output rows.
    * @return A {@link RunInference} for the given model loader.
    */
-  public static RunInference of(
+  public static RunInference<Row> of(
       String modelLoader, Schema.FieldType exampleType, Schema.FieldType inferenceType) {
     Schema schema =
         Schema.of(
             Schema.Field.of("example", exampleType), Schema.Field.of("inference", inferenceType));
-    return new RunInference(modelLoader, schema, ImmutableMap.of(), "");
+    return new RunInference<>(modelLoader, schema, ImmutableMap.of(), null, "");
+  }
+
+  /**
+   * Similar to {@link RunInference#of(String, FieldType, FieldType)} but the input is a {@link
+   * PCollection} of {@link KV}s.
+   *
+   * <p>Also outputs a {@link PCollection} of {@link KV}s of the same key type.
+   *
+   * <p>For example, use this if you are using Python {@code KeyedModelHandler} as the model
+   * handler.
+   *
+   * @param modelLoader A Python callable for a model loader class object.
+   * @param exampleType A schema field type for the example column in output rows.
+   * @param inferenceType A schema field type for the inference column in output rows.
+   * @param keyCoder a {@link Coder} for the input and output Key type.
+   * @param <KeyT> input and output Key type. Inferred by the provided coder.
+   * @return A {@link RunInference} for the given model loader.
+   */
+  public static <KeyT> RunInference<KV<KeyT, Row>> ofKVs(
+      String modelLoader,
+      Schema.FieldType exampleType,
+      Schema.FieldType inferenceType,
+      Coder<KeyT> keyCoder) {
+    Schema schema =
+        Schema.of(
+            Schema.Field.of("example", exampleType), Schema.Field.of("inference", inferenceType));
+    return new RunInference<>(modelLoader, schema, ImmutableMap.of(), keyCoder, "");
   }
 
   /**
@@ -57,8 +91,23 @@
    * @param schema A schema for output rows.
    * @return A {@link RunInference} for the given model loader.
    */
-  public static RunInference of(String modelLoader, Schema schema) {
-    return new RunInference(modelLoader, schema, ImmutableMap.of(), "");
+  public static RunInference<Row> of(String modelLoader, Schema schema) {
+    return new RunInference<>(modelLoader, schema, ImmutableMap.of(), null, "");
+  }
+
+  /**
+   * Similar to {@link RunInference#of(String, Schema)} but the input is a {@link PCollection} of
+   * {@link KV}s.
+   *
+   * @param modelLoader A Python callable for a model loader class object.
+   * @param schema A schema for output rows.
+   * @param keyCoder a {@link Coder} for the input and output Key type.
+   * @param <KeyT> input and output Key type. Inferred by the provided coder.
+   * @return A {@link RunInference} for the given model loader.
+   */
+  public static <KeyT> RunInference<KV<KeyT, Row>> ofKVs(
+      String modelLoader, Schema schema, Coder<KeyT> keyCoder) {
+    return new RunInference<>(modelLoader, schema, ImmutableMap.of(), keyCoder, "");
   }
 
   /**
@@ -66,10 +115,10 @@
    *
    * @return A {@link RunInference} with keyword arguments.
    */
-  public RunInference withKwarg(String key, Object arg) {
+  public RunInference<OutputT> withKwarg(String key, Object arg) {
     ImmutableMap.Builder<String, Object> builder =
         ImmutableMap.<String, Object>builder().putAll(kwargs).put(key, arg);
-    return new RunInference(modelLoader, schema, builder.build(), expansionService);
+    return new RunInference<>(modelLoader, schema, builder.build(), keyCoder, expansionService);
   }
 
   /**
@@ -78,25 +127,38 @@
    * @param expansionService A URL for a Python expansion service.
    * @return A {@link RunInference} for the given expansion service endpoint.
    */
-  public RunInference withExpansionService(String expansionService) {
-    return new RunInference(modelLoader, schema, kwargs, expansionService);
+  public RunInference<OutputT> withExpansionService(String expansionService) {
+    return new RunInference<>(modelLoader, schema, kwargs, keyCoder, expansionService);
   }
 
   private RunInference(
-      String modelLoader, Schema schema, Map<String, Object> kwargs, String expansionService) {
+      String modelLoader,
+      Schema schema,
+      Map<String, Object> kwargs,
+      @Nullable Coder<?> keyCoder,
+      String expansionService) {
     this.modelLoader = modelLoader;
     this.schema = schema;
     this.kwargs = kwargs;
+    this.keyCoder = keyCoder;
     this.expansionService = expansionService;
   }
 
   @Override
-  public PCollection<Row> expand(PCollection<?> input) {
-    return input.apply(
-        PythonExternalTransform.<PCollection<?>, PCollection<Row>>from(
-                "apache_beam.ml.inference.base.RunInference.from_callable", expansionService)
-            .withKwarg("model_handler_provider", PythonCallableSource.of(modelLoader))
-            .withKwargs(kwargs)
-            .withOutputCoder(RowCoder.of(schema)));
+  public PCollection<OutputT> expand(PCollection<?> input) {
+    Coder<OutputT> outputCoder;
+    if (this.keyCoder == null) {
+      outputCoder = (Coder<OutputT>) RowCoder.of(schema);
+    } else {
+      outputCoder = (Coder<OutputT>) KvCoder.of(keyCoder, RowCoder.of(schema));
+    }
+
+    return (PCollection<OutputT>)
+        input.apply(
+            PythonExternalTransform.<PCollection<?>, PCollection<Row>>from(
+                    "apache_beam.ml.inference.base.RunInference.from_callable", expansionService)
+                .withKwarg("model_handler_provider", PythonCallableSource.of(modelLoader))
+                .withOutputCoder(outputCoder)
+                .withKwargs(kwargs));
   }
 }
diff --git a/sdks/java/extensions/python/src/main/resources/org/apache/beam/sdk/extensions/python/bootstrap_beam_venv.py b/sdks/java/extensions/python/src/main/resources/org/apache/beam/sdk/extensions/python/bootstrap_beam_venv.py
index cb5870a..54aeb6a 100644
--- a/sdks/java/extensions/python/src/main/resources/org/apache/beam/sdk/extensions/python/bootstrap_beam_venv.py
+++ b/sdks/java/extensions/python/src/main/resources/org/apache/beam/sdk/extensions/python/bootstrap_beam_venv.py
@@ -24,13 +24,13 @@
 """
 
 import argparse
-import distutils.version
 import hashlib
 import json
 import os
 import shutil
 import subprocess
 import sys
+from pkg_resources import parse_version
 
 
 def main():
@@ -66,9 +66,9 @@
 
         def maybe_strict_version(s):
             try:
-                return distutils.version.StrictVersion(s)
+                return parse_version(s)
             except:
-                return distutils.version.StrictVersion('0.0')
+                return parse_version('0.0')
 
         beam_version = max(info['releases'], key=maybe_strict_version)
         beam_package = 'apache_beam[gcp,aws,asure,dataframe]==' + beam_version
@@ -92,6 +92,17 @@
     if not os.path.exists(venv_python):
         try:
             subprocess.run([executable, '-m', 'venv', venv_dir], check=True)
+
+            # Upgrading pip and setuptools for the virtual environment.
+            subprocess.run([
+                venv_python, '-m', 'pip', 'install', '--upgrade', 'pip'
+            ],
+                           check=True)
+            subprocess.run([
+                venv_python, '-m', 'pip', 'install', '--upgrade', 'setuptools'
+            ],
+                           check=True)
+
             # See https://github.com/apache/beam/issues/21506
             subprocess.run([
                 venv_python, '-m', 'pip', 'install', beam_package,
diff --git a/sdks/java/extensions/python/src/test/java/org/apache/beam/sdk/extensions/python/transforms/RunInferenceTransformTest.java b/sdks/java/extensions/python/src/test/java/org/apache/beam/sdk/extensions/python/transforms/RunInferenceTransformTest.java
index 2e875de..5f7b80f 100644
--- a/sdks/java/extensions/python/src/test/java/org/apache/beam/sdk/extensions/python/transforms/RunInferenceTransformTest.java
+++ b/sdks/java/extensions/python/src/test/java/org/apache/beam/sdk/extensions/python/transforms/RunInferenceTransformTest.java
@@ -18,15 +18,21 @@
 package org.apache.beam.sdk.extensions.python.transforms;
 
 import java.util.Arrays;
+import java.util.List;
 import java.util.Optional;
 import org.apache.beam.runners.core.construction.BaseExternalTest;
 import org.apache.beam.sdk.coders.IterableCoder;
+import org.apache.beam.sdk.coders.KvCoder;
 import org.apache.beam.sdk.coders.VarLongCoder;
 import org.apache.beam.sdk.schemas.Schema;
 import org.apache.beam.sdk.testing.PAssert;
 import org.apache.beam.sdk.testing.UsesPythonExpansionService;
 import org.apache.beam.sdk.testing.ValidatesRunner;
 import org.apache.beam.sdk.transforms.Create;
+import org.apache.beam.sdk.transforms.MapElements;
+import org.apache.beam.sdk.transforms.SimpleFunction;
+import org.apache.beam.sdk.transforms.Values;
+import org.apache.beam.sdk.values.KV;
 import org.apache.beam.sdk.values.PCollection;
 import org.apache.beam.sdk.values.Row;
 import org.junit.Test;
@@ -65,4 +71,52 @@
                     .withExpansionService(expansionAddr));
     PAssert.that(col).containsInAnyOrder(row0, row1);
   }
+
+  private String getModelLoaderScriptWithKVs() {
+    String s = "from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerNumpy\n";
+    s = s + "from apache_beam.ml.inference.base import KeyedModelHandler\n";
+    s = s + "def get_model_handler(model_uri):\n";
+    s = s + "  return KeyedModelHandler(SklearnModelHandlerNumpy(model_uri))\n";
+
+    return s;
+  }
+
+  static class KVFn extends SimpleFunction<Iterable<Long>, KV<Long, Iterable<Long>>> {
+    @Override
+    public KV<Long, Iterable<Long>> apply(Iterable<Long> input) {
+      Long key = (Long) ((List) input).get(0);
+      return KV.of(key, input);
+    }
+  }
+
+  @Test
+  @Category({ValidatesRunner.class, UsesPythonExpansionService.class})
+  public void testRunInferenceWithKVs() {
+    String stagingLocation =
+        Optional.ofNullable(System.getProperty("semiPersistDir")).orElse("/tmp");
+    Schema schema =
+        Schema.of(
+            Schema.Field.of("example", Schema.FieldType.array(Schema.FieldType.INT64)),
+            Schema.Field.of("inference", Schema.FieldType.INT32));
+    Row row0 = Row.withSchema(schema).addArray(0L, 0L).addValue(0).build();
+    Row row1 = Row.withSchema(schema).addArray(1L, 1L).addValue(1).build();
+    PCollection<Row> col =
+        testPipeline
+            .apply(Create.<Iterable<Long>>of(Arrays.asList(0L, 0L), Arrays.asList(1L, 1L)))
+            .apply(MapElements.via(new KVFn()))
+            .setCoder(KvCoder.of(VarLongCoder.of(), IterableCoder.of(VarLongCoder.of())))
+            .apply(
+                RunInference.ofKVs(getModelLoaderScriptWithKVs(), schema, VarLongCoder.of())
+                    .withKwarg(
+                        // The test expansion service creates the test model and saves it to the
+                        // returning external environment as a dependency.
+                        // (sdks/python/apache_beam/runners/portability/expansion_service_test.py)
+                        // The dependencies for Python SDK harness are supposed to be staged to
+                        // $SEMI_PERSIST_DIR/staged directory.
+                        "model_uri", String.format("%s/staged/sklearn_model", stagingLocation))
+                    .withExpansionService(expansionAddr))
+            .apply(Values.<Row>create());
+
+    PAssert.that(col).containsInAnyOrder(row0, row1);
+  }
 }
diff --git a/sdks/java/fn-execution/src/main/java/org/apache/beam/sdk/fn/data/BeamFnDataOutboundAggregator.java b/sdks/java/fn-execution/src/main/java/org/apache/beam/sdk/fn/data/BeamFnDataOutboundAggregator.java
index d78191b..09422e0 100644
--- a/sdks/java/fn-execution/src/main/java/org/apache/beam/sdk/fn/data/BeamFnDataOutboundAggregator.java
+++ b/sdks/java/fn-execution/src/main/java/org/apache/beam/sdk/fn/data/BeamFnDataOutboundAggregator.java
@@ -35,6 +35,7 @@
 import org.apache.beam.sdk.coders.Coder;
 import org.apache.beam.sdk.options.ExperimentalOptions;
 import org.apache.beam.sdk.options.PipelineOptions;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.grpc.v1p43p2.io.grpc.stub.StreamObserver;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting;
@@ -236,29 +237,27 @@
   private Elements.Builder convertBufferForTransmission() {
     Elements.Builder bufferedElements = Elements.newBuilder();
     for (Map.Entry<String, Receiver<?>> entry : outputDataReceivers.entrySet()) {
-      if (entry.getValue().getOutput().size() == 0) {
+      if (entry.getValue().bufferedSize() == 0) {
         continue;
       }
-      ByteString bytes = entry.getValue().getOutput().toByteString();
+      ByteString bytes = entry.getValue().toByteStringAndResetBuffer();
       bufferedElements
           .addDataBuilder()
           .setInstructionId(processBundleRequestIdSupplier.get())
           .setTransformId(entry.getKey())
           .setData(bytes);
-      entry.getValue().resetOutput();
     }
     for (Map.Entry<TimerEndpoint, Receiver<?>> entry : outputTimersReceivers.entrySet()) {
-      if (entry.getValue().getOutput().size() == 0) {
+      if (entry.getValue().bufferedSize() == 0) {
         continue;
       }
-      ByteString bytes = entry.getValue().getOutput().toByteString();
+      ByteString bytes = entry.getValue().toByteStringAndResetBuffer();
       bufferedElements
           .addTimersBuilder()
           .setInstructionId(processBundleRequestIdSupplier.get())
           .setTransformId(entry.getKey().pTransformId)
           .setTimerFamilyId(entry.getKey().timerFamilyId)
           .setTimers(bytes);
-      entry.getValue().resetOutput();
     }
     bytesWrittenSinceFlush = 0L;
     return bufferedElements;
@@ -323,13 +322,13 @@
 
   @VisibleForTesting
   class Receiver<T> implements FnDataReceiver<T> {
-    private final ByteString.Output output;
+    private final ByteStringOutputStream output;
     private final Coder<T> coder;
     private long perBundleByteCount;
     private long perBundleElementCount;
 
     public Receiver(Coder<T> coder) {
-      this.output = ByteString.newOutput();
+      this.output = new ByteStringOutputStream();
       this.coder = coder;
       this.perBundleByteCount = 0L;
       this.perBundleElementCount = 0L;
@@ -351,10 +350,6 @@
       }
     }
 
-    public ByteString.Output getOutput() {
-      return output;
-    }
-
     public long getByteCount() {
       return perBundleByteCount;
     }
@@ -363,8 +358,12 @@
       return perBundleElementCount;
     }
 
-    public void resetOutput() {
-      this.output.reset();
+    public int bufferedSize() {
+      return output.size();
+    }
+
+    public ByteString toByteStringAndResetBuffer() {
+      return this.output.toByteStringAndReset();
     }
 
     public void resetStats() {
diff --git a/sdks/java/fn-execution/src/main/java/org/apache/beam/sdk/fn/stream/DataStreams.java b/sdks/java/fn-execution/src/main/java/org/apache/beam/sdk/fn/stream/DataStreams.java
index 9a0b15f..d55c091 100644
--- a/sdks/java/fn-execution/src/main/java/org/apache/beam/sdk/fn/stream/DataStreams.java
+++ b/sdks/java/fn-execution/src/main/java/org/apache/beam/sdk/fn/stream/DataStreams.java
@@ -27,6 +27,7 @@
 import java.util.List;
 import java.util.NoSuchElementException;
 import org.apache.beam.sdk.coders.Coder;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 
 /**
@@ -80,7 +81,7 @@
    */
   public static final class ElementDelimitedOutputStream extends OutputStream {
     private final OutputChunkConsumer<ByteString> consumer;
-    private final ByteString.Output output;
+    private final ByteStringOutputStream output;
     private final int maximumChunkSize;
     int previousPosition;
 
@@ -88,7 +89,7 @@
         OutputChunkConsumer<ByteString> consumer, int maximumChunkSize) {
       this.consumer = consumer;
       this.maximumChunkSize = maximumChunkSize;
-      this.output = ByteString.newOutput(maximumChunkSize);
+      this.output = new ByteStringOutputStream(maximumChunkSize);
     }
 
     public void delimitElement() throws IOException {
@@ -139,8 +140,7 @@
 
     /** Can only be called if at least one byte has been written. */
     private void internalFlush() throws IOException {
-      consumer.read(output.toByteString());
-      output.reset();
+      consumer.read(output.toByteStringAndReset());
       // Set the previous position to an invalid position representing that a previous buffer
       // was written to.
       previousPosition = -1;
diff --git a/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/data/BeamFnDataInboundObserver2Test.java b/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/data/BeamFnDataInboundObserver2Test.java
index 82d35e5..c1e8e42 100644
--- a/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/data/BeamFnDataInboundObserver2Test.java
+++ b/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/data/BeamFnDataInboundObserver2Test.java
@@ -35,8 +35,8 @@
 import org.apache.beam.sdk.fn.test.TestExecutors;
 import org.apache.beam.sdk.fn.test.TestExecutors.TestExecutorService;
 import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.WindowedValue;
-import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -203,7 +203,7 @@
   }
 
   private BeamFnApi.Elements dataWith(String... values) throws Exception {
-    ByteString.Output output = ByteString.newOutput();
+    ByteStringOutputStream output = new ByteStringOutputStream();
     for (String value : values) {
       CODER.encode(valueInGlobalWindow(value), output);
     }
@@ -222,7 +222,7 @@
   }
 
   private BeamFnApi.Elements timerWith(String... values) throws Exception {
-    ByteString.Output output = ByteString.newOutput();
+    ByteStringOutputStream output = new ByteStringOutputStream();
     for (String value : values) {
       CODER.encode(valueInGlobalWindow(value), output);
     }
diff --git a/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/data/BeamFnDataInboundObserverTest.java b/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/data/BeamFnDataInboundObserverTest.java
index 4f4cb08..c0b1a6c 100644
--- a/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/data/BeamFnDataInboundObserverTest.java
+++ b/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/data/BeamFnDataInboundObserverTest.java
@@ -31,6 +31,7 @@
 import org.apache.beam.sdk.coders.Coder;
 import org.apache.beam.sdk.coders.StringUtf8Coder;
 import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.WindowedValue;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.junit.Rule;
@@ -98,7 +99,7 @@
   }
 
   private ByteString dataWith(String... values) throws Exception {
-    ByteString.Output output = ByteString.newOutput();
+    ByteStringOutputStream output = new ByteStringOutputStream();
     for (String value : values) {
       CODER.encode(valueInGlobalWindow(value), output);
     }
diff --git a/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/data/BeamFnDataOutboundAggregatorTest.java b/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/data/BeamFnDataOutboundAggregatorTest.java
index 9937fcaf..8b2adc6 100644
--- a/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/data/BeamFnDataOutboundAggregatorTest.java
+++ b/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/data/BeamFnDataOutboundAggregatorTest.java
@@ -40,7 +40,7 @@
 import org.apache.beam.sdk.options.ExperimentalOptions;
 import org.apache.beam.sdk.options.PipelineOptions;
 import org.apache.beam.sdk.options.PipelineOptionsFactory;
-import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
 import org.hamcrest.Matchers;
 import org.junit.Test;
@@ -145,7 +145,7 @@
     } else {
       receiver = Iterables.getOnlyElement(aggregator.outputDataReceivers.values());
     }
-    assertEquals(0L, receiver.getOutput().size());
+    assertEquals(0L, receiver.bufferedSize());
     assertEquals(102L, receiver.getByteCount());
     assertEquals(2L, receiver.getElementCount());
 
@@ -155,7 +155,7 @@
     aggregator.sendOrCollectBufferedDataAndFinishOutboundStreams();
     // Test that receiver stats have been reset after
     // sendOrCollectBufferedDataAndFinishOutboundStreams.
-    assertEquals(0L, receiver.getOutput().size());
+    assertEquals(0L, receiver.bufferedSize());
     assertEquals(0L, receiver.getByteCount());
     assertEquals(0L, receiver.getElementCount());
 
@@ -344,7 +344,7 @@
 
   BeamFnApi.Elements.Builder messageWithDataBuilder(LogicalEndpoint endpoint, byte[]... datum)
       throws IOException {
-    ByteString.Output output = ByteString.newOutput();
+    ByteStringOutputStream output = new ByteStringOutputStream();
     for (byte[] data : datum) {
       CODER.encode(data, output);
     }
diff --git a/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/stream/DataStreamsTest.java b/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/stream/DataStreamsTest.java
index 1e12874..dfb1057 100644
--- a/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/stream/DataStreamsTest.java
+++ b/sdks/java/fn-execution/src/test/java/org/apache/beam/sdk/fn/stream/DataStreamsTest.java
@@ -38,6 +38,7 @@
 import org.apache.beam.sdk.fn.stream.DataStreams.DataStreamDecoder;
 import org.apache.beam.sdk.fn.stream.DataStreams.ElementDelimitedOutputStream;
 import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterators;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.io.ByteStreams;
@@ -145,7 +146,7 @@
     }
 
     private ByteString encode(String... values) throws IOException {
-      ByteString.Output out = ByteString.newOutput();
+      ByteStringOutputStream out = new ByteStringOutputStream();
       for (String value : values) {
         StringUtf8Coder.of().encode(value, out);
       }
@@ -153,7 +154,7 @@
     }
 
     private <T> void testDecoderWith(Coder<T> coder, T... expected) throws IOException {
-      ByteString.Output output = ByteString.newOutput();
+      ByteStringOutputStream output = new ByteStringOutputStream();
       for (T value : expected) {
         int size = output.size();
         coder.encode(value, output);
diff --git a/sdks/java/harness/jmh/build.gradle b/sdks/java/harness/jmh/build.gradle
index 4d50c71..17860f1 100644
--- a/sdks/java/harness/jmh/build.gradle
+++ b/sdks/java/harness/jmh/build.gradle
@@ -31,8 +31,14 @@
 }
 
 dependencies {
+    implementation project(path: ":sdks:java:core", configuration: "shadow")
     implementation project(path: ":sdks:java:harness", configuration: "shadow")
-    implementation project(":runners:java-fn-execution")
+    implementation project(path: ":runners:java-fn-execution")
+    implementation project(path: ":model:pipeline", configuration: "shadow")
+    implementation library.java.vendored_grpc_1_43_2
+    implementation library.java.vendored_guava_26_0_jre
+    implementation library.java.slf4j_api
+    implementation library.java.joda_time
     runtimeOnly library.java.slf4j_jdk14
     jammAgent library.java.jamm
 }
diff --git a/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/FnApiDoFnRunner.java b/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/FnApiDoFnRunner.java
index 24b6f8f..50402e4 100644
--- a/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/FnApiDoFnRunner.java
+++ b/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/FnApiDoFnRunner.java
@@ -106,6 +106,7 @@
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
 import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
 import org.apache.beam.sdk.transforms.windowing.PaneInfo;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.UserCodeException;
 import org.apache.beam.sdk.util.WindowedValue;
 import org.apache.beam.sdk.util.WindowedValue.WindowedValueCoder;
@@ -728,7 +729,7 @@
               public void reset() {}
 
               private ByteString encodeProgress(double value) throws IOException {
-                ByteString.Output output = ByteString.newOutput();
+                ByteStringOutputStream output = new ByteStringOutputStream();
                 IterableCoder.of(DoubleCoder.of()).encode(Arrays.asList(value), output);
                 return output.toByteString();
               }
@@ -1514,7 +1515,7 @@
     // Encode window splits.
     if (windowedSplitResult != null
         && windowedSplitResult.getPrimaryInFullyProcessedWindowsRoot() != null) {
-      ByteString.Output primaryInOtherWindowsBytes = ByteString.newOutput();
+      ByteStringOutputStream primaryInOtherWindowsBytes = new ByteStringOutputStream();
       try {
         fullInputCoder.encode(
             windowedSplitResult.getPrimaryInFullyProcessedWindowsRoot(),
@@ -1531,7 +1532,7 @@
     }
     if (windowedSplitResult != null
         && windowedSplitResult.getResidualInUnprocessedWindowsRoot() != null) {
-      ByteString.Output bytesOut = ByteString.newOutput();
+      ByteStringOutputStream bytesOut = new ByteStringOutputStream();
       try {
         fullInputCoder.encode(windowedSplitResult.getResidualInUnprocessedWindowsRoot(), bytesOut);
       } catch (IOException e) {
@@ -1564,8 +1565,8 @@
               .build());
     }
 
-    ByteString.Output primaryBytes = ByteString.newOutput();
-    ByteString.Output residualBytes = ByteString.newOutput();
+    ByteStringOutputStream primaryBytes = new ByteStringOutputStream();
+    ByteStringOutputStream residualBytes = new ByteStringOutputStream();
     // Encode element split from windowedSplitResult or from downstream element split. It's possible
     // that there is no element split.
     if (windowedSplitResult != null && windowedSplitResult.getResidualSplitRoot() != null) {
diff --git a/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/BagUserState.java b/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/BagUserState.java
index 39181d9..51d83b5 100644
--- a/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/BagUserState.java
+++ b/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/BagUserState.java
@@ -32,7 +32,7 @@
 import org.apache.beam.sdk.coders.Coder;
 import org.apache.beam.sdk.fn.stream.PrefetchableIterable;
 import org.apache.beam.sdk.fn.stream.PrefetchableIterables;
-import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
 
 /**
@@ -128,7 +128,7 @@
           request.toBuilder().setClear(StateClearRequest.getDefaultInstance()));
     }
     if (!newValues.isEmpty()) {
-      ByteString.Output out = ByteString.newOutput();
+      ByteStringOutputStream out = new ByteStringOutputStream();
       for (T newValue : newValues) {
         // TODO: Replace with chunking output stream
         valueCoder.encode(newValue, out);
diff --git a/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/FnApiStateAccessor.java b/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/FnApiStateAccessor.java
index e3c850e..1974b61 100644
--- a/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/FnApiStateAccessor.java
+++ b/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/FnApiStateAccessor.java
@@ -56,6 +56,7 @@
 import org.apache.beam.sdk.transforms.Materializations;
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
 import org.apache.beam.sdk.transforms.windowing.TimestampCombiner;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.CombineFnUtil;
 import org.apache.beam.sdk.values.PCollectionView;
 import org.apache.beam.sdk.values.TupleTag;
@@ -118,7 +119,7 @@
               checkState(
                   keyCoder != null, "Accessing state in unkeyed context, no key coder available");
 
-              ByteString.Output encodedKeyOut = ByteString.newOutput();
+              ByteStringOutputStream encodedKeyOut = new ByteStringOutputStream();
               try {
                 ((Coder) keyCoder).encode(key, encodedKeyOut, Coder.Context.NESTED);
               } catch (IOException e) {
@@ -131,7 +132,7 @@
         memoizeFunction(
             currentWindowSupplier,
             window -> {
-              ByteString.Output encodedWindowOut = ByteString.newOutput();
+              ByteStringOutputStream encodedWindowOut = new ByteStringOutputStream();
               try {
                 windowCoder.encode(window, encodedWindowOut);
               } catch (IOException e) {
@@ -167,7 +168,7 @@
     SideInputSpec sideInputSpec = sideInputSpecMap.get(tag);
     checkArgument(sideInputSpec != null, "Attempting to access unknown side input %s.", view);
 
-    ByteString.Output encodedWindowOut = ByteString.newOutput();
+    ByteStringOutputStream encodedWindowOut = new ByteStringOutputStream();
     try {
       sideInputSpec
           .getWindowCoder()
diff --git a/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/FnApiTimerBundleTracker.java b/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/FnApiTimerBundleTracker.java
index 39f735d..718b440 100644
--- a/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/FnApiTimerBundleTracker.java
+++ b/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/FnApiTimerBundleTracker.java
@@ -31,6 +31,7 @@
 import org.apache.beam.sdk.fn.data.FnDataReceiver;
 import org.apache.beam.sdk.state.TimeDomain;
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.UserCodeException;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ComparisonChain;
@@ -42,7 +43,7 @@
 public class FnApiTimerBundleTracker<K> {
   private final Supplier<ByteString> encodedCurrentKeySupplier;
   private final Supplier<ByteString> encodedCurrentWindowSupplier;
-  private Table<ByteString, ByteString, Modifications<K>> timerModifications;
+  private final Table<ByteString, ByteString, Modifications<K>> timerModifications;
 
   @AutoValue
   public abstract static class TimerInfo<K> {
@@ -116,7 +117,7 @@
           Sets.newTreeSet(comparator),
           HashBasedTable.create());
     }
-  };
+  }
 
   public FnApiTimerBundleTracker(
       Coder<K> keyCoder,
@@ -131,9 +132,9 @@
               checkState(
                   keyCoder != null, "Accessing state in unkeyed context, no key coder available");
 
-              ByteString.Output encodedKeyOut = ByteString.newOutput();
+              ByteStringOutputStream encodedKeyOut = new ByteStringOutputStream();
               try {
-                ((Coder) keyCoder).encode(key, encodedKeyOut, Coder.Context.NESTED);
+                keyCoder.encode(key, encodedKeyOut, Coder.Context.NESTED);
               } catch (IOException e) {
                 throw new IllegalStateException(e);
               }
@@ -143,7 +144,7 @@
         memoizeFunction(
             currentWindowSupplier,
             window -> {
-              ByteString.Output encodedWindowOut = ByteString.newOutput();
+              ByteStringOutputStream encodedWindowOut = new ByteStringOutputStream();
               try {
                 windowCoder.encode(window, encodedWindowOut);
               } catch (IOException e) {
diff --git a/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/MultimapSideInput.java b/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/MultimapSideInput.java
index f36423a..409a483 100644
--- a/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/MultimapSideInput.java
+++ b/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/MultimapSideInput.java
@@ -26,6 +26,7 @@
 import org.apache.beam.model.fnexecution.v1.BeamFnApi.StateRequest;
 import org.apache.beam.sdk.coders.Coder;
 import org.apache.beam.sdk.transforms.Materializations.MultimapView;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 
 /**
@@ -70,7 +71,7 @@
 
   @Override
   public Iterable<V> get(K k) {
-    ByteString.Output output = ByteString.newOutput();
+    ByteStringOutputStream output = new ByteStringOutputStream();
     try {
       keyCoder.encode(k, output);
     } catch (IOException e) {
diff --git a/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/MultimapUserState.java b/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/MultimapUserState.java
index 5757dc9..3078f69 100644
--- a/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/MultimapUserState.java
+++ b/sdks/java/harness/src/main/java/org/apache/beam/fn/harness/state/MultimapUserState.java
@@ -41,6 +41,7 @@
 import org.apache.beam.sdk.fn.stream.PrefetchableIterable;
 import org.apache.beam.sdk.fn.stream.PrefetchableIterables;
 import org.apache.beam.sdk.fn.stream.PrefetchableIterator;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.values.KV;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Maps;
@@ -357,7 +358,7 @@
 
   private ByteString encodeValues(Iterable<V> values) {
     try {
-      ByteString.Output output = ByteString.newOutput();
+      ByteStringOutputStream output = new ByteStringOutputStream();
       for (V value : values) {
         valueCoder.encode(value, output);
       }
@@ -373,7 +374,7 @@
 
   private StateRequest createUserStateRequest(K key) {
     try {
-      ByteString.Output output = ByteString.newOutput();
+      ByteStringOutputStream output = new ByteStringOutputStream();
       mapKeyCoder.encode(key, output);
       StateRequest.Builder request = userStateRequest.toBuilder();
       request.getStateKeyBuilder().getMultimapUserStateBuilder().setMapKey(output.toByteString());
diff --git a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/FnApiDoFnRunnerTest.java b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/FnApiDoFnRunnerTest.java
index 27da531..de8be42 100644
--- a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/FnApiDoFnRunnerTest.java
+++ b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/FnApiDoFnRunnerTest.java
@@ -132,6 +132,7 @@
 import org.apache.beam.sdk.transforms.windowing.PaneInfo;
 import org.apache.beam.sdk.transforms.windowing.SlidingWindows;
 import org.apache.beam.sdk.transforms.windowing.Window;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.CoderUtils;
 import org.apache.beam.sdk.util.UserCodeException;
 import org.apache.beam.sdk.util.WindowedValue;
@@ -1337,7 +1338,7 @@
     }
 
     private ByteString encode(String... values) throws IOException {
-      ByteString.Output out = ByteString.newOutput();
+      ByteStringOutputStream out = new ByteStringOutputStream();
       for (String value : values) {
         StringUtf8Coder.of().encode(value, out);
       }
@@ -2997,8 +2998,8 @@
     }
 
     private static SplitResult createSplitResult(double fractionOfRemainder) {
-      ByteString.Output primaryBytes = ByteString.newOutput();
-      ByteString.Output residualBytes = ByteString.newOutput();
+      ByteStringOutputStream primaryBytes = new ByteStringOutputStream();
+      ByteStringOutputStream residualBytes = new ByteStringOutputStream();
       try {
         DoubleCoder.of().encode(fractionOfRemainder, primaryBytes);
         DoubleCoder.of().encode(1 - fractionOfRemainder, residualBytes);
@@ -3181,7 +3182,7 @@
       SplitResult expectedElementSplit = createSplitResult(0);
       BundleApplication expectedElementSplitPrimary =
           Iterables.getOnlyElement(expectedElementSplit.getPrimaryRoots());
-      ByteString.Output primaryBytes = ByteString.newOutput();
+      ByteStringOutputStream primaryBytes = new ByteStringOutputStream();
       inputCoder.encode(
           WindowedValue.of(
               KV.of(
@@ -3198,7 +3199,7 @@
               .build();
       DelayedBundleApplication expectedElementSplitResidual =
           Iterables.getOnlyElement(expectedElementSplit.getResidualRoots());
-      ByteString.Output residualBytes = ByteString.newOutput();
+      ByteStringOutputStream residualBytes = new ByteStringOutputStream();
       inputCoder.encode(
           WindowedValue.of(
               KV.of(
diff --git a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/control/ProcessBundleHandlerTest.java b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/control/ProcessBundleHandlerTest.java
index 450628a..e2c0c3f 100644
--- a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/control/ProcessBundleHandlerTest.java
+++ b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/control/ProcessBundleHandlerTest.java
@@ -137,6 +137,7 @@
 import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
 import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
 import org.apache.beam.sdk.transforms.windowing.PaneInfo;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.util.DoFnWithExecutionInformation;
 import org.apache.beam.sdk.util.SerializableUtils;
 import org.apache.beam.sdk.values.KV;
@@ -1095,9 +1096,9 @@
     ProcessBundleHandler handler =
         setupProcessBundleHandlerForSimpleRecordingDoFn(dataOutput, timerOutput, false);
 
-    ByteString.Output encodedData = ByteString.newOutput();
+    ByteStringOutputStream encodedData = new ByteStringOutputStream();
     KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()).encode(KV.of("", "data"), encodedData);
-    ByteString.Output encodedTimer = ByteString.newOutput();
+    ByteStringOutputStream encodedTimer = new ByteStringOutputStream();
     Timer.Coder.of(StringUtf8Coder.of(), GlobalWindow.Coder.INSTANCE)
         .encode(
             Timer.of(
@@ -1160,7 +1161,7 @@
     ProcessBundleHandler handler =
         setupProcessBundleHandlerForSimpleRecordingDoFn(dataOutput, timerOutput, false);
 
-    ByteString.Output encodedData = ByteString.newOutput();
+    ByteStringOutputStream encodedData = new ByteStringOutputStream();
     KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()).encode(KV.of("", "data"), encodedData);
 
     assertThrows(
@@ -1216,7 +1217,7 @@
     ProcessBundleHandler handler =
         setupProcessBundleHandlerForSimpleRecordingDoFn(dataOutput, timerOutput, false);
 
-    ByteString.Output encodedTimer = ByteString.newOutput();
+    ByteStringOutputStream encodedTimer = new ByteStringOutputStream();
     Timer.Coder.of(StringUtf8Coder.of(), GlobalWindow.Coder.INSTANCE)
         .encode(
             Timer.of(
@@ -1310,7 +1311,7 @@
     ProcessBundleHandler handler =
         setupProcessBundleHandlerForSimpleRecordingDoFn(dataOutput, timerOutput, true);
 
-    ByteString.Output encodedTimer = ByteString.newOutput();
+    ByteStringOutputStream encodedTimer = new ByteStringOutputStream();
     Timer.Coder.of(StringUtf8Coder.of(), GlobalWindow.Coder.INSTANCE)
         .encode(
             Timer.of(
@@ -1446,7 +1447,7 @@
 
     Mockito.doAnswer(
             (invocation) -> {
-              ByteString.Output encodedData = ByteString.newOutput();
+              ByteStringOutputStream encodedData = new ByteStringOutputStream();
               StringUtf8Coder.of().encode("A", encodedData);
               String instructionId = invocation.getArgument(0, String.class);
               CloseableFnDataReceiver<BeamFnApi.Elements> data =
diff --git a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/BagUserStateTest.java b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/BagUserStateTest.java
index 7b1cef8..7b2bce0 100644
--- a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/BagUserStateTest.java
+++ b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/BagUserStateTest.java
@@ -29,6 +29,7 @@
 import org.apache.beam.fn.harness.Caches;
 import org.apache.beam.model.fnexecution.v1.BeamFnApi.StateKey;
 import org.apache.beam.sdk.coders.StringUtf8Coder;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
@@ -252,7 +253,7 @@
   }
 
   private ByteString encode(String... values) throws IOException {
-    ByteString.Output out = ByteString.newOutput();
+    ByteStringOutputStream out = new ByteStringOutputStream();
     for (String value : values) {
       StringUtf8Coder.of().encode(value, out);
     }
diff --git a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/FakeBeamFnStateClient.java b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/FakeBeamFnStateClient.java
index 954b37d..17ef134 100644
--- a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/FakeBeamFnStateClient.java
+++ b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/FakeBeamFnStateClient.java
@@ -37,6 +37,7 @@
 import org.apache.beam.model.fnexecution.v1.BeamFnApi.StateRequest.RequestCase;
 import org.apache.beam.model.fnexecution.v1.BeamFnApi.StateResponse;
 import org.apache.beam.sdk.coders.Coder;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.values.KV;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Maps;
@@ -67,7 +68,7 @@
                 initialData,
                 (KV<Coder<?>, List<?>> coderAndValues) -> {
                   List<ByteString> chunks = new ArrayList<>();
-                  ByteString.Output output = ByteString.newOutput();
+                  ByteStringOutputStream output = new ByteStringOutputStream();
                   for (Object value : coderAndValues.getValue()) {
                     try {
                       ((Coder<Object>) coderAndValues.getKey()).encode(value, output);
@@ -75,7 +76,7 @@
                       throw new RuntimeException(e);
                     }
                     if (output.size() >= chunkSize) {
-                      ByteString chunk = output.toByteString();
+                      ByteString chunk = output.toByteStringAndReset();
                       int i = 0;
                       for (; i + chunkSize <= chunk.size(); i += chunkSize) {
                         // We specifically use a copy of the bytes instead of a proper substring
@@ -88,7 +89,6 @@
                         chunks.add(
                             ByteString.copyFrom(chunk.substring(i, chunk.size()).toByteArray()));
                       }
-                      output.reset();
                     }
                   }
                   // Add the last chunk
diff --git a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/MultimapSideInputTest.java b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/MultimapSideInputTest.java
index 6fea5e1..f0412fd 100644
--- a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/MultimapSideInputTest.java
+++ b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/MultimapSideInputTest.java
@@ -28,6 +28,7 @@
 import org.apache.beam.sdk.coders.ByteArrayCoder;
 import org.apache.beam.sdk.coders.Coder;
 import org.apache.beam.sdk.coders.StringUtf8Coder;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.values.KV;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap;
@@ -142,7 +143,7 @@
   }
 
   private StateKey key(byte[] key) throws IOException {
-    ByteString.Output out = ByteString.newOutput();
+    ByteStringOutputStream out = new ByteStringOutputStream();
     ByteArrayCoder.of().encode(key, out);
     return StateKey.newBuilder()
         .setMultimapSideInput(
diff --git a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/MultimapUserStateTest.java b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/MultimapUserStateTest.java
index fccd8e7..dbd2add 100644
--- a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/MultimapUserStateTest.java
+++ b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/MultimapUserStateTest.java
@@ -41,6 +41,7 @@
 import org.apache.beam.sdk.coders.NullableCoder;
 import org.apache.beam.sdk.coders.StringUtf8Coder;
 import org.apache.beam.sdk.fn.stream.PrefetchableIterable;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.values.KV;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap;
@@ -1060,7 +1061,7 @@
   }
 
   private ByteString encode(String... values) throws IOException {
-    ByteString.Output out = ByteString.newOutput();
+    ByteStringOutputStream out = new ByteStringOutputStream();
     for (String value : values) {
       StringUtf8Coder.of().encode(value, out);
     }
@@ -1068,7 +1069,7 @@
   }
 
   private ByteString encode(byte[]... values) throws IOException {
-    ByteString.Output out = ByteString.newOutput();
+    ByteStringOutputStream out = new ByteStringOutputStream();
     for (byte[] value : values) {
       ByteArrayCoder.of().encode(value, out);
     }
diff --git a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/StateBackedIterableTest.java b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/StateBackedIterableTest.java
index 8b05def..b6e8fe7 100644
--- a/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/StateBackedIterableTest.java
+++ b/sdks/java/harness/src/test/java/org/apache/beam/fn/harness/state/StateBackedIterableTest.java
@@ -35,6 +35,7 @@
 import org.apache.beam.fn.harness.Caches;
 import org.apache.beam.model.fnexecution.v1.BeamFnApi.StateKey;
 import org.apache.beam.sdk.coders.StringUtf8Coder;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.FluentIterable;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList;
@@ -297,7 +298,7 @@
   }
 
   private static ByteString encode(String... values) throws IOException {
-    ByteString.Output out = ByteString.newOutput();
+    ByteStringOutputStream out = new ByteStringOutputStream();
     for (String value : values) {
       StringUtf8Coder.of().encode(value, out);
     }
diff --git a/sdks/java/io/cdap/build.gradle b/sdks/java/io/cdap/build.gradle
index a2781cf..1bcc0ec 100644
--- a/sdks/java/io/cdap/build.gradle
+++ b/sdks/java/io/cdap/build.gradle
@@ -67,7 +67,10 @@
     testImplementation library.java.vendored_guava_26_0_jre
     testImplementation library.java.junit
     testImplementation library.java.mockito_core
+    testImplementation library.java.testcontainers_postgresql
     testImplementation project(":sdks:java:io:hadoop-common")
     testImplementation project(":sdks:java:io:hadoop-format")
+    testImplementation project(path: ":sdks:java:testing:test-utils", configuration: "testRuntimeMigration")
     testImplementation project(path: ":runners:direct-java", configuration: "shadow")
+    testImplementation project(path: ":sdks:java:io:common", configuration: "testRuntimeMigration")
 }
diff --git a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/CdapIO.java b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/CdapIO.java
index f749acf..f265550 100644
--- a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/CdapIO.java
+++ b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/CdapIO.java
@@ -17,6 +17,7 @@
  */
 package org.apache.beam.sdk.io.cdap;
 
+import static org.apache.beam.sdk.util.Preconditions.checkArgumentNotNull;
 import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkArgument;
 
 import com.google.auto.value.AutoValue;
@@ -39,7 +40,6 @@
  * href="https://github.com/data-integrations">CDAP</a> plugins.
  */
 @Experimental(Kind.SOURCE_SINK)
-@SuppressWarnings("nullness")
 public class CdapIO {
 
   public static <K, V> Read<K, V> read() {
@@ -54,7 +54,6 @@
   @AutoValue
   @AutoValue.CopyAnnotations
   public abstract static class Read<K, V> extends PTransform<PBegin, PCollection<KV<K, V>>> {
-
     abstract @Nullable PluginConfig getPluginConfig();
 
     abstract @Nullable Plugin getCdapPlugin();
@@ -108,30 +107,24 @@
 
     @Override
     public PCollection<KV<K, V>> expand(PBegin input) {
-      validateTransform();
+      Plugin plugin = checkArgumentNotNull(getCdapPlugin(), "withCdapPluginClass() is required");
+      PluginConfig pluginConfig =
+          checkArgumentNotNull(getPluginConfig(), "withPluginConfig() is required");
+      Class<K> keyClass = checkArgumentNotNull(getKeyClass(), "withKeyClass() is required");
+      Class<V> valueClass = checkArgumentNotNull(getValueClass(), "withValueClass() is required");
 
-      getCdapPlugin()
-          .withConfig(getPluginConfig())
-          .withHadoopConfiguration(getKeyClass(), getValueClass())
-          .prepareRun();
+      plugin.withConfig(pluginConfig).withHadoopConfiguration(keyClass, valueClass).prepareRun();
 
-      if (getCdapPlugin().isUnbounded()) {
+      if (plugin.isUnbounded()) {
         // TODO: implement SparkReceiverIO.<~>read()
         throw new NotImplementedException("Support for unbounded plugins is not implemented!");
       } else {
-        Configuration hConf = getCdapPlugin().getHadoopConfiguration();
+        Configuration hConf = plugin.getHadoopConfiguration();
         HadoopFormatIO.Read<K, V> readFromHadoop =
             HadoopFormatIO.<K, V>read().withConfiguration(hConf);
         return input.apply(readFromHadoop);
       }
     }
-
-    public void validateTransform() {
-      checkArgument(getCdapPlugin() != null, "withCdapPluginClass() is required");
-      checkArgument(getPluginConfig() != null, "withPluginConfig() is required");
-      checkArgument(getKeyClass() != null, "withKeyClass() is required");
-      checkArgument(getValueClass() != null, "withValueClass() is required");
-    }
   }
 
   /** A {@link PTransform} to read from CDAP source. */
@@ -201,32 +194,28 @@
 
     @Override
     public PDone expand(PCollection<KV<K, V>> input) {
-      validateTransform();
-      getCdapPlugin()
-          .withConfig(getPluginConfig())
-          .withHadoopConfiguration(getKeyClass(), getValueClass())
-          .prepareRun();
+      Plugin plugin = checkArgumentNotNull(getCdapPlugin(), "withKeyClass() is required");
+      PluginConfig pluginConfig =
+          checkArgumentNotNull(getPluginConfig(), "withKeyClass() is required");
+      Class<K> keyClass = checkArgumentNotNull(getKeyClass(), "withKeyClass() is required");
+      Class<V> valueClass = checkArgumentNotNull(getValueClass(), "withValueClass() is required");
+      String locksDirPath =
+          checkArgumentNotNull(getLocksDirPath(), "withLocksDirPath() is required");
 
-      if (getCdapPlugin().isUnbounded()) {
+      plugin.withConfig(pluginConfig).withHadoopConfiguration(keyClass, valueClass).prepareRun();
+
+      if (plugin.isUnbounded()) {
         // TODO: implement SparkReceiverIO.<~>write()
         throw new NotImplementedException("Support for unbounded plugins is not implemented!");
       } else {
-        Configuration hConf = getCdapPlugin().getHadoopConfiguration();
+        Configuration hConf = plugin.getHadoopConfiguration();
         HadoopFormatIO.Write<K, V> writeHadoop =
             HadoopFormatIO.<K, V>write()
                 .withConfiguration(hConf)
                 .withPartitioning()
-                .withExternalSynchronization(new HDFSSynchronization(getLocksDirPath()));
+                .withExternalSynchronization(new HDFSSynchronization(locksDirPath));
         return input.apply(writeHadoop);
       }
     }
-
-    public void validateTransform() {
-      checkArgument(getCdapPlugin() != null, "withCdapPluginClass() is required");
-      checkArgument(getPluginConfig() != null, "withPluginConfig() is required");
-      checkArgument(getKeyClass() != null, "withKeyClass() is required");
-      checkArgument(getValueClass() != null, "withValueClass() is required");
-      checkArgument(getLocksDirPath() != null, "withLocksDirPath() is required");
-    }
   }
 }
diff --git a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/Plugin.java b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/Plugin.java
index 88ed2f3..31deb9d 100644
--- a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/Plugin.java
+++ b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/Plugin.java
@@ -132,7 +132,6 @@
   /** Sets a plugin Hadoop configuration. */
   public Plugin withHadoopConfiguration(Configuration hadoopConfiguration) {
     this.hadoopConfiguration = hadoopConfiguration;
-
     return this;
   }
 
diff --git a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/PluginConfigInstantiationUtils.java b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/PluginConfigInstantiationUtils.java
index cfa6f0e..ced1120 100644
--- a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/PluginConfigInstantiationUtils.java
+++ b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/PluginConfigInstantiationUtils.java
@@ -30,11 +30,11 @@
 import java.util.Map;
 import java.util.stream.Collectors;
 import javax.annotation.Nullable;
+import org.checkerframework.checker.initialization.qual.Initialized;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /** Class for getting any filled {@link PluginConfig} configuration object. */
-@SuppressWarnings("nullness")
 public class PluginConfigInstantiationUtils {
 
   private static final Logger LOG = LoggerFactory.getLogger(PluginConfigInstantiationUtils.class);
@@ -66,7 +66,7 @@
     }
     InstantiatorFactory instantiatorFactory = new InstantiatorFactory(false);
 
-    T config = instantiatorFactory.get(TypeToken.of(configClass)).create();
+    @Initialized T config = instantiatorFactory.get(TypeToken.of(configClass)).create();
 
     if (config != null) {
       for (Field field : allFields) {
diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOIT.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOIT.java
new file mode 100644
index 0000000..1b30ba7
--- /dev/null
+++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOIT.java
@@ -0,0 +1,292 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.io.cdap;
+
+import static org.apache.beam.sdk.io.common.IOITHelper.executeWithRetry;
+import static org.apache.beam.sdk.io.common.IOITHelper.readIOTestPipelineOptions;
+import static org.apache.beam.sdk.io.common.TestRow.getExpectedHashForRowCount;
+
+import com.google.cloud.Timestamp;
+import io.cdap.plugin.common.Constants;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.function.Function;
+import org.apache.beam.sdk.PipelineResult;
+import org.apache.beam.sdk.io.GenerateSequence;
+import org.apache.beam.sdk.io.common.DatabaseTestHelper;
+import org.apache.beam.sdk.io.common.HashingFn;
+import org.apache.beam.sdk.io.common.PostgresIOTestPipelineOptions;
+import org.apache.beam.sdk.io.common.TestRow;
+import org.apache.beam.sdk.options.Default;
+import org.apache.beam.sdk.options.Description;
+import org.apache.beam.sdk.testing.PAssert;
+import org.apache.beam.sdk.testing.TestPipeline;
+import org.apache.beam.sdk.testutils.NamedTestResult;
+import org.apache.beam.sdk.testutils.metrics.IOITMetrics;
+import org.apache.beam.sdk.testutils.metrics.MetricsReader;
+import org.apache.beam.sdk.testutils.metrics.TimeMonitor;
+import org.apache.beam.sdk.testutils.publishing.InfluxDBSettings;
+import org.apache.beam.sdk.transforms.Combine;
+import org.apache.beam.sdk.transforms.DoFn;
+import org.apache.beam.sdk.transforms.ParDo;
+import org.apache.beam.sdk.transforms.Reshuffle;
+import org.apache.beam.sdk.transforms.Values;
+import org.apache.beam.sdk.values.KV;
+import org.apache.beam.sdk.values.PCollection;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.mapred.lib.db.DBInputFormat;
+import org.apache.hadoop.mapreduce.lib.db.DBOutputFormat;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+import org.postgresql.ds.PGSimpleDataSource;
+import org.testcontainers.containers.PostgreSQLContainer;
+import org.testcontainers.utility.DockerImageName;
+
+/**
+ * IO Integration test for {@link org.apache.beam.sdk.io.cdap.CdapIO}.
+ *
+ * <p>{@see https://beam.apache.org/documentation/io/testing/#i-o-transform-integration-tests} for
+ * more details.
+ */
+@RunWith(JUnit4.class)
+@SuppressWarnings("rawtypes")
+public class CdapIOIT {
+
+  private static final String NAMESPACE = CdapIOIT.class.getName();
+  private static final String[] TEST_FIELD_NAMES = new String[] {"id", "name"};
+  private static final String TEST_ORDER_BY = "id ASC";
+
+  private static PGSimpleDataSource dataSource;
+  private static Integer numberOfRows;
+  private static String tableName;
+  private static InfluxDBSettings settings;
+  private static CdapIOITOptions options;
+  private static PostgreSQLContainer postgreSQLContainer;
+
+  @Rule public TestPipeline writePipeline = TestPipeline.create();
+  @Rule public TestPipeline readPipeline = TestPipeline.create();
+  @Rule public TemporaryFolder tmpFolder = new TemporaryFolder();
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    options = readIOTestPipelineOptions(CdapIOITOptions.class);
+    if (options.isWithTestcontainers()) {
+      setPostgresContainer();
+    }
+
+    dataSource = DatabaseTestHelper.getPostgresDataSource(options);
+    numberOfRows = options.getNumberOfRecords();
+    tableName = DatabaseTestHelper.getTestTableName("CdapIOIT");
+    if (!options.isWithTestcontainers()) {
+      settings =
+          InfluxDBSettings.builder()
+              .withHost(options.getInfluxHost())
+              .withDatabase(options.getInfluxDatabase())
+              .withMeasurement(options.getInfluxMeasurement())
+              .get();
+    }
+    executeWithRetry(CdapIOIT::createTable);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    executeWithRetry(CdapIOIT::deleteTable);
+    if (postgreSQLContainer != null) {
+      postgreSQLContainer.stop();
+    }
+  }
+
+  @Test
+  public void testCdapIOReadsAndWritesCorrectlyInBatch() {
+
+    writePipeline
+        .apply("Generate sequence", GenerateSequence.from(0).to(numberOfRows))
+        .apply("Produce db rows", ParDo.of(new TestRow.DeterministicallyConstructTestRowFn()))
+        .apply("Prevent fusion before writing", Reshuffle.viaRandomKey())
+        .apply("Collect write time", ParDo.of(new TimeMonitor<>(NAMESPACE, "write_time")))
+        .apply("Construct rows for DBOutputFormat", ParDo.of(new ConstructDBOutputFormatRowFn()))
+        .apply("Write using CdapIO", writeToDB(getWriteTestParamsFromOptions(options)));
+
+    PipelineResult writeResult = writePipeline.run();
+    writeResult.waitUntilFinish();
+
+    PCollection<String> consolidatedHashcode =
+        readPipeline
+            .apply("Read using CdapIO", readFromDB(getReadTestParamsFromOptions(options)))
+            .apply("Collect read time", ParDo.of(new TimeMonitor<>(NAMESPACE, "read_time")))
+            .apply("Get values only", Values.create())
+            .apply("Values as string", ParDo.of(new TestRow.SelectNameFn()))
+            .apply("Calculate hashcode", Combine.globally(new HashingFn()));
+
+    PAssert.thatSingleton(consolidatedHashcode).isEqualTo(getExpectedHashForRowCount(numberOfRows));
+
+    PipelineResult readResult = readPipeline.run();
+    readResult.waitUntilFinish();
+
+    if (!options.isWithTestcontainers()) {
+      collectAndPublishMetrics(writeResult, readResult);
+    }
+  }
+
+  private CdapIO.Write<TestRowDBWritable, NullWritable> writeToDB(Map<String, Object> params) {
+    DBConfig pluginConfig = new ConfigWrapper<>(DBConfig.class).withParams(params).build();
+
+    return CdapIO.<TestRowDBWritable, NullWritable>write()
+        .withCdapPlugin(
+            Plugin.create(DBBatchSink.class, DBOutputFormat.class, DBOutputFormatProvider.class))
+        .withPluginConfig(pluginConfig)
+        .withKeyClass(TestRowDBWritable.class)
+        .withValueClass(NullWritable.class)
+        .withLocksDirPath(tmpFolder.getRoot().getAbsolutePath());
+  }
+
+  private CdapIO.Read<LongWritable, TestRowDBWritable> readFromDB(Map<String, Object> params) {
+    DBConfig pluginConfig = new ConfigWrapper<>(DBConfig.class).withParams(params).build();
+
+    return CdapIO.<LongWritable, TestRowDBWritable>read()
+        .withCdapPlugin(
+            Plugin.create(DBBatchSource.class, DBInputFormat.class, DBInputFormatProvider.class))
+        .withPluginConfig(pluginConfig)
+        .withKeyClass(LongWritable.class)
+        .withValueClass(TestRowDBWritable.class);
+  }
+
+  private Map<String, Object> getTestParamsFromOptions(CdapIOITOptions options) {
+    Map<String, Object> params = new HashMap<>();
+    params.put(DBConfig.DB_URL, DatabaseTestHelper.getPostgresDBUrl(options));
+    params.put(DBConfig.POSTGRES_USERNAME, options.getPostgresUsername());
+    params.put(DBConfig.POSTGRES_PASSWORD, options.getPostgresPassword());
+    params.put(DBConfig.FIELD_NAMES, StringUtils.arrayToString(TEST_FIELD_NAMES));
+    params.put(DBConfig.TABLE_NAME, tableName);
+    params.put(Constants.Reference.REFERENCE_NAME, "referenceName");
+    return params;
+  }
+
+  private Map<String, Object> getReadTestParamsFromOptions(CdapIOITOptions options) {
+    Map<String, Object> params = getTestParamsFromOptions(options);
+    params.put(DBConfig.ORDER_BY, TEST_ORDER_BY);
+    params.put(DBConfig.VALUE_CLASS_NAME, TestRowDBWritable.class.getName());
+    return params;
+  }
+
+  private Map<String, Object> getWriteTestParamsFromOptions(CdapIOITOptions options) {
+    Map<String, Object> params = getTestParamsFromOptions(options);
+    params.put(DBConfig.FIELD_COUNT, String.valueOf(TEST_FIELD_NAMES.length));
+    return params;
+  }
+
+  /** Pipeline options specific for this test. */
+  public interface CdapIOITOptions extends PostgresIOTestPipelineOptions {
+
+    @Description("Whether to use testcontainers")
+    @Default.Boolean(false)
+    Boolean isWithTestcontainers();
+
+    void setWithTestcontainers(Boolean withTestcontainers);
+  }
+
+  private static void setPostgresContainer() {
+    postgreSQLContainer =
+        new PostgreSQLContainer(DockerImageName.parse("postgres").withTag("latest"))
+            .withDatabaseName(options.getPostgresDatabaseName())
+            .withUsername(options.getPostgresUsername())
+            .withPassword(options.getPostgresPassword());
+    postgreSQLContainer.start();
+    options.setPostgresServerName(postgreSQLContainer.getContainerIpAddress());
+    options.setPostgresPort(postgreSQLContainer.getMappedPort(PostgreSQLContainer.POSTGRESQL_PORT));
+    options.setPostgresSsl(false);
+  }
+
+  private static void createTable() throws SQLException {
+    DatabaseTestHelper.createTable(dataSource, tableName);
+  }
+
+  private static void deleteTable() throws SQLException {
+    DatabaseTestHelper.deleteTable(dataSource, tableName);
+  }
+
+  private void collectAndPublishMetrics(PipelineResult writeResult, PipelineResult readResult) {
+    String uuid = UUID.randomUUID().toString();
+    String timestamp = Timestamp.now().toString();
+
+    Set<Function<MetricsReader, NamedTestResult>> readSuppliers = getReadSuppliers(uuid, timestamp);
+    Set<Function<MetricsReader, NamedTestResult>> writeSuppliers =
+        getWriteSuppliers(uuid, timestamp);
+
+    IOITMetrics readMetrics =
+        new IOITMetrics(readSuppliers, readResult, NAMESPACE, uuid, timestamp);
+    IOITMetrics writeMetrics =
+        new IOITMetrics(writeSuppliers, writeResult, NAMESPACE, uuid, timestamp);
+    readMetrics.publishToInflux(settings);
+    writeMetrics.publishToInflux(settings);
+  }
+
+  private Set<Function<MetricsReader, NamedTestResult>> getReadSuppliers(
+      String uuid, String timestamp) {
+    Set<Function<MetricsReader, NamedTestResult>> suppliers = new HashSet<>();
+    suppliers.add(getTimeMetric(uuid, timestamp, "read_time"));
+    return suppliers;
+  }
+
+  private Set<Function<MetricsReader, NamedTestResult>> getWriteSuppliers(
+      String uuid, String timestamp) {
+    Set<Function<MetricsReader, NamedTestResult>> suppliers = new HashSet<>();
+    suppliers.add(getTimeMetric(uuid, timestamp, "write_time"));
+    suppliers.add(
+        reader ->
+            NamedTestResult.create(
+                uuid,
+                timestamp,
+                "data_size",
+                DatabaseTestHelper.getPostgresTableSize(dataSource, tableName)
+                    .orElseThrow(() -> new IllegalStateException("Unable to fetch table size"))));
+    return suppliers;
+  }
+
+  private Function<MetricsReader, NamedTestResult> getTimeMetric(
+      final String uuid, final String timestamp, final String metricName) {
+    return reader -> {
+      long startTime = reader.getStartTimeMetric(metricName);
+      long endTime = reader.getEndTimeMetric(metricName);
+      return NamedTestResult.create(uuid, timestamp, metricName, (endTime - startTime) / 1e3);
+    };
+  }
+
+  /**
+   * Uses the input {@link TestRow} values as seeds to produce new {@link KV}s for {@link CdapIO}.
+   */
+  static class ConstructDBOutputFormatRowFn
+      extends DoFn<TestRow, KV<TestRowDBWritable, NullWritable>> {
+    @ProcessElement
+    public void processElement(ProcessContext c) {
+      c.output(
+          KV.of(new TestRowDBWritable(c.element().id(), c.element().name()), NullWritable.get()));
+    }
+  }
+}
diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOTest.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOTest.java
index 3cae6ed..e978f5b 100644
--- a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOTest.java
+++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOTest.java
@@ -27,12 +27,15 @@
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import org.apache.beam.sdk.coders.KvCoder;
+import org.apache.beam.sdk.coders.StringUtf8Coder;
 import org.apache.beam.sdk.io.cdap.context.BatchSinkContextImpl;
 import org.apache.beam.sdk.io.cdap.context.BatchSourceContextImpl;
 import org.apache.beam.sdk.testing.PAssert;
 import org.apache.beam.sdk.testing.TestPipeline;
 import org.apache.beam.sdk.transforms.Create;
 import org.apache.beam.sdk.values.KV;
+import org.apache.beam.sdk.values.PBegin;
 import org.apache.beam.sdk.values.PCollection;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.mapreduce.OutputCommitter;
@@ -119,9 +122,10 @@
   }
 
   @Test
-  public void testReadValidationFailsMissingCdapPluginClass() {
+  public void testReadExpandingFailsMissingCdapPluginClass() {
+    PBegin testPBegin = PBegin.in(TestPipeline.create());
     CdapIO.Read<String, String> read = CdapIO.read();
-    assertThrows(IllegalArgumentException.class, read::validateTransform);
+    assertThrows(IllegalArgumentException.class, () -> read.expand(testPBegin));
   }
 
   @Test
@@ -221,9 +225,12 @@
   }
 
   @Test
-  public void testWriteValidationFailsMissingCdapPluginClass() {
+  public void testWriteExpandingFailsMissingCdapPluginClass() {
+    PBegin testPBegin = PBegin.in(TestPipeline.create());
+    PCollection<KV<String, String>> testPCollection =
+        Create.empty(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of())).expand(testPBegin);
     CdapIO.Write<String, String> write = CdapIO.write();
-    assertThrows(IllegalArgumentException.class, write::validateTransform);
+    assertThrows(IllegalArgumentException.class, () -> write.expand(testPCollection));
   }
 
   @Test
diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBBatchSink.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBBatchSink.java
new file mode 100644
index 0000000..979240f
--- /dev/null
+++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBBatchSink.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.io.cdap;
+
+import io.cdap.cdap.api.annotation.Description;
+import io.cdap.cdap.api.annotation.Name;
+import io.cdap.cdap.api.annotation.Plugin;
+import io.cdap.cdap.api.data.batch.Output;
+import io.cdap.cdap.api.data.format.StructuredRecord;
+import io.cdap.cdap.api.dataset.lib.KeyValue;
+import io.cdap.cdap.etl.api.Emitter;
+import io.cdap.cdap.etl.api.FailureCollector;
+import io.cdap.cdap.etl.api.PipelineConfigurer;
+import io.cdap.cdap.etl.api.batch.BatchSink;
+import io.cdap.cdap.etl.api.batch.BatchSinkContext;
+
+/** Imitation of CDAP {@link BatchSink} plugin. Used for integration test {@link CdapIO#write()}. */
+@Plugin(type = BatchSink.PLUGIN_TYPE)
+@Name(DBBatchSink.NAME)
+@Description("Plugin writes <ID, NAME> in batch")
+public class DBBatchSink extends BatchSink<StructuredRecord, String, String> {
+
+  public static final String ID_FIELD = "id";
+  public static final String NAME_FIELD = "name";
+  public static final String NAME = "DBSink";
+
+  private final DBConfig config;
+
+  public DBBatchSink(DBConfig config) {
+    this.config = config;
+  }
+
+  @Override
+  public void configurePipeline(PipelineConfigurer pipelineConfigurer) {
+    super.configurePipeline(pipelineConfigurer);
+    FailureCollector collector = pipelineConfigurer.getStageConfigurer().getFailureCollector();
+    config.validate(collector);
+  }
+
+  @Override
+  public void prepareRun(BatchSinkContext context) {
+    FailureCollector collector = context.getFailureCollector();
+    config.validate(collector);
+    collector.getOrThrowException();
+    context.addOutput(Output.of(config.referenceName, new DBOutputFormatProvider(config)));
+  }
+
+  @Override
+  public void transform(StructuredRecord input, Emitter<KeyValue<String, String>> emitter)
+      throws Exception {
+    emitter.emit(new KeyValue<>(input.get(ID_FIELD), input.get(NAME_FIELD)));
+  }
+}
diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBBatchSource.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBBatchSource.java
new file mode 100644
index 0000000..ab24b8f
--- /dev/null
+++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBBatchSource.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.io.cdap;
+
+import io.cdap.cdap.api.annotation.Description;
+import io.cdap.cdap.api.annotation.Name;
+import io.cdap.cdap.api.annotation.Plugin;
+import io.cdap.cdap.api.data.batch.Input;
+import io.cdap.cdap.api.data.format.StructuredRecord;
+import io.cdap.cdap.api.data.schema.Schema;
+import io.cdap.cdap.api.dataset.lib.KeyValue;
+import io.cdap.cdap.etl.api.Emitter;
+import io.cdap.cdap.etl.api.FailureCollector;
+import io.cdap.cdap.etl.api.PipelineConfigurer;
+import io.cdap.cdap.etl.api.batch.BatchSource;
+import io.cdap.cdap.etl.api.batch.BatchSourceContext;
+import io.cdap.plugin.common.IdUtils;
+import io.cdap.plugin.common.LineageRecorder;
+import java.util.stream.Collectors;
+
+/**
+ * Imitation of CDAP {@link BatchSource} plugin. Used for integration test {@link CdapIO#read()}.
+ */
+@Plugin(type = BatchSource.PLUGIN_TYPE)
+@Name(DBBatchSource.NAME)
+@Description("Plugin reads <ID, NAME> in batch")
+public class DBBatchSource extends BatchSource<String, String, StructuredRecord> {
+
+  private final DBConfig config;
+
+  public static final String NAME = "DBSource";
+
+  public DBBatchSource(DBConfig config) {
+    this.config = config;
+  }
+
+  @Override
+  public void configurePipeline(PipelineConfigurer pipelineConfigurer) {
+    validateConfiguration(pipelineConfigurer.getStageConfigurer().getFailureCollector());
+    pipelineConfigurer.getStageConfigurer().setOutputSchema(config.getSchema());
+  }
+
+  /**
+   * Prepare DB objects as it could be implemented in CDAP plugin.
+   *
+   * @param context the batch source context
+   */
+  @Override
+  public void prepareRun(BatchSourceContext context) {
+    validateConfiguration(context.getFailureCollector());
+    LineageRecorder lineageRecorder = new LineageRecorder(context, config.referenceName);
+    lineageRecorder.createExternalDataset(config.getSchema());
+    lineageRecorder.recordRead(
+        "Reads",
+        "Reading DB objects",
+        config.getSchema().getFields().stream()
+            .map(Schema.Field::getName)
+            .collect(Collectors.toList()));
+    context.setInput(Input.of(NAME, new DBInputFormatProvider(config)));
+  }
+
+  @Override
+  public void transform(KeyValue<String, String> input, Emitter<StructuredRecord> emitter) {
+    StructuredRecord.Builder builder = StructuredRecord.builder(config.getSchema());
+    builder.set("id", input.getKey());
+    builder.set("name", input.getValue());
+    emitter.emit(builder.build());
+  }
+
+  private void validateConfiguration(FailureCollector failureCollector) {
+    IdUtils.validateReferenceName(config.referenceName, failureCollector);
+    config.validate(failureCollector);
+    failureCollector.getOrThrowException();
+  }
+}
diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBConfig.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBConfig.java
new file mode 100644
index 0000000..d95c941
--- /dev/null
+++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBConfig.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.io.cdap;
+
+import io.cdap.cdap.api.annotation.Macro;
+import io.cdap.cdap.api.annotation.Name;
+import io.cdap.cdap.api.data.schema.Schema;
+import io.cdap.cdap.etl.api.FailureCollector;
+import io.cdap.plugin.common.ReferencePluginConfig;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * {@link io.cdap.cdap.api.plugin.PluginConfig} for {@link DBBatchSource} and {@link DBBatchSink}
+ * CDAP plugins. Used for integration test {@link CdapIO#read()} and {@link CdapIO#write()}.
+ */
+public class DBConfig extends ReferencePluginConfig {
+
+  public static final String DB_URL = "dbUrl";
+  public static final String POSTGRES_USERNAME = "pgUsername";
+  public static final String POSTGRES_PASSWORD = "pgPassword";
+  public static final String TABLE_NAME = "tableName";
+  public static final String FIELD_NAMES = "fieldNames";
+  public static final String FIELD_COUNT = "fieldCount";
+  public static final String ORDER_BY = "orderBy";
+  public static final String VALUE_CLASS_NAME = "valueClassName";
+
+  @Name(DB_URL)
+  @Macro
+  public String dbUrl;
+
+  @Name(POSTGRES_USERNAME)
+  @Macro
+  public String pgUsername;
+
+  @Name(POSTGRES_PASSWORD)
+  @Macro
+  public String pgPassword;
+
+  @Name(TABLE_NAME)
+  @Macro
+  public String tableName;
+
+  @Name(FIELD_NAMES)
+  @Macro
+  public String fieldNames;
+
+  @Name(FIELD_COUNT)
+  @Macro
+  public String fieldCount;
+
+  @Name(ORDER_BY)
+  @Macro
+  public String orderBy;
+
+  @Name(VALUE_CLASS_NAME)
+  @Macro
+  public String valueClassName;
+
+  public DBConfig(
+      String referenceName,
+      String dbUrl,
+      String pgUsername,
+      String pgPassword,
+      String tableName,
+      String fieldNames,
+      String fieldCount,
+      String orderBy,
+      String valueClassName) {
+    super(referenceName);
+    this.dbUrl = dbUrl;
+    this.pgUsername = pgUsername;
+    this.pgPassword = pgPassword;
+    this.tableName = tableName;
+    this.fieldNames = fieldNames;
+    this.fieldCount = fieldCount;
+    this.orderBy = orderBy;
+    this.valueClassName = valueClassName;
+  }
+
+  public Schema getSchema() {
+    Set<Schema.Field> schemaFields = new HashSet<>();
+    schemaFields.add(Schema.Field.of("id", Schema.nullableOf(Schema.of(Schema.Type.STRING))));
+    schemaFields.add(Schema.Field.of("name", Schema.nullableOf(Schema.of(Schema.Type.STRING))));
+    return Schema.recordOf("etlSchemaBody", schemaFields);
+  }
+
+  public void validate(FailureCollector failureCollector) {
+    if (dbUrl == null) {
+      failureCollector.addFailure("DB URL must be not null.", null).withConfigProperty(DB_URL);
+    }
+    if (pgUsername == null) {
+      failureCollector
+          .addFailure("Postgres username must be not null.", null)
+          .withConfigProperty(POSTGRES_USERNAME);
+    }
+    if (pgPassword == null) {
+      failureCollector
+          .addFailure("Postgres password must be not null.", null)
+          .withConfigProperty(POSTGRES_PASSWORD);
+    }
+  }
+}
diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBInputFormatProvider.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBInputFormatProvider.java
new file mode 100644
index 0000000..f4ddd88
--- /dev/null
+++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBInputFormatProvider.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.io.cdap;
+
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.DRIVER_CLASS_PROPERTY;
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.INPUT_CLASS_PROPERTY;
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.INPUT_FIELD_NAMES_PROPERTY;
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.INPUT_ORDER_BY_PROPERTY;
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.INPUT_TABLE_NAME_PROPERTY;
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.PASSWORD_PROPERTY;
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.URL_PROPERTY;
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.USERNAME_PROPERTY;
+
+import io.cdap.cdap.api.data.batch.InputFormatProvider;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.beam.sdk.io.hadoop.format.HadoopFormatIO;
+import org.apache.hadoop.mapreduce.lib.db.DBInputFormat;
+
+/**
+ * {@link InputFormatProvider} for {@link DBBatchSource} CDAP plugin. Used for integration test
+ * {@link CdapIO#read()}.
+ */
+public class DBInputFormatProvider implements InputFormatProvider {
+
+  private static final String POSTGRESQL_DRIVER = "org.postgresql.Driver";
+
+  private final Map<String, String> conf;
+
+  DBInputFormatProvider(DBConfig config) {
+    this.conf = new HashMap<>();
+
+    conf.put(DRIVER_CLASS_PROPERTY, POSTGRESQL_DRIVER);
+    conf.put(URL_PROPERTY, config.dbUrl);
+    conf.put(USERNAME_PROPERTY, config.pgUsername);
+    conf.put(PASSWORD_PROPERTY, config.pgPassword);
+
+    conf.put(INPUT_TABLE_NAME_PROPERTY, config.tableName);
+    conf.put(INPUT_FIELD_NAMES_PROPERTY, config.fieldNames);
+    conf.put(INPUT_ORDER_BY_PROPERTY, config.orderBy);
+    conf.put(INPUT_CLASS_PROPERTY, config.valueClassName);
+
+    conf.put(HadoopFormatIO.JOB_ID, String.valueOf(1));
+  }
+
+  @Override
+  public String getInputFormatClassName() {
+    return DBInputFormat.class.getName();
+  }
+
+  @Override
+  public Map<String, String> getInputFormatConfiguration() {
+    return conf;
+  }
+}
diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBOutputFormatProvider.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBOutputFormatProvider.java
new file mode 100644
index 0000000..24ec982
--- /dev/null
+++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/DBOutputFormatProvider.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.io.cdap;
+
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.DRIVER_CLASS_PROPERTY;
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.OUTPUT_FIELD_COUNT_PROPERTY;
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.OUTPUT_FIELD_NAMES_PROPERTY;
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.OUTPUT_TABLE_NAME_PROPERTY;
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.PASSWORD_PROPERTY;
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.URL_PROPERTY;
+import static org.apache.hadoop.mapreduce.lib.db.DBConfiguration.USERNAME_PROPERTY;
+
+import io.cdap.cdap.api.data.batch.OutputFormatProvider;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.beam.sdk.io.hadoop.format.HadoopFormatIO;
+import org.apache.hadoop.mapreduce.lib.db.DBOutputFormat;
+
+/**
+ * {@link OutputFormatProvider} for {@link DBBatchSink} CDAP plugin. Used for integration test
+ * {@link CdapIO#write()}.
+ */
+public class DBOutputFormatProvider implements OutputFormatProvider {
+
+  private static final String POSTGRESQL_DRIVER = "org.postgresql.Driver";
+
+  private final Map<String, String> conf;
+
+  DBOutputFormatProvider(DBConfig config) {
+    this.conf = new HashMap<>();
+
+    conf.put(DRIVER_CLASS_PROPERTY, POSTGRESQL_DRIVER);
+    conf.put(URL_PROPERTY, config.dbUrl);
+    conf.put(USERNAME_PROPERTY, config.pgUsername);
+    conf.put(PASSWORD_PROPERTY, config.pgPassword);
+
+    conf.put(OUTPUT_TABLE_NAME_PROPERTY, config.tableName);
+    conf.put(OUTPUT_FIELD_COUNT_PROPERTY, config.fieldCount);
+    conf.put(OUTPUT_FIELD_NAMES_PROPERTY, config.fieldNames);
+
+    conf.put(HadoopFormatIO.JOB_ID, String.valueOf(1));
+  }
+
+  @Override
+  public String getOutputFormatClassName() {
+    return DBOutputFormat.class.getName();
+  }
+
+  @Override
+  public Map<String, String> getOutputFormatConfiguration() {
+    return conf;
+  }
+}
diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/TestRowDBWritable.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/TestRowDBWritable.java
new file mode 100644
index 0000000..d85c5ea
--- /dev/null
+++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/TestRowDBWritable.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.io.cdap;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import org.apache.beam.sdk.coders.AvroCoder;
+import org.apache.beam.sdk.coders.DefaultCoder;
+import org.apache.beam.sdk.io.common.TestRow;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+
+/**
+ * A subclass of {@link TestRow} to be used with {@link
+ * org.apache.hadoop.mapreduce.lib.db.DBInputFormat}.
+ */
+@DefaultCoder(AvroCoder.class)
+class TestRowDBWritable extends TestRow implements DBWritable, Writable {
+
+  private Integer id;
+  private String name;
+
+  public TestRowDBWritable() {}
+
+  public TestRowDBWritable(Integer id, String name) {
+    this.id = id;
+    this.name = name;
+  }
+
+  @Override
+  public Integer id() {
+    return id;
+  }
+
+  @Override
+  public String name() {
+    return name;
+  }
+
+  @Override
+  public void write(PreparedStatement statement) throws SQLException {
+    statement.setInt(1, id);
+    statement.setString(2, name);
+  }
+
+  @Override
+  public void readFields(ResultSet resultSet) throws SQLException {
+    id = resultSet.getInt(1);
+    name = resultSet.getString(2);
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.writeInt(id);
+    out.writeChars(name);
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    id = in.readInt();
+    name = in.readUTF();
+  }
+}
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BatchedStreamingWrite.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BatchedStreamingWrite.java
index 5e611d6..133fa5b 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BatchedStreamingWrite.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BatchedStreamingWrite.java
@@ -77,6 +77,7 @@
   private final boolean skipInvalidRows;
   private final boolean ignoreUnknownValues;
   private final boolean ignoreInsertIds;
+  private final boolean propagateSuccessful;
   private final @Nullable SerializableFunction<ElementT, TableRow> toTableRow;
   private final @Nullable SerializableFunction<ElementT, TableRow> toFailsafeTableRow;
   private final Set<String> allowedMetricUrns;
@@ -96,6 +97,7 @@
       boolean skipInvalidRows,
       boolean ignoreUnknownValues,
       boolean ignoreInsertIds,
+      boolean propagateSuccessful,
       @Nullable SerializableFunction<ElementT, TableRow> toTableRow,
       @Nullable SerializableFunction<ElementT, TableRow> toFailsafeTableRow) {
     this.bqServices = bqServices;
@@ -106,6 +108,7 @@
     this.skipInvalidRows = skipInvalidRows;
     this.ignoreUnknownValues = ignoreUnknownValues;
     this.ignoreInsertIds = ignoreInsertIds;
+    this.propagateSuccessful = propagateSuccessful;
     this.toTableRow = toTableRow;
     this.toFailsafeTableRow = toFailsafeTableRow;
     this.allowedMetricUrns = getAllowedMetricUrns();
@@ -121,6 +124,7 @@
       boolean skipInvalidRows,
       boolean ignoreUnknownValues,
       boolean ignoreInsertIds,
+      boolean propagateSuccessful,
       @Nullable SerializableFunction<ElementT, TableRow> toTableRow,
       @Nullable SerializableFunction<ElementT, TableRow> toFailsafeTableRow,
       boolean batchViaStateful) {
@@ -132,6 +136,7 @@
     this.skipInvalidRows = skipInvalidRows;
     this.ignoreUnknownValues = ignoreUnknownValues;
     this.ignoreInsertIds = ignoreInsertIds;
+    this.propagateSuccessful = propagateSuccessful;
     this.toTableRow = toTableRow;
     this.toFailsafeTableRow = toFailsafeTableRow;
     this.allowedMetricUrns = getAllowedMetricUrns();
@@ -159,6 +164,7 @@
         skipInvalidRows,
         ignoreUnknownValues,
         ignoreInsertIds,
+        propagateSuccessful,
         toTableRow,
         toFailsafeTableRow,
         false);
@@ -179,6 +185,7 @@
         skipInvalidRows,
         ignoreUnknownValues,
         ignoreInsertIds,
+        propagateSuccessful,
         toTableRow,
         toFailsafeTableRow,
         true);
@@ -197,11 +204,16 @@
     public PCollectionTuple expand(PCollection<KV<String, TableRowInfo<ElementT>>> input) {
       PCollectionTuple result =
           input.apply(
-              ParDo.of(new BatchAndInsertElements())
+              ParDo.of(new BatchAndInsertElements(propagateSuccessful))
                   .withOutputTags(
-                      mainOutputTag, TupleTagList.of(failedOutputTag).and(SUCCESSFUL_ROWS_TAG)));
+                      mainOutputTag,
+                      propagateSuccessful
+                          ? TupleTagList.of(failedOutputTag).and(SUCCESSFUL_ROWS_TAG)
+                          : TupleTagList.of(failedOutputTag)));
       result.get(failedOutputTag).setCoder(failedOutputCoder);
-      result.get(SUCCESSFUL_ROWS_TAG).setCoder(TableRowJsonCoder.of());
+      if (propagateSuccessful) {
+        result.get(SUCCESSFUL_ROWS_TAG).setCoder(TableRowJsonCoder.of());
+      }
       return result;
     }
   }
@@ -218,6 +230,12 @@
 
     private transient @Nullable DatasetService datasetService;
 
+    private final boolean propagateSuccessfulInserts;
+
+    BatchAndInsertElements(boolean propagateSuccessful) {
+      this.propagateSuccessfulInserts = propagateSuccessful;
+    }
+
     private DatasetService getDatasetService(PipelineOptions pipelineOptions) throws IOException {
       if (datasetService == null) {
         datasetService = bqServices.getDatasetService(pipelineOptions.as(BigQueryOptions.class));
@@ -279,8 +297,10 @@
       for (ValueInSingleWindow<ErrorT> row : failedInserts) {
         context.output(failedOutputTag, row.getValue(), row.getTimestamp(), row.getWindow());
       }
-      for (ValueInSingleWindow<TableRow> row : successfulInserts) {
-        context.output(SUCCESSFUL_ROWS_TAG, row.getValue(), row.getTimestamp(), row.getWindow());
+      if (propagateSuccessfulInserts) {
+        for (ValueInSingleWindow<TableRow> row : successfulInserts) {
+          context.output(SUCCESSFUL_ROWS_TAG, row.getValue(), row.getTimestamp(), row.getWindow());
+        }
       }
       reportStreamingApiLogging(options);
     }
@@ -354,12 +374,16 @@
               // opposed to using the annotation @RequiresStableInputs, to avoid potential
               // performance penalty due to extra data shuffling.
               .apply(
-                  ParDo.of(new BatchAndInsertElements())
+                  ParDo.of(new BatchAndInsertElements(propagateSuccessful))
                       .withOutputTags(
                           mainOutputTag,
-                          TupleTagList.of(failedOutputTag).and(SUCCESSFUL_ROWS_TAG)));
+                          propagateSuccessful
+                              ? TupleTagList.of(failedOutputTag).and(SUCCESSFUL_ROWS_TAG)
+                              : TupleTagList.of(failedOutputTag)));
       result.get(failedOutputTag).setCoder(failedOutputCoder);
-      result.get(SUCCESSFUL_ROWS_TAG).setCoder(TableRowJsonCoder.of());
+      if (propagateSuccessful) {
+        result.get(SUCCESSFUL_ROWS_TAG).setCoder(TableRowJsonCoder.of());
+      }
       return result;
     }
   }
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java
index 790cb44..e1ad4b4 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java
@@ -1766,6 +1766,7 @@
         .setOptimizeWrites(false)
         .setUseBeamSchema(false)
         .setAutoSharding(false)
+        .setPropagateSuccessful(true)
         .setDeterministicRecordIdFn(null)
         .build();
   }
@@ -1840,7 +1841,7 @@
 
     abstract RowWriterFactory.@Nullable AvroRowWriterFactory<T, ?, ?> getAvroRowWriterFactory();
 
-    abstract @Nullable SerializableFunction<TableSchema, org.apache.avro.Schema>
+    abstract @Nullable SerializableFunction<@Nullable TableSchema, org.apache.avro.Schema>
         getAvroSchemaFactory();
 
     abstract boolean getUseAvroLogicalTypes();
@@ -1907,6 +1908,8 @@
     @Experimental
     abstract Boolean getAutoSharding();
 
+    abstract Boolean getPropagateSuccessful();
+
     @Experimental
     abstract @Nullable SerializableFunction<T, String> getDeterministicRecordIdFn();
 
@@ -1930,7 +1933,7 @@
           RowWriterFactory.AvroRowWriterFactory<T, ?, ?> avroRowWriterFactory);
 
       abstract Builder<T> setAvroSchemaFactory(
-          SerializableFunction<TableSchema, org.apache.avro.Schema> avroSchemaFactory);
+          SerializableFunction<@Nullable TableSchema, org.apache.avro.Schema> avroSchemaFactory);
 
       abstract Builder<T> setUseAvroLogicalTypes(boolean useAvroLogicalTypes);
 
@@ -1996,6 +1999,8 @@
       @Experimental
       abstract Builder<T> setAutoSharding(Boolean autoSharding);
 
+      abstract Builder<T> setPropagateSuccessful(Boolean propagateSuccessful);
+
       @Experimental
       abstract Builder<T> setDeterministicRecordIdFn(
           SerializableFunction<T, String> toUniqueIdFunction);
@@ -2200,7 +2205,7 @@
      * <p>If not specified, the TableSchema will automatically be converted to an avro schema.
      */
     public Write<T> withAvroSchemaFactory(
-        SerializableFunction<TableSchema, org.apache.avro.Schema> avroSchemaFactory) {
+        SerializableFunction<@Nullable TableSchema, org.apache.avro.Schema> avroSchemaFactory) {
       return toBuilder().setAvroSchemaFactory(avroSchemaFactory).build();
     }
 
@@ -2502,13 +2507,24 @@
     }
 
     /**
+     * If true, it enables the propagation of the successfully inserted TableRows on BigQuery as
+     * part of the {@link WriteResult} object when using {@link Method#STREAMING_INSERTS}. By
+     * default this property is set on true. In the cases where a pipeline won't make use of the
+     * insert results this property can be set on false, which will make the pipeline let go of
+     * those inserted TableRows and reclaim worker resources.
+     */
+    public Write<T> withSuccessfulInsertsPropagation(boolean propagateSuccessful) {
+      return toBuilder().setPropagateSuccessful(propagateSuccessful).build();
+    }
+
+    /**
      * Provides a function which can serve as a source of deterministic unique ids for each record
      * to be written, replacing the unique ids generated with the default scheme. When used with
      * {@link Method#STREAMING_INSERTS} This also elides the re-shuffle from the BigQueryIO Write by
      * using the keys on which the data is grouped at the point at which BigQueryIO Write is
      * applied, since the reshuffle is necessary only for the checkpointing of the default-generated
      * ids for determinism. This may be beneficial as a performance optimization in the case when
-     * the current sharding is already sufficient for writing to BigQuery. Thi behavior takes
+     * the current sharding is already sufficient for writing to BigQuery. This behavior takes
      * precedence over {@link #withAutoSharding}.
      */
     @Experimental
@@ -2819,7 +2835,7 @@
               "Only one of withFormatFunction or withAvroFormatFunction/withAvroWriter maybe set,"
                   + " not both.");
 
-          SerializableFunction<TableSchema, org.apache.avro.Schema> avroSchemaFactory =
+          SerializableFunction<@Nullable TableSchema, org.apache.avro.Schema> avroSchemaFactory =
               getAvroSchemaFactory();
           if (avroSchemaFactory == null) {
             checkArgument(
@@ -2911,6 +2927,7 @@
                 .withIgnoreUnknownValues(getIgnoreUnknownValues())
                 .withIgnoreInsertIds(getIgnoreInsertIds())
                 .withAutoSharding(getAutoSharding())
+                .withSuccessfulInsertsPropagation(getPropagateSuccessful())
                 .withDeterministicRecordIdFn(getDeterministicRecordIdFn())
                 .withKmsKey(getKmsKey());
         return input.apply(streamingInserts);
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/RowWriterFactory.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/RowWriterFactory.java
index ad85170..710e9d7 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/RowWriterFactory.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/RowWriterFactory.java
@@ -99,13 +99,13 @@
 
     private final SerializableFunction<AvroWriteRequest<ElementT>, AvroT> toAvro;
     private final SerializableFunction<Schema, DatumWriter<AvroT>> writerFactory;
-    private final @Nullable SerializableFunction<TableSchema, Schema> schemaFactory;
+    private final @Nullable SerializableFunction<@Nullable TableSchema, Schema> schemaFactory;
     private final @Nullable DynamicDestinations<?, DestinationT> dynamicDestinations;
 
     private AvroRowWriterFactory(
         SerializableFunction<AvroWriteRequest<ElementT>, AvroT> toAvro,
         SerializableFunction<Schema, DatumWriter<AvroT>> writerFactory,
-        @Nullable SerializableFunction<TableSchema, Schema> schemaFactory,
+        @Nullable SerializableFunction<@Nullable TableSchema, Schema> schemaFactory,
         @Nullable DynamicDestinations<?, DestinationT> dynamicDestinations) {
       this.toAvro = toAvro;
       this.writerFactory = writerFactory;
@@ -115,7 +115,7 @@
 
     AvroRowWriterFactory<ElementT, AvroT, DestinationT> prepare(
         DynamicDestinations<?, DestinationT> dynamicDestinations,
-        SerializableFunction<TableSchema, Schema> schemaFactory) {
+        SerializableFunction<@Nullable TableSchema, Schema> schemaFactory) {
       return new AvroRowWriterFactory<>(toAvro, writerFactory, schemaFactory, dynamicDestinations);
     }
 
@@ -135,10 +135,8 @@
         throw new IllegalStateException(
             "createRowWriter called when schemaFactory is null; forgot to call prepare()?");
       }
+
       TableSchema tableSchema = dynamicDestinations.getSchema(destination);
-      if (tableSchema == null) {
-        throw new IllegalStateException("dynamicDestinations.getSchema returned null");
-      }
       Schema avroSchema = schemaFactory.apply(tableSchema);
       return new AvroRowWriter<>(tempFilePrefix, avroSchema, toAvro, writerFactory);
     }
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWritesShardedRecords.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWritesShardedRecords.java
index 1ccd527..afecc96 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWritesShardedRecords.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWritesShardedRecords.java
@@ -553,6 +553,11 @@
         OutputReceiver<KV<String, Operation>> o,
         BoundedWindow window) {
       // Stream is idle - clear it.
+      // Note: this is best effort. We are explicitly emiting a timestamp that is before
+      // the default output timestamp, which means that in some cases (usually when draining
+      // a pipeline) this finalize element will be dropped as late. This is usually ok as
+      // BigQuery will eventually garbage collect the stream. We attempt to finalize idle streams
+      // merely to remove the pressure of large numbers of orphaned streams from BigQuery.
       finalizeStream(streamName, streamOffset, o, window.maxTimestamp());
       streamsIdle.inc();
     }
@@ -567,5 +572,10 @@
       // streams so that they are not leaked.
       finalizeStream(streamName, streamOffset, o, window.maxTimestamp());
     }
+
+    @Override
+    public Duration getAllowedTimestampSkew() {
+      return Duration.millis(BoundedWindow.TIMESTAMP_MAX_VALUE.getMillis());
+    }
   }
 }
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StreamingInserts.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StreamingInserts.java
index c9cefdc..8c825b0 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StreamingInserts.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StreamingInserts.java
@@ -41,6 +41,7 @@
   private final boolean ignoreUnknownValues;
   private final boolean ignoreInsertIds;
   private final boolean autoSharding;
+  private final boolean propagateSuccessful;
   private final @Nullable String kmsKey;
   private final Coder<ElementT> elementCoder;
   private final SerializableFunction<ElementT, TableRow> toTableRow;
@@ -64,6 +65,7 @@
         false,
         false,
         false,
+        true,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -82,6 +84,7 @@
       boolean ignoreUnknownValues,
       boolean ignoreInsertIds,
       boolean autoSharding,
+      boolean propagateSuccessful,
       Coder<ElementT> elementCoder,
       SerializableFunction<ElementT, TableRow> toTableRow,
       SerializableFunction<ElementT, TableRow> toFailsafeTableRow,
@@ -96,6 +99,7 @@
     this.ignoreUnknownValues = ignoreUnknownValues;
     this.ignoreInsertIds = ignoreInsertIds;
     this.autoSharding = autoSharding;
+    this.propagateSuccessful = propagateSuccessful;
     this.elementCoder = elementCoder;
     this.toTableRow = toTableRow;
     this.toFailsafeTableRow = toFailsafeTableRow;
@@ -116,6 +120,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -135,6 +140,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -153,6 +159,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -171,6 +178,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -189,6 +197,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -207,6 +216,27 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
+        elementCoder,
+        toTableRow,
+        toFailsafeTableRow,
+        deterministicRecordIdFn,
+        kmsKey);
+  }
+
+  StreamingInserts<DestinationT, ElementT> withSuccessfulInsertsPropagation(
+      boolean propagateSuccessful) {
+    return new StreamingInserts<>(
+        createDisposition,
+        dynamicDestinations,
+        bigQueryServices,
+        retryPolicy,
+        extendedErrorInfo,
+        skipInvalidRows,
+        ignoreUnknownValues,
+        ignoreInsertIds,
+        autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -226,6 +256,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -244,6 +275,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -262,6 +294,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -287,6 +320,7 @@
             .withIgnoreUnknownValues(ignoreUnknownValues)
             .withIgnoreInsertIds(ignoreInsertIds)
             .withAutoSharding(autoSharding)
+            .withPropagateSuccessful(propagateSuccessful)
             .withElementCoder(elementCoder)
             .withToTableRow(toTableRow)
             .withToFailsafeTableRow(toFailsafeTableRow)
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StreamingWriteTables.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StreamingWriteTables.java
index 22dd041..23cda2f 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StreamingWriteTables.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StreamingWriteTables.java
@@ -59,6 +59,7 @@
   private final boolean ignoreUnknownValues;
   private final boolean ignoreInsertIds;
   private final boolean autoSharding;
+  private final boolean propagateSuccessful;
   private final @Nullable Coder<ElementT> elementCoder;
   private final @Nullable SerializableFunction<ElementT, TableRow> toTableRow;
   private final @Nullable SerializableFunction<ElementT, TableRow> toFailsafeTableRow;
@@ -73,6 +74,7 @@
         false, // ignoreUnknownValues
         false, // ignoreInsertIds
         false, // autoSharding
+        false, // propagateSuccessful
         null, // elementCoder
         null, // toTableRow
         null, // toFailsafeTableRow
@@ -87,6 +89,7 @@
       boolean ignoreUnknownValues,
       boolean ignoreInsertIds,
       boolean autoSharding,
+      boolean propagateSuccessful,
       @Nullable Coder<ElementT> elementCoder,
       @Nullable SerializableFunction<ElementT, TableRow> toTableRow,
       @Nullable SerializableFunction<ElementT, TableRow> toFailsafeTableRow,
@@ -98,6 +101,7 @@
     this.ignoreUnknownValues = ignoreUnknownValues;
     this.ignoreInsertIds = ignoreInsertIds;
     this.autoSharding = autoSharding;
+    this.propagateSuccessful = propagateSuccessful;
     this.elementCoder = elementCoder;
     this.toTableRow = toTableRow;
     this.toFailsafeTableRow = toFailsafeTableRow;
@@ -113,6 +117,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -128,6 +133,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -143,6 +149,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -158,6 +165,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -173,6 +181,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -188,6 +197,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -203,6 +213,23 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
+        elementCoder,
+        toTableRow,
+        toFailsafeTableRow,
+        deterministicRecordIdFn);
+  }
+
+  StreamingWriteTables<ElementT> withPropagateSuccessful(boolean propagateSuccessful) {
+    return new StreamingWriteTables<>(
+        bigQueryServices,
+        retryPolicy,
+        extendedErrorInfo,
+        skipInvalidRows,
+        ignoreUnknownValues,
+        ignoreInsertIds,
+        autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -218,6 +245,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -234,6 +262,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -250,6 +279,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -266,6 +296,7 @@
         ignoreUnknownValues,
         ignoreInsertIds,
         autoSharding,
+        propagateSuccessful,
         elementCoder,
         toTableRow,
         toFailsafeTableRow,
@@ -288,7 +319,7 @@
           input.getPipeline(),
           failedInsertsTag,
           failedInserts,
-          result.get(BatchedStreamingWrite.SUCCESSFUL_ROWS_TAG));
+          propagateSuccessful ? result.get(BatchedStreamingWrite.SUCCESSFUL_ROWS_TAG) : null);
     } else {
       TupleTag<TableRow> failedInsertsTag = new TupleTag<>(FAILED_INSERTS_TAG_ID);
       PCollectionTuple result =
@@ -302,7 +333,7 @@
           input.getPipeline(),
           failedInsertsTag,
           failedInserts,
-          result.get(BatchedStreamingWrite.SUCCESSFUL_ROWS_TAG),
+          propagateSuccessful ? result.get(BatchedStreamingWrite.SUCCESSFUL_ROWS_TAG) : null,
           null,
           null,
           null,
@@ -363,6 +394,7 @@
                   skipInvalidRows,
                   ignoreUnknownValues,
                   ignoreInsertIds,
+                  propagateSuccessful,
                   toTableRow,
                   toFailsafeTableRow)
               .viaStateful());
@@ -422,6 +454,7 @@
                       skipInvalidRows,
                       ignoreUnknownValues,
                       ignoreInsertIds,
+                      propagateSuccessful,
                       toTableRow,
                       toFailsafeTableRow)
                   .viaDoFnFinalization());
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/WriteResult.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/WriteResult.java
index 820deb8..574f854 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/WriteResult.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/WriteResult.java
@@ -70,7 +70,7 @@
       Pipeline pipeline,
       TupleTag<BigQueryInsertError> failedInsertsTag,
       PCollection<BigQueryInsertError> failedInserts,
-      PCollection<TableRow> successfulInserts) {
+      @Nullable PCollection<TableRow> successfulInserts) {
     return new WriteResult(
         pipeline,
         null,
@@ -151,7 +151,9 @@
   public PCollection<TableRow> getSuccessfulInserts() {
     if (successfulInserts == null) {
       throw new IllegalStateException(
-          "Retrieving successful inserts is only supported for streaming inserts.");
+          "Retrieving successful inserts is only supported for streaming inserts. "
+              + "Make sure withSuccessfulInsertsPropagation is correctly configured for "
+              + "BigQueryIO.Write object.");
     }
     return successfulInserts;
   }
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java
index 9137335..06b7c52 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java
@@ -45,6 +45,7 @@
 import com.google.datastore.v1.PartitionId;
 import com.google.datastore.v1.Query;
 import com.google.datastore.v1.QueryResultBatch;
+import com.google.datastore.v1.ReadOptions;
 import com.google.datastore.v1.RunQueryRequest;
 import com.google.datastore.v1.RunQueryResponse;
 import com.google.datastore.v1.client.Datastore;
@@ -54,6 +55,8 @@
 import com.google.datastore.v1.client.DatastoreOptions;
 import com.google.datastore.v1.client.QuerySplitter;
 import com.google.protobuf.Int32Value;
+import com.google.protobuf.Timestamp;
+import com.google.protobuf.util.Timestamps;
 import com.google.rpc.Code;
 import java.io.IOException;
 import java.io.Serializable;
@@ -320,6 +323,8 @@
 
     public abstract @Nullable String getLocalhost();
 
+    public abstract @Nullable Instant getReadTime();
+
     @Override
     public abstract String toString();
 
@@ -339,6 +344,8 @@
 
       abstract Builder setLocalhost(String localhost);
 
+      abstract Builder setReadTime(Instant readTime);
+
       abstract Read build();
     }
 
@@ -346,10 +353,11 @@
      * Computes the number of splits to be performed on the given query by querying the estimated
      * size from Cloud Datastore.
      */
-    static int getEstimatedNumSplits(Datastore datastore, Query query, @Nullable String namespace) {
+    static int getEstimatedNumSplits(
+        Datastore datastore, Query query, @Nullable String namespace, @Nullable Instant readTime) {
       int numSplits;
       try {
-        long estimatedSizeBytes = getEstimatedSizeBytes(datastore, query, namespace);
+        long estimatedSizeBytes = getEstimatedSizeBytes(datastore, query, namespace, readTime);
         LOG.info("Estimated size bytes for the query is: {}", estimatedSizeBytes);
         numSplits =
             (int)
@@ -370,7 +378,8 @@
      * table.
      */
     private static long queryLatestStatisticsTimestamp(
-        Datastore datastore, @Nullable String namespace) throws DatastoreException {
+        Datastore datastore, @Nullable String namespace, @Nullable Instant readTime)
+        throws DatastoreException {
       Query.Builder query = Query.newBuilder();
       // Note: namespace either being null or empty represents the default namespace, in which
       // case we treat it as not provided by the user.
@@ -381,7 +390,7 @@
       }
       query.addOrder(makeOrder("timestamp", DESCENDING));
       query.setLimit(Int32Value.newBuilder().setValue(1));
-      RunQueryRequest request = makeRequest(query.build(), namespace);
+      RunQueryRequest request = makeRequest(query.build(), namespace, readTime);
 
       RunQueryResponse response = datastore.runQuery(request);
       QueryResultBatch batch = response.getBatch();
@@ -392,10 +401,14 @@
       return entity.getProperties().get("timestamp").getTimestampValue().getSeconds() * 1000000;
     }
 
-    /** Retrieve latest table statistics for a given kind, namespace, and datastore. */
+    /**
+     * Retrieve latest table statistics for a given kind, namespace, and datastore. If the Read has
+     * readTime specified, the latest statistics at or before readTime is retrieved.
+     */
     private static Entity getLatestTableStats(
-        String ourKind, @Nullable String namespace, Datastore datastore) throws DatastoreException {
-      long latestTimestamp = queryLatestStatisticsTimestamp(datastore, namespace);
+        String ourKind, @Nullable String namespace, Datastore datastore, @Nullable Instant readTime)
+        throws DatastoreException {
+      long latestTimestamp = queryLatestStatisticsTimestamp(datastore, namespace, readTime);
       LOG.info("Latest stats timestamp for kind {} is {}", ourKind, latestTimestamp);
 
       Query.Builder queryBuilder = Query.newBuilder();
@@ -410,7 +423,7 @@
               makeFilter("kind_name", EQUAL, makeValue(ourKind).build()).build(),
               makeFilter("timestamp", EQUAL, makeValue(latestTimestamp).build()).build()));
 
-      RunQueryRequest request = makeRequest(queryBuilder.build(), namespace);
+      RunQueryRequest request = makeRequest(queryBuilder.build(), namespace, readTime);
 
       long now = System.currentTimeMillis();
       RunQueryResponse response = datastore.runQuery(request);
@@ -433,10 +446,11 @@
      *
      * <p>See https://cloud.google.com/datastore/docs/concepts/stats.
      */
-    static long getEstimatedSizeBytes(Datastore datastore, Query query, @Nullable String namespace)
+    static long getEstimatedSizeBytes(
+        Datastore datastore, Query query, @Nullable String namespace, @Nullable Instant readTime)
         throws DatastoreException {
       String ourKind = query.getKind(0).getName();
-      Entity entity = getLatestTableStats(ourKind, namespace, datastore);
+      Entity entity = getLatestTableStats(ourKind, namespace, datastore, readTime);
       return entity.getProperties().get("entity_bytes").getIntegerValue();
     }
 
@@ -451,21 +465,38 @@
       return partitionBuilder;
     }
 
-    /** Builds a {@link RunQueryRequest} from the {@code query} and {@code namespace}. */
-    static RunQueryRequest makeRequest(Query query, @Nullable String namespace) {
-      return RunQueryRequest.newBuilder()
-          .setQuery(query)
-          .setPartitionId(forNamespace(namespace))
-          .build();
+    /**
+     * Builds a {@link RunQueryRequest} from the {@code query} and {@code namespace}, optionally at
+     * the requested {@code readTime}.
+     */
+    static RunQueryRequest makeRequest(
+        Query query, @Nullable String namespace, @Nullable Instant readTime) {
+      RunQueryRequest.Builder request =
+          RunQueryRequest.newBuilder().setQuery(query).setPartitionId(forNamespace(namespace));
+      if (readTime != null) {
+        Timestamp readTimeProto = Timestamps.fromMillis(readTime.getMillis());
+        request.setReadOptions(ReadOptions.newBuilder().setReadTime(readTimeProto).build());
+      }
+      return request.build();
     }
 
     @VisibleForTesting
-    /** Builds a {@link RunQueryRequest} from the {@code GqlQuery} and {@code namespace}. */
-    static RunQueryRequest makeRequest(GqlQuery gqlQuery, @Nullable String namespace) {
-      return RunQueryRequest.newBuilder()
-          .setGqlQuery(gqlQuery)
-          .setPartitionId(forNamespace(namespace))
-          .build();
+    /**
+     * Builds a {@link RunQueryRequest} from the {@code GqlQuery} and {@code namespace}, optionally
+     * at the requested {@code readTime}.
+     */
+    static RunQueryRequest makeRequest(
+        GqlQuery gqlQuery, @Nullable String namespace, @Nullable Instant readTime) {
+      RunQueryRequest.Builder request =
+          RunQueryRequest.newBuilder()
+              .setGqlQuery(gqlQuery)
+              .setPartitionId(forNamespace(namespace));
+      if (readTime != null) {
+        Timestamp readTimeProto = Timestamps.fromMillis(readTime.getMillis());
+        request.setReadOptions(ReadOptions.newBuilder().setReadTime(readTimeProto).build());
+      }
+
+      return request.build();
     }
 
     /**
@@ -477,10 +508,16 @@
         @Nullable String namespace,
         Datastore datastore,
         QuerySplitter querySplitter,
-        int numSplits)
+        int numSplits,
+        @Nullable Instant readTime)
         throws DatastoreException {
       // If namespace is set, include it in the split request so splits are calculated accordingly.
-      return querySplitter.getSplits(query, forNamespace(namespace).build(), numSplits, datastore);
+      PartitionId partitionId = forNamespace(namespace).build();
+      if (readTime != null) {
+        Timestamp readTimeProto = Timestamps.fromMillis(readTime.getMillis());
+        return querySplitter.getSplits(query, partitionId, numSplits, datastore, readTimeProto);
+      }
+      return querySplitter.getSplits(query, partitionId, numSplits, datastore);
     }
 
     /**
@@ -497,11 +534,13 @@
      * problem in practice.
      */
     @VisibleForTesting
-    static Query translateGqlQueryWithLimitCheck(String gql, Datastore datastore, String namespace)
+    static Query translateGqlQueryWithLimitCheck(
+        String gql, Datastore datastore, String namespace, @Nullable Instant readTime)
         throws DatastoreException {
       String gqlQueryWithZeroLimit = gql + " LIMIT 0";
       try {
-        Query translatedQuery = translateGqlQuery(gqlQueryWithZeroLimit, datastore, namespace);
+        Query translatedQuery =
+            translateGqlQuery(gqlQueryWithZeroLimit, datastore, namespace, readTime);
         // Clear the limit that we set.
         return translatedQuery.toBuilder().clearLimit().build();
       } catch (DatastoreException e) {
@@ -512,7 +551,7 @@
           LOG.warn("Failed to translate Gql query '{}': {}", gqlQueryWithZeroLimit, e.getMessage());
           LOG.warn("User query might have a limit already set, so trying without zero limit");
           // Retry without the zero limit.
-          return translateGqlQuery(gql, datastore, namespace);
+          return translateGqlQuery(gql, datastore, namespace, readTime);
         } else {
           throw e;
         }
@@ -520,10 +559,11 @@
     }
 
     /** Translates a gql query string to {@link Query}. */
-    private static Query translateGqlQuery(String gql, Datastore datastore, String namespace)
+    private static Query translateGqlQuery(
+        String gql, Datastore datastore, String namespace, @Nullable Instant readTime)
         throws DatastoreException {
       GqlQuery gqlQuery = GqlQuery.newBuilder().setQueryString(gql).setAllowLiterals(true).build();
-      RunQueryRequest req = makeRequest(gqlQuery, namespace);
+      RunQueryRequest req = makeRequest(gqlQuery, namespace, readTime);
       return datastore.runQuery(req).getQuery();
     }
 
@@ -628,6 +668,11 @@
       return toBuilder().setLocalhost(localhost).build();
     }
 
+    /** Returns a new {@link DatastoreV1.Read} that reads at the specified {@code readTime}. */
+    public DatastoreV1.Read withReadTime(Instant readTime) {
+      return toBuilder().setReadTime(readTime).build();
+    }
+
     /** Returns Number of entities available for reading. */
     public long getNumEntities(
         PipelineOptions options, String ourKind, @Nullable String namespace) {
@@ -638,7 +683,7 @@
             datastoreFactory.getDatastore(
                 options, v1Options.getProjectId(), v1Options.getLocalhost());
 
-        Entity entity = getLatestTableStats(ourKind, namespace, datastore);
+        Entity entity = getLatestTableStats(ourKind, namespace, datastore, getReadTime());
         return entity.getProperties().get("count").getIntegerValue();
       } catch (Exception e) {
         return -1;
@@ -688,13 +733,13 @@
         inputQuery =
             input
                 .apply(Create.ofProvider(getLiteralGqlQuery(), StringUtf8Coder.of()))
-                .apply(ParDo.of(new GqlQueryTranslateFn(v1Options)));
+                .apply(ParDo.of(new GqlQueryTranslateFn(v1Options, getReadTime())));
       }
 
       return inputQuery
-          .apply("Split", ParDo.of(new SplitQueryFn(v1Options, getNumQuerySplits())))
+          .apply("Split", ParDo.of(new SplitQueryFn(v1Options, getNumQuerySplits(), getReadTime())))
           .apply("Reshuffle", Reshuffle.viaRandomKey())
-          .apply("Read", ParDo.of(new ReadFn(v1Options)));
+          .apply("Read", ParDo.of(new ReadFn(v1Options, getReadTime())));
     }
 
     @Override
@@ -705,7 +750,8 @@
           .addIfNotNull(DisplayData.item("projectId", getProjectId()).withLabel("ProjectId"))
           .addIfNotNull(DisplayData.item("namespace", getNamespace()).withLabel("Namespace"))
           .addIfNotNull(DisplayData.item("query", query).withLabel("Query"))
-          .addIfNotNull(DisplayData.item("gqlQuery", getLiteralGqlQuery()).withLabel("GqlQuery"));
+          .addIfNotNull(DisplayData.item("gqlQuery", getLiteralGqlQuery()).withLabel("GqlQuery"))
+          .addIfNotNull(DisplayData.item("readTime", getReadTime()).withLabel("ReadTime"));
     }
 
     @VisibleForTesting
@@ -764,15 +810,22 @@
     /** A DoFn that translates a Cloud Datastore gql query string to {@code Query}. */
     static class GqlQueryTranslateFn extends DoFn<String, Query> {
       private final V1Options v1Options;
+      private final @Nullable Instant readTime;
       private transient Datastore datastore;
       private final V1DatastoreFactory datastoreFactory;
 
       GqlQueryTranslateFn(V1Options options) {
-        this(options, new V1DatastoreFactory());
+        this(options, null, new V1DatastoreFactory());
       }
 
-      GqlQueryTranslateFn(V1Options options, V1DatastoreFactory datastoreFactory) {
+      GqlQueryTranslateFn(V1Options options, @Nullable Instant readTime) {
+        this(options, readTime, new V1DatastoreFactory());
+      }
+
+      GqlQueryTranslateFn(
+          V1Options options, @Nullable Instant readTime, V1DatastoreFactory datastoreFactory) {
         this.v1Options = options;
+        this.readTime = readTime;
         this.datastoreFactory = datastoreFactory;
       }
 
@@ -788,7 +841,8 @@
         String gqlQuery = c.element();
         LOG.info("User query: '{}'", gqlQuery);
         Query query =
-            translateGqlQueryWithLimitCheck(gqlQuery, datastore, v1Options.getNamespace());
+            translateGqlQueryWithLimitCheck(
+                gqlQuery, datastore, v1Options.getNamespace(), readTime);
         LOG.info("User gql query translated to Query({})", query);
         c.output(query);
       }
@@ -803,6 +857,8 @@
       private final V1Options options;
       // number of splits to make for a given query
       private final int numSplits;
+      // time from which to run the queries
+      private final @Nullable Instant readTime;
 
       private final V1DatastoreFactory datastoreFactory;
       // Datastore client
@@ -811,14 +867,23 @@
       private transient QuerySplitter querySplitter;
 
       public SplitQueryFn(V1Options options, int numSplits) {
-        this(options, numSplits, new V1DatastoreFactory());
+        this(options, numSplits, null, new V1DatastoreFactory());
+      }
+
+      public SplitQueryFn(V1Options options, int numSplits, @Nullable Instant readTime) {
+        this(options, numSplits, readTime, new V1DatastoreFactory());
       }
 
       @VisibleForTesting
-      SplitQueryFn(V1Options options, int numSplits, V1DatastoreFactory datastoreFactory) {
+      SplitQueryFn(
+          V1Options options,
+          int numSplits,
+          @Nullable Instant readTime,
+          V1DatastoreFactory datastoreFactory) {
         this.options = options;
         this.numSplits = numSplits;
         this.datastoreFactory = datastoreFactory;
+        this.readTime = readTime;
       }
 
       @StartBundle
@@ -842,7 +907,8 @@
         int estimatedNumSplits;
         // Compute the estimated numSplits if numSplits is not specified by the user.
         if (numSplits <= 0) {
-          estimatedNumSplits = getEstimatedNumSplits(datastore, query, options.getNamespace());
+          estimatedNumSplits =
+              getEstimatedNumSplits(datastore, query, options.getNamespace(), readTime);
         } else {
           estimatedNumSplits = numSplits;
         }
@@ -852,7 +918,12 @@
         try {
           querySplits =
               splitQuery(
-                  query, options.getNamespace(), datastore, querySplitter, estimatedNumSplits);
+                  query,
+                  options.getNamespace(),
+                  datastore,
+                  querySplitter,
+                  estimatedNumSplits,
+                  readTime);
         } catch (Exception e) {
           LOG.warn("Unable to parallelize the given query: {}", query, e);
           querySplits = ImmutableList.of(query);
@@ -873,6 +944,7 @@
               DisplayData.item("numQuerySplits", numSplits)
                   .withLabel("Requested number of Query splits"));
         }
+        builder.addIfNotNull(DisplayData.item("readTime", readTime).withLabel("ReadTime"));
       }
     }
 
@@ -880,6 +952,7 @@
     @VisibleForTesting
     static class ReadFn extends DoFn<Query, Entity> {
       private final V1Options options;
+      private final @Nullable Instant readTime;
       private final V1DatastoreFactory datastoreFactory;
       // Datastore client
       private transient Datastore datastore;
@@ -894,12 +967,17 @@
               .withInitialBackoff(Duration.standardSeconds(5));
 
       public ReadFn(V1Options options) {
-        this(options, new V1DatastoreFactory());
+        this(options, null, new V1DatastoreFactory());
+      }
+
+      public ReadFn(V1Options options, @Nullable Instant readTime) {
+        this(options, readTime, new V1DatastoreFactory());
       }
 
       @VisibleForTesting
-      ReadFn(V1Options options, V1DatastoreFactory datastoreFactory) {
+      ReadFn(V1Options options, @Nullable Instant readTime, V1DatastoreFactory datastoreFactory) {
         this.options = options;
+        this.readTime = readTime;
         this.datastoreFactory = datastoreFactory;
       }
 
@@ -967,7 +1045,7 @@
             queryBuilder.setStartCursor(currentBatch.getEndCursor());
           }
 
-          RunQueryRequest request = makeRequest(queryBuilder.build(), namespace);
+          RunQueryRequest request = makeRequest(queryBuilder.build(), namespace, readTime);
           RunQueryResponse response = runQueryWithRetries(request);
 
           currentBatch = response.getBatch();
@@ -1005,6 +1083,7 @@
       public void populateDisplayData(DisplayData.Builder builder) {
         super.populateDisplayData(builder);
         builder.include("options", options);
+        builder.addIfNotNull(DisplayData.item("readTime", readTime).withLabel("ReadTime"));
       }
     }
   }
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/healthcare/FhirIO.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/healthcare/FhirIO.java
index 78d769f..149212d 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/healthcare/FhirIO.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/healthcare/FhirIO.java
@@ -1327,8 +1327,8 @@
               RESOURCES_IMPORTED_SUCCESS,
               RESOURCES_IMPORTED_ERRORS);
 
-          // Clean up temp files on GCS as they we successfully imported to FHIR store and no longer
-          // needed.
+          // Clean up temp files on GCS as they were successfully imported to FHIR store and no
+          // longer needed.
           FileSystems.delete(tempDestinations);
         } catch (IOException | InterruptedException e) {
           ResourceId deadLetterResourceId =
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/pubsublite/internal/TopicBacklogReaderImpl.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/pubsublite/internal/TopicBacklogReaderImpl.java
index 96980b5..80e3cbe 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/pubsublite/internal/TopicBacklogReaderImpl.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/pubsublite/internal/TopicBacklogReaderImpl.java
@@ -43,7 +43,7 @@
   public ComputeMessageStatsResponse computeMessageStats(Offset offset) throws ApiException {
     try {
       return client
-          .computeMessageStats(topicPath, partition, offset, Offset.of(Integer.MAX_VALUE))
+          .computeMessageStats(topicPath, partition, offset, Offset.of(Long.MAX_VALUE))
           .get(1, MINUTES);
     } catch (Throwable t) {
       throw toCanonical(t).underlying;
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/pubsublite/internal/Uuid.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/pubsublite/internal/Uuid.java
index 969233e..16e5b11 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/pubsublite/internal/Uuid.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/pubsublite/internal/Uuid.java
@@ -24,6 +24,7 @@
 import java.util.Base64;
 import java.util.UUID;
 import org.apache.beam.sdk.coders.DefaultCoder;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 
 /** A Uuid storable in a Pub/Sub Lite attribute. */
 @DefaultCoder(UuidCoder.class)
@@ -42,7 +43,7 @@
 
   public static Uuid random() {
     UUID uuid = UUID.randomUUID();
-    ByteString.Output output = ByteString.newOutput(16);
+    ByteStringOutputStream output = new ByteStringOutputStream(16);
     DataOutputStream stream = new DataOutputStream(output);
     try {
       stream.writeLong(uuid.getMostSignificantBits());
diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIOWriteTest.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIOWriteTest.java
index 2905002..1bf78ed 100644
--- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIOWriteTest.java
+++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIOWriteTest.java
@@ -2625,6 +2625,20 @@
               "Cannot use getFailedInserts as this WriteResult "
                   + "uses extended errors information. Use getFailedInsertsWithErr instead"));
     }
+
+    try {
+      p.apply("Create3", Create.<TableRow>of(row1))
+          .apply("Write3", bqIoWrite.withSuccessfulInsertsPropagation(false))
+          .getSuccessfulInserts();
+      fail();
+    } catch (IllegalStateException e) {
+      assertThat(
+          e.getMessage(),
+          is(
+              "Retrieving successful inserts is only supported for streaming inserts. "
+                  + "Make sure withSuccessfulInsertsPropagation is correctly configured for "
+                  + "BigQueryIO.Write object."));
+    }
   }
 
   void schemaUpdateOptionsTest(
diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1Test.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1Test.java
index 0fc895b..4aed59c 100644
--- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1Test.java
+++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1Test.java
@@ -67,8 +67,12 @@
 import com.google.datastore.v1.client.DatastoreException;
 import com.google.datastore.v1.client.QuerySplitter;
 import com.google.protobuf.Int32Value;
+import com.google.protobuf.Timestamp;
+import com.google.protobuf.util.Timestamps;
 import com.google.rpc.Code;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.List;
@@ -101,17 +105,23 @@
 import org.apache.beam.sdk.transforms.display.DisplayDataEvaluator;
 import org.apache.beam.sdk.values.PBegin;
 import org.apache.beam.sdk.values.PCollection;
+import org.joda.time.Instant;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.experimental.runners.Enclosed;
 import org.junit.rules.ExpectedException;
 import org.junit.runner.RunWith;
 import org.junit.runners.JUnit4;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+import org.mockito.ArgumentCaptor;
 import org.mockito.Mock;
 import org.mockito.MockitoAnnotations;
 
 /** Tests for {@link DatastoreV1}. */
-@RunWith(JUnit4.class)
+@RunWith(Enclosed.class)
 public class DatastoreV1Test {
   private static final String PROJECT_ID = "testProject";
   private static final String NAMESPACE = "testNamespace";
@@ -119,6 +129,8 @@
   private static final Query QUERY;
   private static final String LOCALHOST = "localhost:9955";
   private static final String GQL_QUERY = "SELECT * from " + KIND;
+  private static final Instant TIMESTAMP = Instant.now();
+  private static final Timestamp TIMESTAMP_PROTO = Timestamps.fromMillis(TIMESTAMP.getMillis());
   private static final V1Options V_1_OPTIONS;
 
   static {
@@ -128,9 +140,9 @@
     V_1_OPTIONS = V1Options.from(PROJECT_ID, NAMESPACE, null);
   }
 
-  @Mock private Datastore mockDatastore;
-  @Mock QuerySplitter mockQuerySplitter;
-  @Mock V1DatastoreFactory mockDatastoreFactory;
+  @Mock protected Datastore mockDatastore;
+  @Mock protected QuerySplitter mockQuerySplitter;
+  @Mock protected V1DatastoreFactory mockDatastoreFactory;
 
   @Rule public final ExpectedException thrown = ExpectedException.none();
 
@@ -146,782 +158,890 @@
     MetricsEnvironment.setProcessWideContainer(container);
   }
 
-  @Test
-  public void testBuildRead() throws Exception {
-    DatastoreV1.Read read =
-        DatastoreIO.v1().read().withProjectId(PROJECT_ID).withQuery(QUERY).withNamespace(NAMESPACE);
-    assertEquals(QUERY, read.getQuery());
-    assertEquals(PROJECT_ID, read.getProjectId().get());
-    assertEquals(NAMESPACE, read.getNamespace().get());
-  }
-
-  @Test
-  public void testBuildReadWithGqlQuery() throws Exception {
-    DatastoreV1.Read read =
-        DatastoreIO.v1()
-            .read()
-            .withProjectId(PROJECT_ID)
-            .withLiteralGqlQuery(GQL_QUERY)
-            .withNamespace(NAMESPACE);
-    assertEquals(GQL_QUERY, read.getLiteralGqlQuery().get());
-    assertEquals(PROJECT_ID, read.getProjectId().get());
-    assertEquals(NAMESPACE, read.getNamespace().get());
-  }
-
-  /** {@link #testBuildRead} but constructed in a different order. */
-  @Test
-  public void testBuildReadAlt() throws Exception {
-    DatastoreV1.Read read =
-        DatastoreIO.v1()
-            .read()
-            .withQuery(QUERY)
-            .withNamespace(NAMESPACE)
-            .withProjectId(PROJECT_ID)
-            .withLocalhost(LOCALHOST);
-    assertEquals(QUERY, read.getQuery());
-    assertEquals(PROJECT_ID, read.getProjectId().get());
-    assertEquals(NAMESPACE, read.getNamespace().get());
-    assertEquals(LOCALHOST, read.getLocalhost());
-  }
-
-  @Test
-  public void testReadValidationFailsQueryAndGqlQuery() throws Exception {
-    DatastoreV1.Read read =
-        DatastoreIO.v1()
-            .read()
-            .withProjectId(PROJECT_ID)
-            .withLiteralGqlQuery(GQL_QUERY)
-            .withQuery(QUERY);
-
-    thrown.expect(IllegalArgumentException.class);
-    thrown.expectMessage("withQuery() and withLiteralGqlQuery() are exclusive");
-    read.expand(null);
-  }
-
-  @Test
-  public void testReadValidationFailsQueryLimitZero() throws Exception {
-    Query invalidLimit = Query.newBuilder().setLimit(Int32Value.newBuilder().setValue(0)).build();
-    thrown.expect(IllegalArgumentException.class);
-    thrown.expectMessage("Invalid query limit 0: must be positive");
-
-    DatastoreIO.v1().read().withQuery(invalidLimit);
-  }
-
-  @Test
-  public void testReadValidationFailsQueryLimitNegative() throws Exception {
-    Query invalidLimit = Query.newBuilder().setLimit(Int32Value.newBuilder().setValue(-5)).build();
-    thrown.expect(IllegalArgumentException.class);
-    thrown.expectMessage("Invalid query limit -5: must be positive");
-
-    DatastoreIO.v1().read().withQuery(invalidLimit);
-  }
-
-  @Test
-  public void testReadDisplayData() {
-    DatastoreV1.Read read =
-        DatastoreIO.v1().read().withProjectId(PROJECT_ID).withQuery(QUERY).withNamespace(NAMESPACE);
-
-    DisplayData displayData = DisplayData.from(read);
-
-    assertThat(displayData, hasDisplayItem("projectId", PROJECT_ID));
-    assertThat(displayData, hasDisplayItem("query", QUERY.toString()));
-    assertThat(displayData, hasDisplayItem("namespace", NAMESPACE));
-  }
-
-  @Test
-  public void testReadDisplayDataWithGqlQuery() {
-    DatastoreV1.Read read =
-        DatastoreIO.v1()
-            .read()
-            .withProjectId(PROJECT_ID)
-            .withLiteralGqlQuery(GQL_QUERY)
-            .withNamespace(NAMESPACE);
-
-    DisplayData displayData = DisplayData.from(read);
-
-    assertThat(displayData, hasDisplayItem("projectId", PROJECT_ID));
-    assertThat(displayData, hasDisplayItem("gqlQuery", GQL_QUERY));
-    assertThat(displayData, hasDisplayItem("namespace", NAMESPACE));
-  }
-
-  @Test
-  public void testSourcePrimitiveDisplayData() {
-    DisplayDataEvaluator evaluator = DisplayDataEvaluator.create();
-    int numSplits = 98;
-    PTransform<PBegin, PCollection<Entity>> read =
-        DatastoreIO.v1()
-            .read()
-            .withProjectId(PROJECT_ID)
-            .withQuery(Query.newBuilder().build())
-            .withNumQuerySplits(numSplits);
-
-    String assertMessage = "DatastoreIO read should include the '%s' in its primitive display data";
-    Set<DisplayData> displayData = evaluator.displayDataForPrimitiveSourceTransforms(read);
-    assertThat(
-        String.format(assertMessage, "project id"),
-        displayData,
-        hasItem(hasDisplayItem("projectId", PROJECT_ID)));
-    assertThat(
-        String.format(assertMessage, "number of query splits"),
-        displayData,
-        hasItem(hasDisplayItem("numQuerySplits", numSplits)));
-  }
-
-  @Test
-  public void testWriteDisplayData() {
-    Write write = DatastoreIO.v1().write().withProjectId(PROJECT_ID);
-
-    DisplayData displayData = DisplayData.from(write);
-
-    assertThat(displayData, hasDisplayItem("projectId", PROJECT_ID));
-  }
-
-  @Test
-  public void testDeleteEntityDisplayData() {
-    DeleteEntity deleteEntity = DatastoreIO.v1().deleteEntity().withProjectId(PROJECT_ID);
-
-    DisplayData displayData = DisplayData.from(deleteEntity);
-
-    assertThat(displayData, hasDisplayItem("projectId", PROJECT_ID));
-  }
-
-  @Test
-  public void testDeleteKeyDisplayData() {
-    DeleteKey deleteKey = DatastoreIO.v1().deleteKey().withProjectId(PROJECT_ID);
-
-    DisplayData displayData = DisplayData.from(deleteKey);
-
-    assertThat(displayData, hasDisplayItem("projectId", PROJECT_ID));
-  }
-
-  @Test
-  public void testWritePrimitiveDisplayData() {
-    int hintNumWorkers = 10;
-    DisplayDataEvaluator evaluator = DisplayDataEvaluator.create();
-    PTransform<PCollection<Entity>, ?> write =
-        DatastoreIO.v1().write().withProjectId("myProject").withHintNumWorkers(hintNumWorkers);
-
-    Set<DisplayData> displayData = evaluator.displayDataForPrimitiveTransforms(write);
-    assertThat(
-        "DatastoreIO write should include the project in its primitive display data",
-        displayData,
-        hasItem(hasDisplayItem("projectId")));
-    assertThat(
-        "DatastoreIO write should include the upsertFn in its primitive display data",
-        displayData,
-        hasItem(hasDisplayItem("upsertFn")));
-    assertThat(
-        "DatastoreIO write should include ramp-up throttling worker count hint if enabled",
-        displayData,
-        hasItem(hasDisplayItem("hintNumWorkers", hintNumWorkers)));
-  }
-
-  @Test
-  public void testWritePrimitiveDisplayDataDisabledThrottler() {
-    DisplayDataEvaluator evaluator = DisplayDataEvaluator.create();
-    PTransform<PCollection<Entity>, ?> write =
-        DatastoreIO.v1().write().withProjectId("myProject").withRampupThrottlingDisabled();
-
-    Set<DisplayData> displayData = evaluator.displayDataForPrimitiveTransforms(write);
-    assertThat(
-        "DatastoreIO write should include the project in its primitive display data",
-        displayData,
-        hasItem(hasDisplayItem("projectId")));
-    assertThat(
-        "DatastoreIO write should include the upsertFn in its primitive display data",
-        displayData,
-        hasItem(hasDisplayItem("upsertFn")));
-    assertThat(
-        "DatastoreIO write should include ramp-up throttling worker count hint if enabled",
-        displayData,
-        not(hasItem(hasDisplayItem("hintNumWorkers"))));
-  }
-
-  @Test
-  public void testDeleteEntityPrimitiveDisplayData() {
-    int hintNumWorkers = 10;
-    DisplayDataEvaluator evaluator = DisplayDataEvaluator.create();
-    PTransform<PCollection<Entity>, ?> write =
-        DatastoreIO.v1()
-            .deleteEntity()
-            .withProjectId("myProject")
-            .withHintNumWorkers(hintNumWorkers);
-
-    Set<DisplayData> displayData = evaluator.displayDataForPrimitiveTransforms(write);
-    assertThat(
-        "DatastoreIO write should include the project in its primitive display data",
-        displayData,
-        hasItem(hasDisplayItem("projectId")));
-    assertThat(
-        "DatastoreIO write should include the deleteEntityFn in its primitive display data",
-        displayData,
-        hasItem(hasDisplayItem("deleteEntityFn")));
-    assertThat(
-        "DatastoreIO write should include ramp-up throttling worker count hint if enabled",
-        displayData,
-        hasItem(hasDisplayItem("hintNumWorkers", hintNumWorkers)));
-  }
-
-  @Test
-  public void testDeleteKeyPrimitiveDisplayData() {
-    int hintNumWorkers = 10;
-    DisplayDataEvaluator evaluator = DisplayDataEvaluator.create();
-    PTransform<PCollection<Key>, ?> write =
-        DatastoreIO.v1().deleteKey().withProjectId("myProject").withHintNumWorkers(hintNumWorkers);
-
-    Set<DisplayData> displayData = evaluator.displayDataForPrimitiveTransforms(write);
-    assertThat(
-        "DatastoreIO write should include the project in its primitive display data",
-        displayData,
-        hasItem(hasDisplayItem("projectId")));
-    assertThat(
-        "DatastoreIO write should include the deleteKeyFn in its primitive display data",
-        displayData,
-        hasItem(hasDisplayItem("deleteKeyFn")));
-    assertThat(
-        "DatastoreIO write should include ramp-up throttling worker count hint if enabled",
-        displayData,
-        hasItem(hasDisplayItem("hintNumWorkers", hintNumWorkers)));
-  }
-
-  /** Test building a Write using builder methods. */
-  @Test
-  public void testBuildWrite() throws Exception {
-    DatastoreV1.Write write = DatastoreIO.v1().write().withProjectId(PROJECT_ID);
-    assertEquals(PROJECT_ID, write.getProjectId());
-  }
-
-  /** Test the detection of complete and incomplete keys. */
-  @Test
-  public void testHasNameOrId() {
-    Key key;
-    // Complete with name, no ancestor
-    key = makeKey("bird", "finch").build();
-    assertTrue(isValidKey(key));
-
-    // Complete with id, no ancestor
-    key = makeKey("bird", 123).build();
-    assertTrue(isValidKey(key));
-
-    // Incomplete, no ancestor
-    key = makeKey("bird").build();
-    assertFalse(isValidKey(key));
-
-    // Complete with name and ancestor
-    key = makeKey("bird", "owl").build();
-    key = makeKey(key, "bird", "horned").build();
-    assertTrue(isValidKey(key));
-
-    // Complete with id and ancestor
-    key = makeKey("bird", "owl").build();
-    key = makeKey(key, "bird", 123).build();
-    assertTrue(isValidKey(key));
-
-    // Incomplete with ancestor
-    key = makeKey("bird", "owl").build();
-    key = makeKey(key, "bird").build();
-    assertFalse(isValidKey(key));
-
-    key = makeKey().build();
-    assertFalse(isValidKey(key));
-  }
-
-  /** Test that entities with incomplete keys cannot be updated. */
-  @Test
-  public void testAddEntitiesWithIncompleteKeys() throws Exception {
-    Key key = makeKey("bird").build();
-    Entity entity = Entity.newBuilder().setKey(key).build();
-    UpsertFn upsertFn = new UpsertFn();
-
-    thrown.expect(IllegalArgumentException.class);
-    thrown.expectMessage("Entities to be written to the Cloud Datastore must have complete keys");
-
-    upsertFn.apply(entity);
-  }
-
-  @Test
-  /** Test that entities with valid keys are transformed to upsert mutations. */
-  public void testAddEntities() throws Exception {
-    Key key = makeKey("bird", "finch").build();
-    Entity entity = Entity.newBuilder().setKey(key).build();
-    UpsertFn upsertFn = new UpsertFn();
-
-    Mutation expectedMutation = makeUpsert(entity).build();
-    assertEquals(expectedMutation, upsertFn.apply(entity));
-  }
-
-  /** Test that entities with incomplete keys cannot be deleted. */
-  @Test
-  public void testDeleteEntitiesWithIncompleteKeys() throws Exception {
-    Key key = makeKey("bird").build();
-    Entity entity = Entity.newBuilder().setKey(key).build();
-    DeleteEntityFn deleteEntityFn = new DeleteEntityFn();
-
-    thrown.expect(IllegalArgumentException.class);
-    thrown.expectMessage("Entities to be deleted from the Cloud Datastore must have complete keys");
-
-    deleteEntityFn.apply(entity);
-  }
-
-  /** Test that entities with valid keys are transformed to delete mutations. */
-  @Test
-  public void testDeleteEntities() throws Exception {
-    Key key = makeKey("bird", "finch").build();
-    Entity entity = Entity.newBuilder().setKey(key).build();
-    DeleteEntityFn deleteEntityFn = new DeleteEntityFn();
-
-    Mutation expectedMutation = makeDelete(entity.getKey()).build();
-    assertEquals(expectedMutation, deleteEntityFn.apply(entity));
-  }
-
-  /** Test that incomplete keys cannot be deleted. */
-  @Test
-  public void testDeleteIncompleteKeys() throws Exception {
-    Key key = makeKey("bird").build();
-    DeleteKeyFn deleteKeyFn = new DeleteKeyFn();
-
-    thrown.expect(IllegalArgumentException.class);
-    thrown.expectMessage("Keys to be deleted from the Cloud Datastore must be complete");
-
-    deleteKeyFn.apply(key);
-  }
-
-  /** Test that valid keys are transformed to delete mutations. */
-  @Test
-  public void testDeleteKeys() {
-    Key key = makeKey("bird", "finch").build();
-    DeleteKeyFn deleteKeyFn = new DeleteKeyFn();
-
-    Mutation expectedMutation = makeDelete(key).build();
-    assertEquals(expectedMutation, deleteKeyFn.apply(key));
-  }
-
-  @Test
-  public void testDatastoreWriteFnDisplayData() {
-    DatastoreWriterFn datastoreWriter = new DatastoreWriterFn(PROJECT_ID, null);
-    DisplayData displayData = DisplayData.from(datastoreWriter);
-    assertThat(displayData, hasDisplayItem("projectId", PROJECT_ID));
-  }
-
-  /** Tests {@link DatastoreWriterFn} with entities less than one batch. */
-  @Test
-  public void testDatatoreWriterFnWithOneBatch() throws Exception {
-    datastoreWriterFnTest(100);
-    verifyMetricWasSet("BatchDatastoreWrite", "ok", "", 2);
-  }
-
-  /** Tests {@link DatastoreWriterFn} with entities of more than one batches, but not a multiple. */
-  @Test
-  public void testDatatoreWriterFnWithMultipleBatches() throws Exception {
-    datastoreWriterFnTest(DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_START * 3 + 100);
-    verifyMetricWasSet("BatchDatastoreWrite", "ok", "", 5);
-  }
-
-  /**
-   * Tests {@link DatastoreWriterFn} with entities of several batches, using an exact multiple of
-   * write batch size.
-   */
-  @Test
-  public void testDatatoreWriterFnWithBatchesExactMultiple() throws Exception {
-    datastoreWriterFnTest(DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_START * 2);
-    verifyMetricWasSet("BatchDatastoreWrite", "ok", "", 2);
-  }
-
-  // A helper method to test DatastoreWriterFn for various batch sizes.
-  private void datastoreWriterFnTest(int numMutations) throws Exception {
-    // Create the requested number of mutations.
-    List<Mutation> mutations = new ArrayList<>(numMutations);
-    for (int i = 0; i < numMutations; ++i) {
-      mutations.add(
-          makeUpsert(Entity.newBuilder().setKey(makeKey("key" + i, i + 1)).build()).build());
+  @RunWith(JUnit4.class)
+  public static class SingletonTests extends DatastoreV1Test {
+    @Test
+    public void testBuildRead() throws Exception {
+      DatastoreV1.Read read =
+          DatastoreIO.v1()
+              .read()
+              .withProjectId(PROJECT_ID)
+              .withQuery(QUERY)
+              .withNamespace(NAMESPACE);
+      assertEquals(QUERY, read.getQuery());
+      assertEquals(PROJECT_ID, read.getProjectId().get());
+      assertEquals(NAMESPACE, read.getNamespace().get());
     }
 
-    DatastoreWriterFn datastoreWriter =
-        new DatastoreWriterFn(
-            StaticValueProvider.of(PROJECT_ID), null, mockDatastoreFactory, new FakeWriteBatcher());
-    DoFnTester<Mutation, Void> doFnTester = DoFnTester.of(datastoreWriter);
-    doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
-    doFnTester.processBundle(mutations);
-
-    int start = 0;
-    while (start < numMutations) {
-      int end = Math.min(numMutations, start + DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_START);
-      CommitRequest.Builder commitRequest = CommitRequest.newBuilder();
-      commitRequest.setMode(CommitRequest.Mode.NON_TRANSACTIONAL);
-      commitRequest.addAllMutations(mutations.subList(start, end));
-      // Verify all the batch requests were made with the expected mutations.
-      verify(mockDatastore, times(1)).commit(commitRequest.build());
-      start = end;
-    }
-  }
-
-  /**
-   * Tests {@link DatastoreWriterFn} with large entities that need to be split into more batches.
-   */
-  @Test
-  public void testDatatoreWriterFnWithLargeEntities() throws Exception {
-    List<Mutation> mutations = new ArrayList<>();
-    int entitySize = 0;
-    for (int i = 0; i < 12; ++i) {
-      Entity entity =
-          Entity.newBuilder()
-              .setKey(makeKey("key" + i, i + 1))
-              .putProperties(
-                  "long",
-                  makeValue(new String(new char[900_000])).setExcludeFromIndexes(true).build())
-              .build();
-      entitySize = entity.getSerializedSize(); // Take the size of any one entity.
-      mutations.add(makeUpsert(entity).build());
+    @Test
+    public void testBuildReadWithReadTime() throws Exception {
+      DatastoreV1.Read read =
+          DatastoreIO.v1()
+              .read()
+              .withProjectId(PROJECT_ID)
+              .withQuery(QUERY)
+              .withReadTime(TIMESTAMP);
+      assertEquals(TIMESTAMP, read.getReadTime());
+      assertEquals(QUERY, read.getQuery());
+      assertEquals(PROJECT_ID, read.getProjectId().get());
     }
 
-    DatastoreWriterFn datastoreWriter =
-        new DatastoreWriterFn(
-            StaticValueProvider.of(PROJECT_ID), null, mockDatastoreFactory, new FakeWriteBatcher());
-    DoFnTester<Mutation, Void> doFnTester = DoFnTester.of(datastoreWriter);
-    doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
-    doFnTester.processBundle(mutations);
-
-    // This test is over-specific currently; it requires that we split the 12 entity writes into 3
-    // requests, but we only need each CommitRequest to be less than 10MB in size.
-    int entitiesPerRpc = DATASTORE_BATCH_UPDATE_BYTES_LIMIT / entitySize;
-    int start = 0;
-    while (start < mutations.size()) {
-      int end = Math.min(mutations.size(), start + entitiesPerRpc);
-      CommitRequest.Builder commitRequest = CommitRequest.newBuilder();
-      commitRequest.setMode(CommitRequest.Mode.NON_TRANSACTIONAL);
-      commitRequest.addAllMutations(mutations.subList(start, end));
-      // Verify all the batch requests were made with the expected mutations.
-      verify(mockDatastore).commit(commitRequest.build());
-      start = end;
-    }
-  }
-
-  /** Tests {@link DatastoreWriterFn} with a failed request which is retried. */
-  @Test
-  public void testDatatoreWriterFnRetriesErrors() throws Exception {
-    List<Mutation> mutations = new ArrayList<>();
-    int numRpcs = 2;
-    for (int i = 0; i < DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_START * numRpcs; ++i) {
-      mutations.add(
-          makeUpsert(Entity.newBuilder().setKey(makeKey("key" + i, i + 1)).build()).build());
+    @Test
+    public void testBuildReadWithGqlQuery() throws Exception {
+      DatastoreV1.Read read =
+          DatastoreIO.v1()
+              .read()
+              .withProjectId(PROJECT_ID)
+              .withLiteralGqlQuery(GQL_QUERY)
+              .withNamespace(NAMESPACE);
+      assertEquals(GQL_QUERY, read.getLiteralGqlQuery().get());
+      assertEquals(PROJECT_ID, read.getProjectId().get());
+      assertEquals(NAMESPACE, read.getNamespace().get());
     }
 
-    CommitResponse successfulCommit = CommitResponse.getDefaultInstance();
-    when(mockDatastore.commit(any(CommitRequest.class)))
-        .thenReturn(successfulCommit)
-        .thenThrow(new DatastoreException("commit", Code.DEADLINE_EXCEEDED, "", null))
-        .thenReturn(successfulCommit);
+    /** {@link #testBuildRead} but constructed in a different order. */
+    @Test
+    public void testBuildReadAlt() throws Exception {
+      DatastoreV1.Read read =
+          DatastoreIO.v1()
+              .read()
+              .withReadTime(TIMESTAMP)
+              .withQuery(QUERY)
+              .withNamespace(NAMESPACE)
+              .withProjectId(PROJECT_ID)
+              .withLocalhost(LOCALHOST);
+      assertEquals(TIMESTAMP, read.getReadTime());
+      assertEquals(QUERY, read.getQuery());
+      assertEquals(PROJECT_ID, read.getProjectId().get());
+      assertEquals(NAMESPACE, read.getNamespace().get());
+      assertEquals(LOCALHOST, read.getLocalhost());
+    }
 
-    DatastoreWriterFn datastoreWriter =
-        new DatastoreWriterFn(
-            StaticValueProvider.of(PROJECT_ID), null, mockDatastoreFactory, new FakeWriteBatcher());
-    DoFnTester<Mutation, Void> doFnTester = DoFnTester.of(datastoreWriter);
-    doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
-    doFnTester.processBundle(mutations);
-    verifyMetricWasSet("BatchDatastoreWrite", "ok", "", 2);
-    verifyMetricWasSet("BatchDatastoreWrite", "unknown", "", 1);
-  }
+    @Test
+    public void testReadValidationFailsQueryAndGqlQuery() throws Exception {
+      DatastoreV1.Read read =
+          DatastoreIO.v1()
+              .read()
+              .withProjectId(PROJECT_ID)
+              .withLiteralGqlQuery(GQL_QUERY)
+              .withQuery(QUERY);
 
-  /**
-   * Tests {@link DatastoreV1.Read#getEstimatedSizeBytes} to fetch and return estimated size for a
-   * query.
-   */
-  @Test
-  public void testEstimatedSizeBytes() throws Exception {
-    long entityBytes = 100L;
-    // In seconds
-    long timestamp = 1234L;
+      thrown.expect(IllegalArgumentException.class);
+      thrown.expectMessage("withQuery() and withLiteralGqlQuery() are exclusive");
+      read.expand(null);
+    }
 
-    RunQueryRequest latestTimestampRequest =
-        makeRequest(makeLatestTimestampQuery(NAMESPACE), NAMESPACE);
-    RunQueryResponse latestTimestampResponse = makeLatestTimestampResponse(timestamp);
-    // Per Kind statistics request and response
-    RunQueryRequest statRequest = makeRequest(makeStatKindQuery(NAMESPACE, timestamp), NAMESPACE);
-    RunQueryResponse statResponse = makeStatKindResponse(entityBytes);
+    @Test
+    public void testReadValidationFailsQueryLimitZero() throws Exception {
+      Query invalidLimit = Query.newBuilder().setLimit(Int32Value.newBuilder().setValue(0)).build();
+      thrown.expect(IllegalArgumentException.class);
+      thrown.expectMessage("Invalid query limit 0: must be positive");
 
-    when(mockDatastore.runQuery(latestTimestampRequest)).thenReturn(latestTimestampResponse);
-    when(mockDatastore.runQuery(statRequest)).thenReturn(statResponse);
+      DatastoreIO.v1().read().withQuery(invalidLimit);
+    }
 
-    assertEquals(entityBytes, getEstimatedSizeBytes(mockDatastore, QUERY, NAMESPACE));
-    verify(mockDatastore, times(1)).runQuery(latestTimestampRequest);
-    verify(mockDatastore, times(1)).runQuery(statRequest);
-  }
+    @Test
+    public void testReadValidationFailsQueryLimitNegative() throws Exception {
+      Query invalidLimit =
+          Query.newBuilder().setLimit(Int32Value.newBuilder().setValue(-5)).build();
+      thrown.expect(IllegalArgumentException.class);
+      thrown.expectMessage("Invalid query limit -5: must be positive");
 
-  /** Tests {@link SplitQueryFn} when number of query splits is specified. */
-  @Test
-  public void testSplitQueryFnWithNumSplits() throws Exception {
-    int numSplits = 100;
-    when(mockQuerySplitter.getSplits(
-            eq(QUERY), any(PartitionId.class), eq(numSplits), any(Datastore.class)))
-        .thenReturn(splitQuery(QUERY, numSplits));
+      DatastoreIO.v1().read().withQuery(invalidLimit);
+    }
 
-    SplitQueryFn splitQueryFn = new SplitQueryFn(V_1_OPTIONS, numSplits, mockDatastoreFactory);
-    DoFnTester<Query, Query> doFnTester = DoFnTester.of(splitQueryFn);
+    @Test
+    public void testReadDisplayData() {
+      DatastoreV1.Read read =
+          DatastoreIO.v1()
+              .read()
+              .withProjectId(PROJECT_ID)
+              .withQuery(QUERY)
+              .withNamespace(NAMESPACE)
+              .withReadTime(TIMESTAMP);
+
+      DisplayData displayData = DisplayData.from(read);
+
+      assertThat(displayData, hasDisplayItem("projectId", PROJECT_ID));
+      assertThat(displayData, hasDisplayItem("query", QUERY.toString()));
+      assertThat(displayData, hasDisplayItem("namespace", NAMESPACE));
+      assertThat(displayData, hasDisplayItem("readTime", TIMESTAMP));
+    }
+
+    @Test
+    public void testReadDisplayDataWithGqlQuery() {
+      DatastoreV1.Read read =
+          DatastoreIO.v1()
+              .read()
+              .withProjectId(PROJECT_ID)
+              .withLiteralGqlQuery(GQL_QUERY)
+              .withNamespace(NAMESPACE)
+              .withReadTime(TIMESTAMP);
+
+      DisplayData displayData = DisplayData.from(read);
+
+      assertThat(displayData, hasDisplayItem("projectId", PROJECT_ID));
+      assertThat(displayData, hasDisplayItem("gqlQuery", GQL_QUERY));
+      assertThat(displayData, hasDisplayItem("namespace", NAMESPACE));
+      assertThat(displayData, hasDisplayItem("readTime", TIMESTAMP));
+    }
+
+    @Test
+    public void testSourcePrimitiveDisplayData() {
+      DisplayDataEvaluator evaluator = DisplayDataEvaluator.create();
+      int numSplits = 98;
+      PTransform<PBegin, PCollection<Entity>> read =
+          DatastoreIO.v1()
+              .read()
+              .withProjectId(PROJECT_ID)
+              .withQuery(Query.newBuilder().build())
+              .withNumQuerySplits(numSplits);
+
+      String assertMessage =
+          "DatastoreIO read should include the '%s' in its primitive display data";
+      Set<DisplayData> displayData = evaluator.displayDataForPrimitiveSourceTransforms(read);
+      assertThat(
+          String.format(assertMessage, "project id"),
+          displayData,
+          hasItem(hasDisplayItem("projectId", PROJECT_ID)));
+      assertThat(
+          String.format(assertMessage, "number of query splits"),
+          displayData,
+          hasItem(hasDisplayItem("numQuerySplits", numSplits)));
+    }
+
+    @Test
+    public void testWriteDisplayData() {
+      Write write = DatastoreIO.v1().write().withProjectId(PROJECT_ID);
+
+      DisplayData displayData = DisplayData.from(write);
+
+      assertThat(displayData, hasDisplayItem("projectId", PROJECT_ID));
+    }
+
+    @Test
+    public void testDeleteEntityDisplayData() {
+      DeleteEntity deleteEntity = DatastoreIO.v1().deleteEntity().withProjectId(PROJECT_ID);
+
+      DisplayData displayData = DisplayData.from(deleteEntity);
+
+      assertThat(displayData, hasDisplayItem("projectId", PROJECT_ID));
+    }
+
+    @Test
+    public void testDeleteKeyDisplayData() {
+      DeleteKey deleteKey = DatastoreIO.v1().deleteKey().withProjectId(PROJECT_ID);
+
+      DisplayData displayData = DisplayData.from(deleteKey);
+
+      assertThat(displayData, hasDisplayItem("projectId", PROJECT_ID));
+    }
+
+    @Test
+    public void testWritePrimitiveDisplayData() {
+      int hintNumWorkers = 10;
+      DisplayDataEvaluator evaluator = DisplayDataEvaluator.create();
+      PTransform<PCollection<Entity>, ?> write =
+          DatastoreIO.v1().write().withProjectId("myProject").withHintNumWorkers(hintNumWorkers);
+
+      Set<DisplayData> displayData = evaluator.displayDataForPrimitiveTransforms(write);
+      assertThat(
+          "DatastoreIO write should include the project in its primitive display data",
+          displayData,
+          hasItem(hasDisplayItem("projectId")));
+      assertThat(
+          "DatastoreIO write should include the upsertFn in its primitive display data",
+          displayData,
+          hasItem(hasDisplayItem("upsertFn")));
+      assertThat(
+          "DatastoreIO write should include ramp-up throttling worker count hint if enabled",
+          displayData,
+          hasItem(hasDisplayItem("hintNumWorkers", hintNumWorkers)));
+    }
+
+    @Test
+    public void testWritePrimitiveDisplayDataDisabledThrottler() {
+      DisplayDataEvaluator evaluator = DisplayDataEvaluator.create();
+      PTransform<PCollection<Entity>, ?> write =
+          DatastoreIO.v1().write().withProjectId("myProject").withRampupThrottlingDisabled();
+
+      Set<DisplayData> displayData = evaluator.displayDataForPrimitiveTransforms(write);
+      assertThat(
+          "DatastoreIO write should include the project in its primitive display data",
+          displayData,
+          hasItem(hasDisplayItem("projectId")));
+      assertThat(
+          "DatastoreIO write should include the upsertFn in its primitive display data",
+          displayData,
+          hasItem(hasDisplayItem("upsertFn")));
+      assertThat(
+          "DatastoreIO write should include ramp-up throttling worker count hint if enabled",
+          displayData,
+          not(hasItem(hasDisplayItem("hintNumWorkers"))));
+    }
+
+    @Test
+    public void testDeleteEntityPrimitiveDisplayData() {
+      int hintNumWorkers = 10;
+      DisplayDataEvaluator evaluator = DisplayDataEvaluator.create();
+      PTransform<PCollection<Entity>, ?> write =
+          DatastoreIO.v1()
+              .deleteEntity()
+              .withProjectId("myProject")
+              .withHintNumWorkers(hintNumWorkers);
+
+      Set<DisplayData> displayData = evaluator.displayDataForPrimitiveTransforms(write);
+      assertThat(
+          "DatastoreIO write should include the project in its primitive display data",
+          displayData,
+          hasItem(hasDisplayItem("projectId")));
+      assertThat(
+          "DatastoreIO write should include the deleteEntityFn in its primitive display data",
+          displayData,
+          hasItem(hasDisplayItem("deleteEntityFn")));
+      assertThat(
+          "DatastoreIO write should include ramp-up throttling worker count hint if enabled",
+          displayData,
+          hasItem(hasDisplayItem("hintNumWorkers", hintNumWorkers)));
+    }
+
+    @Test
+    public void testDeleteKeyPrimitiveDisplayData() {
+      int hintNumWorkers = 10;
+      DisplayDataEvaluator evaluator = DisplayDataEvaluator.create();
+      PTransform<PCollection<Key>, ?> write =
+          DatastoreIO.v1()
+              .deleteKey()
+              .withProjectId("myProject")
+              .withHintNumWorkers(hintNumWorkers);
+
+      Set<DisplayData> displayData = evaluator.displayDataForPrimitiveTransforms(write);
+      assertThat(
+          "DatastoreIO write should include the project in its primitive display data",
+          displayData,
+          hasItem(hasDisplayItem("projectId")));
+      assertThat(
+          "DatastoreIO write should include the deleteKeyFn in its primitive display data",
+          displayData,
+          hasItem(hasDisplayItem("deleteKeyFn")));
+      assertThat(
+          "DatastoreIO write should include ramp-up throttling worker count hint if enabled",
+          displayData,
+          hasItem(hasDisplayItem("hintNumWorkers", hintNumWorkers)));
+    }
+
+    /** Test building a Write using builder methods. */
+    @Test
+    public void testBuildWrite() throws Exception {
+      DatastoreV1.Write write = DatastoreIO.v1().write().withProjectId(PROJECT_ID);
+      assertEquals(PROJECT_ID, write.getProjectId());
+    }
+
+    /** Test the detection of complete and incomplete keys. */
+    @Test
+    public void testHasNameOrId() {
+      Key key;
+      // Complete with name, no ancestor
+      key = makeKey("bird", "finch").build();
+      assertTrue(isValidKey(key));
+
+      // Complete with id, no ancestor
+      key = makeKey("bird", 123).build();
+      assertTrue(isValidKey(key));
+
+      // Incomplete, no ancestor
+      key = makeKey("bird").build();
+      assertFalse(isValidKey(key));
+
+      // Complete with name and ancestor
+      key = makeKey("bird", "owl").build();
+      key = makeKey(key, "bird", "horned").build();
+      assertTrue(isValidKey(key));
+
+      // Complete with id and ancestor
+      key = makeKey("bird", "owl").build();
+      key = makeKey(key, "bird", 123).build();
+      assertTrue(isValidKey(key));
+
+      // Incomplete with ancestor
+      key = makeKey("bird", "owl").build();
+      key = makeKey(key, "bird").build();
+      assertFalse(isValidKey(key));
+
+      key = makeKey().build();
+      assertFalse(isValidKey(key));
+    }
+
+    /** Test that entities with incomplete keys cannot be updated. */
+    @Test
+    public void testAddEntitiesWithIncompleteKeys() throws Exception {
+      Key key = makeKey("bird").build();
+      Entity entity = Entity.newBuilder().setKey(key).build();
+      UpsertFn upsertFn = new UpsertFn();
+
+      thrown.expect(IllegalArgumentException.class);
+      thrown.expectMessage("Entities to be written to the Cloud Datastore must have complete keys");
+
+      upsertFn.apply(entity);
+    }
+
+    @Test
+    /** Test that entities with valid keys are transformed to upsert mutations. */
+    public void testAddEntities() throws Exception {
+      Key key = makeKey("bird", "finch").build();
+      Entity entity = Entity.newBuilder().setKey(key).build();
+      UpsertFn upsertFn = new UpsertFn();
+
+      Mutation expectedMutation = makeUpsert(entity).build();
+      assertEquals(expectedMutation, upsertFn.apply(entity));
+    }
+
+    /** Test that entities with incomplete keys cannot be deleted. */
+    @Test
+    public void testDeleteEntitiesWithIncompleteKeys() throws Exception {
+      Key key = makeKey("bird").build();
+      Entity entity = Entity.newBuilder().setKey(key).build();
+      DeleteEntityFn deleteEntityFn = new DeleteEntityFn();
+
+      thrown.expect(IllegalArgumentException.class);
+      thrown.expectMessage(
+          "Entities to be deleted from the Cloud Datastore must have complete keys");
+
+      deleteEntityFn.apply(entity);
+    }
+
+    /** Test that entities with valid keys are transformed to delete mutations. */
+    @Test
+    public void testDeleteEntities() throws Exception {
+      Key key = makeKey("bird", "finch").build();
+      Entity entity = Entity.newBuilder().setKey(key).build();
+      DeleteEntityFn deleteEntityFn = new DeleteEntityFn();
+
+      Mutation expectedMutation = makeDelete(entity.getKey()).build();
+      assertEquals(expectedMutation, deleteEntityFn.apply(entity));
+    }
+
+    /** Test that incomplete keys cannot be deleted. */
+    @Test
+    public void testDeleteIncompleteKeys() throws Exception {
+      Key key = makeKey("bird").build();
+      DeleteKeyFn deleteKeyFn = new DeleteKeyFn();
+
+      thrown.expect(IllegalArgumentException.class);
+      thrown.expectMessage("Keys to be deleted from the Cloud Datastore must be complete");
+
+      deleteKeyFn.apply(key);
+    }
+
+    /** Test that valid keys are transformed to delete mutations. */
+    @Test
+    public void testDeleteKeys() {
+      Key key = makeKey("bird", "finch").build();
+      DeleteKeyFn deleteKeyFn = new DeleteKeyFn();
+
+      Mutation expectedMutation = makeDelete(key).build();
+      assertEquals(expectedMutation, deleteKeyFn.apply(key));
+    }
+
+    @Test
+    public void testDatastoreWriteFnDisplayData() {
+      DatastoreWriterFn datastoreWriter = new DatastoreWriterFn(PROJECT_ID, null);
+      DisplayData displayData = DisplayData.from(datastoreWriter);
+      assertThat(displayData, hasDisplayItem("projectId", PROJECT_ID));
+    }
+
+    /** Tests {@link DatastoreWriterFn} with entities less than one batch. */
+    @Test
+    public void testDatatoreWriterFnWithOneBatch() throws Exception {
+      datastoreWriterFnTest(100);
+      verifyMetricWasSet("BatchDatastoreWrite", "ok", "", 2);
+    }
+
     /**
-     * Although Datastore client is marked transient in {@link SplitQueryFn}, when injected through
-     * mock factory using a when clause for unit testing purposes, it is not serializable because it
-     * doesn't have a no-arg constructor. Thus disabling the cloning to prevent the doFn from being
-     * serialized.
+     * Tests {@link DatastoreWriterFn} with entities of more than one batches, but not a multiple.
      */
-    doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
-    List<Query> queries = doFnTester.processBundle(QUERY);
-
-    assertEquals(queries.size(), numSplits);
-
-    // Confirms that sub-queries are not equal to original when there is more than one split.
-    for (Query subQuery : queries) {
-      assertNotEquals(subQuery, QUERY);
+    @Test
+    public void testDatatoreWriterFnWithMultipleBatches() throws Exception {
+      datastoreWriterFnTest(DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_START * 3 + 100);
+      verifyMetricWasSet("BatchDatastoreWrite", "ok", "", 5);
     }
 
-    verify(mockQuerySplitter, times(1))
-        .getSplits(eq(QUERY), any(PartitionId.class), eq(numSplits), any(Datastore.class));
-    verifyZeroInteractions(mockDatastore);
+    /**
+     * Tests {@link DatastoreWriterFn} with entities of several batches, using an exact multiple of
+     * write batch size.
+     */
+    @Test
+    public void testDatatoreWriterFnWithBatchesExactMultiple() throws Exception {
+      datastoreWriterFnTest(DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_START * 2);
+      verifyMetricWasSet("BatchDatastoreWrite", "ok", "", 2);
+    }
+
+    // A helper method to test DatastoreWriterFn for various batch sizes.
+    private void datastoreWriterFnTest(int numMutations) throws Exception {
+      // Create the requested number of mutations.
+      List<Mutation> mutations = new ArrayList<>(numMutations);
+      for (int i = 0; i < numMutations; ++i) {
+        mutations.add(
+            makeUpsert(Entity.newBuilder().setKey(makeKey("key" + i, i + 1)).build()).build());
+      }
+
+      DatastoreWriterFn datastoreWriter =
+          new DatastoreWriterFn(
+              StaticValueProvider.of(PROJECT_ID),
+              null,
+              mockDatastoreFactory,
+              new FakeWriteBatcher());
+      DoFnTester<Mutation, Void> doFnTester = DoFnTester.of(datastoreWriter);
+      doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
+      doFnTester.processBundle(mutations);
+
+      int start = 0;
+      while (start < numMutations) {
+        int end = Math.min(numMutations, start + DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_START);
+        CommitRequest.Builder commitRequest = CommitRequest.newBuilder();
+        commitRequest.setMode(CommitRequest.Mode.NON_TRANSACTIONAL);
+        commitRequest.addAllMutations(mutations.subList(start, end));
+        // Verify all the batch requests were made with the expected mutations.
+        verify(mockDatastore, times(1)).commit(commitRequest.build());
+        start = end;
+      }
+    }
+
+    /**
+     * Tests {@link DatastoreWriterFn} with large entities that need to be split into more batches.
+     */
+    @Test
+    public void testDatatoreWriterFnWithLargeEntities() throws Exception {
+      List<Mutation> mutations = new ArrayList<>();
+      int entitySize = 0;
+      for (int i = 0; i < 12; ++i) {
+        Entity entity =
+            Entity.newBuilder()
+                .setKey(makeKey("key" + i, i + 1))
+                .putProperties(
+                    "long",
+                    makeValue(new String(new char[900_000])).setExcludeFromIndexes(true).build())
+                .build();
+        entitySize = entity.getSerializedSize(); // Take the size of any one entity.
+        mutations.add(makeUpsert(entity).build());
+      }
+
+      DatastoreWriterFn datastoreWriter =
+          new DatastoreWriterFn(
+              StaticValueProvider.of(PROJECT_ID),
+              null,
+              mockDatastoreFactory,
+              new FakeWriteBatcher());
+      DoFnTester<Mutation, Void> doFnTester = DoFnTester.of(datastoreWriter);
+      doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
+      doFnTester.processBundle(mutations);
+
+      // This test is over-specific currently; it requires that we split the 12 entity writes into 3
+      // requests, but we only need each CommitRequest to be less than 10MB in size.
+      int entitiesPerRpc = DATASTORE_BATCH_UPDATE_BYTES_LIMIT / entitySize;
+      int start = 0;
+      while (start < mutations.size()) {
+        int end = Math.min(mutations.size(), start + entitiesPerRpc);
+        CommitRequest.Builder commitRequest = CommitRequest.newBuilder();
+        commitRequest.setMode(CommitRequest.Mode.NON_TRANSACTIONAL);
+        commitRequest.addAllMutations(mutations.subList(start, end));
+        // Verify all the batch requests were made with the expected mutations.
+        verify(mockDatastore).commit(commitRequest.build());
+        start = end;
+      }
+    }
+
+    /** Tests {@link DatastoreWriterFn} with a failed request which is retried. */
+    @Test
+    public void testDatatoreWriterFnRetriesErrors() throws Exception {
+      List<Mutation> mutations = new ArrayList<>();
+      int numRpcs = 2;
+      for (int i = 0; i < DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_START * numRpcs; ++i) {
+        mutations.add(
+            makeUpsert(Entity.newBuilder().setKey(makeKey("key" + i, i + 1)).build()).build());
+      }
+
+      CommitResponse successfulCommit = CommitResponse.getDefaultInstance();
+      when(mockDatastore.commit(any(CommitRequest.class)))
+          .thenReturn(successfulCommit)
+          .thenThrow(new DatastoreException("commit", Code.DEADLINE_EXCEEDED, "", null))
+          .thenReturn(successfulCommit);
+
+      DatastoreWriterFn datastoreWriter =
+          new DatastoreWriterFn(
+              StaticValueProvider.of(PROJECT_ID),
+              null,
+              mockDatastoreFactory,
+              new FakeWriteBatcher());
+      DoFnTester<Mutation, Void> doFnTester = DoFnTester.of(datastoreWriter);
+      doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
+      doFnTester.processBundle(mutations);
+      verifyMetricWasSet("BatchDatastoreWrite", "ok", "", 2);
+      verifyMetricWasSet("BatchDatastoreWrite", "unknown", "", 1);
+    }
+
+    /** Test options. * */
+    public interface RuntimeTestOptions extends PipelineOptions {
+      ValueProvider<String> getDatastoreProject();
+
+      void setDatastoreProject(ValueProvider<String> value);
+
+      ValueProvider<String> getGqlQuery();
+
+      void setGqlQuery(ValueProvider<String> value);
+
+      ValueProvider<String> getNamespace();
+
+      void setNamespace(ValueProvider<String> value);
+    }
+
+    /**
+     * Test to ensure that {@link ValueProvider} values are not accessed at pipeline construction
+     * time when built with {@link DatastoreV1.Read#withQuery(Query)}.
+     */
+    @Test
+    public void testRuntimeOptionsNotCalledInApplyQuery() {
+      RuntimeTestOptions options = PipelineOptionsFactory.as(RuntimeTestOptions.class);
+      Pipeline pipeline = TestPipeline.create(options);
+      pipeline
+          .apply(
+              DatastoreIO.v1()
+                  .read()
+                  .withProjectId(options.getDatastoreProject())
+                  .withQuery(QUERY)
+                  .withNamespace(options.getNamespace()))
+          .apply(DatastoreIO.v1().write().withProjectId(options.getDatastoreProject()));
+    }
+
+    /**
+     * Test to ensure that {@link ValueProvider} values are not accessed at pipeline construction
+     * time when built with {@link DatastoreV1.Read#withLiteralGqlQuery(String)}.
+     */
+    @Test
+    public void testRuntimeOptionsNotCalledInApplyGqlQuery() {
+      RuntimeTestOptions options = PipelineOptionsFactory.as(RuntimeTestOptions.class);
+      Pipeline pipeline = TestPipeline.create(options);
+      pipeline
+          .apply(
+              DatastoreIO.v1()
+                  .read()
+                  .withProjectId(options.getDatastoreProject())
+                  .withLiteralGqlQuery(options.getGqlQuery()))
+          .apply(DatastoreIO.v1().write().withProjectId(options.getDatastoreProject()));
+    }
+
+    @Test
+    public void testWriteBatcherWithoutData() {
+      DatastoreV1.WriteBatcher writeBatcher = new DatastoreV1.WriteBatcherImpl();
+      writeBatcher.start();
+      assertEquals(
+          DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_START, writeBatcher.nextBatchSize(0));
+    }
+
+    @Test
+    public void testWriteBatcherFastQueries() {
+      DatastoreV1.WriteBatcher writeBatcher = new DatastoreV1.WriteBatcherImpl();
+      writeBatcher.start();
+      writeBatcher.addRequestLatency(0, 1000, 200);
+      writeBatcher.addRequestLatency(0, 1000, 200);
+      assertEquals(
+          DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_LIMIT, writeBatcher.nextBatchSize(0));
+    }
+
+    @Test
+    public void testWriteBatcherSlowQueries() {
+      DatastoreV1.WriteBatcher writeBatcher = new DatastoreV1.WriteBatcherImpl();
+      writeBatcher.start();
+      writeBatcher.addRequestLatency(0, 10000, 200);
+      writeBatcher.addRequestLatency(0, 10000, 200);
+      assertEquals(120, writeBatcher.nextBatchSize(0));
+    }
+
+    @Test
+    public void testWriteBatcherSizeNotBelowMinimum() {
+      DatastoreV1.WriteBatcher writeBatcher = new DatastoreV1.WriteBatcherImpl();
+      writeBatcher.start();
+      writeBatcher.addRequestLatency(0, 75000, 50);
+      writeBatcher.addRequestLatency(0, 75000, 50);
+      assertEquals(DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_MIN, writeBatcher.nextBatchSize(0));
+    }
+
+    @Test
+    public void testWriteBatcherSlidingWindow() {
+      DatastoreV1.WriteBatcher writeBatcher = new DatastoreV1.WriteBatcherImpl();
+      writeBatcher.start();
+      writeBatcher.addRequestLatency(0, 30000, 50);
+      writeBatcher.addRequestLatency(50000, 8000, 200);
+      writeBatcher.addRequestLatency(100000, 8000, 200);
+      assertEquals(150, writeBatcher.nextBatchSize(150000));
+    }
   }
 
-  /** Tests {@link SplitQueryFn} when no query splits is specified. */
-  @Test
-  public void testSplitQueryFnWithoutNumSplits() throws Exception {
-    // Force SplitQueryFn to compute the number of query splits
-    int numSplits = 0;
-    int expectedNumSplits = 20;
-    long entityBytes = expectedNumSplits * DEFAULT_BUNDLE_SIZE_BYTES;
-    // In seconds
-    long timestamp = 1234L;
+  @RunWith(Parameterized.class)
+  public static class ParameterizedTests extends DatastoreV1Test {
+    @Parameter(0)
+    public Instant readTime;
 
-    RunQueryRequest latestTimestampRequest =
-        makeRequest(makeLatestTimestampQuery(NAMESPACE), NAMESPACE);
-    RunQueryResponse latestTimestampResponse = makeLatestTimestampResponse(timestamp);
+    @Parameter(1)
+    public Timestamp readTimeProto;
 
-    // Per Kind statistics request and response
-    RunQueryRequest statRequest = makeRequest(makeStatKindQuery(NAMESPACE, timestamp), NAMESPACE);
-    RunQueryResponse statResponse = makeStatKindResponse(entityBytes);
+    @Parameters(name = "readTime = {0}, readTimeProto = {1}")
+    public static Collection<Object[]> data() {
+      return Arrays.asList(new Object[] {null, null}, new Object[] {TIMESTAMP, TIMESTAMP_PROTO});
+    }
 
-    when(mockDatastore.runQuery(latestTimestampRequest)).thenReturn(latestTimestampResponse);
-    when(mockDatastore.runQuery(statRequest)).thenReturn(statResponse);
-    when(mockQuerySplitter.getSplits(
-            eq(QUERY), any(PartitionId.class), eq(expectedNumSplits), any(Datastore.class)))
-        .thenReturn(splitQuery(QUERY, expectedNumSplits));
+    /**
+     * Tests {@link DatastoreV1.Read#getEstimatedSizeBytes} to fetch and return estimated size for a
+     * query.
+     */
+    @Test
+    public void testEstimatedSizeBytes() throws Exception {
+      long entityBytes = 100L;
+      // In seconds
+      long timestamp = 1234L;
 
-    SplitQueryFn splitQueryFn = new SplitQueryFn(V_1_OPTIONS, numSplits, mockDatastoreFactory);
-    DoFnTester<Query, Query> doFnTester = DoFnTester.of(splitQueryFn);
-    doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
-    List<Query> queries = doFnTester.processBundle(QUERY);
+      RunQueryRequest latestTimestampRequest =
+          makeRequest(makeLatestTimestampQuery(NAMESPACE), NAMESPACE, readTime);
+      RunQueryResponse latestTimestampResponse = makeLatestTimestampResponse(timestamp);
+      // Per Kind statistics request and response
+      RunQueryRequest statRequest =
+          makeRequest(makeStatKindQuery(NAMESPACE, timestamp), NAMESPACE, readTime);
+      RunQueryResponse statResponse = makeStatKindResponse(entityBytes);
 
-    assertEquals(expectedNumSplits, queries.size());
-    verify(mockQuerySplitter, times(1))
-        .getSplits(eq(QUERY), any(PartitionId.class), eq(expectedNumSplits), any(Datastore.class));
-    verify(mockDatastore, times(1)).runQuery(latestTimestampRequest);
-    verify(mockDatastore, times(1)).runQuery(statRequest);
-  }
+      when(mockDatastore.runQuery(latestTimestampRequest)).thenReturn(latestTimestampResponse);
+      when(mockDatastore.runQuery(statRequest)).thenReturn(statResponse);
 
-  /** Tests {@link DatastoreV1.Read.SplitQueryFn} when the query has a user specified limit. */
-  @Test
-  public void testSplitQueryFnWithQueryLimit() throws Exception {
-    Query queryWithLimit = QUERY.toBuilder().setLimit(Int32Value.newBuilder().setValue(1)).build();
+      assertEquals(entityBytes, getEstimatedSizeBytes(mockDatastore, QUERY, NAMESPACE, readTime));
+      verify(mockDatastore, times(1)).runQuery(latestTimestampRequest);
+      verify(mockDatastore, times(1)).runQuery(statRequest);
+    }
 
-    SplitQueryFn splitQueryFn = new SplitQueryFn(V_1_OPTIONS, 10, mockDatastoreFactory);
-    DoFnTester<Query, Query> doFnTester = DoFnTester.of(splitQueryFn);
-    doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
-    List<Query> queries = doFnTester.processBundle(queryWithLimit);
+    /** Tests {@link SplitQueryFn} when number of query splits is specified. */
+    @Test
+    public void testSplitQueryFnWithNumSplits() throws Exception {
+      int numSplits = 100;
 
-    assertEquals(1, queries.size());
-    verifyNoMoreInteractions(mockDatastore);
-    verifyNoMoreInteractions(mockQuerySplitter);
-  }
+      when(mockQuerySplitter.getSplits(
+              eq(QUERY), any(PartitionId.class), eq(numSplits), any(Datastore.class)))
+          .thenReturn(splitQuery(QUERY, numSplits));
+      when(mockQuerySplitter.getSplits(
+              eq(QUERY),
+              any(PartitionId.class),
+              eq(numSplits),
+              any(Datastore.class),
+              eq(readTimeProto)))
+          .thenReturn(splitQuery(QUERY, numSplits));
 
-  /** Tests {@link ReadFn} with a query limit less than one batch. */
-  @Test
-  public void testReadFnWithOneBatch() throws Exception {
-    readFnTest(5);
-    verifyMetricWasSet("BatchDatastoreRead", "ok", NAMESPACE, 1);
-  }
+      SplitQueryFn splitQueryFn =
+          new SplitQueryFn(V_1_OPTIONS, numSplits, readTime, mockDatastoreFactory);
+      DoFnTester<Query, Query> doFnTester = DoFnTester.of(splitQueryFn);
+      /**
+       * Although Datastore client is marked transient in {@link SplitQueryFn}, when injected
+       * through mock factory using a when clause for unit testing purposes, it is not serializable
+       * because it doesn't have a no-arg constructor. Thus disabling the cloning to prevent the
+       * doFn from being serialized.
+       */
+      doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
+      List<Query> queries = doFnTester.processBundle(QUERY);
 
-  /** Tests {@link ReadFn} with a query limit more than one batch, and not a multiple. */
-  @Test
-  public void testReadFnWithMultipleBatches() throws Exception {
-    readFnTest(QUERY_BATCH_LIMIT + 5);
-    verifyMetricWasSet("BatchDatastoreRead", "ok", NAMESPACE, 2);
-  }
+      assertEquals(queries.size(), numSplits);
 
-  /** Tests {@link ReadFn} for several batches, using an exact multiple of batch size results. */
-  @Test
-  public void testReadFnWithBatchesExactMultiple() throws Exception {
-    readFnTest(5 * QUERY_BATCH_LIMIT);
-    verifyMetricWasSet("BatchDatastoreRead", "ok", NAMESPACE, 5);
-  }
+      // Confirms that sub-queries are not equal to original when there is more than one split.
+      for (Query subQuery : queries) {
+        assertNotEquals(subQuery, QUERY);
+      }
 
-  /** Tests that {@link ReadFn} retries after an error. */
-  @Test
-  public void testReadFnRetriesErrors() throws Exception {
-    // An empty query to read entities.
-    Query query = Query.newBuilder().setLimit(Int32Value.newBuilder().setValue(1)).build();
+      if (readTime == null) {
+        verify(mockQuerySplitter, times(1))
+            .getSplits(eq(QUERY), any(PartitionId.class), eq(numSplits), any(Datastore.class));
+      } else {
+        verify(mockQuerySplitter, times(1))
+            .getSplits(
+                eq(QUERY),
+                any(PartitionId.class),
+                eq(numSplits),
+                any(Datastore.class),
+                eq(readTimeProto));
+      }
+      verifyZeroInteractions(mockDatastore);
+    }
 
-    // Use mockResponseForQuery to generate results.
-    when(mockDatastore.runQuery(any(RunQueryRequest.class)))
-        .thenThrow(new DatastoreException("RunQuery", Code.DEADLINE_EXCEEDED, "", null))
-        .thenAnswer(
-            invocationOnMock -> {
-              Query q = ((RunQueryRequest) invocationOnMock.getArguments()[0]).getQuery();
-              return mockResponseForQuery(q);
-            });
+    /** Tests {@link SplitQueryFn} when no query splits is specified. */
+    @Test
+    public void testSplitQueryFnWithoutNumSplits() throws Exception {
+      // Force SplitQueryFn to compute the number of query splits
+      int numSplits = 0;
+      int expectedNumSplits = 20;
+      long entityBytes = expectedNumSplits * DEFAULT_BUNDLE_SIZE_BYTES;
+      // In seconds
+      long timestamp = 1234L;
 
-    ReadFn readFn = new ReadFn(V_1_OPTIONS, mockDatastoreFactory);
-    DoFnTester<Query, Entity> doFnTester = DoFnTester.of(readFn);
-    doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
-    doFnTester.processBundle(query);
-    verifyMetricWasSet("BatchDatastoreRead", "ok", NAMESPACE, 1);
-    verifyMetricWasSet("BatchDatastoreRead", "unknown", NAMESPACE, 1);
-  }
+      RunQueryRequest latestTimestampRequest =
+          makeRequest(makeLatestTimestampQuery(NAMESPACE), NAMESPACE, readTime);
+      RunQueryResponse latestTimestampResponse = makeLatestTimestampResponse(timestamp);
 
-  @Test
-  public void testTranslateGqlQueryWithLimit() throws Exception {
-    String gql = "SELECT * from DummyKind LIMIT 10";
-    String gqlWithZeroLimit = gql + " LIMIT 0";
-    GqlQuery gqlQuery = GqlQuery.newBuilder().setQueryString(gql).setAllowLiterals(true).build();
-    GqlQuery gqlQueryWithZeroLimit =
-        GqlQuery.newBuilder().setQueryString(gqlWithZeroLimit).setAllowLiterals(true).build();
-    RunQueryRequest gqlRequest = makeRequest(gqlQuery, V_1_OPTIONS.getNamespace());
-    RunQueryRequest gqlRequestWithZeroLimit =
-        makeRequest(gqlQueryWithZeroLimit, V_1_OPTIONS.getNamespace());
-    when(mockDatastore.runQuery(gqlRequestWithZeroLimit))
-        .thenThrow(
-            new DatastoreException(
-                "runQuery",
-                Code.INVALID_ARGUMENT,
-                "invalid query",
-                // dummy
-                new RuntimeException()));
-    when(mockDatastore.runQuery(gqlRequest))
-        .thenReturn(RunQueryResponse.newBuilder().setQuery(QUERY).build());
-    assertEquals(
-        translateGqlQueryWithLimitCheck(gql, mockDatastore, V_1_OPTIONS.getNamespace()), QUERY);
-    verify(mockDatastore, times(1)).runQuery(gqlRequest);
-    verify(mockDatastore, times(1)).runQuery(gqlRequestWithZeroLimit);
-  }
+      // Per Kind statistics request and response
+      RunQueryRequest statRequest =
+          makeRequest(makeStatKindQuery(NAMESPACE, timestamp), NAMESPACE, readTime);
+      RunQueryResponse statResponse = makeStatKindResponse(entityBytes);
 
-  @Test
-  public void testTranslateGqlQueryWithNoLimit() throws Exception {
-    String gql = "SELECT * from DummyKind";
-    String gqlWithZeroLimit = gql + " LIMIT 0";
-    GqlQuery gqlQueryWithZeroLimit =
-        GqlQuery.newBuilder().setQueryString(gqlWithZeroLimit).setAllowLiterals(true).build();
-    RunQueryRequest gqlRequestWithZeroLimit =
-        makeRequest(gqlQueryWithZeroLimit, V_1_OPTIONS.getNamespace());
-    when(mockDatastore.runQuery(gqlRequestWithZeroLimit))
-        .thenReturn(RunQueryResponse.newBuilder().setQuery(QUERY).build());
-    assertEquals(
-        translateGqlQueryWithLimitCheck(gql, mockDatastore, V_1_OPTIONS.getNamespace()), QUERY);
-    verify(mockDatastore, times(1)).runQuery(gqlRequestWithZeroLimit);
-  }
+      when(mockDatastore.runQuery(latestTimestampRequest)).thenReturn(latestTimestampResponse);
+      when(mockDatastore.runQuery(statRequest)).thenReturn(statResponse);
+      when(mockQuerySplitter.getSplits(
+              eq(QUERY), any(PartitionId.class), eq(expectedNumSplits), any(Datastore.class)))
+          .thenReturn(splitQuery(QUERY, expectedNumSplits));
+      when(mockQuerySplitter.getSplits(
+              eq(QUERY),
+              any(PartitionId.class),
+              eq(expectedNumSplits),
+              any(Datastore.class),
+              eq(readTimeProto)))
+          .thenReturn(splitQuery(QUERY, expectedNumSplits));
 
-  @Test
-  public void testTranslateGqlQueryWithException() throws Exception {
-    String gql = "SELECT * from DummyKind";
-    String gqlWithZeroLimit = gql + " LIMIT 0";
-    GqlQuery gqlQueryWithZeroLimit =
-        GqlQuery.newBuilder().setQueryString(gqlWithZeroLimit).setAllowLiterals(true).build();
-    RunQueryRequest gqlRequestWithZeroLimit =
-        makeRequest(gqlQueryWithZeroLimit, V_1_OPTIONS.getNamespace());
-    when(mockDatastore.runQuery(gqlRequestWithZeroLimit))
-        .thenThrow(new RuntimeException("TestException"));
+      SplitQueryFn splitQueryFn =
+          new SplitQueryFn(V_1_OPTIONS, numSplits, readTime, mockDatastoreFactory);
+      DoFnTester<Query, Query> doFnTester = DoFnTester.of(splitQueryFn);
+      doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
+      List<Query> queries = doFnTester.processBundle(QUERY);
 
-    thrown.expect(RuntimeException.class);
-    thrown.expectMessage("TestException");
-    translateGqlQueryWithLimitCheck(gql, mockDatastore, V_1_OPTIONS.getNamespace());
-  }
+      assertEquals(expectedNumSplits, queries.size());
+      if (readTime == null) {
+        verify(mockQuerySplitter, times(1))
+            .getSplits(
+                eq(QUERY), any(PartitionId.class), eq(expectedNumSplits), any(Datastore.class));
+      } else {
+        verify(mockQuerySplitter, times(1))
+            .getSplits(
+                eq(QUERY),
+                any(PartitionId.class),
+                eq(expectedNumSplits),
+                any(Datastore.class),
+                eq(readTimeProto));
+      }
+      verify(mockDatastore, times(1)).runQuery(latestTimestampRequest);
+      verify(mockDatastore, times(1)).runQuery(statRequest);
+    }
 
-  /** Test options. * */
-  public interface RuntimeTestOptions extends PipelineOptions {
-    ValueProvider<String> getDatastoreProject();
+    /** Tests {@link DatastoreV1.Read.SplitQueryFn} when the query has a user specified limit. */
+    @Test
+    public void testSplitQueryFnWithQueryLimit() throws Exception {
+      Query queryWithLimit =
+          QUERY.toBuilder().setLimit(Int32Value.newBuilder().setValue(1)).build();
 
-    void setDatastoreProject(ValueProvider<String> value);
+      SplitQueryFn splitQueryFn = new SplitQueryFn(V_1_OPTIONS, 10, readTime, mockDatastoreFactory);
+      DoFnTester<Query, Query> doFnTester = DoFnTester.of(splitQueryFn);
+      doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
+      List<Query> queries = doFnTester.processBundle(queryWithLimit);
 
-    ValueProvider<String> getGqlQuery();
+      assertEquals(1, queries.size());
+      verifyNoMoreInteractions(mockDatastore);
+      verifyNoMoreInteractions(mockQuerySplitter);
+    }
 
-    void setGqlQuery(ValueProvider<String> value);
+    /** Tests {@link ReadFn} with a query limit less than one batch. */
+    @Test
+    public void testReadFnWithOneBatch() throws Exception {
+      readFnTest(5, readTime);
+      verifyMetricWasSet("BatchDatastoreRead", "ok", NAMESPACE, 1);
+    }
 
-    ValueProvider<String> getNamespace();
+    /** Tests {@link ReadFn} with a query limit more than one batch, and not a multiple. */
+    @Test
+    public void testReadFnWithMultipleBatches() throws Exception {
+      readFnTest(QUERY_BATCH_LIMIT + 5, readTime);
+      verifyMetricWasSet("BatchDatastoreRead", "ok", NAMESPACE, 2);
+    }
 
-    void setNamespace(ValueProvider<String> value);
-  }
+    /** Tests {@link ReadFn} for several batches, using an exact multiple of batch size results. */
+    @Test
+    public void testReadFnWithBatchesExactMultiple() throws Exception {
+      readFnTest(5 * QUERY_BATCH_LIMIT, readTime);
+      verifyMetricWasSet("BatchDatastoreRead", "ok", NAMESPACE, 5);
+    }
 
-  /**
-   * Test to ensure that {@link ValueProvider} values are not accessed at pipeline construction time
-   * when built with {@link DatastoreV1.Read#withQuery(Query)}.
-   */
-  @Test
-  public void testRuntimeOptionsNotCalledInApplyQuery() {
-    RuntimeTestOptions options = PipelineOptionsFactory.as(RuntimeTestOptions.class);
-    Pipeline pipeline = TestPipeline.create(options);
-    pipeline
-        .apply(
-            DatastoreIO.v1()
-                .read()
-                .withProjectId(options.getDatastoreProject())
-                .withQuery(QUERY)
-                .withNamespace(options.getNamespace()))
-        .apply(DatastoreIO.v1().write().withProjectId(options.getDatastoreProject()));
-  }
+    /** Tests that {@link ReadFn} retries after an error. */
+    @Test
+    public void testReadFnRetriesErrors() throws Exception {
+      // An empty query to read entities.
+      Query query = Query.newBuilder().setLimit(Int32Value.newBuilder().setValue(1)).build();
 
-  /**
-   * Test to ensure that {@link ValueProvider} values are not accessed at pipeline construction time
-   * when built with {@link DatastoreV1.Read#withLiteralGqlQuery(String)}.
-   */
-  @Test
-  public void testRuntimeOptionsNotCalledInApplyGqlQuery() {
-    RuntimeTestOptions options = PipelineOptionsFactory.as(RuntimeTestOptions.class);
-    Pipeline pipeline = TestPipeline.create(options);
-    pipeline
-        .apply(
-            DatastoreIO.v1()
-                .read()
-                .withProjectId(options.getDatastoreProject())
-                .withLiteralGqlQuery(options.getGqlQuery()))
-        .apply(DatastoreIO.v1().write().withProjectId(options.getDatastoreProject()));
-  }
+      // Use mockResponseForQuery to generate results.
+      when(mockDatastore.runQuery(any(RunQueryRequest.class)))
+          .thenThrow(new DatastoreException("RunQuery", Code.DEADLINE_EXCEEDED, "", null))
+          .thenAnswer(
+              invocationOnMock -> {
+                Query q = ((RunQueryRequest) invocationOnMock.getArguments()[0]).getQuery();
+                return mockResponseForQuery(q);
+              });
 
-  @Test
-  public void testWriteBatcherWithoutData() {
-    DatastoreV1.WriteBatcher writeBatcher = new DatastoreV1.WriteBatcherImpl();
-    writeBatcher.start();
-    assertEquals(DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_START, writeBatcher.nextBatchSize(0));
-  }
+      ReadFn readFn = new ReadFn(V_1_OPTIONS, readTime, mockDatastoreFactory);
+      DoFnTester<Query, Entity> doFnTester = DoFnTester.of(readFn);
+      doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
+      doFnTester.processBundle(query);
+      verifyMetricWasSet("BatchDatastoreRead", "ok", NAMESPACE, 1);
+      verifyMetricWasSet("BatchDatastoreRead", "unknown", NAMESPACE, 1);
+    }
 
-  @Test
-  public void testWriteBatcherFastQueries() {
-    DatastoreV1.WriteBatcher writeBatcher = new DatastoreV1.WriteBatcherImpl();
-    writeBatcher.start();
-    writeBatcher.addRequestLatency(0, 1000, 200);
-    writeBatcher.addRequestLatency(0, 1000, 200);
-    assertEquals(DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_LIMIT, writeBatcher.nextBatchSize(0));
-  }
+    @Test
+    public void testTranslateGqlQueryWithLimit() throws Exception {
+      String gql = "SELECT * from DummyKind LIMIT 10";
+      String gqlWithZeroLimit = gql + " LIMIT 0";
+      GqlQuery gqlQuery = GqlQuery.newBuilder().setQueryString(gql).setAllowLiterals(true).build();
+      GqlQuery gqlQueryWithZeroLimit =
+          GqlQuery.newBuilder().setQueryString(gqlWithZeroLimit).setAllowLiterals(true).build();
 
-  @Test
-  public void testWriteBatcherSlowQueries() {
-    DatastoreV1.WriteBatcher writeBatcher = new DatastoreV1.WriteBatcherImpl();
-    writeBatcher.start();
-    writeBatcher.addRequestLatency(0, 10000, 200);
-    writeBatcher.addRequestLatency(0, 10000, 200);
-    assertEquals(120, writeBatcher.nextBatchSize(0));
-  }
+      RunQueryRequest gqlRequest = makeRequest(gqlQuery, V_1_OPTIONS.getNamespace(), readTime);
+      RunQueryRequest gqlRequestWithZeroLimit =
+          makeRequest(gqlQueryWithZeroLimit, V_1_OPTIONS.getNamespace(), readTime);
+      when(mockDatastore.runQuery(gqlRequestWithZeroLimit))
+          .thenThrow(
+              new DatastoreException(
+                  "runQuery",
+                  Code.INVALID_ARGUMENT,
+                  "invalid query",
+                  // dummy
+                  new RuntimeException()));
+      when(mockDatastore.runQuery(gqlRequest))
+          .thenReturn(RunQueryResponse.newBuilder().setQuery(QUERY).build());
+      assertEquals(
+          translateGqlQueryWithLimitCheck(gql, mockDatastore, V_1_OPTIONS.getNamespace(), readTime),
+          QUERY);
+      verify(mockDatastore, times(1)).runQuery(gqlRequest);
+      verify(mockDatastore, times(1)).runQuery(gqlRequestWithZeroLimit);
+    }
 
-  @Test
-  public void testWriteBatcherSizeNotBelowMinimum() {
-    DatastoreV1.WriteBatcher writeBatcher = new DatastoreV1.WriteBatcherImpl();
-    writeBatcher.start();
-    writeBatcher.addRequestLatency(0, 75000, 50);
-    writeBatcher.addRequestLatency(0, 75000, 50);
-    assertEquals(DatastoreV1.DATASTORE_BATCH_UPDATE_ENTITIES_MIN, writeBatcher.nextBatchSize(0));
-  }
+    @Test
+    public void testTranslateGqlQueryWithNoLimit() throws Exception {
+      String gql = "SELECT * from DummyKind";
+      String gqlWithZeroLimit = gql + " LIMIT 0";
+      GqlQuery gqlQueryWithZeroLimit =
+          GqlQuery.newBuilder().setQueryString(gqlWithZeroLimit).setAllowLiterals(true).build();
 
-  @Test
-  public void testWriteBatcherSlidingWindow() {
-    DatastoreV1.WriteBatcher writeBatcher = new DatastoreV1.WriteBatcherImpl();
-    writeBatcher.start();
-    writeBatcher.addRequestLatency(0, 30000, 50);
-    writeBatcher.addRequestLatency(50000, 8000, 200);
-    writeBatcher.addRequestLatency(100000, 8000, 200);
-    assertEquals(150, writeBatcher.nextBatchSize(150000));
+      RunQueryRequest gqlRequestWithZeroLimit =
+          makeRequest(gqlQueryWithZeroLimit, V_1_OPTIONS.getNamespace(), readTime);
+      when(mockDatastore.runQuery(gqlRequestWithZeroLimit))
+          .thenReturn(RunQueryResponse.newBuilder().setQuery(QUERY).build());
+      assertEquals(
+          translateGqlQueryWithLimitCheck(gql, mockDatastore, V_1_OPTIONS.getNamespace(), readTime),
+          QUERY);
+      verify(mockDatastore, times(1)).runQuery(gqlRequestWithZeroLimit);
+    }
+
+    @Test
+    public void testTranslateGqlQueryWithException() throws Exception {
+      String gql = "SELECT * from DummyKind";
+      String gqlWithZeroLimit = gql + " LIMIT 0";
+      GqlQuery gqlQueryWithZeroLimit =
+          GqlQuery.newBuilder().setQueryString(gqlWithZeroLimit).setAllowLiterals(true).build();
+      RunQueryRequest gqlRequestWithZeroLimit =
+          makeRequest(gqlQueryWithZeroLimit, V_1_OPTIONS.getNamespace(), readTime);
+      when(mockDatastore.runQuery(gqlRequestWithZeroLimit))
+          .thenThrow(new RuntimeException("TestException"));
+
+      thrown.expect(RuntimeException.class);
+      thrown.expectMessage("TestException");
+      translateGqlQueryWithLimitCheck(gql, mockDatastore, V_1_OPTIONS.getNamespace(), readTime);
+    }
   }
 
   /** Helper Methods */
@@ -963,7 +1083,7 @@
   }
 
   /** Helper function to run a test reading from a {@link ReadFn}. */
-  private void readFnTest(int numEntities) throws Exception {
+  protected void readFnTest(int numEntities, Instant readTime) throws Exception {
     // An empty query to read entities.
     Query query =
         Query.newBuilder().setLimit(Int32Value.newBuilder().setValue(numEntities)).build();
@@ -976,7 +1096,7 @@
               return mockResponseForQuery(q);
             });
 
-    ReadFn readFn = new ReadFn(V_1_OPTIONS, mockDatastoreFactory);
+    ReadFn readFn = new ReadFn(V_1_OPTIONS, readTime, mockDatastoreFactory);
     DoFnTester<Query, Entity> doFnTester = DoFnTester.of(readFn);
     /**
      * Although Datastore client is marked transient in {@link ReadFn}, when injected through mock
@@ -987,10 +1107,20 @@
     doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
     List<Entity> entities = doFnTester.processBundle(query);
 
+    ArgumentCaptor<RunQueryRequest> requestCaptor = ArgumentCaptor.forClass(RunQueryRequest.class);
+
     int expectedNumCallsToRunQuery = (int) Math.ceil((double) numEntities / QUERY_BATCH_LIMIT);
-    verify(mockDatastore, times(expectedNumCallsToRunQuery)).runQuery(any(RunQueryRequest.class));
+    verify(mockDatastore, times(expectedNumCallsToRunQuery)).runQuery(requestCaptor.capture());
     // Validate the number of results.
     assertEquals(numEntities, entities.size());
+    // Validate read Time.
+    RunQueryRequest request = requestCaptor.getValue();
+    if (readTime != null) {
+      assertEquals(
+          readTime.getMillis(), Timestamps.toMillis(request.getReadOptions().getReadTime()));
+    } else {
+      assertFalse(request.hasReadOptions());
+    }
   }
 
   /** Builds a per-kind statistics response with the given entity size. */
@@ -1050,7 +1180,7 @@
   }
 
   /** Generate dummy query splits. */
-  private List<Query> splitQuery(Query query, int numSplits) {
+  private static List<Query> splitQuery(Query query, int numSplits) {
     List<Query> queries = new ArrayList<>();
     int offsetOfOriginal = query.getOffset();
     for (int i = 0; i < numSplits; i++) {
@@ -1082,7 +1212,8 @@
     }
   }
 
-  private void verifyMetricWasSet(String method, String status, String namespace, long count) {
+  private static void verifyMetricWasSet(
+      String method, String status, String namespace, long count) {
     // Verify the metric as reported.
     HashMap<String, String> labels = new HashMap<>();
     labels.put(MonitoringInfoConstants.Labels.PTRANSFORM, "");
diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java
index 6d0bd52..ea00821 100644
--- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java
+++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java
@@ -26,6 +26,8 @@
 import org.apache.beam.sdk.io.gcp.datastore.DatastoreV1.Read.V1Options;
 import org.apache.beam.sdk.transforms.DoFnTester;
 import org.checkerframework.checker.nullness.qual.Nullable;
+import org.joda.time.Duration;
+import org.joda.time.Instant;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.JUnit4;
@@ -50,6 +52,8 @@
  */
 @RunWith(JUnit4.class)
 public class SplitQueryFnIT {
+  private Instant readTime = Instant.now().minus(Duration.standardSeconds(10));
+
   /** Tests {@link SplitQueryFn} to generate expected number of splits for a large dataset. */
   @Test
   public void testSplitQueryFnWithLargeDataset() throws Exception {
@@ -59,7 +63,8 @@
     // Num splits is computed based on the entity_bytes size of the input_sort_1G kind reported by
     // Datastore stats.
     int expectedNumSplits = 32;
-    testSplitQueryFn(projectId, kind, namespace, expectedNumSplits);
+    testSplitQueryFn(projectId, kind, namespace, expectedNumSplits, null);
+    testSplitQueryFn(projectId, kind, namespace, expectedNumSplits, readTime);
   }
 
   /** Tests {@link SplitQueryFn} to fallback to NUM_QUERY_SPLITS_MIN for a small dataset. */
@@ -69,17 +74,23 @@
     String kind = "shakespeare";
     String namespace = null;
     int expectedNumSplits = NUM_QUERY_SPLITS_MIN;
-    testSplitQueryFn(projectId, kind, namespace, expectedNumSplits);
+    testSplitQueryFn(projectId, kind, namespace, expectedNumSplits, null);
+    testSplitQueryFn(projectId, kind, namespace, expectedNumSplits, readTime);
   }
 
   /** A helper method to test {@link SplitQueryFn} to generate the expected number of splits. */
   private void testSplitQueryFn(
-      String projectId, String kind, @Nullable String namespace, int expectedNumSplits)
+      String projectId,
+      String kind,
+      @Nullable String namespace,
+      int expectedNumSplits,
+      @Nullable Instant readTime)
       throws Exception {
     Query.Builder query = Query.newBuilder();
     query.addKindBuilder().setName(kind);
 
-    SplitQueryFn splitQueryFn = new SplitQueryFn(V1Options.from(projectId, namespace, null), 0);
+    SplitQueryFn splitQueryFn =
+        new SplitQueryFn(V1Options.from(projectId, namespace, null), 0, readTime);
     DoFnTester<Query, Query> doFnTester = DoFnTester.of(splitQueryFn);
 
     List<Query> queries = doFnTester.processBundle(query.build());
diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java
index 55b53b3..249cadd 100644
--- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java
+++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java
@@ -36,6 +36,7 @@
 import org.apache.beam.sdk.testing.TestPipeline;
 import org.apache.beam.sdk.transforms.Count;
 import org.apache.beam.sdk.values.PCollection;
+import org.joda.time.Instant;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -48,7 +49,9 @@
   private V1TestOptions options;
   private String project;
   private String ancestor;
-  private final long numEntities = 1000;
+  private final long numEntitiesBeforeReadTime = 600;
+  private final long totalNumEntities = 1000;
+  private Instant readTime;
 
   @Before
   public void setup() throws Exception {
@@ -57,7 +60,15 @@
     project = TestPipeline.testingPipelineOptions().as(GcpOptions.class).getProject();
     ancestor = UUID.randomUUID().toString();
     // Create entities and write them to datastore
-    writeEntitiesToDatastore(options, project, ancestor, numEntities);
+    writeEntitiesToDatastore(options, project, ancestor, 0, numEntitiesBeforeReadTime);
+
+    Thread.sleep(1000);
+    readTime = Instant.now();
+    Thread.sleep(1000);
+
+    long moreEntitiesToWrite = totalNumEntities - numEntitiesBeforeReadTime;
+    writeEntitiesToDatastore(
+        options, project, ancestor, numEntitiesBeforeReadTime, moreEntitiesToWrite);
   }
 
   @After
@@ -77,6 +88,7 @@
     Query query =
         V1TestUtil.makeAncestorKindQuery(options.getKind(), options.getNamespace(), ancestor);
 
+    // Read entities without readTime.
     DatastoreV1.Read read =
         DatastoreIO.v1()
             .read()
@@ -88,8 +100,23 @@
     Pipeline p = Pipeline.create(options);
     PCollection<Long> count = p.apply(read).apply(Count.globally());
 
-    PAssert.thatSingleton(count).isEqualTo(numEntities);
+    PAssert.thatSingleton(count).isEqualTo(totalNumEntities);
     p.run();
+
+    // Read entities with readTime.
+    DatastoreV1.Read snapshotRead =
+        DatastoreIO.v1()
+            .read()
+            .withProjectId(project)
+            .withQuery(query)
+            .withNamespace(options.getNamespace())
+            .withReadTime(readTime);
+
+    Pipeline p2 = Pipeline.create(options);
+    PCollection<Long> count2 = p2.apply(snapshotRead).apply(Count.globally());
+
+    PAssert.thatSingleton(count2).isEqualTo(numEntitiesBeforeReadTime);
+    p2.run();
   }
 
   @Test
@@ -114,12 +141,13 @@
             "SELECT * from %s WHERE __key__ HAS ANCESTOR KEY(%s, '%s')",
             options.getKind(), options.getKind(), ancestor);
 
-    long expectedNumEntities = numEntities;
+    long expectedNumEntities = totalNumEntities;
     if (limit > 0) {
       gqlQuery = String.format("%s LIMIT %d", gqlQuery, limit);
       expectedNumEntities = limit;
     }
 
+    // Read entities without readTime.
     DatastoreV1.Read read =
         DatastoreIO.v1()
             .read()
@@ -133,18 +161,36 @@
 
     PAssert.thatSingleton(count).isEqualTo(expectedNumEntities);
     p.run();
+
+    // Read entities with readTime.
+    DatastoreV1.Read snapshotRead =
+        DatastoreIO.v1()
+            .read()
+            .withProjectId(project)
+            .withLiteralGqlQuery(gqlQuery)
+            .withNamespace(options.getNamespace())
+            .withReadTime(readTime);
+
+    Pipeline p2 = Pipeline.create(options);
+    PCollection<Long> count2 = p2.apply(snapshotRead).apply(Count.globally());
+
+    long expectedNumEntities2 = limit > 0 ? limit : numEntitiesBeforeReadTime;
+    PAssert.thatSingleton(count2).isEqualTo(expectedNumEntities2);
+    p2.run();
   }
 
   // Creates entities and write them to datastore
   private static void writeEntitiesToDatastore(
-      V1TestOptions options, String project, String ancestor, long numEntities) throws Exception {
+      V1TestOptions options, String project, String ancestor, long valueOffset, long numEntities)
+      throws Exception {
     Datastore datastore = getDatastore(options, project);
     // Write test entities to datastore
     V1TestWriter writer = new V1TestWriter(datastore, new UpsertMutationBuilder());
     Key ancestorKey = makeAncestorKey(options.getNamespace(), options.getKind(), ancestor);
 
     for (long i = 0; i < numEntities; i++) {
-      Entity entity = makeEntity(i, ancestorKey, options.getKind(), options.getNamespace(), 0);
+      Entity entity =
+          makeEntity(valueOffset + i, ancestorKey, options.getKind(), options.getNamespace(), 0);
       writer.write(entity);
     }
     writer.close();
diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/pubsub/PubsubIOExternalTest.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/pubsub/PubsubIOExternalTest.java
index bb8ddc4..39750f2 100644
--- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/pubsub/PubsubIOExternalTest.java
+++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/pubsub/PubsubIOExternalTest.java
@@ -35,9 +35,9 @@
 import org.apache.beam.sdk.schemas.SchemaTranslation;
 import org.apache.beam.sdk.transforms.DoFn;
 import org.apache.beam.sdk.transforms.Impulse;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.values.PCollection;
 import org.apache.beam.sdk.values.Row;
-import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.grpc.v1p43p2.io.grpc.stub.StreamObserver;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables;
 import org.hamcrest.Matchers;
@@ -205,7 +205,7 @@
   }
 
   private static ExternalTransforms.ExternalConfigurationPayload encodeRow(Row row) {
-    ByteString.Output outputStream = ByteString.newOutput();
+    ByteStringOutputStream outputStream = new ByteStringOutputStream();
     try {
       SchemaCoder.of(row.getSchema()).encode(row, outputStream);
     } catch (IOException e) {
diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/pubsublite/internal/TopicBacklogReaderImplTest.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/pubsublite/internal/TopicBacklogReaderImplTest.java
index 42c30aa..e60e8c0 100644
--- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/pubsublite/internal/TopicBacklogReaderImplTest.java
+++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/pubsublite/internal/TopicBacklogReaderImplTest.java
@@ -66,7 +66,7 @@
             example(TopicPath.class),
             example(Partition.class),
             example(Offset.class),
-            Offset.of(Integer.MAX_VALUE)))
+            Offset.of(Long.MAX_VALUE)))
         .thenReturn(
             ApiFutures.immediateFailedFuture(new CheckedApiException(Code.UNAVAILABLE).underlying));
 
@@ -91,7 +91,7 @@
             example(TopicPath.class),
             example(Partition.class),
             example(Offset.class),
-            Offset.of(Integer.MAX_VALUE)))
+            Offset.of(Long.MAX_VALUE)))
         .thenReturn(ApiFutures.immediateFuture(response));
 
     assertEquals(reader.computeMessageStats(example(Offset.class)), response);
diff --git a/sdks/java/io/jdbc/src/main/java/org/apache/beam/sdk/io/jdbc/JdbcIO.java b/sdks/java/io/jdbc/src/main/java/org/apache/beam/sdk/io/jdbc/JdbcIO.java
index 1e48af9..72d0ef5 100644
--- a/sdks/java/io/jdbc/src/main/java/org/apache/beam/sdk/io/jdbc/JdbcIO.java
+++ b/sdks/java/io/jdbc/src/main/java/org/apache/beam/sdk/io/jdbc/JdbcIO.java
@@ -44,8 +44,6 @@
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 import javax.sql.DataSource;
-import org.apache.beam.sdk.annotations.Experimental;
-import org.apache.beam.sdk.annotations.Experimental.Kind;
 import org.apache.beam.sdk.coders.CannotProvideCoderException;
 import org.apache.beam.sdk.coders.Coder;
 import org.apache.beam.sdk.coders.CoderRegistry;
@@ -305,7 +303,6 @@
  * Consider using <a href="https://en.wikipedia.org/wiki/Merge_(SQL)">MERGE ("upsert")
  * statements</a> supported by your database instead.
  */
-@Experimental(Kind.SOURCE_SINK)
 @SuppressWarnings({
   "rawtypes", // TODO(https://github.com/apache/beam/issues/20447)
   "nullness" // TODO(https://github.com/apache/beam/issues/20497)
@@ -327,7 +324,6 @@
   }
 
   /** Read Beam {@link Row}s from a JDBC data source. */
-  @Experimental(Kind.SCHEMAS)
   public static ReadRows readRows() {
     return new AutoValue_JdbcIO_ReadRows.Builder()
         .setFetchSize(DEFAULT_FETCH_SIZE)
@@ -594,7 +590,6 @@
 
   /** Implementation of {@link #readRows()}. */
   @AutoValue
-  @Experimental(Kind.SCHEMAS)
   public abstract static class ReadRows extends PTransform<PBegin, PCollection<Row>> {
 
     abstract @Nullable SerializableFunction<Void, DataSource> getDataSourceProviderFn();
diff --git a/sdks/java/io/jdbc/src/test/java/org/apache/beam/sdk/io/jdbc/JdbcIOIT.java b/sdks/java/io/jdbc/src/test/java/org/apache/beam/sdk/io/jdbc/JdbcIOIT.java
index 97efd4b..e08f7be 100644
--- a/sdks/java/io/jdbc/src/test/java/org/apache/beam/sdk/io/jdbc/JdbcIOIT.java
+++ b/sdks/java/io/jdbc/src/test/java/org/apache/beam/sdk/io/jdbc/JdbcIOIT.java
@@ -19,8 +19,8 @@
 
 import static org.apache.beam.sdk.io.common.DatabaseTestHelper.assertRowCount;
 import static org.apache.beam.sdk.io.common.DatabaseTestHelper.getTestDataToWrite;
-import static org.apache.beam.sdk.io.common.IOITHelper.executeWithRetry;
 import static org.apache.beam.sdk.io.common.IOITHelper.readIOTestPipelineOptions;
+import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.MoreObjects.firstNonNull;
 
 import com.google.cloud.Timestamp;
 import java.sql.SQLException;
@@ -32,17 +32,17 @@
 import java.util.UUID;
 import java.util.function.Function;
 import org.apache.beam.sdk.PipelineResult;
-import org.apache.beam.sdk.coders.KvCoder;
-import org.apache.beam.sdk.coders.StringUtf8Coder;
 import org.apache.beam.sdk.coders.VarIntCoder;
 import org.apache.beam.sdk.io.GenerateSequence;
 import org.apache.beam.sdk.io.common.DatabaseTestHelper;
 import org.apache.beam.sdk.io.common.HashingFn;
 import org.apache.beam.sdk.io.common.PostgresIOTestPipelineOptions;
 import org.apache.beam.sdk.io.common.TestRow;
+import org.apache.beam.sdk.state.StateSpec;
+import org.apache.beam.sdk.state.StateSpecs;
+import org.apache.beam.sdk.state.ValueState;
 import org.apache.beam.sdk.testing.PAssert;
 import org.apache.beam.sdk.testing.TestPipeline;
-import org.apache.beam.sdk.testing.TestStream;
 import org.apache.beam.sdk.testutils.NamedTestResult;
 import org.apache.beam.sdk.testutils.metrics.IOITMetrics;
 import org.apache.beam.sdk.testutils.metrics.MetricsReader;
@@ -51,12 +51,19 @@
 import org.apache.beam.sdk.transforms.Combine;
 import org.apache.beam.sdk.transforms.Count;
 import org.apache.beam.sdk.transforms.Create;
+import org.apache.beam.sdk.transforms.DoFn;
+import org.apache.beam.sdk.transforms.Impulse;
+import org.apache.beam.sdk.transforms.PTransform;
 import org.apache.beam.sdk.transforms.ParDo;
+import org.apache.beam.sdk.transforms.PeriodicSequence;
+import org.apache.beam.sdk.transforms.PeriodicSequence.SequenceDefinition;
 import org.apache.beam.sdk.transforms.Top;
 import org.apache.beam.sdk.values.KV;
+import org.apache.beam.sdk.values.PBegin;
 import org.apache.beam.sdk.values.PCollection;
+import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Lists;
+import org.joda.time.Duration;
 import org.joda.time.Instant;
-import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -88,18 +95,19 @@
 @RunWith(JUnit4.class)
 public class JdbcIOIT {
 
+  // the number of rows written to table in normal integration tests (not the performance test).
   private static final int EXPECTED_ROW_COUNT = 1000;
   private static final String NAMESPACE = JdbcIOIT.class.getName();
+  // the number of rows written to table in the performance test.
   private static int numberOfRows;
   private static PGSimpleDataSource dataSource;
   private static String tableName;
-  private static Long tableSize;
   private static InfluxDBSettings settings;
   @Rule public TestPipeline pipelineWrite = TestPipeline.create();
   @Rule public TestPipeline pipelineRead = TestPipeline.create();
 
   @BeforeClass
-  public static void setup() throws Exception {
+  public static void setup() {
     PostgresIOTestPipelineOptions options;
     try {
       options = readIOTestPipelineOptions(PostgresIOTestPipelineOptions.class);
@@ -107,12 +115,9 @@
       options = null;
     }
     org.junit.Assume.assumeNotNull(options);
-
     numberOfRows = options.getNumberOfRecords();
     dataSource = DatabaseTestHelper.getPostgresDataSource(options);
     tableName = DatabaseTestHelper.getTestTableName("IT");
-    executeWithRetry(JdbcIOIT::createTable);
-    tableSize = DatabaseTestHelper.getPostgresTableSize(dataSource, tableName).orElse(0L);
     settings =
         InfluxDBSettings.builder()
             .withHost(options.getInfluxHost())
@@ -121,27 +126,22 @@
             .get();
   }
 
-  private static void createTable() throws SQLException {
-    DatabaseTestHelper.createTable(dataSource, tableName);
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    executeWithRetry(JdbcIOIT::deleteTable);
-  }
-
-  private static void deleteTable() throws SQLException {
-    DatabaseTestHelper.deleteTable(dataSource, tableName);
-  }
-
-  /** Tests writing then reading data for a postgres database. */
+  /**
+   * Tests writing then reading data for a postgres database. Also used as a performance test of
+   * JDBCIO.
+   */
   @Test
-  public void testWriteThenRead() {
-    PipelineResult writeResult = runWrite();
-    writeResult.waitUntilFinish();
-    PipelineResult readResult = runRead();
-    readResult.waitUntilFinish();
-    gatherAndPublishMetrics(writeResult, readResult);
+  public void testWriteThenRead() throws SQLException {
+    DatabaseTestHelper.createTable(dataSource, tableName);
+    try {
+      PipelineResult writeResult = runWrite();
+      writeResult.waitUntilFinish();
+      PipelineResult readResult = runRead();
+      readResult.waitUntilFinish();
+      gatherAndPublishMetrics(writeResult, readResult);
+    } finally {
+      DatabaseTestHelper.deleteTable(dataSource, tableName);
+    }
   }
 
   private void gatherAndPublishMetrics(PipelineResult writeResult, PipelineResult readResult) {
@@ -177,9 +177,7 @@
     postgresTableSize.ifPresent(
         tableFinalSize ->
             suppliers.add(
-                ignore ->
-                    NamedTestResult.create(
-                        uuid, timestamp, "total_size", tableFinalSize - tableSize)));
+                ignore -> NamedTestResult.create(uuid, timestamp, "total_size", tableFinalSize)));
     return suppliers;
   }
 
@@ -264,43 +262,120 @@
     return pipelineRead.run();
   }
 
+  /** An integration test of auto sharding functionality using test stream. */
   @Test
   public void testWriteWithAutosharding() throws Exception {
-    String firstTableName = DatabaseTestHelper.getTestTableName("UT_WRITE");
+    String firstTableName = DatabaseTestHelper.getTestTableName("JDBCIT_AUTOSHARD");
     DatabaseTestHelper.createTable(dataSource, firstTableName);
     try {
-      List<KV<Integer, String>> data = getTestDataToWrite(EXPECTED_ROW_COUNT);
-      TestStream.Builder<KV<Integer, String>> ts =
-          TestStream.create(KvCoder.of(VarIntCoder.of(), StringUtf8Coder.of()))
-              .advanceWatermarkTo(Instant.now());
-      for (KV<Integer, String> elm : data) {
-        ts.addElements(elm);
+      PCollection<TestRow> dataCollection =
+          pipelineWrite.apply(
+              // emit 50_000 elements per seconds.
+              new GenerateRecordsStream(numberOfRows, 50_000, Duration.standardSeconds(1)));
+      dataCollection
+          .apply(ParDo.of(new TimeMonitor<>(NAMESPACE, "write_time")))
+          .apply(
+              JdbcIO.<TestRow>write()
+                  .withDataSourceConfiguration(JdbcIO.DataSourceConfiguration.create(dataSource))
+                  .withStatement(String.format("insert into %s values(?, ?)", firstTableName))
+                  .withAutoSharding()
+                  .withPreparedStatementSetter(new JdbcTestHelper.PrepareStatementFromTestRow()));
+
+      List<String> additionalArgs = Lists.newArrayList("--streaming");
+      if (pipelineWrite
+          .getOptions()
+          .getRunner()
+          .getCanonicalName()
+          .startsWith("org.apache.beam.runners.dataflow")) {
+        // enableStreamingEngine is a gcp option.
+        additionalArgs.add("--enableStreamingEngine");
       }
+      pipelineWrite.runWithAdditionalOptionArgs(additionalArgs).waitUntilFinish();
 
-      PCollection<KV<Integer, String>> dataCollection =
-          pipelineWrite.apply(ts.advanceWatermarkToInfinity());
-      dataCollection.apply(
-          JdbcIO.<KV<Integer, String>>write()
-              .withDataSourceProviderFn(voidInput -> dataSource)
-              .withStatement(String.format("insert into %s values(?, ?) returning *", tableName))
-              .withAutoSharding()
-              .withPreparedStatementSetter(
-                  (element, statement) -> {
-                    statement.setInt(1, element.getKey());
-                    statement.setString(2, element.getValue());
-                  }));
-
-      pipelineWrite.run().waitUntilFinish();
-
-      runRead();
+      assertRowCount(dataSource, firstTableName, numberOfRows);
     } finally {
       DatabaseTestHelper.deleteTable(dataSource, firstTableName);
     }
   }
 
+  /** Generate a stream of records for testing. */
+  private static class GenerateRecordsStream extends PTransform<PBegin, PCollection<TestRow>> {
+    private final long numRecords;
+    private final long numPerPeriod;
+
+    public GenerateRecordsStream(long numRecords, long numPerPeriod, Duration periodLength) {
+      this.numRecords = numRecords;
+      this.numPerPeriod = numPerPeriod;
+    }
+
+    @Override
+    public PCollection<TestRow> expand(PBegin pBegin) {
+      PCollection<TestRow> pcoll =
+          pBegin
+              .apply(Impulse.create())
+              .apply(ParDo.of(new GenerateSequenceDefinitionFn(numRecords / numPerPeriod)))
+              .apply(PeriodicSequence.create())
+              .apply(
+                  "Add dumb key",
+                  ParDo.of(
+                      new DoFn<Instant, KV<Integer, Instant>>() {
+                        @ProcessElement
+                        public void processElement(ProcessContext c) {
+                          c.output(KV.of(0, c.element()));
+                        }
+                      }))
+              .apply(ParDo.of(new EmitSequenceFn(numRecords, numPerPeriod)))
+              .apply(ParDo.of(new TestRow.DeterministicallyConstructTestRowFn()));
+      return pcoll;
+    }
+  }
+
+  /** Set Periodic Sequence starting time when pipeline executation begins. */
+  private static class GenerateSequenceDefinitionFn extends DoFn<byte[], SequenceDefinition> {
+    private final long numPulses;
+
+    @ProcessElement
+    public void processElement(ProcessContext c) {
+      Instant now = Instant.now();
+      c.output(
+          new SequenceDefinition(
+              now, now.plus(Duration.standardSeconds(numPulses)), Duration.standardSeconds(1)));
+    }
+
+    public GenerateSequenceDefinitionFn(long numPulses) {
+      this.numPulses = numPulses;
+    }
+  }
+
+  private static class EmitSequenceFn extends DoFn<KV<Integer, Instant>, Long> {
+    private final long numRecords;
+    private final long numPerPeriod;
+
+    public EmitSequenceFn(long numRecords, long numPerPeriod) {
+      this.numRecords = numRecords;
+      this.numPerPeriod = numPerPeriod;
+    }
+
+    @StateId("count")
+    @SuppressWarnings("unused")
+    private final StateSpec<ValueState<Integer>> countSpec = StateSpecs.value(VarIntCoder.of());
+
+    @ProcessElement
+    public void processElement(ProcessContext c, @StateId("count") ValueState<Integer> count) {
+      int current = firstNonNull(count.read(), 0);
+      count.write(current + 1);
+      long startId = current * numPerPeriod;
+      long endId = Math.min((current + 1) * numPerPeriod, numRecords);
+      for (long id = startId; id < endId; ++id) {
+        c.output(id);
+      }
+    }
+  }
+
+  /** An integration test of with write results functionality. */
   @Test
   public void testWriteWithWriteResults() throws Exception {
-    String firstTableName = DatabaseTestHelper.getTestTableName("UT_WRITE");
+    String firstTableName = DatabaseTestHelper.getTestTableName("JDBCIT_WRITE");
     DatabaseTestHelper.createTable(dataSource, firstTableName);
     try {
       ArrayList<KV<Integer, String>> data = getTestDataToWrite(EXPECTED_ROW_COUNT);
@@ -325,7 +400,7 @@
 
       PAssert.that(resultSetCollection).containsInAnyOrder(expectedResult);
 
-      pipelineWrite.run();
+      pipelineWrite.run().waitUntilFinish();
 
       assertRowCount(dataSource, firstTableName, EXPECTED_ROW_COUNT);
     } finally {
@@ -339,7 +414,7 @@
    */
   private static JdbcIO.Write<KV<Integer, String>> getJdbcWriteWithReturning(String tableName) {
     return JdbcIO.<KV<Integer, String>>write()
-        .withDataSourceProviderFn(voidInput -> dataSource)
+        .withDataSourceConfiguration(JdbcIO.DataSourceConfiguration.create(dataSource))
         .withStatement(String.format("insert into %s values(?, ?) returning *", tableName))
         .withPreparedStatementSetter(
             (element, statement) -> {
diff --git a/sdks/java/io/kafka/build.gradle b/sdks/java/io/kafka/build.gradle
index 889b6a4..ec645db 100644
--- a/sdks/java/io/kafka/build.gradle
+++ b/sdks/java/io/kafka/build.gradle
@@ -146,6 +146,7 @@
     filter {
       excludeTestsMatching "*InStreaming"
       if (!(kv.key in sdfKafkaVersions)) excludeTestsMatching "*DynamicPartitions" //admin client create partitions does not exist in kafka 0.11.0.3 and kafka sdf does not appear to work for kafka versions <2.0.1
+      if (!(kv.key in sdfKafkaVersions)) excludeTestsMatching "*SDFResumesCorrectly" //Kafka SDF does not work for kafka versions <2.0.1
     }
   }
 }
diff --git a/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/ReadFromKafkaDoFn.java b/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/ReadFromKafkaDoFn.java
index 7c674a6..28b3c4c 100644
--- a/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/ReadFromKafkaDoFn.java
+++ b/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/ReadFromKafkaDoFn.java
@@ -252,10 +252,7 @@
   public OffsetRange initialRestriction(@Element KafkaSourceDescriptor kafkaSourceDescriptor) {
     Map<String, Object> updatedConsumerConfig =
         overrideBootstrapServersConfig(consumerConfig, kafkaSourceDescriptor);
-    try (Consumer<byte[], byte[]> offsetConsumer =
-        consumerFactoryFn.apply(
-            KafkaIOUtils.getOffsetConsumerConfig(
-                "initialOffset", offsetConsumerConfig, updatedConsumerConfig))) {
+    try (Consumer<byte[], byte[]> offsetConsumer = consumerFactoryFn.apply(updatedConsumerConfig)) {
       ConsumerSpEL.evaluateAssign(
           offsetConsumer, ImmutableList.of(kafkaSourceDescriptor.getTopicPartition()));
       long startOffset;
diff --git a/sdks/java/io/kafka/src/test/java/org/apache/beam/sdk/io/kafka/KafkaIOExternalTest.java b/sdks/java/io/kafka/src/test/java/org/apache/beam/sdk/io/kafka/KafkaIOExternalTest.java
index d9489a8..b62da98 100644
--- a/sdks/java/io/kafka/src/test/java/org/apache/beam/sdk/io/kafka/KafkaIOExternalTest.java
+++ b/sdks/java/io/kafka/src/test/java/org/apache/beam/sdk/io/kafka/KafkaIOExternalTest.java
@@ -43,8 +43,8 @@
 import org.apache.beam.sdk.transforms.DoFn;
 import org.apache.beam.sdk.transforms.Impulse;
 import org.apache.beam.sdk.transforms.WithKeys;
+import org.apache.beam.sdk.util.ByteStringOutputStream;
 import org.apache.beam.sdk.values.Row;
-import org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString;
 import org.apache.beam.vendor.grpc.v1p43p2.io.grpc.stub.StreamObserver;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList;
 import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap;
@@ -375,7 +375,7 @@
   }
 
   private static ExternalConfigurationPayload encodeRow(Row row) {
-    ByteString.Output outputStream = ByteString.newOutput();
+    ByteStringOutputStream outputStream = new ByteStringOutputStream();
     try {
       SchemaCoder.of(row.getSchema()).encode(row, outputStream);
     } catch (IOException e) {
diff --git a/sdks/java/io/kafka/src/test/java/org/apache/beam/sdk/io/kafka/KafkaIOIT.java b/sdks/java/io/kafka/src/test/java/org/apache/beam/sdk/io/kafka/KafkaIOIT.java
index d385606..0d91504 100644
--- a/sdks/java/io/kafka/src/test/java/org/apache/beam/sdk/io/kafka/KafkaIOIT.java
+++ b/sdks/java/io/kafka/src/test/java/org/apache/beam/sdk/io/kafka/KafkaIOIT.java
@@ -24,7 +24,9 @@
 import com.google.cloud.Timestamp;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
@@ -47,6 +49,7 @@
 import org.apache.beam.sdk.options.PipelineOptionsFactory;
 import org.apache.beam.sdk.options.StreamingOptions;
 import org.apache.beam.sdk.options.Validation;
+import org.apache.beam.sdk.testing.ExpectedLogs;
 import org.apache.beam.sdk.testing.PAssert;
 import org.apache.beam.sdk.testing.TestPipeline;
 import org.apache.beam.sdk.testing.TestPipelineOptions;
@@ -63,6 +66,7 @@
 import org.apache.beam.sdk.transforms.MapElements;
 import org.apache.beam.sdk.transforms.ParDo;
 import org.apache.beam.sdk.transforms.SimpleFunction;
+import org.apache.beam.sdk.transforms.Values;
 import org.apache.beam.sdk.transforms.windowing.FixedWindows;
 import org.apache.beam.sdk.transforms.windowing.Window;
 import org.apache.beam.sdk.values.KV;
@@ -87,6 +91,8 @@
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.JUnit4;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.testcontainers.containers.KafkaContainer;
 import org.testcontainers.utility.DockerImageName;
 
@@ -116,6 +122,8 @@
 
   private static final String TIMESTAMP = Timestamp.now().toString();
 
+  private static final Logger LOG = LoggerFactory.getLogger(KafkaIOIT.class);
+
   private static String expectedHashcode;
 
   private static SyntheticSourceOptions sourceOptions;
@@ -124,8 +132,12 @@
 
   private static InfluxDBSettings settings;
 
+  @Rule public ExpectedLogs kafkaIOITExpectedLogs = ExpectedLogs.none(KafkaIOIT.class);
+
   @Rule public TestPipeline writePipeline = TestPipeline.create();
 
+  @Rule public TestPipeline writePipeline2 = TestPipeline.create();
+
   @Rule public TestPipeline readPipeline = TestPipeline.create();
 
   private static ExperimentalOptions sdfPipelineOptions;
@@ -138,6 +150,7 @@
   }
 
   @Rule public TestPipeline sdfReadPipeline = TestPipeline.fromOptions(sdfPipelineOptions);
+  @Rule public TestPipeline sdfReadPipeline2 = TestPipeline.fromOptions(sdfPipelineOptions);
 
   private static KafkaContainer kafkaContainer;
 
@@ -239,6 +252,94 @@
     }
   }
 
+  // Because of existing limitations in streaming testing, this is verified via a combination of
+  // DoFns.  CrashOnExtra will throw an exception if we see any extra records beyond those we
+  // expect, and LogFn acts as a sink we can inspect using ExpectedLogs to verify that we got all
+  // those we expect.
+  @Test
+  public void testKafkaIOSDFResumesCorrectly() throws IOException {
+    roundtripElements("first-pass", 4, writePipeline, sdfReadPipeline);
+    roundtripElements("second-pass", 3, writePipeline2, sdfReadPipeline2);
+  }
+
+  private void roundtripElements(
+      String recordPrefix, Integer recordCount, TestPipeline wPipeline, TestPipeline rPipeline)
+      throws IOException {
+    AdminClient client =
+        AdminClient.create(
+            ImmutableMap.of("bootstrap.servers", options.getKafkaBootstrapServerAddresses()));
+    client.listTopics();
+    Map<Integer, String> records = new HashMap<>();
+    for (int i = 0; i < recordCount; i++) {
+      records.put(i, recordPrefix + "-" + i);
+    }
+
+    wPipeline
+        .apply("Generate Write Elements", Create.of(records))
+        .apply(
+            "Write to Kafka",
+            KafkaIO.<Integer, String>write()
+                .withBootstrapServers(options.getKafkaBootstrapServerAddresses())
+                .withTopic(options.getKafkaTopic() + "-resuming")
+                .withKeySerializer(IntegerSerializer.class)
+                .withValueSerializer(StringSerializer.class));
+
+    wPipeline.run().waitUntilFinish(Duration.standardSeconds(10));
+
+    rPipeline
+        .apply(
+            "Read from Kafka",
+            KafkaIO.<Integer, String>read()
+                .withBootstrapServers(options.getKafkaBootstrapServerAddresses())
+                .withConsumerConfigUpdates(
+                    ImmutableMap.of(
+                        "group.id",
+                        "resuming-group",
+                        "auto.offset.reset",
+                        "earliest",
+                        "enable.auto.commit",
+                        "true"))
+                .withTopic(options.getKafkaTopic() + "-resuming")
+                .withKeyDeserializer(IntegerDeserializer.class)
+                .withValueDeserializer(StringDeserializer.class)
+                .withoutMetadata())
+        .apply("Get Values", Values.create())
+        .apply(ParDo.of(new CrashOnExtra(records.values())))
+        .apply(ParDo.of(new LogFn()));
+
+    rPipeline.run().waitUntilFinish(Duration.standardSeconds(options.getReadTimeout()));
+
+    for (String value : records.values()) {
+      kafkaIOITExpectedLogs.verifyError(value);
+    }
+  }
+
+  public static class CrashOnExtra extends DoFn<String, String> {
+    final Set<String> expected;
+
+    public CrashOnExtra(Collection<String> records) {
+      expected = new HashSet<>(records);
+    }
+
+    @ProcessElement
+    public void processElement(@Element String element, OutputReceiver<String> outputReceiver) {
+      if (!expected.contains(element)) {
+        throw new RuntimeException("Received unexpected element: " + element);
+      } else {
+        expected.remove(element);
+        outputReceiver.output(element);
+      }
+    }
+  }
+
+  public static class LogFn extends DoFn<String, String> {
+    @ProcessElement
+    public void processElement(@Element String element, OutputReceiver<String> outputReceiver) {
+      LOG.error(element);
+      outputReceiver.output(element);
+    }
+  }
+
   // This test roundtrips a single KV<Null,Null> to verify that externalWithMetadata
   // can handle null keys and values correctly.
   @Test
diff --git a/sdks/java/io/sparkreceiver/build.gradle b/sdks/java/io/sparkreceiver/build.gradle
index 09f98db..f226435 100644
--- a/sdks/java/io/sparkreceiver/build.gradle
+++ b/sdks/java/io/sparkreceiver/build.gradle
@@ -41,4 +41,6 @@
     compileOnly "org.scala-lang:scala-library:2.11.12"
     testImplementation project(path: ":sdks:java:io:cdap", configuration: "testRuntimeMigration")
     testImplementation library.java.junit
+    testImplementation project(path: ":runners:direct-java", configuration: "shadow")
+    testImplementation project(path: ":examples:java", configuration: "testRuntimeMigration")
 }
diff --git a/sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/HasOffset.java b/sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/HasOffset.java
new file mode 100644
index 0000000..b3c9e11
--- /dev/null
+++ b/sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/HasOffset.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.io.sparkreceiver;
+
+/**
+ * Interface for any Spark {@link org.apache.spark.streaming.receiver.Receiver} that supports
+ * reading from and to some offset.
+ */
+public interface HasOffset {
+
+  /** @param offset inclusive start offset from which the reading should be started. */
+  void setStartOffset(Long offset);
+
+  /** @return exclusive end offset to which the reading from current page will occur. */
+  Long getEndOffset();
+}
diff --git a/sdks/python/apache_beam/coders/coders_test.py b/sdks/python/apache_beam/coders/coders_test.py
index 0a30a32..1d73c59 100644
--- a/sdks/python/apache_beam/coders/coders_test.py
+++ b/sdks/python/apache_beam/coders/coders_test.py
@@ -59,18 +59,18 @@
 # The test proto message file was generated by running the following:
 #
 # `cd <beam repo>`
-# `cp sdks/java/core/src/proto/proto2_coder_test_message.proto
-#    sdks/python/apache_beam/coders`
+# `cp sdks/java/extensions/protobuf/src/test/proto/\
+#    proto2_coder_test_messages.proto sdks/python/apache_beam/coders/`
 # `cd sdks/python`
 # `protoc apache_beam/coders/proto2_coder_test_messages.proto
-#    --python_out=apache_beam/coders
+#    --python_out=.
 # `rm apache_beam/coders/proto2_coder_test_message.proto`
 #
 # Note: The protoc version should match the protobuf library version specified
 # in setup.py.
 #
-# TODO(vikasrk): The proto file should be placed in a common directory
-# that can be shared between java and python.
+# TODO(https://github.com/apache/beam/issues/22319): The proto file should be
+# placed in a common directory that can be shared between java and python.
 class ProtoCoderTest(unittest.TestCase):
   def test_proto_coder(self):
     ma = test_message.MessageA()
diff --git a/sdks/python/apache_beam/coders/proto2_coder_test_messages_pb2.py b/sdks/python/apache_beam/coders/proto2_coder_test_messages_pb2.py
index 433d33f..97ae8be 100644
--- a/sdks/python/apache_beam/coders/proto2_coder_test_messages_pb2.py
+++ b/sdks/python/apache_beam/coders/proto2_coder_test_messages_pb2.py
@@ -15,30 +15,29 @@
 # limitations under the License.
 #
 
+# -*- coding: utf-8 -*-
 # Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: sdks/java/core/src/main/proto/proto2_coder_test_messages.proto
-
-import sys
+# source: apache_beam/coders/proto2_coder_test_messages.proto
 
 from google.protobuf import descriptor as _descriptor
 from google.protobuf import message as _message
 from google.protobuf import reflection as _reflection
 from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
 # @@protoc_insertion_point(imports)
 
 _sym_db = _symbol_database.Default()
 
 
+
+
 DESCRIPTOR = _descriptor.FileDescriptor(
   name='apache_beam/coders/proto2_coder_test_messages.proto',
   package='proto2_coder_test_messages',
   syntax='proto2',
-  serialized_pb=_b('\n3apache_beam/coders/proto2_coder_test_messages.proto\x12\x1aproto2_coder_test_messages\"P\n\x08MessageA\x12\x0e\n\x06\x66ield1\x18\x01 \x01(\t\x12\x34\n\x06\x66ield2\x18\x02 \x03(\x0b\x32$.proto2_coder_test_messages.MessageB\"\x1a\n\x08MessageB\x12\x0e\n\x06\x66ield1\x18\x01 \x01(\x08\"\x10\n\x08MessageC*\x04\x08\x64\x10j\"\xad\x01\n\x0eMessageWithMap\x12\x46\n\x06\x66ield1\x18\x01 \x03(\x0b\x32\x36.proto2_coder_test_messages.MessageWithMap.Field1Entry\x1aS\n\x0b\x46ield1Entry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.proto2_coder_test_messages.MessageA:\x02\x38\x01\"V\n\x18ReferencesMessageWithMap\x12:\n\x06\x66ield1\x18\x01 \x03(\x0b\x32*.proto2_coder_test_messages.MessageWithMap:Z\n\x06\x66ield1\x12$.proto2_coder_test_messages.MessageC\x18\x65 \x01(\x0b\x32$.proto2_coder_test_messages.MessageA:Z\n\x06\x66ield2\x12$.proto2_coder_test_messages.MessageC\x18\x66 \x01(\x0b\x32$.proto2_coder_test_messages.MessageBB\x1c\n\x1aorg.apache.beam.sdk.coders')
+  serialized_options=b'\n\'org.apache.beam.sdk.extensions.protobuf',
+  create_key=_descriptor._internal_create_key,
+  serialized_pb=b'\n3apache_beam/coders/proto2_coder_test_messages.proto\x12\x1aproto2_coder_test_messages\"P\n\x08MessageA\x12\x0e\n\x06\x66ield1\x18\x01 \x01(\t\x12\x34\n\x06\x66ield2\x18\x02 \x03(\x0b\x32$.proto2_coder_test_messages.MessageB\"\x1a\n\x08MessageB\x12\x0e\n\x06\x66ield1\x18\x01 \x01(\x08\"\x10\n\x08MessageC*\x04\x08\x64\x10j\"\xad\x01\n\x0eMessageWithMap\x12\x46\n\x06\x66ield1\x18\x01 \x03(\x0b\x32\x36.proto2_coder_test_messages.MessageWithMap.Field1Entry\x1aS\n\x0b\x46ield1Entry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.proto2_coder_test_messages.MessageA:\x02\x38\x01\"V\n\x18ReferencesMessageWithMap\x12:\n\x06\x66ield1\x18\x01 \x03(\x0b\x32*.proto2_coder_test_messages.MessageWithMap:Z\n\x06\x66ield1\x12$.proto2_coder_test_messages.MessageC\x18\x65 \x01(\x0b\x32$.proto2_coder_test_messages.MessageA:Z\n\x06\x66ield2\x12$.proto2_coder_test_messages.MessageC\x18\x66 \x01(\x0b\x32$.proto2_coder_test_messages.MessageBB)\n\'org.apache.beam.sdk.extensions.protobuf'
 )
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
 
 
 FIELD1_FIELD_NUMBER = 101
@@ -48,7 +47,7 @@
   has_default_value=False, default_value=None,
   message_type=None, enum_type=None, containing_type=None,
   is_extension=True, extension_scope=None,
-  options=None)
+  serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key)
 FIELD2_FIELD_NUMBER = 102
 field2 = _descriptor.FieldDescriptor(
   name='field2', full_name='proto2_coder_test_messages.field2', index=1,
@@ -56,7 +55,7 @@
   has_default_value=False, default_value=None,
   message_type=None, enum_type=None, containing_type=None,
   is_extension=True, extension_scope=None,
-  options=None)
+  serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key)
 
 
 _MESSAGEA = _descriptor.Descriptor(
@@ -65,28 +64,29 @@
   filename=None,
   file=DESCRIPTOR,
   containing_type=None,
+  create_key=_descriptor._internal_create_key,
   fields=[
     _descriptor.FieldDescriptor(
       name='field1', full_name='proto2_coder_test_messages.MessageA.field1', index=0,
       number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
+      has_default_value=False, default_value=b"".decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      options=None),
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
     _descriptor.FieldDescriptor(
       name='field2', full_name='proto2_coder_test_messages.MessageA.field2', index=1,
       number=2, type=11, cpp_type=10, label=3,
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      options=None),
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
   ],
   extensions=[
   ],
   nested_types=[],
   enum_types=[
   ],
-  options=None,
+  serialized_options=None,
   is_extendable=False,
   syntax='proto2',
   extension_ranges=[],
@@ -103,6 +103,7 @@
   filename=None,
   file=DESCRIPTOR,
   containing_type=None,
+  create_key=_descriptor._internal_create_key,
   fields=[
     _descriptor.FieldDescriptor(
       name='field1', full_name='proto2_coder_test_messages.MessageB.field1', index=0,
@@ -110,14 +111,14 @@
       has_default_value=False, default_value=False,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      options=None),
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
   ],
   extensions=[
   ],
   nested_types=[],
   enum_types=[
   ],
-  options=None,
+  serialized_options=None,
   is_extendable=False,
   syntax='proto2',
   extension_ranges=[],
@@ -134,6 +135,7 @@
   filename=None,
   file=DESCRIPTOR,
   containing_type=None,
+  create_key=_descriptor._internal_create_key,
   fields=[
   ],
   extensions=[
@@ -141,7 +143,7 @@
   nested_types=[],
   enum_types=[
   ],
-  options=None,
+  serialized_options=None,
   is_extendable=True,
   syntax='proto2',
   extension_ranges=[(100, 106), ],
@@ -158,28 +160,29 @@
   filename=None,
   file=DESCRIPTOR,
   containing_type=None,
+  create_key=_descriptor._internal_create_key,
   fields=[
     _descriptor.FieldDescriptor(
       name='key', full_name='proto2_coder_test_messages.MessageWithMap.Field1Entry.key', index=0,
       number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
+      has_default_value=False, default_value=b"".decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      options=None),
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
     _descriptor.FieldDescriptor(
       name='value', full_name='proto2_coder_test_messages.MessageWithMap.Field1Entry.value', index=1,
       number=2, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      options=None),
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
   ],
   extensions=[
   ],
   nested_types=[],
   enum_types=[
   ],
-  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
+  serialized_options=b'8\001',
   is_extendable=False,
   syntax='proto2',
   extension_ranges=[],
@@ -195,6 +198,7 @@
   filename=None,
   file=DESCRIPTOR,
   containing_type=None,
+  create_key=_descriptor._internal_create_key,
   fields=[
     _descriptor.FieldDescriptor(
       name='field1', full_name='proto2_coder_test_messages.MessageWithMap.field1', index=0,
@@ -202,14 +206,14 @@
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      options=None),
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
   ],
   extensions=[
   ],
   nested_types=[_MESSAGEWITHMAP_FIELD1ENTRY, ],
   enum_types=[
   ],
-  options=None,
+  serialized_options=None,
   is_extendable=False,
   syntax='proto2',
   extension_ranges=[],
@@ -226,6 +230,7 @@
   filename=None,
   file=DESCRIPTOR,
   containing_type=None,
+  create_key=_descriptor._internal_create_key,
   fields=[
     _descriptor.FieldDescriptor(
       name='field1', full_name='proto2_coder_test_messages.ReferencesMessageWithMap.field1', index=0,
@@ -233,14 +238,14 @@
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      options=None),
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
   ],
   extensions=[
   ],
   nested_types=[],
   enum_types=[
   ],
-  options=None,
+  serialized_options=None,
   is_extendable=False,
   syntax='proto2',
   extension_ranges=[],
@@ -262,48 +267,49 @@
 DESCRIPTOR.message_types_by_name['ReferencesMessageWithMap'] = _REFERENCESMESSAGEWITHMAP
 DESCRIPTOR.extensions_by_name['field1'] = field1
 DESCRIPTOR.extensions_by_name['field2'] = field2
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
 
-MessageA = _reflection.GeneratedProtocolMessageType('MessageA', (_message.Message,), dict(
-  DESCRIPTOR = _MESSAGEA,
-  __module__ = 'apache_beam.coders.proto2_coder_test_messages_pb2'
+MessageA = _reflection.GeneratedProtocolMessageType('MessageA', (_message.Message,), {
+  'DESCRIPTOR' : _MESSAGEA,
+  '__module__' : 'apache_beam.coders.proto2_coder_test_messages_pb2'
   # @@protoc_insertion_point(class_scope:proto2_coder_test_messages.MessageA)
-  ))
+  })
 _sym_db.RegisterMessage(MessageA)
 
-MessageB = _reflection.GeneratedProtocolMessageType('MessageB', (_message.Message,), dict(
-  DESCRIPTOR = _MESSAGEB,
-  __module__ = 'apache_beam.coders.proto2_coder_test_messages_pb2'
+MessageB = _reflection.GeneratedProtocolMessageType('MessageB', (_message.Message,), {
+  'DESCRIPTOR' : _MESSAGEB,
+  '__module__' : 'apache_beam.coders.proto2_coder_test_messages_pb2'
   # @@protoc_insertion_point(class_scope:proto2_coder_test_messages.MessageB)
-  ))
+  })
 _sym_db.RegisterMessage(MessageB)
 
-MessageC = _reflection.GeneratedProtocolMessageType('MessageC', (_message.Message,), dict(
-  DESCRIPTOR = _MESSAGEC,
-  __module__ = 'apache_beam.coders.proto2_coder_test_messages_pb2'
+MessageC = _reflection.GeneratedProtocolMessageType('MessageC', (_message.Message,), {
+  'DESCRIPTOR' : _MESSAGEC,
+  '__module__' : 'apache_beam.coders.proto2_coder_test_messages_pb2'
   # @@protoc_insertion_point(class_scope:proto2_coder_test_messages.MessageC)
-  ))
+  })
 _sym_db.RegisterMessage(MessageC)
 
-MessageWithMap = _reflection.GeneratedProtocolMessageType('MessageWithMap', (_message.Message,), dict(
+MessageWithMap = _reflection.GeneratedProtocolMessageType('MessageWithMap', (_message.Message,), {
 
-  Field1Entry = _reflection.GeneratedProtocolMessageType('Field1Entry', (_message.Message,), dict(
-    DESCRIPTOR = _MESSAGEWITHMAP_FIELD1ENTRY,
-    __module__ = 'apache_beam.coders.proto2_coder_test_messages_pb2'
+  'Field1Entry' : _reflection.GeneratedProtocolMessageType('Field1Entry', (_message.Message,), {
+    'DESCRIPTOR' : _MESSAGEWITHMAP_FIELD1ENTRY,
+    '__module__' : 'apache_beam.coders.proto2_coder_test_messages_pb2'
     # @@protoc_insertion_point(class_scope:proto2_coder_test_messages.MessageWithMap.Field1Entry)
-    ))
+    })
   ,
-  DESCRIPTOR = _MESSAGEWITHMAP,
-  __module__ = 'apache_beam.coders.proto2_coder_test_messages_pb2'
+  'DESCRIPTOR' : _MESSAGEWITHMAP,
+  '__module__' : 'apache_beam.coders.proto2_coder_test_messages_pb2'
   # @@protoc_insertion_point(class_scope:proto2_coder_test_messages.MessageWithMap)
-  ))
+  })
 _sym_db.RegisterMessage(MessageWithMap)
 _sym_db.RegisterMessage(MessageWithMap.Field1Entry)
 
-ReferencesMessageWithMap = _reflection.GeneratedProtocolMessageType('ReferencesMessageWithMap', (_message.Message,), dict(
-  DESCRIPTOR = _REFERENCESMESSAGEWITHMAP,
-  __module__ = 'apache_beam.coders.proto2_coder_test_messages_pb2'
+ReferencesMessageWithMap = _reflection.GeneratedProtocolMessageType('ReferencesMessageWithMap', (_message.Message,), {
+  'DESCRIPTOR' : _REFERENCESMESSAGEWITHMAP,
+  '__module__' : 'apache_beam.coders.proto2_coder_test_messages_pb2'
   # @@protoc_insertion_point(class_scope:proto2_coder_test_messages.ReferencesMessageWithMap)
-  ))
+  })
 _sym_db.RegisterMessage(ReferencesMessageWithMap)
 
 field1.message_type = _MESSAGEA
@@ -311,8 +317,6 @@
 field2.message_type = _MESSAGEB
 MessageC.RegisterExtension(field2)
 
-DESCRIPTOR.has_options = True
-DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\032org.apache.beam.sdk.coders'))
-_MESSAGEWITHMAP_FIELD1ENTRY.has_options = True
-_MESSAGEWITHMAP_FIELD1ENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
+DESCRIPTOR._options = None
+_MESSAGEWITHMAP_FIELD1ENTRY._options = None
 # @@protoc_insertion_point(module_scope)
diff --git a/sdks/python/apache_beam/examples/complete/juliaset/setup.py b/sdks/python/apache_beam/examples/complete/juliaset/setup.py
index c4dcbe1..c3a9fe0 100644
--- a/sdks/python/apache_beam/examples/complete/juliaset/setup.py
+++ b/sdks/python/apache_beam/examples/complete/juliaset/setup.py
@@ -28,10 +28,14 @@
 # pytype: skip-file
 
 import subprocess
-from distutils.command.build import build as _build  # type: ignore
 
 import setuptools
 
+# It is recommended to import setuptools prior to importing distutils to avoid
+# using legacy behavior from distutils.
+# https://setuptools.readthedocs.io/en/latest/history.html#v48-0-0
+from distutils.command.build import build as _build  # isort:skip
+
 
 # This class handles the pip install mechanism.
 class build(_build):  # pylint: disable=invalid-name
diff --git a/sdks/python/apache_beam/examples/inference/pytorch_image_segmentation.py b/sdks/python/apache_beam/examples/inference/pytorch_image_segmentation.py
index e0e2e67..8e4a682 100644
--- a/sdks/python/apache_beam/examples/inference/pytorch_image_segmentation.py
+++ b/sdks/python/apache_beam/examples/inference/pytorch_image_segmentation.py
@@ -145,14 +145,9 @@
 
 def preprocess_image(data: Image.Image) -> torch.Tensor:
   image_size = (224, 224)
-  # Pre-trained PyTorch models expect input images normalized with the
-  # below values (see: https://pytorch.org/vision/stable/models.html)
-  normalize = transforms.Normalize(
-      mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
   transform = transforms.Compose([
       transforms.Resize(image_size),
       transforms.ToTensor(),
-      normalize,
   ])
   return transform(data)
 
diff --git a/sdks/python/apache_beam/examples/kafkataxi/README.md b/sdks/python/apache_beam/examples/kafkataxi/README.md
index dc086e3..c4e808c 100644
--- a/sdks/python/apache_beam/examples/kafkataxi/README.md
+++ b/sdks/python/apache_beam/examples/kafkataxi/README.md
@@ -104,8 +104,7 @@
   --region $REGION \
   --num_workers $NUM_WORKERS \
   --job_name $JOB_NAME \
-  --bootstrap_servers $BOOTSTRAP_SERVER \
-  --experiments=use_runner_v2
+  --bootstrap_servers $BOOTSTRAP_SERVER
 ```
 
 ## *(Optional)*  Running the Example from a Beam Git Clone
@@ -185,6 +184,5 @@
   --num_workers $NUM_WORKERS \
   --job_name $JOB_NAME \
   --bootstrap_servers $BOOTSTRAP_SERVER \
-  --sdk_harness_container_image_overrides ".*java.*,${DOCKER_ROOT}/beam_java8_sdk:latest" \
-  --experiments=use_runner_v2
+  --sdk_harness_container_image_overrides ".*java.*,${DOCKER_ROOT}/beam_java8_sdk:latest"
 ```
diff --git a/sdks/python/apache_beam/examples/kafkataxi/kafka_taxi.py b/sdks/python/apache_beam/examples/kafkataxi/kafka_taxi.py
index 96fee05..1cdd266 100644
--- a/sdks/python/apache_beam/examples/kafkataxi/kafka_taxi.py
+++ b/sdks/python/apache_beam/examples/kafkataxi/kafka_taxi.py
@@ -49,8 +49,7 @@
   #                  '--runner', 'DataflowRunner',
   #                  '--temp_location', 'my-temp-location',
   #                  '--region', 'my-region',
-  #                  '--num_workers', 'my-num-workers',
-  #                  '--experiments', 'use_runner_v2']
+  #                  '--num_workers', 'my-num-workers']
 
   window_size = 15  # size of the Window in seconds.
 
diff --git a/sdks/python/apache_beam/examples/sql_taxi.py b/sdks/python/apache_beam/examples/sql_taxi.py
index 165bbc9..e8a2980 100644
--- a/sdks/python/apache_beam/examples/sql_taxi.py
+++ b/sdks/python/apache_beam/examples/sql_taxi.py
@@ -22,9 +22,8 @@
 the data in 15s windows using SqlTransform, and writes the output to
 a user-defined PubSub topic.
 
-Java 8 must be available to run this pipeline, and the
---experiments=use_runner_v2 flag must be passed when running on Dataflow.
-Docker must also be available to run this pipeline locally.
+A Java version supported by Beam must be installed locally to run this pipeline.
+Additionally, Docker must also be available to run this pipeline locally.
 """
 
 # pytype: skip-file
diff --git a/sdks/python/apache_beam/examples/wordcount_xlang_sql.py b/sdks/python/apache_beam/examples/wordcount_xlang_sql.py
index 97a43d3..9d7d756 100644
--- a/sdks/python/apache_beam/examples/wordcount_xlang_sql.py
+++ b/sdks/python/apache_beam/examples/wordcount_xlang_sql.py
@@ -17,9 +17,8 @@
 
 """A word-counting workflow that uses the SQL transform.
 
-Java 8 must be available to run this pipeline, and the
---experiments=use_runner_v2 flag must be passed when running on Dataflow.
-Docker must also be available to run this pipeline locally.
+A Java version supported by Beam must be installed locally to run this pipeline.
+Additionally, Docker must also be available to run this pipeline locally.
 """
 
 import argparse
diff --git a/sdks/python/apache_beam/io/aws/s3io.py b/sdks/python/apache_beam/io/aws/s3io.py
index d8bbfe1..66bdaa5 100644
--- a/sdks/python/apache_beam/io/aws/s3io.py
+++ b/sdks/python/apache_beam/io/aws/s3io.py
@@ -120,9 +120,9 @@
     start_time = time.time()
 
     if with_metadata:
-      logging.info("Starting the file information of the input")
+      logging.debug("Starting the file information of the input")
     else:
-      logging.info("Starting the size estimation of the input")
+      logging.debug("Starting the size estimation of the input")
 
     while True:
       #The list operation will raise an exception
@@ -157,7 +157,9 @@
       else:
         break
 
-    logging.info(
+    logging.log(
+        # do not spam logs when list_prefix is likely used to check empty folder
+        logging.INFO if counter > 0 else logging.DEBUG,
         "Finished listing %s files in %s seconds.",
         counter,
         time.time() - start_time)
diff --git a/sdks/python/apache_beam/io/azure/blobstorageio.py b/sdks/python/apache_beam/io/azure/blobstorageio.py
index 290b522..ae0a494 100644
--- a/sdks/python/apache_beam/io/azure/blobstorageio.py
+++ b/sdks/python/apache_beam/io/azure/blobstorageio.py
@@ -588,9 +588,9 @@
     start_time = time.time()
 
     if with_metadata:
-      logging.info("Starting the file information of the input")
+      logging.debug("Starting the file information of the input")
     else:
-      logging.info("Starting the size estimation of the input")
+      logging.debug("Starting the size estimation of the input")
     container_client = self.client.get_container_client(container)
 
     while True:
@@ -612,7 +612,9 @@
             logging.info("Finished computing size of: %s files", len(file_info))
       break
 
-    logging.info(
+    logging.log(
+        # do not spam logs when list_prefix is likely used to check empty folder
+        logging.INFO if counter > 0 else logging.DEBUG,
         "Finished listing %s files in %s seconds.",
         counter,
         time.time() - start_time)
diff --git a/sdks/python/apache_beam/io/filebasedsource.py b/sdks/python/apache_beam/io/filebasedsource.py
index 6592a0c..e2cd268 100644
--- a/sdks/python/apache_beam/io/filebasedsource.py
+++ b/sdks/python/apache_beam/io/filebasedsource.py
@@ -29,6 +29,7 @@
 # pytype: skip-file
 
 from typing import Callable
+from typing import Iterable
 from typing import Tuple
 from typing import Union
 
@@ -347,7 +348,7 @@
     self._compression_type = compression_type
 
   def process(self, element: Union[str, FileMetadata], *args,
-              **kwargs) -> Tuple[FileMetadata, OffsetRange]:
+              **kwargs) -> Iterable[Tuple[FileMetadata, OffsetRange]]:
     if isinstance(element, FileMetadata):
       metadata_list = [element]
     else:
diff --git a/sdks/python/apache_beam/io/fileio.py b/sdks/python/apache_beam/io/fileio.py
index 38881e8..e635280 100644
--- a/sdks/python/apache_beam/io/fileio.py
+++ b/sdks/python/apache_beam/io/fileio.py
@@ -689,9 +689,8 @@
       yield FileResult(
           final_file_name, i, len(file_results), r.window, r.pane, destination)
 
-    _LOGGER.info(
-        'Checking orphaned temporary files for'
-        ' destination %s and window %s',
+    _LOGGER.debug(
+        'Checking orphaned temporary files for destination %s and window %s',
         destination,
         w)
     writer_key = (destination, w)
@@ -704,9 +703,10 @@
       match_result = filesystems.FileSystems.match(['%s*' % prefix])
       orphaned_files = [m.path for m in match_result[0].metadata_list]
 
-      _LOGGER.info(
-          'Some files may be left orphaned in the temporary folder: %s',
-          orphaned_files)
+      if len(orphaned_files) > 0:
+        _LOGGER.info(
+            'Some files may be left orphaned in the temporary folder: %s',
+            orphaned_files)
     except BeamIOError as e:
       _LOGGER.info('Exceptions when checking orphaned files: %s', e)
 
diff --git a/sdks/python/apache_beam/io/filesystem.py b/sdks/python/apache_beam/io/filesystem.py
index a833eef..fa1f67a 100644
--- a/sdks/python/apache_beam/io/filesystem.py
+++ b/sdks/python/apache_beam/io/filesystem.py
@@ -39,6 +39,8 @@
 from typing import Optional
 from typing import Tuple
 
+import zstandard
+
 from apache_beam.utils.plugin import BeamPlugin
 
 logger = logging.getLogger(__name__)
@@ -72,6 +74,9 @@
   # DEFLATE compression
   DEFLATE = 'deflate'
 
+  # ZSTD compression
+  ZSTD = 'zstd'
+
   # GZIP compression (deflate with GZIP headers).
   GZIP = 'gzip'
 
@@ -86,6 +91,7 @@
         CompressionTypes.BZIP2,
         CompressionTypes.DEFLATE,
         CompressionTypes.GZIP,
+        CompressionTypes.ZSTD,
         CompressionTypes.UNCOMPRESSED
     ])
     return compression_type in types
@@ -96,6 +102,7 @@
         cls.BZIP2: 'application/x-bz2',
         cls.DEFLATE: 'application/x-deflate',
         cls.GZIP: 'application/x-gzip',
+        cls.ZSTD: 'application/zstd',
     }
     return mime_types_by_compression_type.get(compression_type, default)
 
@@ -103,7 +110,11 @@
   def detect_compression_type(cls, file_path):
     """Returns the compression type of a file (based on its suffix)."""
     compression_types_by_suffix = {
-        '.bz2': cls.BZIP2, '.deflate': cls.DEFLATE, '.gz': cls.GZIP
+        '.bz2': cls.BZIP2,
+        '.deflate': cls.DEFLATE,
+        '.gz': cls.GZIP,
+        '.zst': cls.ZSTD,
+        '.zstd': cls.ZSTD
     }
     lowercased_path = file_path.lower()
     for suffix, compression_type in compression_types_by_suffix.items():
@@ -166,6 +177,13 @@
       self._decompressor = bz2.BZ2Decompressor()
     elif self._compression_type == CompressionTypes.DEFLATE:
       self._decompressor = zlib.decompressobj()
+    elif self._compression_type == CompressionTypes.ZSTD:
+      # hardcoded max_window_size to avoid too much memory
+      # errors when reading big files, please refer
+      # to the following issue for further explanation:
+      # https://github.com/indygreg/python-zstandard/issues/157
+      self._decompressor = zstandard.ZstdDecompressor(
+          max_window_size=2147483648).decompressobj()
     else:
       assert self._compression_type == CompressionTypes.GZIP
       self._decompressor = zlib.decompressobj(self._gzip_mask)
@@ -176,6 +194,8 @@
     elif self._compression_type == CompressionTypes.DEFLATE:
       self._compressor = zlib.compressobj(
           zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED)
+    elif self._compression_type == CompressionTypes.ZSTD:
+      self._compressor = zstandard.ZstdCompressor().compressobj()
     else:
       assert self._compression_type == CompressionTypes.GZIP
       self._compressor = zlib.compressobj(
@@ -236,6 +256,7 @@
         # EOF of current stream reached.
         if (self._compression_type == CompressionTypes.BZIP2 or
             self._compression_type == CompressionTypes.DEFLATE or
+            self._compression_type == CompressionTypes.ZSTD or
             self._compression_type == CompressionTypes.GZIP):
           pass
         else:
diff --git a/sdks/python/apache_beam/io/filesystem_test.py b/sdks/python/apache_beam/io/filesystem_test.py
index 4f57fd5..0b1827f 100644
--- a/sdks/python/apache_beam/io/filesystem_test.py
+++ b/sdks/python/apache_beam/io/filesystem_test.py
@@ -31,6 +31,7 @@
 import zlib
 from io import BytesIO
 
+import zstandard
 from parameterized import param
 from parameterized import parameterized
 
@@ -311,6 +312,10 @@
           else gzip.open
       with compress_open(file_name, 'wb') as f:
         f.write(content)
+    elif compression_type == CompressionTypes.ZSTD:
+      compress_open = zstandard.open
+      with compress_open(file_name, 'wb') as f:
+        f.write(content)
     else:
       assert False, "Invalid compression type: %s" % compression_type
 
@@ -334,7 +339,8 @@
   def test_seek_set(self):
     for compression_type in [CompressionTypes.BZIP2,
                              CompressionTypes.DEFLATE,
-                             CompressionTypes.GZIP]:
+                             CompressionTypes.GZIP,
+                             CompressionTypes.ZSTD]:
       file_name = self._create_compressed_file(compression_type, self.content)
       with open(file_name, 'rb') as f:
         compressed_fd = CompressedFile(
@@ -368,7 +374,8 @@
   def test_seek_cur(self):
     for compression_type in [CompressionTypes.BZIP2,
                              CompressionTypes.DEFLATE,
-                             CompressionTypes.GZIP]:
+                             CompressionTypes.GZIP,
+                             CompressionTypes.ZSTD]:
       file_name = self._create_compressed_file(compression_type, self.content)
       with open(file_name, 'rb') as f:
         compressed_fd = CompressedFile(
@@ -402,7 +409,8 @@
   def test_read_from_end_returns_no_data(self):
     for compression_type in [CompressionTypes.BZIP2,
                              CompressionTypes.DEFLATE,
-                             CompressionTypes.GZIP]:
+                             CompressionTypes.GZIP,
+                             CompressionTypes.ZSTD]:
       file_name = self._create_compressed_file(compression_type, self.content)
       with open(file_name, 'rb') as f:
         compressed_fd = CompressedFile(
@@ -419,7 +427,8 @@
   def test_seek_outside(self):
     for compression_type in [CompressionTypes.BZIP2,
                              CompressionTypes.DEFLATE,
-                             CompressionTypes.GZIP]:
+                             CompressionTypes.GZIP,
+                             CompressionTypes.ZSTD]:
       file_name = self._create_compressed_file(compression_type, self.content)
       with open(file_name, 'rb') as f:
         compressed_fd = CompressedFile(
@@ -443,7 +452,8 @@
   def test_read_and_seek_back_to_beginning(self):
     for compression_type in [CompressionTypes.BZIP2,
                              CompressionTypes.DEFLATE,
-                             CompressionTypes.GZIP]:
+                             CompressionTypes.GZIP,
+                             CompressionTypes.ZSTD]:
       file_name = self._create_compressed_file(compression_type, self.content)
       with open(file_name, 'rb') as f:
         compressed_fd = CompressedFile(
@@ -508,6 +518,8 @@
         compress_factory = bz2.BZ2File
       elif compression_type == CompressionTypes.GZIP:
         compress_factory = gzip.open
+      elif compression_type == CompressionTypes.ZSTD:
+        compress_factory = zstandard.open
       else:
         assert False, "Invalid compression type: %s" % compression_type
       for line in lines:
@@ -533,7 +545,9 @@
     timer = threading.Timer(timeout, timeout_handler)
     try:
       test_lines = tuple(generate_random_line() for i in range(num_test_lines))
-      for compression_type in [CompressionTypes.BZIP2, CompressionTypes.GZIP]:
+      for compression_type in [CompressionTypes.BZIP2,
+                               CompressionTypes.GZIP,
+                               CompressionTypes.ZSTD]:
         file_name = create_test_file(compression_type, test_lines)
         timer.start()
         with open(file_name, 'rb') as f:
diff --git a/sdks/python/apache_beam/io/gcp/bigquery.py b/sdks/python/apache_beam/io/gcp/bigquery.py
index fa28819..5ef4a1d 100644
--- a/sdks/python/apache_beam/io/gcp/bigquery.py
+++ b/sdks/python/apache_beam/io/gcp/bigquery.py
@@ -2683,7 +2683,13 @@
       to run queries with INTERACTIVE priority. This option is ignored when
       reading from a table rather than a query. To learn more about query
       priority, see: https://cloud.google.com/bigquery/docs/running-queries
-   """
+    output_type (str): By default, this source yields Python dictionaries
+      (`PYTHON_DICT`). There is experimental support for producing a
+      PCollection with a schema and yielding Beam Rows via the option
+      `BEAM_ROW`. For more information on schemas, see
+      https://beam.apache.org/documentation/programming-guide/\
+      #what-is-a-schema)
+      """
   class Method(object):
     EXPORT = 'EXPORT'  #  This is currently the default.
     DIRECT_READ = 'DIRECT_READ'
@@ -2695,10 +2701,14 @@
       gcs_location=None,
       method=None,
       use_native_datetime=False,
+      output_type=None,
       *args,
       **kwargs):
     self.method = method or ReadFromBigQuery.Method.EXPORT
     self.use_native_datetime = use_native_datetime
+    self.output_type = output_type
+    self._args = args
+    self._kwargs = kwargs
 
     if self.method is ReadFromBigQuery.Method.EXPORT \
         and self.use_native_datetime is True:
@@ -2716,22 +2726,51 @@
       if isinstance(gcs_location, str):
         gcs_location = StaticValueProvider(str, gcs_location)
 
+    if self.output_type == 'BEAM_ROW' and self._kwargs.get('query',
+                                                           None) is not None:
+      raise ValueError(
+          "Both a query and an output type of 'BEAM_ROW' were specified. "
+          "'BEAM_ROW' is not currently supported with queries.")
+
     self.gcs_location = gcs_location
     self.bigquery_dataset_labels = {
         'type': 'bq_direct_read_' + str(uuid.uuid4())[0:10]
     }
-    self._args = args
-    self._kwargs = kwargs
 
   def expand(self, pcoll):
     if self.method is ReadFromBigQuery.Method.EXPORT:
-      return self._expand_export(pcoll)
+      output_pcollection = self._expand_export(pcoll)
     elif self.method is ReadFromBigQuery.Method.DIRECT_READ:
-      return self._expand_direct_read(pcoll)
+      output_pcollection = self._expand_direct_read(pcoll)
+
     else:
       raise ValueError(
           'The method to read from BigQuery must be either EXPORT'
           'or DIRECT_READ.')
+    return self._expand_output_type(output_pcollection)
+
+  def _expand_output_type(self, output_pcollection):
+    if self.output_type == 'PYTHON_DICT' or self.output_type is None:
+      return output_pcollection
+    elif self.output_type == 'BEAM_ROW':
+      table_details = bigquery_tools.parse_table_reference(
+          table=self._kwargs.get("table", None),
+          dataset=self._kwargs.get("dataset", None),
+          project=self._kwargs.get("project", None))
+      if isinstance(self._kwargs['table'], ValueProvider):
+        raise TypeError(
+            '%s: table must be of type string'
+            '; got ValueProvider instead' % self.__class__.__name__)
+      elif callable(self._kwargs['table']):
+        raise TypeError(
+            '%s: table must be of type string'
+            '; got a callable instead' % self.__class__.__name__)
+      return output_pcollection | beam.io.gcp.bigquery_schema_tools.\
+            convert_to_usertype(
+            beam.io.gcp.bigquery.bigquery_tools.BigQueryWrapper().get_table(
+                project_id=table_details.projectId,
+                dataset_id=table_details.datasetId,
+                table_id=table_details.tableId).schema)
 
   def _expand_export(self, pcoll):
     # TODO(https://github.com/apache/beam/issues/20683): Make ReadFromBQ rely
diff --git a/sdks/python/apache_beam/io/gcp/bigquery_read_it_test.py b/sdks/python/apache_beam/io/gcp/bigquery_read_it_test.py
index babc6f1..b9a414e 100644
--- a/sdks/python/apache_beam/io/gcp/bigquery_read_it_test.py
+++ b/sdks/python/apache_beam/io/gcp/bigquery_read_it_test.py
@@ -33,6 +33,8 @@
 import pytest
 
 import apache_beam as beam
+import apache_beam.io.gcp.bigquery
+from apache_beam.io.gcp import bigquery_schema_tools
 from apache_beam.io.gcp import bigquery_tools
 from apache_beam.io.gcp.bigquery_tools import BigQueryWrapper
 from apache_beam.io.gcp.internal.clients import bigquery
@@ -178,6 +180,84 @@
               query=query, use_standard_sql=True, project=self.project))
       assert_that(result, equal_to(self.TABLE_DATA))
 
+  @pytest.mark.it_postcommit
+  def test_table_schema_retrieve(self):
+    the_table = bigquery_tools.BigQueryWrapper().get_table(
+        project_id="apache-beam-testing",
+        dataset_id="beam_bigquery_io_test",
+        table_id="dfsqltable_3c7d6fd5_16e0460dfd0")
+    table = the_table.schema
+    utype = bigquery_schema_tools.\
+        generate_user_type_from_bq_schema(table)
+    with beam.Pipeline(argv=self.args) as p:
+      result = (
+          p | apache_beam.io.gcp.bigquery.ReadFromBigQuery(
+              gcs_location="gs://bqio_schema_test",
+              dataset="beam_bigquery_io_test",
+              table="dfsqltable_3c7d6fd5_16e0460dfd0",
+              project="apache-beam-testing",
+              output_type='BEAM_ROW'))
+      assert_that(
+          result,
+          equal_to([
+              utype(id=3, name='customer1', type='test'),
+              utype(id=1, name='customer1', type='test'),
+              utype(id=2, name='customer2', type='test'),
+              utype(id=4, name='customer2', type='test')
+          ]))
+
+  @pytest.mark.it_postcommit
+  def test_table_schema_retrieve_specifying_only_table(self):
+    the_table = bigquery_tools.BigQueryWrapper().get_table(
+        project_id="apache-beam-testing",
+        dataset_id="beam_bigquery_io_test",
+        table_id="dfsqltable_3c7d6fd5_16e0460dfd0")
+    table = the_table.schema
+    utype = bigquery_schema_tools.\
+        generate_user_type_from_bq_schema(table)
+    with beam.Pipeline(argv=self.args) as p:
+      result = (
+          p | apache_beam.io.gcp.bigquery.ReadFromBigQuery(
+              gcs_location="gs://bqio_schema_test",
+              table="apache-beam-testing:"
+              "beam_bigquery_io_test."
+              "dfsqltable_3c7d6fd5_16e0460dfd0",
+              output_type='BEAM_ROW'))
+      assert_that(
+          result,
+          equal_to([
+              utype(id=3, name='customer1', type='test'),
+              utype(id=1, name='customer1', type='test'),
+              utype(id=2, name='customer2', type='test'),
+              utype(id=4, name='customer2', type='test')
+          ]))
+
+  @pytest.mark.it_postcommit
+  def test_table_schema_retrieve_with_direct_read(self):
+    the_table = bigquery_tools.BigQueryWrapper().get_table(
+        project_id="apache-beam-testing",
+        dataset_id="beam_bigquery_io_test",
+        table_id="dfsqltable_3c7d6fd5_16e0460dfd0")
+    table = the_table.schema
+    utype = bigquery_schema_tools.\
+        generate_user_type_from_bq_schema(table)
+    with beam.Pipeline(argv=self.args) as p:
+      result = (
+          p | apache_beam.io.gcp.bigquery.ReadFromBigQuery(
+              method=beam.io.ReadFromBigQuery.Method.DIRECT_READ,
+              table="apache-beam-testing:"
+              "beam_bigquery_io_test."
+              "dfsqltable_3c7d6fd5_16e0460dfd0",
+              output_type='BEAM_ROW'))
+      assert_that(
+          result,
+          equal_to([
+              utype(id=3, name='customer1', type='test'),
+              utype(id=1, name='customer1', type='test'),
+              utype(id=2, name='customer2', type='test'),
+              utype(id=4, name='customer2', type='test')
+          ]))
+
 
 class ReadUsingStorageApiTests(BigQueryReadIntegrationTests):
   TABLE_DATA = [{
diff --git a/sdks/python/apache_beam/io/gcp/bigquery_schema_tools.py b/sdks/python/apache_beam/io/gcp/bigquery_schema_tools.py
new file mode 100644
index 0000000..a36c389
--- /dev/null
+++ b/sdks/python/apache_beam/io/gcp/bigquery_schema_tools.py
@@ -0,0 +1,119 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Tools used tool work with Schema types in the context of BigQuery.
+Classes, constants and functions in this file are experimental and have no
+backwards compatibility guarantees.
+NOTHING IN THIS FILE HAS BACKWARDS COMPATIBILITY GUARANTEES.
+"""
+
+from typing import Optional
+from typing import Sequence
+
+import numpy as np
+
+import apache_beam as beam
+from apache_beam.io.gcp.internal.clients import bigquery
+from apache_beam.portability.api import schema_pb2
+
+# BigQuery types as listed in
+# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
+# with aliases (RECORD, BOOLEAN, FLOAT, INTEGER) as defined in
+# https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/java/latest/com/google/api/services/bigquery/model/TableFieldSchema.html#setType-java.lang.String-
+BIG_QUERY_TO_PYTHON_TYPES = {
+    "STRING": str,
+    "INTEGER": np.int64,
+    "FLOAT64": np.float64,
+    "BOOLEAN": bool,
+    "BYTES": bytes,
+    #TODO(https://github.com/apache/beam/issues/20810):
+    # Finish mappings for all BQ types
+}
+
+
+def generate_user_type_from_bq_schema(the_table_schema):
+  #type: (bigquery.TableSchema) -> type
+
+  """Convert a schema of type TableSchema into a pcollection element.
+      Args:
+        the_table_schema: A BQ schema of type TableSchema
+      Returns:
+        type: type that can be used to work with pCollections.
+  """
+
+  the_schema = beam.io.gcp.bigquery_tools.get_dict_table_schema(
+      the_table_schema)
+  if the_schema == {}:
+    raise ValueError("Encountered an empty schema")
+  dict_of_tuples = []
+  for i in range(len(the_schema['fields'])):
+    if the_schema['fields'][i]['type'] in BIG_QUERY_TO_PYTHON_TYPES:
+      typ = bq_field_to_type(
+          the_schema['fields'][i]['type'], the_schema['fields'][i]['mode'])
+    else:
+      raise ValueError(
+          f"Encountered "
+          f"an unsupported type: {the_schema['fields'][i]['type']!r}")
+    # TODO svetaksundhar@: Map remaining BQ types
+    dict_of_tuples.append((the_schema['fields'][i]['name'], typ))
+  sample_schema = beam.typehints.schemas.named_fields_to_schema(dict_of_tuples)
+  usertype = beam.typehints.schemas.named_tuple_from_schema(sample_schema)
+  return usertype
+
+
+def bq_field_to_type(field, mode):
+  if mode == 'NULLABLE':
+    return Optional[BIG_QUERY_TO_PYTHON_TYPES[field]]
+  elif mode == 'REPEATED':
+    return Sequence[BIG_QUERY_TO_PYTHON_TYPES[field]]
+  elif mode is None or mode == '':
+    return BIG_QUERY_TO_PYTHON_TYPES[field]
+  else:
+    raise ValueError(f"Encountered an unsupported mode: {mode!r}")
+
+
+def convert_to_usertype(table_schema):
+  usertype = beam.io.gcp.bigquery_schema_tools. \
+        generate_user_type_from_bq_schema(table_schema)
+  return beam.ParDo(
+      beam.io.gcp.bigquery_schema_tools.BeamSchemaConversionDoFn(usertype))
+
+
+class BeamSchemaConversionDoFn(beam.DoFn):
+  # Converting a dictionary of tuples to a usertype.
+  def __init__(self, pcoll_val_ctor):
+    self._pcoll_val_ctor = pcoll_val_ctor
+
+  def process(self, dict_of_tuples):
+    yield self._pcoll_val_ctor(**dict_of_tuples)
+
+  def infer_output_type(self, input_type):
+    return self._pcoll_val_ctor
+
+  @classmethod
+  def _from_serialized_schema(cls, schema_str):
+    return cls(
+        beam.typehints.schemas.named_tuple_from_schema(
+            beam.utils.proto_utils.parse_Bytes(schema_str, schema_pb2.Schema)))
+
+  def __reduce__(self):
+    # when pickling, use bytes representation of the schema.
+    return (
+        self._from_serialized_schema,
+        (
+            beam.typehints.schemas.named_tuple_to_schema(
+                self._pcoll_val_ctor).SerializeToString(), ))
diff --git a/sdks/python/apache_beam/io/gcp/bigquery_schema_tools_test.py b/sdks/python/apache_beam/io/gcp/bigquery_schema_tools_test.py
new file mode 100644
index 0000000..9187ec2
--- /dev/null
+++ b/sdks/python/apache_beam/io/gcp/bigquery_schema_tools_test.py
@@ -0,0 +1,186 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import typing
+import unittest.mock
+
+import mock
+import numpy as np
+
+import apache_beam.io.gcp.bigquery
+from apache_beam.io.gcp import bigquery_schema_tools
+from apache_beam.io.gcp.bigquery_tools import BigQueryWrapper
+from apache_beam.io.gcp.internal.clients import bigquery
+from apache_beam.options import value_provider
+
+try:
+  from apitools.base.py.exceptions import HttpError
+except ImportError:
+  HttpError = None
+
+
+@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
+class TestBigQueryToSchema(unittest.TestCase):
+  def test_check_schema_conversions(self):
+    fields = [
+        bigquery.TableFieldSchema(name='stn', type='STRING', mode="NULLABLE"),
+        bigquery.TableFieldSchema(name='temp', type='FLOAT64', mode="REPEATED"),
+        bigquery.TableFieldSchema(name='count', type='INTEGER', mode=None)
+    ]
+    schema = bigquery.TableSchema(fields=fields)
+
+    usertype = bigquery_schema_tools.generate_user_type_from_bq_schema(
+        the_table_schema=schema)
+    self.assertEqual(
+        usertype.__annotations__,
+        {
+            'stn': typing.Optional[str],
+            'temp': typing.Sequence[np.float64],
+            'count': np.int64
+        })
+
+  def test_check_conversion_with_empty_schema(self):
+    fields = []
+    schema = bigquery.TableSchema(fields=fields)
+
+    usertype = bigquery_schema_tools.generate_user_type_from_bq_schema(
+        the_table_schema=schema)
+    self.assertEqual(usertype.__annotations__, {})
+
+  def test_unsupported_type(self):
+    fields = [
+        bigquery.TableFieldSchema(
+            name='number', type='DOUBLE', mode="NULLABLE"),
+        bigquery.TableFieldSchema(name='temp', type='FLOAT64', mode="REPEATED"),
+        bigquery.TableFieldSchema(name='count', type='INTEGER', mode=None)
+    ]
+    schema = bigquery.TableSchema(fields=fields)
+    with self.assertRaisesRegex(ValueError,
+                                "Encountered an unsupported type: 'DOUBLE'"):
+      bigquery_schema_tools.generate_user_type_from_bq_schema(
+          the_table_schema=schema)
+
+  def test_unsupported_mode(self):
+    fields = [
+        bigquery.TableFieldSchema(name='number', type='INTEGER', mode="NESTED"),
+        bigquery.TableFieldSchema(name='temp', type='FLOAT64', mode="REPEATED"),
+        bigquery.TableFieldSchema(name='count', type='INTEGER', mode=None)
+    ]
+    schema = bigquery.TableSchema(fields=fields)
+    with self.assertRaisesRegex(ValueError,
+                                "Encountered an unsupported mode: 'NESTED'"):
+      bigquery_schema_tools.generate_user_type_from_bq_schema(
+          the_table_schema=schema)
+
+  @mock.patch.object(BigQueryWrapper, 'get_table')
+  def test_bad_schema_public_api_export(self, get_table):
+    fields = [
+        bigquery.TableFieldSchema(name='stn', type='DOUBLE', mode="NULLABLE"),
+        bigquery.TableFieldSchema(name='temp', type='FLOAT64', mode="REPEATED"),
+        bigquery.TableFieldSchema(name='count', type='INTEGER', mode=None)
+    ]
+    schema = bigquery.TableSchema(fields=fields)
+    table = apache_beam.io.gcp.internal.clients.bigquery.\
+        bigquery_v2_messages.Table(
+        schema=schema)
+    get_table.return_value = table
+
+    with self.assertRaisesRegex(ValueError,
+                                "Encountered an unsupported type: 'DOUBLE'"):
+      p = apache_beam.Pipeline()
+      pipeline = p | apache_beam.io.gcp.bigquery.ReadFromBigQuery(
+          table="dataset.sample_table",
+          method="EXPORT",
+          project="project",
+          output_type='BEAM_ROW')
+      pipeline
+
+  @mock.patch.object(BigQueryWrapper, 'get_table')
+  def test_bad_schema_public_api_direct_read(self, get_table):
+    fields = [
+        bigquery.TableFieldSchema(name='stn', type='DOUBLE', mode="NULLABLE"),
+        bigquery.TableFieldSchema(name='temp', type='FLOAT64', mode="REPEATED"),
+        bigquery.TableFieldSchema(name='count', type='INTEGER', mode=None)
+    ]
+    schema = bigquery.TableSchema(fields=fields)
+    table = apache_beam.io.gcp.internal.clients.bigquery. \
+        bigquery_v2_messages.Table(
+        schema=schema)
+    get_table.return_value = table
+
+    with self.assertRaisesRegex(ValueError,
+                                "Encountered an unsupported type: 'DOUBLE'"):
+      p = apache_beam.Pipeline()
+      pipeline = p | apache_beam.io.gcp.bigquery.ReadFromBigQuery(
+          table="dataset.sample_table",
+          method="DIRECT_READ",
+          project="project",
+          output_type='BEAM_ROW')
+      pipeline
+
+  def test_unsupported_value_provider(self):
+    with self.assertRaisesRegex(TypeError,
+                                'ReadFromBigQuery: table must be of type string'
+                                '; got ValueProvider instead'):
+      p = apache_beam.Pipeline()
+      pipeline = p | apache_beam.io.gcp.bigquery.ReadFromBigQuery(
+          table=value_provider.ValueProvider(), output_type='BEAM_ROW')
+      pipeline
+
+  def test_unsupported_callable(self):
+    def filterTable(table):
+      if table is not None:
+        return table
+
+    res = filterTable
+    with self.assertRaisesRegex(TypeError,
+                                'ReadFromBigQuery: table must be of type string'
+                                '; got a callable instead'):
+      p = apache_beam.Pipeline()
+      pipeline = p | apache_beam.io.gcp.bigquery.ReadFromBigQuery(
+          table=res, output_type='BEAM_ROW')
+      pipeline
+
+  def test_unsupported_query_export(self):
+    with self.assertRaisesRegex(
+        ValueError,
+        "Both a query and an output type of 'BEAM_ROW' were specified. "
+        "'BEAM_ROW' is not currently supported with queries."):
+      p = apache_beam.Pipeline()
+      pipeline = p | apache_beam.io.gcp.bigquery.ReadFromBigQuery(
+          table="project:dataset.sample_table",
+          method="EXPORT",
+          query='SELECT name FROM dataset.sample_table',
+          output_type='BEAM_ROW')
+      pipeline
+
+  def test_unsupported_query_direct_read(self):
+    with self.assertRaisesRegex(
+        ValueError,
+        "Both a query and an output type of 'BEAM_ROW' were specified. "
+        "'BEAM_ROW' is not currently supported with queries."):
+      p = apache_beam.Pipeline()
+      pipeline = p | apache_beam.io.gcp.bigquery.ReadFromBigQuery(
+          table="project:dataset.sample_table",
+          method="DIRECT_READ",
+          query='SELECT name FROM dataset.sample_table',
+          output_type='BEAM_ROW')
+      pipeline
+
+  if __name__ == '__main__':
+    logging.getLogger().setLevel(logging.INFO)
+    unittest.main()
diff --git a/sdks/python/apache_beam/io/gcp/gcsio.py b/sdks/python/apache_beam/io/gcp/gcsio.py
index bf41ae6..d4ceeda 100644
--- a/sdks/python/apache_beam/io/gcp/gcsio.py
+++ b/sdks/python/apache_beam/io/gcp/gcsio.py
@@ -580,9 +580,9 @@
     counter = 0
     start_time = time.time()
     if with_metadata:
-      _LOGGER.info("Starting the file information of the input")
+      _LOGGER.debug("Starting the file information of the input")
     else:
-      _LOGGER.info("Starting the size estimation of the input")
+      _LOGGER.debug("Starting the size estimation of the input")
     while True:
       response = self.client.objects.List(request)
       for item in response.items:
@@ -604,7 +604,9 @@
         request.pageToken = response.nextPageToken
       else:
         break
-    _LOGGER.info(
+    _LOGGER.log(
+        # do not spam logs when list_prefix is likely used to check empty folder
+        logging.INFO if counter > 0 else logging.DEBUG,
         "Finished listing %s files in %s seconds.",
         counter,
         time.time() - start_time)
diff --git a/sdks/python/apache_beam/ml/inference/base.py b/sdks/python/apache_beam/ml/inference/base.py
index 5bb45d7..075260e 100644
--- a/sdks/python/apache_beam/ml/inference/base.py
+++ b/sdks/python/apache_beam/ml/inference/base.py
@@ -134,6 +134,17 @@
     """
     return {}
 
+  def validate_inference_args(self, inference_args: Optional[Dict[str, Any]]):
+    """Validates inference_args passed in the inference call.
+
+    Most frameworks do not need extra arguments in their predict() call so the
+    default behavior is to error out if inference_args are present.
+    """
+    if inference_args:
+      raise ValueError(
+          'inference_args were provided, but should be None because this '
+          'framework does not expect extra arguments on inferences.')
+
 
 class KeyedModelHandler(Generic[KeyT, ExampleT, PredictionT, ModelT],
                         ModelHandler[Tuple[KeyT, ExampleT],
@@ -178,6 +189,9 @@
   def batch_elements_kwargs(self):
     return self._unkeyed.batch_elements_kwargs()
 
+  def validate_inference_args(self, inference_args: Optional[Dict[str, Any]]):
+    return self._unkeyed.validate_inference_args(inference_args)
+
 
 class MaybeKeyedModelHandler(Generic[KeyT, ExampleT, PredictionT, ModelT],
                              ModelHandler[Union[ExampleT, Tuple[KeyT,
@@ -248,6 +262,9 @@
   def batch_elements_kwargs(self):
     return self._unkeyed.batch_elements_kwargs()
 
+  def validate_inference_args(self, inference_args: Optional[Dict[str, Any]]):
+    return self._unkeyed.validate_inference_args(inference_args)
+
 
 class RunInference(beam.PTransform[beam.PCollection[ExampleT],
                                    beam.PCollection[PredictionT]]):
@@ -297,6 +314,7 @@
   # handled.
   def expand(
       self, pcoll: beam.PCollection[ExampleT]) -> beam.PCollection[PredictionT]:
+    self._model_handler.validate_inference_args(self._inference_args)
     resource_hints = self._model_handler.get_resource_hints()
     return (
         pcoll
diff --git a/sdks/python/apache_beam/ml/inference/base_test.py b/sdks/python/apache_beam/ml/inference/base_test.py
index 98fc252..ca79a3c 100644
--- a/sdks/python/apache_beam/ml/inference/base_test.py
+++ b/sdks/python/apache_beam/ml/inference/base_test.py
@@ -79,12 +79,22 @@
     return {'min_batch_size': 9999}
 
 
-class FakeModelHandlerExtraInferenceArgs(FakeModelHandler):
+class FakeModelHandlerFailsOnInferenceArgs(FakeModelHandler):
+  def run_inference(self, batch, unused_model, inference_args=None):
+    raise ValueError(
+        'run_inference should not be called because error should already be '
+        'thrown from the validate_inference_args check.')
+
+
+class FakeModelHandlerExpectedInferenceArgs(FakeModelHandler):
   def run_inference(self, batch, unused_model, inference_args=None):
     if not inference_args:
       raise ValueError('inference_args should exist')
     return batch
 
+  def validate_inference_args(self, inference_args):
+    pass
+
 
 class RunInferenceBaseTest(unittest.TestCase):
   def test_run_inference_impl_simple_examples(self):
@@ -128,9 +138,20 @@
       pcoll = pipeline | 'start' >> beam.Create(examples)
       inference_args = {'key': True}
       actual = pcoll | base.RunInference(
-          FakeModelHandlerExtraInferenceArgs(), inference_args=inference_args)
+          FakeModelHandlerExpectedInferenceArgs(),
+          inference_args=inference_args)
       assert_that(actual, equal_to(examples), label='assert:inferences')
 
+  def test_unexpected_inference_args_passed(self):
+    with self.assertRaisesRegex(ValueError, r'inference_args were provided'):
+      with TestPipeline() as pipeline:
+        examples = [1, 5, 3, 10]
+        pcoll = pipeline | 'start' >> beam.Create(examples)
+        inference_args = {'key': True}
+        _ = pcoll | base.RunInference(
+            FakeModelHandlerFailsOnInferenceArgs(),
+            inference_args=inference_args)
+
   def test_counted_metrics(self):
     pipeline = TestPipeline()
     examples = [1, 5, 3, 10]
diff --git a/sdks/python/apache_beam/ml/inference/pytorch_inference.py b/sdks/python/apache_beam/ml/inference/pytorch_inference.py
index d32ed50..945ad7e 100644
--- a/sdks/python/apache_beam/ml/inference/pytorch_inference.py
+++ b/sdks/python/apache_beam/ml/inference/pytorch_inference.py
@@ -29,6 +29,12 @@
 from apache_beam.io.filesystems import FileSystems
 from apache_beam.ml.inference.base import ModelHandler
 from apache_beam.ml.inference.base import PredictionResult
+from apache_beam.utils.annotations import experimental
+
+__all__ = [
+    'PytorchModelHandlerTensor',
+    'PytorchModelHandlerKeyedTensor',
+]
 
 
 def _load_model(
@@ -144,7 +150,11 @@
     """
     return 'RunInferencePytorch'
 
+  def validate_inference_args(self, inference_args: Optional[Dict[str, Any]]):
+    pass
 
+
+@experimental(extra_message="No backwards-compatibility guarantees.")
 class PytorchModelHandlerKeyedTensor(ModelHandler[Dict[str, torch.Tensor],
                                                   PredictionResult,
                                                   torch.nn.Module]):
@@ -250,3 +260,6 @@
        A namespace for metrics collected by the RunInference transform.
     """
     return 'RunInferencePytorch'
+
+  def validate_inference_args(self, inference_args: Optional[Dict[str, Any]]):
+    pass
diff --git a/sdks/python/apache_beam/ml/inference/pytorch_inference_it_test.py b/sdks/python/apache_beam/ml/inference/pytorch_inference_it_test.py
index 784182e..5e37772 100644
--- a/sdks/python/apache_beam/ml/inference/pytorch_inference_it_test.py
+++ b/sdks/python/apache_beam/ml/inference/pytorch_inference_it_test.py
@@ -15,8 +15,6 @@
 # limitations under the License.
 #
 
-# pylint: skip-file
-
 """End-to-End test for Pytorch Inference"""
 
 import logging
@@ -29,6 +27,7 @@
 from apache_beam.io.filesystems import FileSystems
 from apache_beam.testing.test_pipeline import TestPipeline
 
+# pylint: disable=ungrouped-imports
 try:
   import torch
   from apache_beam.examples.inference import pytorch_image_classification
@@ -37,6 +36,7 @@
 except ImportError as e:
   torch = None
 
+# pylint: disable=line-too-long
 _EXPECTED_OUTPUTS = {
     'gs://apache-beam-ml/datasets/imagenet/raw-data/validation/ILSVRC2012_val_00005001.JPEG': '681',
     'gs://apache-beam-ml/datasets/imagenet/raw-data/validation/ILSVRC2012_val_00005002.JPEG': '333',
@@ -64,19 +64,17 @@
     os.getenv('FORCE_TORCH_IT') is None and torch is None,
     'Missing dependencies. '
     'Test depends on torch, torchvision, pillow, and transformers')
-# TODO: https://github.com/apache/beam/issues/21859
-@pytest.mark.skip
 class PyTorchInference(unittest.TestCase):
   @pytest.mark.uses_pytorch
   @pytest.mark.it_postcommit
   def test_torch_run_inference_imagenet_mobilenetv2(self):
     test_pipeline = TestPipeline(is_integration_test=True)
     # text files containing absolute path to the imagenet validation data on GCS
-    file_of_image_names = 'gs://apache-beam-ml/testing/inputs/it_mobilenetv2_imagenet_validation_inputs.txt'  # disable: line-too-long
+    file_of_image_names = 'gs://apache-beam-ml/testing/inputs/it_mobilenetv2_imagenet_validation_inputs.txt'  # pylint: disable=line-too-long
     output_file_dir = 'gs://apache-beam-ml/testing/predictions'
     output_file = '/'.join([output_file_dir, str(uuid.uuid4()), 'result.txt'])
 
-    model_state_dict_path = 'gs://apache-beam-ml/models/imagenet_classification_mobilenet_v2.pt'
+    model_state_dict_path = 'gs://apache-beam-ml/models/imagenet_classification_mobilenet_v2.pt'  # pylint: disable=line-too-long
     extra_opts = {
         'input': file_of_image_names,
         'output': output_file,
@@ -98,11 +96,11 @@
   def test_torch_run_inference_coco_maskrcnn_resnet50_fpn(self):
     test_pipeline = TestPipeline(is_integration_test=True)
     # text files containing absolute path to the coco validation data on GCS
-    file_of_image_names = 'gs://apache-beam-ml/testing/inputs/it_coco_validation_inputs.txt'  # disable: line-too-long
+    file_of_image_names = 'gs://apache-beam-ml/testing/inputs/it_coco_validation_inputs.txt'  # pylint: disable=line-too-long
     output_file_dir = 'gs://apache-beam-ml/testing/predictions'
     output_file = '/'.join([output_file_dir, str(uuid.uuid4()), 'result.txt'])
 
-    model_state_dict_path = 'gs://apache-beam-ml/models/torchvision.models.detection.maskrcnn_resnet50_fpn.pth'
+    model_state_dict_path = 'gs://apache-beam-ml/models/torchvision.models.detection.maskrcnn_resnet50_fpn.pth'  # pylint: disable=line-too-long
     images_dir = 'gs://apache-beam-ml/datasets/coco/raw-data/val2017'
     extra_opts = {
         'input': file_of_image_names,
@@ -116,7 +114,7 @@
 
     self.assertEqual(FileSystems().exists(output_file), True)
     predictions = process_outputs(filepath=output_file)
-    actuals_file = 'gs://apache-beam-ml/testing/expected_outputs/test_torch_run_inference_coco_maskrcnn_resnet50_fpn_actuals.txt'
+    actuals_file = 'gs://apache-beam-ml/testing/expected_outputs/test_torch_run_inference_coco_maskrcnn_resnet50_fpn_actuals.txt'  # pylint: disable=line-too-long
     actuals = process_outputs(filepath=actuals_file)
 
     predictions_dict = {}
@@ -134,11 +132,11 @@
   def test_torch_run_inference_bert_for_masked_lm(self):
     test_pipeline = TestPipeline(is_integration_test=True)
     # Path to text file containing some sentences
-    file_of_sentences = 'gs://apache-beam-ml/datasets/custom/sentences.txt'  # disable: line-too-long
+    file_of_sentences = 'gs://apache-beam-ml/datasets/custom/sentences.txt'  # pylint: disable=line-too-long
     output_file_dir = 'gs://apache-beam-ml/testing/predictions'
     output_file = '/'.join([output_file_dir, str(uuid.uuid4()), 'result.txt'])
 
-    model_state_dict_path = 'gs://apache-beam-ml/models/huggingface.BertForMaskedLM.bert-base-uncased.pth'
+    model_state_dict_path = 'gs://apache-beam-ml/models/huggingface.BertForMaskedLM.bert-base-uncased.pth'  # pylint: disable=line-too-long
     extra_opts = {
         'input': file_of_sentences,
         'output': output_file,
@@ -150,7 +148,7 @@
 
     self.assertEqual(FileSystems().exists(output_file), True)
     predictions = process_outputs(filepath=output_file)
-    actuals_file = 'gs://apache-beam-ml/testing/expected_outputs/test_torch_run_inference_bert_for_masked_lm_actuals.txt'
+    actuals_file = 'gs://apache-beam-ml/testing/expected_outputs/test_torch_run_inference_bert_for_masked_lm_actuals.txt'  # pylint: disable=line-too-long
     actuals = process_outputs(filepath=actuals_file)
 
     predictions_dict = {}
diff --git a/sdks/python/apache_beam/ml/inference/sklearn_inference.py b/sdks/python/apache_beam/ml/inference/sklearn_inference.py
index e9d8aa6..1338a5a 100644
--- a/sdks/python/apache_beam/ml/inference/sklearn_inference.py
+++ b/sdks/python/apache_beam/ml/inference/sklearn_inference.py
@@ -31,6 +31,7 @@
 from apache_beam.io.filesystems import FileSystems
 from apache_beam.ml.inference.base import ModelHandler
 from apache_beam.ml.inference.base import PredictionResult
+from apache_beam.utils.annotations import experimental
 
 try:
   import joblib
@@ -38,6 +39,11 @@
   # joblib is an optional dependency.
   pass
 
+__all__ = [
+    'SklearnModelHandlerNumpy',
+    'SklearnModelHandlerPandas',
+]
+
 
 class ModelFileType(enum.Enum):
   """Defines how a model file is serialized. Options are pickle or joblib."""
@@ -60,20 +66,6 @@
   raise AssertionError('Unsupported serialization type.')
 
 
-def _validate_inference_args(inference_args):
-  """Confirms that inference_args is None.
-
-  scikit-learn models do not need extra arguments in their predict() call.
-  However, since inference_args is an argument in the RunInference interface,
-  we want to make sure it is not passed here in Sklearn's implementation of
-  RunInference.
-  """
-  if inference_args:
-    raise ValueError(
-        'inference_args were provided, but should be None because scikit-learn '
-        'models do not need extra arguments in their predict() call.')
-
-
 class SklearnModelHandlerNumpy(ModelHandler[numpy.ndarray,
                                             PredictionResult,
                                             BaseEstimator]):
@@ -118,7 +110,6 @@
     Returns:
       An Iterable of type PredictionResult.
     """
-    _validate_inference_args(inference_args)
     # vectorize data for better performance
     vectorized_batch = numpy.stack(batch, axis=0)
     predictions = model.predict(vectorized_batch)
@@ -132,6 +123,7 @@
     return sum(sys.getsizeof(element) for element in batch)
 
 
+@experimental(extra_message="No backwards-compatibility guarantees.")
 class SklearnModelHandlerPandas(ModelHandler[pandas.DataFrame,
                                              PredictionResult,
                                              BaseEstimator]):
@@ -180,7 +172,6 @@
     Returns:
       An Iterable of type PredictionResult.
     """
-    _validate_inference_args(inference_args)
     # sklearn_inference currently only supports single rowed dataframes.
     for dataframe in iter(batch):
       if dataframe.shape[0] != 1:
diff --git a/sdks/python/apache_beam/ml/inference/sklearn_inference_it_test.py b/sdks/python/apache_beam/ml/inference/sklearn_inference_it_test.py
index be5da3f..4ce0282 100644
--- a/sdks/python/apache_beam/ml/inference/sklearn_inference_it_test.py
+++ b/sdks/python/apache_beam/ml/inference/sklearn_inference_it_test.py
@@ -27,6 +27,12 @@
 from apache_beam.io.filesystems import FileSystems
 from apache_beam.testing.test_pipeline import TestPipeline
 
+# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports, unused-import
+try:
+  from apache_beam.io.gcp.gcsfilesystem import GCSFileSystem
+except ImportError:
+  raise unittest.SkipTest('GCP dependencies are not installed')
+
 
 def process_outputs(filepath):
   with FileSystems().open(filepath) as f:
@@ -35,12 +41,11 @@
   return lines
 
 
-@pytest.mark.skip
 @pytest.mark.uses_sklearn
 @pytest.mark.it_postcommit
 class SklearnInference(unittest.TestCase):
   def test_sklearn_mnist_classification(self):
-    test_pipeline = TestPipeline(is_integration_test=False)
+    test_pipeline = TestPipeline(is_integration_test=True)
     input_file = 'gs://apache-beam-ml/testing/inputs/it_mnist_data.csv'
     output_file_dir = 'gs://temp-storage-for-end-to-end-tests'
     output_file = '/'.join([output_file_dir, str(uuid.uuid4()), 'result.txt'])
diff --git a/sdks/python/apache_beam/ml/inference/sklearn_inference_test.py b/sdks/python/apache_beam/ml/inference/sklearn_inference_test.py
index 978c3a8..72c5130 100644
--- a/sdks/python/apache_beam/ml/inference/sklearn_inference_test.py
+++ b/sdks/python/apache_beam/ml/inference/sklearn_inference_test.py
@@ -317,15 +317,6 @@
       inference_runner = SklearnModelHandlerPandas(model_uri='unused')
       inference_runner.run_inference([data_frame_too_many_rows], fake_model)
 
-  def test_inference_args_passed(self):
-    with self.assertRaisesRegex(ValueError, r'inference_args were provided'):
-      data_frame = pandas_dataframe()
-      fake_model = FakeModel()
-      inference_runner = SklearnModelHandlerPandas(model_uri='unused')
-      inference_runner.run_inference([data_frame],
-                                     fake_model,
-                                     inference_args={'key1': 'value1'})
-
 
 if __name__ == '__main__':
   unittest.main()
diff --git a/sdks/python/apache_beam/pipeline.py b/sdks/python/apache_beam/pipeline.py
index 6b9d211..12bab33 100644
--- a/sdks/python/apache_beam/pipeline.py
+++ b/sdks/python/apache_beam/pipeline.py
@@ -82,6 +82,7 @@
 from apache_beam.options.pipeline_options import TypeOptions
 from apache_beam.options.pipeline_options_validator import PipelineOptionsValidator
 from apache_beam.portability import common_urns
+from apache_beam.portability.api import beam_runner_api_pb2
 from apache_beam.runners import PipelineRunner
 from apache_beam.runners import create_runner
 from apache_beam.transforms import ParDo
@@ -100,7 +101,6 @@
 
 if TYPE_CHECKING:
   from types import TracebackType
-  from apache_beam.portability.api import beam_runner_api_pb2
   from apache_beam.runners.pipeline_context import PipelineContext
   from apache_beam.runners.runner import PipelineResult
   from apache_beam.transforms import environments
@@ -851,7 +851,6 @@
 
     """For internal use only; no backwards-compatibility guarantees."""
     from apache_beam.runners import pipeline_context
-    from apache_beam.portability.api import beam_runner_api_pb2
     if context is None:
       context = pipeline_context.PipelineContext(
           use_fake_coders=use_fake_coders,
@@ -919,12 +918,50 @@
         requirements=context.requirements())
     proto.components.transforms[root_transform_id].unique_name = (
         root_transform_id)
+    self.merge_compatible_environments(proto)
     if return_context:
       return proto, context  # type: ignore  # too complicated for now
     else:
       return proto
 
   @staticmethod
+  def merge_compatible_environments(proto):
+    """Tries to minimize the number of distinct environments by merging
+    those that are compatible (currently defined as identical).
+
+    Mutates proto as contexts may have references to proto.components.
+    """
+    env_map = {}
+    canonical_env = {}
+    files_by_hash = {}
+    for env_id, env in proto.components.environments.items():
+      # First deduplicate any file dependencies by their hash.
+      for dep in env.dependencies:
+        if dep.type_urn == common_urns.artifact_types.FILE.urn:
+          file_payload = beam_runner_api_pb2.ArtifactFilePayload.FromString(
+              dep.type_payload)
+          if file_payload.sha256:
+            if file_payload.sha256 in files_by_hash:
+              file_payload.path = files_by_hash[file_payload.sha256]
+              dep.type_payload = file_payload.SerializeToString()
+            else:
+              files_by_hash[file_payload.sha256] = file_payload.path
+      # Next check if we've ever seen this environment before.
+      normalized = env.SerializeToString(deterministic=True)
+      if normalized in canonical_env:
+        env_map[env_id] = canonical_env[normalized]
+      else:
+        canonical_env[normalized] = env_id
+    for old_env, new_env in env_map.items():
+      for transform in proto.components.transforms.values():
+        if transform.environment_id == old_env:
+          transform.environment_id = new_env
+      for windowing_strategy in proto.components.windowing_strategies.values():
+        if windowing_strategy.environment_id == old_env:
+          windowing_strategy.environment_id = new_env
+      del proto.components.environments[old_env]
+
+  @staticmethod
   def from_runner_api(
       proto,  # type: beam_runner_api_pb2.Pipeline
       runner,  # type: PipelineRunner
@@ -1270,8 +1307,6 @@
       # are properly propagated.
       return self.transform.to_runner_api_transform(context, self.full_label)
 
-    from apache_beam.portability.api import beam_runner_api_pb2
-
     def transform_to_runner_api(
         transform,  # type: Optional[ptransform.PTransform]
         context  # type: PipelineContext
@@ -1331,7 +1366,6 @@
 
     if common_urns.primitives.PAR_DO.urn == proto.spec.urn:
       # Preserving side input tags.
-      from apache_beam.portability.api import beam_runner_api_pb2
       pardo_payload = (
           proto_utils.parse_Bytes(
               proto.spec.payload, beam_runner_api_pb2.ParDoPayload))
diff --git a/sdks/python/apache_beam/pipeline_test.py b/sdks/python/apache_beam/pipeline_test.py
index 8f8f44b..98d5828 100644
--- a/sdks/python/apache_beam/pipeline_test.py
+++ b/sdks/python/apache_beam/pipeline_test.py
@@ -36,6 +36,7 @@
 from apache_beam.pipeline import PipelineVisitor
 from apache_beam.pipeline import PTransformOverride
 from apache_beam.portability import common_urns
+from apache_beam.portability.api import beam_runner_api_pb2
 from apache_beam.pvalue import AsSingleton
 from apache_beam.pvalue import TaggedOutput
 from apache_beam.runners.dataflow.native_io.iobase import NativeSource
@@ -1021,7 +1022,6 @@
 
     p = beam.Pipeline()
     p | MyPTransform()  # pylint: disable=expression-not-assigned
-    from apache_beam.portability.api import beam_runner_api_pb2
 
     proto_pipeline = Pipeline.to_runner_api(p, use_fake_coders=True)
     my_transform, = [
@@ -1291,6 +1291,85 @@
         count += 1
     assert count == 2
 
+  def test_environments_are_deduplicated(self):
+    def file_artifact(path, hash, staged_name):
+      return beam_runner_api_pb2.ArtifactInformation(
+          type_urn=common_urns.artifact_types.FILE.urn,
+          type_payload=beam_runner_api_pb2.ArtifactFilePayload(
+              path=path, sha256=hash).SerializeToString(),
+          role_urn=common_urns.artifact_roles.STAGING_TO.urn,
+          role_payload=beam_runner_api_pb2.ArtifactStagingToRolePayload(
+              staged_name=staged_name).SerializeToString(),
+      )
+
+    proto = beam_runner_api_pb2.Pipeline(
+        components=beam_runner_api_pb2.Components(
+            transforms={
+                f'transform{ix}': beam_runner_api_pb2.PTransform(
+                    environment_id=f'e{ix}')
+                for ix in range(8)
+            },
+            environments={
+                # Same hash and destination.
+                'e1': beam_runner_api_pb2.Environment(
+                    dependencies=[file_artifact('a1', 'x', 'dest')]),
+                'e2': beam_runner_api_pb2.Environment(
+                    dependencies=[file_artifact('a2', 'x', 'dest')]),
+                # Different hash.
+                'e3': beam_runner_api_pb2.Environment(
+                    dependencies=[file_artifact('a3', 'y', 'dest')]),
+                # Different destination.
+                'e4': beam_runner_api_pb2.Environment(
+                    dependencies=[file_artifact('a4', 'y', 'dest2')]),
+                # Multiple files with same hash and destinations.
+                'e5': beam_runner_api_pb2.Environment(
+                    dependencies=[
+                        file_artifact('a1', 'x', 'dest'),
+                        file_artifact('b1', 'xb', 'destB')
+                    ]),
+                'e6': beam_runner_api_pb2.Environment(
+                    dependencies=[
+                        file_artifact('a2', 'x', 'dest'),
+                        file_artifact('b2', 'xb', 'destB')
+                    ]),
+                # Overlapping, but not identical, files.
+                'e7': beam_runner_api_pb2.Environment(
+                    dependencies=[
+                        file_artifact('a1', 'x', 'dest'),
+                        file_artifact('b2', 'y', 'destB')
+                    ]),
+                # Same files as first, but differing other properties.
+                'e0': beam_runner_api_pb2.Environment(
+                    resource_hints={'hint': b'value'},
+                    dependencies=[file_artifact('a1', 'x', 'dest')]),
+            }))
+    Pipeline.merge_compatible_environments(proto)
+
+    # These environments are equivalent.
+    self.assertEqual(
+        proto.components.transforms['transform1'].environment_id,
+        proto.components.transforms['transform2'].environment_id)
+
+    self.assertEqual(
+        proto.components.transforms['transform5'].environment_id,
+        proto.components.transforms['transform6'].environment_id)
+
+    # These are not.
+    self.assertNotEqual(
+        proto.components.transforms['transform1'].environment_id,
+        proto.components.transforms['transform3'].environment_id)
+    self.assertNotEqual(
+        proto.components.transforms['transform4'].environment_id,
+        proto.components.transforms['transform3'].environment_id)
+    self.assertNotEqual(
+        proto.components.transforms['transform6'].environment_id,
+        proto.components.transforms['transform7'].environment_id)
+    self.assertNotEqual(
+        proto.components.transforms['transform1'].environment_id,
+        proto.components.transforms['transform0'].environment_id)
+
+    self.assertEqual(len(proto.components.environments), 6)
+
 
 if __name__ == '__main__':
   unittest.main()
diff --git a/sdks/python/apache_beam/runners/dataflow/internal/apiclient.py b/sdks/python/apache_beam/runners/dataflow/internal/apiclient.py
index fcaf889..81ce4c0 100644
--- a/sdks/python/apache_beam/runners/dataflow/internal/apiclient.py
+++ b/sdks/python/apache_beam/runners/dataflow/internal/apiclient.py
@@ -298,7 +298,7 @@
       container_image = dataflow.SdkHarnessContainerImage()
       container_image.containerImage = container_image_url
       container_image.useSingleCorePerContainer = (
-          common_urns.protocols.MULTI_CORE_BUNDLE_PROCESSING not in
+          common_urns.protocols.MULTI_CORE_BUNDLE_PROCESSING.urn not in
           environment.capabilities)
       container_image.environmentId = id
       for capability in environment.capabilities:
diff --git a/sdks/python/apache_beam/runners/dataflow/internal/apiclient_test.py b/sdks/python/apache_beam/runners/dataflow/internal/apiclient_test.py
index 0433892..1532001 100644
--- a/sdks/python/apache_beam/runners/dataflow/internal/apiclient_test.py
+++ b/sdks/python/apache_beam/runners/dataflow/internal/apiclient_test.py
@@ -187,6 +187,8 @@
         payload=(
             beam_runner_api_pb2.DockerPayload(
                 container_image='dummy_image')).SerializeToString())
+    dummy_env.capabilities.append(
+        common_urns.protocols.MULTI_CORE_BUNDLE_PROCESSING.urn)
     proto_pipeline.components.environments['dummy_env_id'].CopyFrom(dummy_env)
 
     dummy_transform = beam_runner_api_pb2.PTransform(
@@ -203,6 +205,12 @@
     worker_pool = env.proto.workerPools[0]
 
     self.assertEqual(2, len(worker_pool.sdkHarnessContainerImages))
+    # Only one of the environments is missing MULTI_CORE_BUNDLE_PROCESSING.
+    self.assertEqual(
+        1,
+        sum(
+            c.useSingleCorePerContainer
+            for c in worker_pool.sdkHarnessContainerImages))
 
     env_and_image = [(item.environmentId, item.containerImage)
                      for item in worker_pool.sdkHarnessContainerImages]
diff --git a/sdks/python/apache_beam/runners/dataflow/internal/names.py b/sdks/python/apache_beam/runners/dataflow/internal/names.py
index 9e34962..40f8342 100644
--- a/sdks/python/apache_beam/runners/dataflow/internal/names.py
+++ b/sdks/python/apache_beam/runners/dataflow/internal/names.py
@@ -36,10 +36,10 @@
 
 # Update this version to the next version whenever there is a change that will
 # require changes to legacy Dataflow worker execution environment.
-BEAM_CONTAINER_VERSION = 'beam-master-20220617'
+BEAM_CONTAINER_VERSION = 'beam-master-20220803'
 # Update this version to the next version whenever there is a change that
 # requires changes to SDK harness container or SDK harness launcher.
-BEAM_FNAPI_CONTAINER_VERSION = 'beam-master-20220617'
+BEAM_FNAPI_CONTAINER_VERSION = 'beam-master-20220803'
 
 DATAFLOW_CONTAINER_IMAGE_REPOSITORY = 'gcr.io/cloud-dataflow/v1beta3'
 
diff --git a/sdks/python/apache_beam/runners/interactive/extensions/apache-beam-jupyterlab-sidepanel/yarn.lock b/sdks/python/apache_beam/runners/interactive/extensions/apache-beam-jupyterlab-sidepanel/yarn.lock
index f31608c..0dd2a3a 100644
--- a/sdks/python/apache_beam/runners/interactive/extensions/apache-beam-jupyterlab-sidepanel/yarn.lock
+++ b/sdks/python/apache_beam/runners/interactive/extensions/apache-beam-jupyterlab-sidepanel/yarn.lock
@@ -561,6 +561,46 @@
     "@types/yargs" "^15.0.0"
     chalk "^4.0.0"
 
+"@jridgewell/gen-mapping@^0.3.0":
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz#c1aedc61e853f2bb9f5dfe6d4442d3b565b253b9"
+  integrity sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==
+  dependencies:
+    "@jridgewell/set-array" "^1.0.1"
+    "@jridgewell/sourcemap-codec" "^1.4.10"
+    "@jridgewell/trace-mapping" "^0.3.9"
+
+"@jridgewell/resolve-uri@^3.0.3":
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz#2203b118c157721addfe69d47b70465463066d78"
+  integrity sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==
+
+"@jridgewell/set-array@^1.0.1":
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72"
+  integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==
+
+"@jridgewell/source-map@^0.3.2":
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/@jridgewell/source-map/-/source-map-0.3.2.tgz#f45351aaed4527a298512ec72f81040c998580fb"
+  integrity sha512-m7O9o2uR8k2ObDysZYzdfhb08VuEml5oWGiosa1VdaPZ/A6QyPkAJuwN0Q1lhULOf6B7MtQmHENS743hWtCrgw==
+  dependencies:
+    "@jridgewell/gen-mapping" "^0.3.0"
+    "@jridgewell/trace-mapping" "^0.3.9"
+
+"@jridgewell/sourcemap-codec@^1.4.10":
+  version "1.4.14"
+  resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24"
+  integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==
+
+"@jridgewell/trace-mapping@^0.3.9":
+  version "0.3.14"
+  resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.14.tgz#b231a081d8f66796e475ad588a1ef473112701ed"
+  integrity sha512-bJWEfQ9lPTvm3SneWwRFVLzrh6nhjwqw7TUFFBEMzwvg7t7PCDenf2lDwqo4NQXzdpgBXyFgDWnQA+2vkruksQ==
+  dependencies:
+    "@jridgewell/resolve-uri" "^3.0.3"
+    "@jridgewell/sourcemap-codec" "^1.4.10"
+
 "@jupyterlab/application@^3.1.17":
   version "3.1.17"
   resolved "https://registry.yarnpkg.com/@jupyterlab/application/-/application-3.1.17.tgz#137ae63fe1af4b0e6fa27c71421de4e396e9a33c"
@@ -2461,20 +2501,15 @@
   resolved "https://registry.yarnpkg.com/acorn/-/acorn-6.4.2.tgz#35866fd710528e92de10cf06016498e47e39e1e6"
   integrity sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ==
 
-acorn@^7.1.0, acorn@^7.4.0:
+acorn@^7.1.0, acorn@^7.1.1, acorn@^7.4.0:
   version "7.4.1"
   resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.4.1.tgz#feaed255973d2e77555b83dbc08851a6c63520fa"
   integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==
 
-acorn@^7.1.1:
-  version "7.3.1"
-  resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.3.1.tgz#85010754db53c3fbaf3b9ea3e083aa5c5d147ffd"
-  integrity sha512-tLc0wSnatxAQHVHUapaHdz72pi9KUyHjq5KyHjGg9Y8Ifdc79pTh2XvI6I1/chZbnM7QtNKzh66ooDogPZSleA==
-
-acorn@^8.4.1:
-  version "8.5.0"
-  resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.5.0.tgz#4512ccb99b3698c752591e9bb4472e38ad43cee2"
-  integrity sha512-yXbYeFy+jUuYd3/CDcg2NkIYE991XYX/bje7LmjJigUciaeO1JR4XxXgCIV1/Zc/dRuFEyw1L0pbA+qynJkW5Q==
+acorn@^8.4.1, acorn@^8.5.0:
+  version "8.7.1"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.7.1.tgz#0197122c843d1bf6d0a5e83220a788f278f63c30"
+  integrity sha512-Xx54uLJQZ19lKygFXOWsscKUbsBZW0CPykPhVQdhIeIwrbPmJzqeASDInc8nKBnp/JT6igTs82qPXz069H8I/A==
 
 add-dom-event-listener@^1.1.0:
   version "1.1.0"
@@ -2908,11 +2943,16 @@
   resolved "https://registry.yarnpkg.com/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz#f8e71132f7ffe6e01a5c9697a4c6f3e48d5cc819"
   integrity sha1-+OcRMvf/5uAaXJaXpMbz5I1cyBk=
 
-buffer-from@1.x, buffer-from@^1.0.0:
+buffer-from@1.x:
   version "1.1.1"
   resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef"
   integrity sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==
 
+buffer-from@^1.0.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5"
+  integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==
+
 buffer@^5.5.0, buffer@^5.6.0:
   version "5.7.1"
   resolved "https://registry.yarnpkg.com/buffer/-/buffer-5.7.1.tgz#ba62e7c13133053582197160851a8f648e99eed0"
@@ -8305,18 +8345,10 @@
     source-map-url "^0.4.0"
     urix "^0.1.0"
 
-source-map-support@^0.5.6:
-  version "0.5.19"
-  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.19.tgz#a98b62f86dcaf4f67399648c085291ab9e8fed61"
-  integrity sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==
-  dependencies:
-    buffer-from "^1.0.0"
-    source-map "^0.6.0"
-
-source-map-support@~0.5.20:
-  version "0.5.20"
-  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.20.tgz#12166089f8f5e5e8c56926b377633392dd2cb6c9"
-  integrity sha512-n1lZZ8Ve4ksRqizaBQgxXDgKwttHDhyfQjA6YZZn8+AroHbsIz+JjwxQDxbp+7y5OYCI8t1Yk7etjD9CRd2hIw==
+source-map-support@^0.5.6, source-map-support@~0.5.20:
+  version "0.5.21"
+  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.21.tgz#04fe7c7f9e1ed2d662233c28cb2b35b9f63f6e4f"
+  integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==
   dependencies:
     buffer-from "^1.0.0"
     source-map "^0.6.0"
@@ -8336,7 +8368,7 @@
   resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263"
   integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
 
-source-map@^0.7.3, source-map@~0.7.2:
+source-map@^0.7.3:
   version "0.7.3"
   resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.7.3.tgz#5302f8169031735226544092e64981f751750383"
   integrity sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==
@@ -8685,12 +8717,13 @@
     terser "^5.7.2"
 
 terser@^5.3.4, terser@^5.7.2:
-  version "5.9.0"
-  resolved "https://registry.yarnpkg.com/terser/-/terser-5.9.0.tgz#47d6e629a522963240f2b55fcaa3c99083d2c351"
-  integrity sha512-h5hxa23sCdpzcye/7b8YqbE5OwKca/ni0RQz1uRX3tGh8haaGHqcuSqbGRybuAKNdntZ0mDgFNXPJ48xQ2RXKQ==
+  version "5.14.2"
+  resolved "https://registry.yarnpkg.com/terser/-/terser-5.14.2.tgz#9ac9f22b06994d736174f4091aa368db896f1c10"
+  integrity sha512-oL0rGeM/WFQCUd0y2QrWxYnq7tfSuKBiqTjRPWrRgB46WD/kiwHwF8T23z78H6Q6kGCuuHcPB+KULHRdxvVGQA==
   dependencies:
+    "@jridgewell/source-map" "^0.3.2"
+    acorn "^8.5.0"
     commander "^2.20.0"
-    source-map "~0.7.2"
     source-map-support "~0.5.20"
 
 test-exclude@^6.0.0:
diff --git a/sdks/python/apache_beam/runners/portability/fn_api_runner/worker_handlers.py b/sdks/python/apache_beam/runners/portability/fn_api_runner/worker_handlers.py
index 6f94f3e..5aaadbd 100644
--- a/sdks/python/apache_beam/runners/portability/fn_api_runner/worker_handlers.py
+++ b/sdks/python/apache_beam/runners/portability/fn_api_runner/worker_handlers.py
@@ -65,6 +65,7 @@
 from apache_beam.runners.worker import data_plane
 from apache_beam.runners.worker import sdk_worker
 from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
+from apache_beam.runners.worker.log_handler import LOGENTRY_TO_LOG_LEVEL_MAP
 from apache_beam.runners.worker.sdk_worker import _Future
 from apache_beam.runners.worker.statecache import StateCache
 from apache_beam.utils import proto_utils
@@ -407,24 +408,12 @@
 
 
 class BasicLoggingService(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
-
-  LOG_LEVEL_MAP = {
-      beam_fn_api_pb2.LogEntry.Severity.CRITICAL: logging.CRITICAL,
-      beam_fn_api_pb2.LogEntry.Severity.ERROR: logging.ERROR,
-      beam_fn_api_pb2.LogEntry.Severity.WARN: logging.WARNING,
-      beam_fn_api_pb2.LogEntry.Severity.NOTICE: logging.INFO + 1,
-      beam_fn_api_pb2.LogEntry.Severity.INFO: logging.INFO,
-      beam_fn_api_pb2.LogEntry.Severity.DEBUG: logging.DEBUG,
-      beam_fn_api_pb2.LogEntry.Severity.TRACE: logging.DEBUG - 1,
-      beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED: logging.NOTSET,
-  }
-
   def Logging(self, log_messages, context=None):
     # type: (Iterable[beam_fn_api_pb2.LogEntry.List], Any) -> Iterator[beam_fn_api_pb2.LogControl]
     yield beam_fn_api_pb2.LogControl()
     for log_message in log_messages:
       for log in log_message.log_entries:
-        logging.log(self.LOG_LEVEL_MAP[log.severity], str(log))
+        logging.log(LOGENTRY_TO_LOG_LEVEL_MAP[log.severity], str(log))
 
 
 class BasicProvisionService(beam_provision_api_pb2_grpc.ProvisionServiceServicer
diff --git a/sdks/python/apache_beam/runners/portability/local_job_service.py b/sdks/python/apache_beam/runners/portability/local_job_service.py
index d0d52a2..84d1ded 100644
--- a/sdks/python/apache_beam/runners/portability/local_job_service.py
+++ b/sdks/python/apache_beam/runners/portability/local_job_service.py
@@ -35,6 +35,7 @@
 from google.protobuf import json_format
 from google.protobuf import text_format  # type: ignore # not in typeshed
 
+from apache_beam import pipeline
 from apache_beam.metrics import monitoring_infos
 from apache_beam.options import pipeline_options
 from apache_beam.portability.api import beam_artifact_api_pb2_grpc
@@ -49,6 +50,7 @@
 from apache_beam.runners.portability import portable_runner
 from apache_beam.runners.portability.fn_api_runner import fn_runner
 from apache_beam.runners.portability.fn_api_runner import worker_handlers
+from apache_beam.runners.worker.log_handler import LOGENTRY_TO_LOG_LEVEL_MAP
 from apache_beam.utils import thread_pool_executor
 
 if TYPE_CHECKING:
@@ -285,6 +287,7 @@
   def _run_job(self):
     with JobLogHandler(self._log_queues) as log_handler:
       self._update_dependencies()
+      pipeline.Pipeline.merge_compatible_environments(self._pipeline_proto)
       try:
         start = time.time()
         self.result = self._invoke_runner()
@@ -365,7 +368,10 @@
   def Logging(self, log_bundles, context=None):
     for log_bundle in log_bundles:
       for log_entry in log_bundle.log_entries:
-        _LOGGER.info('Worker: %s', str(log_entry).replace('\n', ' '))
+        _LOGGER.log(
+            LOGENTRY_TO_LOG_LEVEL_MAP[log_entry.severity],
+            'Worker: %s',
+            str(log_entry).replace('\n', ' '))
     return iter([])
 
 
diff --git a/sdks/python/apache_beam/runners/portability/stager.py b/sdks/python/apache_beam/runners/portability/stager.py
index 743beb4..e06c71c 100644
--- a/sdks/python/apache_beam/runners/portability/stager.py
+++ b/sdks/python/apache_beam/runners/portability/stager.py
@@ -54,7 +54,6 @@
 import shutil
 import sys
 import tempfile
-from distutils.version import StrictVersion
 from typing import Callable
 from typing import List
 from typing import Optional
@@ -62,6 +61,7 @@
 from urllib.parse import urlparse
 
 import pkg_resources
+from pkg_resources import parse_version
 
 from apache_beam.internal import pickler
 from apache_beam.internal.http_client import get_new_http
@@ -698,7 +698,7 @@
     # addressed, download wheel based on glibc version in Beam's Python
     # Base image
     pip_version = pkg_resources.get_distribution('pip').version
-    if StrictVersion(pip_version) >= StrictVersion('19.3'):
+    if parse_version(pip_version) >= parse_version('19.3'):
       return 'manylinux2014_x86_64'
     else:
       return 'manylinux2010_x86_64'
diff --git a/sdks/python/apache_beam/runners/worker/log_handler.py b/sdks/python/apache_beam/runners/worker/log_handler.py
index 75cdcf5..f5347d3 100644
--- a/sdks/python/apache_beam/runners/worker/log_handler.py
+++ b/sdks/python/apache_beam/runners/worker/log_handler.py
@@ -46,6 +46,29 @@
 if TYPE_CHECKING:
   from apache_beam.portability.api import endpoints_pb2
 
+# Mapping from logging levels to LogEntry levels.
+LOG_LEVEL_TO_LOGENTRY_MAP = {
+    logging.FATAL: beam_fn_api_pb2.LogEntry.Severity.CRITICAL,
+    logging.ERROR: beam_fn_api_pb2.LogEntry.Severity.ERROR,
+    logging.WARNING: beam_fn_api_pb2.LogEntry.Severity.WARN,
+    logging.INFO: beam_fn_api_pb2.LogEntry.Severity.INFO,
+    logging.DEBUG: beam_fn_api_pb2.LogEntry.Severity.DEBUG,
+    logging.NOTSET: beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED,
+    -float('inf'): beam_fn_api_pb2.LogEntry.Severity.DEBUG,
+}
+
+# Mapping from LogEntry levels to logging levels
+LOGENTRY_TO_LOG_LEVEL_MAP = {
+    beam_fn_api_pb2.LogEntry.Severity.CRITICAL: logging.CRITICAL,
+    beam_fn_api_pb2.LogEntry.Severity.ERROR: logging.ERROR,
+    beam_fn_api_pb2.LogEntry.Severity.WARN: logging.WARNING,
+    beam_fn_api_pb2.LogEntry.Severity.NOTICE: logging.INFO + 1,
+    beam_fn_api_pb2.LogEntry.Severity.INFO: logging.INFO,
+    beam_fn_api_pb2.LogEntry.Severity.DEBUG: logging.DEBUG,
+    beam_fn_api_pb2.LogEntry.Severity.TRACE: logging.DEBUG - 1,
+    beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED: logging.NOTSET,
+}
+
 # This module is experimental. No backwards-compatibility guarantees.
 
 
@@ -60,16 +83,6 @@
   # dropped. If the average log size is 1KB this may use up to 10MB of memory.
   _QUEUE_SIZE = 10000
 
-  # Mapping from logging levels to LogEntry levels.
-  LOG_LEVEL_MAP = {
-      logging.FATAL: beam_fn_api_pb2.LogEntry.Severity.CRITICAL,
-      logging.ERROR: beam_fn_api_pb2.LogEntry.Severity.ERROR,
-      logging.WARNING: beam_fn_api_pb2.LogEntry.Severity.WARN,
-      logging.INFO: beam_fn_api_pb2.LogEntry.Severity.INFO,
-      logging.DEBUG: beam_fn_api_pb2.LogEntry.Severity.DEBUG,
-      -float('inf'): beam_fn_api_pb2.LogEntry.Severity.DEBUG,
-  }
-
   def __init__(self, log_service_descriptor):
     # type: (endpoints_pb2.ApiServiceDescriptor) -> None
     super().__init__()
@@ -101,11 +114,12 @@
   def map_log_level(self, level):
     # type: (int) -> beam_fn_api_pb2.LogEntry.Severity.Enum
     try:
-      return self.LOG_LEVEL_MAP[level]
+      return LOG_LEVEL_TO_LOGENTRY_MAP[level]
     except KeyError:
       return max(
           beam_level for python_level,
-          beam_level in self.LOG_LEVEL_MAP.items() if python_level <= level)
+          beam_level in LOG_LEVEL_TO_LOGENTRY_MAP.items()
+          if python_level <= level)
 
   def emit(self, record):
     # type: (logging.LogRecord) -> None
diff --git a/sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_launcher.py b/sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_launcher.py
index 89b6dce..e4babe5 100644
--- a/sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_launcher.py
+++ b/sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_launcher.py
@@ -69,6 +69,7 @@
 from apache_beam.options.pipeline_options import PipelineOptions
 from apache_beam.options.pipeline_options import SetupOptions
 from apache_beam.options.pipeline_options import StandardOptions
+from apache_beam.options.pipeline_options import TypeOptions
 from apache_beam.runners import PipelineState
 from apache_beam.testing.benchmarks.nexmark import nexmark_util
 from apache_beam.testing.benchmarks.nexmark.monitor import Monitor
@@ -180,6 +181,7 @@
     # Usage with Dataflow requires a project to be supplied.
     self.project = self.pipeline_options.view_as(GoogleCloudOptions).project
     self.streaming = self.pipeline_options.view_as(StandardOptions).streaming
+    self.pipeline_options.view_as(TypeOptions).allow_unsafe_triggers = True
 
     if self.streaming:
       if self.args.subscription_name is None or self.project is None:
diff --git a/sdks/python/apache_beam/testing/load_tests/load_test_metrics_utils.py b/sdks/python/apache_beam/testing/load_tests/load_test_metrics_utils.py
index 0099d46..86c5005 100644
--- a/sdks/python/apache_beam/testing/load_tests/load_test_metrics_utils.py
+++ b/sdks/python/apache_beam/testing/load_tests/load_test_metrics_utils.py
@@ -54,7 +54,7 @@
 except ImportError:
   bigquery = None
   SchemaField = None
-  NotFound = None
+  NotFound = None  # type: ignore
 
 RUNTIME_METRIC = 'runtime'
 COUNTER_LABEL = 'total_bytes_count'
@@ -92,7 +92,12 @@
   Returns:
     lower case step name without namespace and step label
   """
-  return step_name.lower().replace(' ', '_').strip('step:_')
+  prefix = 'step'
+  step_name = step_name.lower().replace(' ', '_')
+  step_name = (
+      step_name[len(prefix):]
+      if prefix and step_name.startswith(prefix) else step_name)
+  return step_name.strip(':_')
 
 
 def split_metrics_by_namespace_and_name(metrics, namespace, name):
diff --git a/sdks/python/apache_beam/transforms/batch_dofn_test.py b/sdks/python/apache_beam/transforms/batch_dofn_test.py
index eb4e6ff..b75f447 100644
--- a/sdks/python/apache_beam/transforms/batch_dofn_test.py
+++ b/sdks/python/apache_beam/transforms/batch_dofn_test.py
@@ -71,6 +71,9 @@
   def process(self, element: int, *args, **kwargs) -> Iterator[List[int]]:
     yield [element] * element
 
+  def infer_output_type(self, input_element_type):
+    return input_element_type
+
 
 class BatchToElementDoFn(beam.DoFn):
   @beam.DoFn.yields_elements
@@ -170,6 +173,31 @@
     yield [element * 2 for element in batch]
 
 
+class MismatchedBatchProducingDoFn(beam.DoFn):
+  """A DoFn that produces batches from both process and process_batch, with
+  mismatched return types (one yields floats, the other ints). Should yield
+  a construction time error when applied."""
+  @beam.DoFn.yields_batches
+  def process(self, element: int, *args, **kwargs) -> Iterator[List[int]]:
+    yield [element]
+
+  def process_batch(self, batch: List[int], *args,
+                    **kwargs) -> Iterator[List[float]]:
+    yield [element / 2 for element in batch]
+
+
+class MismatchedElementProducingDoFn(beam.DoFn):
+  """A DoFn that produces elements from both process and process_batch, with
+  mismatched return types (one yields floats, the other ints). Should yield
+  a construction time error when applied."""
+  def process(self, element: int, *args, **kwargs) -> Iterator[float]:
+    yield element / 2
+
+  @beam.DoFn.yields_elements
+  def process_batch(self, batch: List[int], *args, **kwargs) -> Iterator[int]:
+    yield batch[0]
+
+
 class BatchDoFnTest(unittest.TestCase):
   def test_map_pardo(self):
     # verify batch dofn accessors work well with beam.Map generated DoFn
@@ -199,9 +227,52 @@
     pc = p | beam.Create([1, 2, 3])
 
     with self.assertRaisesRegex(NotImplementedError,
-                                r'.*BatchDoFnBadParam.*KeyParam'):
+                                r'BatchDoFnBadParam.*KeyParam'):
       _ = pc | beam.ParDo(BatchDoFnBadParam())
 
+  def test_mismatched_batch_producer_raises(self):
+    p = beam.Pipeline()
+    pc = p | beam.Create([1, 2, 3])
+
+    # Note (?ms) makes this a multiline regex, where . matches newlines.
+    # See (?aiLmsux) at
+    # https://docs.python.org/3.4/library/re.html#regular-expression-syntax
+    with self.assertRaisesRegex(
+        TypeError,
+        (r'(?ms)MismatchedBatchProducingDoFn.*'
+         r'process: List\[int\].*process_batch: List\[float\]')):
+      _ = pc | beam.ParDo(MismatchedBatchProducingDoFn())
+
+  def test_mismatched_element_producer_raises(self):
+    p = beam.Pipeline()
+    pc = p | beam.Create([1, 2, 3])
+
+    # Note (?ms) makes this a multiline regex, where . matches newlines.
+    # See (?aiLmsux) at
+    # https://docs.python.org/3.4/library/re.html#regular-expression-syntax
+    with self.assertRaisesRegex(
+        TypeError,
+        r'(?ms)MismatchedElementProducingDoFn.*process:.*process_batch:'):
+      _ = pc | beam.ParDo(MismatchedElementProducingDoFn())
+
+  def test_element_to_batch_dofn_typehint(self):
+    # Verify that element to batch DoFn sets the correct typehint on the output
+    # PCollection.
+
+    p = beam.Pipeline()
+    pc = (p | beam.Create([1, 2, 3]) | beam.ParDo(ElementToBatchDoFn()))
+
+    self.assertEqual(pc.element_type, int)
+
+  def test_batch_to_element_dofn_typehint(self):
+    # Verify that batch to element DoFn sets the correct typehint on the output
+    # PCollection.
+
+    p = beam.Pipeline()
+    pc = (p | beam.Create([1, 2, 3]) | beam.ParDo(BatchToElementDoFn()))
+
+    self.assertEqual(pc.element_type, beam.typehints.Tuple[int, int])
+
 
 if __name__ == '__main__':
   unittest.main()
diff --git a/sdks/python/apache_beam/transforms/core.py b/sdks/python/apache_beam/transforms/core.py
index dbf683a..5169289 100644
--- a/sdks/python/apache_beam/transforms/core.py
+++ b/sdks/python/apache_beam/transforms/core.py
@@ -708,14 +708,44 @@
     return get_function_arguments(self, func)
 
   def default_type_hints(self):
-    fn_type_hints = typehints.decorators.IOTypeHints.from_callable(self.process)
-    if fn_type_hints is not None:
-      try:
-        fn_type_hints = fn_type_hints.strip_iterable()
-      except ValueError as e:
-        raise ValueError('Return value not iterable: %s: %s' % (self, e))
+    process_type_hints = typehints.decorators.IOTypeHints.from_callable(
+        self.process) or typehints.decorators.IOTypeHints.empty()
+
+    if self._process_yields_batches:
+      # process() produces batches, don't use it's output typehint
+      process_type_hints = process_type_hints.with_output_types_from(
+          typehints.decorators.IOTypeHints.empty())
+
+    if self._process_batch_yields_elements:
+      # process_batch() produces elements, *do* use it's output typehint
+
+      # First access the typehint
+      process_batch_type_hints = typehints.decorators.IOTypeHints.from_callable(
+          self.process_batch) or typehints.decorators.IOTypeHints.empty()
+
+      # Then we deconflict with the typehint from process, if it exists
+      if (process_batch_type_hints.output_types !=
+          typehints.decorators.IOTypeHints.empty().output_types):
+        if (process_type_hints.output_types !=
+            typehints.decorators.IOTypeHints.empty().output_types and
+            process_batch_type_hints.output_types !=
+            process_type_hints.output_types):
+          raise TypeError(
+              f"DoFn {self!r} yields element from both process and "
+              "process_batch, but they have mismatched output typehints:\n"
+              f" process: {process_type_hints.output_types}\n"
+              f" process_batch: {process_batch_type_hints.output_types}")
+
+        process_type_hints = process_type_hints.with_output_types_from(
+            process_batch_type_hints)
+
+    try:
+      process_type_hints = process_type_hints.strip_iterable()
+    except ValueError as e:
+      raise ValueError('Return value not iterable: %s: %s' % (self, e))
+
     # Prefer class decorator type hints for backwards compatibility.
-    return get_type_hints(self.__class__).with_defaults(fn_type_hints)
+    return get_type_hints(self.__class__).with_defaults(process_type_hints)
 
   # TODO(sourabhbajaj): Do we want to remove the responsibility of these from
   # the DoFn or maybe the runner
diff --git a/sdks/python/apache_beam/typehints/decorators.py b/sdks/python/apache_beam/typehints/decorators.py
index 1b6fe70..c24f2ed 100644
--- a/sdks/python/apache_beam/typehints/decorators.py
+++ b/sdks/python/apache_beam/typehints/decorators.py
@@ -302,6 +302,16 @@
     return self._replace(
         output_types=(args, kwargs), origin=self._make_origin([self]))
 
+  def with_input_types_from(self, other):
+    # type: (IOTypeHints) -> IOTypeHints
+    return self._replace(
+        input_types=other.input_types, origin=self._make_origin([self]))
+
+  def with_output_types_from(self, other):
+    # type: (IOTypeHints) -> IOTypeHints
+    return self._replace(
+        output_types=other.output_types, origin=self._make_origin([self]))
+
   def simple_output_type(self, context):
     if self._has_output_types():
       args, kwargs = self.output_types
diff --git a/sdks/python/apache_beam/typehints/decorators_test.py b/sdks/python/apache_beam/typehints/decorators_test.py
index 9b10996..ba46038 100644
--- a/sdks/python/apache_beam/typehints/decorators_test.py
+++ b/sdks/python/apache_beam/typehints/decorators_test.py
@@ -97,6 +97,46 @@
     after = th.strip_iterable()
     self.assertEqual(((expected_after, ), {}), after.output_types)
 
+  def test_with_output_types_from(self):
+    th = decorators.IOTypeHints(
+        input_types=((int), {
+            'foo': str
+        }),
+        output_types=((int, str), {}),
+        origin=[])
+
+    self.assertEqual(
+        th.with_output_types_from(decorators.IOTypeHints.empty()),
+        decorators.IOTypeHints(
+            input_types=((int), {
+                'foo': str
+            }), output_types=None, origin=[]))
+
+    self.assertEqual(
+        decorators.IOTypeHints.empty().with_output_types_from(th),
+        decorators.IOTypeHints(
+            input_types=None, output_types=((int, str), {}), origin=[]))
+
+  def test_with_input_types_from(self):
+    th = decorators.IOTypeHints(
+        input_types=((int), {
+            'foo': str
+        }),
+        output_types=((int, str), {}),
+        origin=[])
+
+    self.assertEqual(
+        th.with_input_types_from(decorators.IOTypeHints.empty()),
+        decorators.IOTypeHints(
+            input_types=None, output_types=((int, str), {}), origin=[]))
+
+    self.assertEqual(
+        decorators.IOTypeHints.empty().with_input_types_from(th),
+        decorators.IOTypeHints(
+            input_types=((int), {
+                'foo': str
+            }), output_types=None, origin=[]))
+
   def _test_strip_iterable_fail(self, before):
     with self.assertRaisesRegex(ValueError, r'not iterable'):
       self._test_strip_iterable(before, None)
diff --git a/sdks/python/apache_beam/utils/retry.py b/sdks/python/apache_beam/utils/retry.py
index a6cde4a..6eed290 100644
--- a/sdks/python/apache_beam/utils/retry.py
+++ b/sdks/python/apache_beam/utils/retry.py
@@ -44,7 +44,7 @@
   from google.api_core.exceptions import GoogleAPICallError
 except ImportError as e:
   HttpError = None
-  GoogleAPICallError = None
+  GoogleAPICallError = None  # type: ignore
 
 # Protect against environments where aws tools are not available.
 # pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
diff --git a/sdks/python/apache_beam/utils/subprocess_server.py b/sdks/python/apache_beam/utils/subprocess_server.py
index 5341f96..769a5f4 100644
--- a/sdks/python/apache_beam/utils/subprocess_server.py
+++ b/sdks/python/apache_beam/utils/subprocess_server.py
@@ -121,8 +121,9 @@
       def log_stdout():
         line = self._process.stdout.readline()
         while line:
-          # Remove newline via rstrip() to not print an empty line
-          _LOGGER.info(line.rstrip())
+          # The log obtained from stdout is bytes, decode it into string.
+          # Remove newline via rstrip() to not print an empty line.
+          _LOGGER.info(line.decode(errors='backslashreplace').rstrip())
           line = self._process.stdout.readline()
 
       t = threading.Thread(target=log_stdout)
diff --git a/sdks/python/apache_beam/version.py b/sdks/python/apache_beam/version.py
index 473481e..fd9eeee 100644
--- a/sdks/python/apache_beam/version.py
+++ b/sdks/python/apache_beam/version.py
@@ -17,4 +17,4 @@
 
 """Apache Beam SDK version information and utilities."""
 
-__version__ = '2.41.0.dev'
+__version__ = '2.42.0.dev'
diff --git a/sdks/python/container/Dockerfile b/sdks/python/container/Dockerfile
index b40eac6..a301db7 100644
--- a/sdks/python/container/Dockerfile
+++ b/sdks/python/container/Dockerfile
@@ -48,6 +48,8 @@
     rm -rf /root/.cache/pip && \
     rm -rf /tmp/base_image_requirements.txt
 
+RUN pip install --upgrade pip setuptools
+
 # Install Google Cloud SDK.
 ENV CLOUDSDK_CORE_DISABLE_PROMPTS yes
 ENV PATH $PATH:/usr/local/gcloud/google-cloud-sdk/bin
diff --git a/sdks/python/container/py37/base_image_requirements.txt b/sdks/python/container/py37/base_image_requirements.txt
index 2e92ee7..392a06f 100644
--- a/sdks/python/container/py37/base_image_requirements.txt
+++ b/sdks/python/container/py37/base_image_requirements.txt
@@ -21,89 +21,89 @@
 # https://s.apache.org/beam-python-dev-wiki
 # Reach out to a committer if you need help.
 
-absl-py==1.1.0
+absl-py==1.2.0
 astunparse==1.6.3
-atomicwrites==1.4.1
-attrs==21.4.0
+attrs==22.1.0
 beautifulsoup4==4.11.1
 bs4==0.0.1
 cachetools==4.2.4
 certifi==2022.6.15
-cffi==1.15.0
-charset-normalizer==2.0.12
+cffi==1.15.1
+charset-normalizer==2.1.0
 click==8.1.3
 cloudpickle==2.1.0
 crcmod==1.7
-cryptography==37.0.2
-Cython==0.29.30
+cryptography==37.0.4
+Cython==0.29.32
 deprecation==2.1.0
 dill==0.3.1.1
 docker==5.0.3
 docopt==0.6.2
 execnet==1.9.0
-fastavro==1.5.1
+fastavro==1.5.4
 fasteners==0.17.3
 flatbuffers==1.12
 freezegun==1.2.1
 future==0.18.2
 gast==0.4.0
-google-api-core==1.31.6
-google-api-python-client==2.51.0
+google-api-core==2.8.1
+google-api-python-client==2.55.0
 google-apitools==0.5.31
-google-auth==1.35.0
+google-auth==2.9.1
 google-auth-httplib2==0.1.0
 google-auth-oauthlib==0.4.6
 google-cloud-bigquery==2.34.4
 google-cloud-bigquery-storage==2.13.2
 google-cloud-bigtable==1.7.2
-google-cloud-core==1.7.2
+google-cloud-core==2.3.2
 google-cloud-datastore==1.15.5
-google-cloud-dlp==3.7.1
+google-cloud-dlp==3.8.0
 google-cloud-language==1.3.2
-google-cloud-profiler==3.1.0
-google-cloud-pubsub==2.13.0
+google-cloud-profiler==4.0.0
+google-cloud-pubsub==2.13.4
 google-cloud-pubsublite==1.4.2
-google-cloud-recommendations-ai==0.2.0
+google-cloud-recommendations-ai==0.7.0
 google-cloud-spanner==1.19.3
 google-cloud-videointelligence==1.16.3
 google-cloud-vision==1.0.2
 google-crc32c==1.3.0
 google-pasta==0.2.0
-google-python-cloud-debugger==2.18
+google-python-cloud-debugger==2.19
 google-resumable-media==2.3.3
-googleapis-common-protos==1.56.2
+googleapis-common-protos==1.56.4
 greenlet==1.1.2
 grpc-google-iam-v1==0.12.4
-grpcio==1.46.3
+grpcio==1.47.0
 grpcio-gcp==0.2.2
-grpcio-status==1.46.3
+grpcio-status==1.47.0
 guppy3==3.1.2
 h5py==3.7.0
 hdfs==2.7.0
 httplib2==0.20.4
 idna==3.3
-importlib-metadata==4.11.4
+importlib-metadata==4.12.0
+iniconfig==1.1.1
 joblib==1.1.0
 keras==2.9.0
 Keras-Preprocessing==1.1.2
-libclang==14.0.1
-Markdown==3.3.7
+libclang==14.0.6
+Markdown==3.4.1
+MarkupSafe==2.1.1
 mmh3==3.0.0
 mock==2.0.0
-more-itertools==8.13.0
 nltk==3.7
 nose==1.3.7
 numpy==1.21.6
 oauth2client==4.1.3
 oauthlib==3.2.0
 opt-einsum==3.3.0
-orjson==3.7.2
+orjson==3.7.11
 overrides==6.1.0
 packaging==21.3
 pandas==1.3.5
-parameterized==0.7.5
+parameterized==0.8.1
 pbr==5.9.0
-pluggy==0.13.1
+pluggy==1.0.0
 proto-plus==1.20.6
 protobuf==3.19.4
 psycopg2-binary==2.9.3
@@ -117,24 +117,24 @@
 pymongo==3.12.3
 PyMySQL==1.0.2
 pyparsing==3.0.9
-pytest==4.6.11
+pytest==7.1.2
 pytest-forked==1.4.0
-pytest-timeout==1.4.2
-pytest-xdist==1.34.0
+pytest-timeout==2.1.0
+pytest-xdist==2.5.0
 python-dateutil==2.8.2
 python-snappy==0.6.1
 pytz==2022.1
 PyYAML==6.0
-regex==2022.6.2
-requests==2.28.0
+regex==2022.7.25
+requests==2.28.1
 requests-mock==1.9.3
 requests-oauthlib==1.3.1
-rsa==4.8
+rsa==4.9
 scikit-learn==1.0.2
 scipy==1.7.3
 six==1.16.0
 soupsieve==2.3.2.post1
-SQLAlchemy==1.4.37
+SQLAlchemy==1.4.39
 tenacity==5.1.5
 tensorboard==2.9.1
 tensorboard-data-server==0.6.1
@@ -145,13 +145,14 @@
 termcolor==1.1.0
 testcontainers==3.6.0
 threadpoolctl==3.1.0
+tomli==2.0.1
 tqdm==4.64.0
 typing-utils==0.1.0
-typing_extensions==4.2.0
+typing_extensions==4.3.0
 uritemplate==4.1.1
-urllib3==1.26.9
-wcwidth==0.2.5
-websocket-client==1.3.2
-Werkzeug==2.1.2
+urllib3==1.26.11
+websocket-client==1.3.3
+Werkzeug==2.2.1
 wrapt==1.14.1
-zipp==3.8.0
+zipp==3.8.1
+zstandard==0.18.0
diff --git a/sdks/python/container/py38/base_image_requirements.txt b/sdks/python/container/py38/base_image_requirements.txt
index f7c4b1f..24a81d1 100644
--- a/sdks/python/container/py38/base_image_requirements.txt
+++ b/sdks/python/container/py38/base_image_requirements.txt
@@ -21,89 +21,89 @@
 # https://s.apache.org/beam-python-dev-wiki
 # Reach out to a committer if you need help.
 
-absl-py==1.1.0
+absl-py==1.2.0
 astunparse==1.6.3
-atomicwrites==1.4.1
-attrs==21.4.0
+attrs==22.1.0
 beautifulsoup4==4.11.1
 bs4==0.0.1
 cachetools==4.2.4
 certifi==2022.6.15
-cffi==1.15.0
-charset-normalizer==2.0.12
+cffi==1.15.1
+charset-normalizer==2.1.0
 click==8.1.3
 cloudpickle==2.1.0
 crcmod==1.7
-cryptography==37.0.2
-Cython==0.29.30
+cryptography==37.0.4
+Cython==0.29.32
 deprecation==2.1.0
 dill==0.3.1.1
 docker==5.0.3
 docopt==0.6.2
 execnet==1.9.0
-fastavro==1.5.1
+fastavro==1.5.4
 fasteners==0.17.3
 flatbuffers==1.12
 freezegun==1.2.1
 future==0.18.2
 gast==0.4.0
-google-api-core==1.31.6
-google-api-python-client==2.51.0
+google-api-core==2.8.1
+google-api-python-client==2.55.0
 google-apitools==0.5.31
-google-auth==1.35.0
+google-auth==2.9.1
 google-auth-httplib2==0.1.0
 google-auth-oauthlib==0.4.6
 google-cloud-bigquery==2.34.4
 google-cloud-bigquery-storage==2.13.2
 google-cloud-bigtable==1.7.2
-google-cloud-core==1.7.2
+google-cloud-core==2.3.2
 google-cloud-datastore==1.15.5
-google-cloud-dlp==3.7.1
+google-cloud-dlp==3.8.0
 google-cloud-language==1.3.2
-google-cloud-profiler==3.1.0
-google-cloud-pubsub==2.13.0
+google-cloud-profiler==4.0.0
+google-cloud-pubsub==2.13.4
 google-cloud-pubsublite==1.4.2
-google-cloud-recommendations-ai==0.2.0
+google-cloud-recommendations-ai==0.7.0
 google-cloud-spanner==1.19.3
 google-cloud-videointelligence==1.16.3
 google-cloud-vision==1.0.2
 google-crc32c==1.3.0
 google-pasta==0.2.0
-google-python-cloud-debugger==2.18
+google-python-cloud-debugger==2.19
 google-resumable-media==2.3.3
-googleapis-common-protos==1.56.2
+googleapis-common-protos==1.56.4
 greenlet==1.1.2
 grpc-google-iam-v1==0.12.4
-grpcio==1.46.3
+grpcio==1.47.0
 grpcio-gcp==0.2.2
-grpcio-status==1.46.3
+grpcio-status==1.47.0
 guppy3==3.1.2
 h5py==3.7.0
 hdfs==2.7.0
 httplib2==0.20.4
 idna==3.3
-importlib-metadata==4.11.4
+importlib-metadata==4.12.0
+iniconfig==1.1.1
 joblib==1.1.0
 keras==2.9.0
 Keras-Preprocessing==1.1.2
-libclang==14.0.1
-Markdown==3.3.7
+libclang==14.0.6
+Markdown==3.4.1
+MarkupSafe==2.1.1
 mmh3==3.0.0
 mock==2.0.0
-more-itertools==8.13.0
 nltk==3.7
 nose==1.3.7
 numpy==1.22.4
 oauth2client==4.1.3
 oauthlib==3.2.0
 opt-einsum==3.3.0
-orjson==3.7.2
+orjson==3.7.11
 overrides==6.1.0
 packaging==21.3
-pandas==1.4.2
-parameterized==0.7.5
+pandas==1.4.3
+parameterized==0.8.1
 pbr==5.9.0
-pluggy==0.13.1
+pluggy==1.0.0
 proto-plus==1.20.6
 protobuf==3.19.4
 psycopg2-binary==2.9.3
@@ -117,24 +117,24 @@
 pymongo==3.12.3
 PyMySQL==1.0.2
 pyparsing==3.0.9
-pytest==4.6.11
+pytest==7.1.2
 pytest-forked==1.4.0
-pytest-timeout==1.4.2
-pytest-xdist==1.34.0
+pytest-timeout==2.1.0
+pytest-xdist==2.5.0
 python-dateutil==2.8.2
 python-snappy==0.6.1
 pytz==2022.1
 PyYAML==6.0
-regex==2022.6.2
-requests==2.28.0
+regex==2022.7.25
+requests==2.28.1
 requests-mock==1.9.3
 requests-oauthlib==1.3.1
-rsa==4.8
+rsa==4.9
 scikit-learn==1.1.1
-scipy==1.8.1
+scipy==1.9.0
 six==1.16.0
 soupsieve==2.3.2.post1
-SQLAlchemy==1.4.37
+SQLAlchemy==1.4.39
 tenacity==5.1.5
 tensorboard==2.9.1
 tensorboard-data-server==0.6.1
@@ -145,13 +145,14 @@
 termcolor==1.1.0
 testcontainers==3.6.0
 threadpoolctl==3.1.0
+tomli==2.0.1
 tqdm==4.64.0
 typing-utils==0.1.0
-typing_extensions==4.2.0
+typing_extensions==4.3.0
 uritemplate==4.1.1
-urllib3==1.26.9
-wcwidth==0.2.5
-websocket-client==1.3.2
-Werkzeug==2.1.2
+urllib3==1.26.11
+websocket-client==1.3.3
+Werkzeug==2.2.1
 wrapt==1.14.1
-zipp==3.8.0
+zipp==3.8.1
+zstandard==0.18.0
diff --git a/sdks/python/container/py39/base_image_requirements.txt b/sdks/python/container/py39/base_image_requirements.txt
index e3e556f..67de8d2 100644
--- a/sdks/python/container/py39/base_image_requirements.txt
+++ b/sdks/python/container/py39/base_image_requirements.txt
@@ -21,89 +21,89 @@
 # https://s.apache.org/beam-python-dev-wiki
 # Reach out to a committer if you need help.
 
-absl-py==1.1.0
+absl-py==1.2.0
 astunparse==1.6.3
-atomicwrites==1.4.1
-attrs==21.4.0
+attrs==22.1.0
 beautifulsoup4==4.11.1
 bs4==0.0.1
 cachetools==4.2.4
 certifi==2022.6.15
-cffi==1.15.0
-charset-normalizer==2.0.12
+cffi==1.15.1
+charset-normalizer==2.1.0
 click==8.1.3
 cloudpickle==2.1.0
 crcmod==1.7
-cryptography==37.0.2
-Cython==0.29.30
+cryptography==37.0.4
+Cython==0.29.32
 deprecation==2.1.0
 dill==0.3.1.1
 docker==5.0.3
 docopt==0.6.2
 execnet==1.9.0
-fastavro==1.5.1
+fastavro==1.5.4
 fasteners==0.17.3
 flatbuffers==1.12
 freezegun==1.2.1
 future==0.18.2
 gast==0.4.0
-google-api-core==1.31.6
-google-api-python-client==2.51.0
+google-api-core==2.8.1
+google-api-python-client==2.55.0
 google-apitools==0.5.31
-google-auth==1.35.0
+google-auth==2.9.1
 google-auth-httplib2==0.1.0
 google-auth-oauthlib==0.4.6
 google-cloud-bigquery==2.34.4
 google-cloud-bigquery-storage==2.13.2
 google-cloud-bigtable==1.7.2
-google-cloud-core==1.7.2
+google-cloud-core==2.3.2
 google-cloud-datastore==1.15.5
-google-cloud-dlp==3.7.1
+google-cloud-dlp==3.8.0
 google-cloud-language==1.3.2
-google-cloud-profiler==3.1.0
-google-cloud-pubsub==2.13.0
+google-cloud-profiler==4.0.0
+google-cloud-pubsub==2.13.4
 google-cloud-pubsublite==1.4.2
-google-cloud-recommendations-ai==0.2.0
+google-cloud-recommendations-ai==0.7.0
 google-cloud-spanner==1.19.3
 google-cloud-videointelligence==1.16.3
 google-cloud-vision==1.0.2
 google-crc32c==1.3.0
 google-pasta==0.2.0
-google-python-cloud-debugger==2.18
+google-python-cloud-debugger==2.19
 google-resumable-media==2.3.3
-googleapis-common-protos==1.56.2
+googleapis-common-protos==1.56.4
 greenlet==1.1.2
 grpc-google-iam-v1==0.12.4
-grpcio==1.46.3
+grpcio==1.47.0
 grpcio-gcp==0.2.2
-grpcio-status==1.46.3
+grpcio-status==1.47.0
 guppy3==3.1.2
 h5py==3.7.0
 hdfs==2.7.0
 httplib2==0.20.4
 idna==3.3
-importlib-metadata==4.11.4
+importlib-metadata==4.12.0
+iniconfig==1.1.1
 joblib==1.1.0
 keras==2.9.0
 Keras-Preprocessing==1.1.2
-libclang==14.0.1
-Markdown==3.3.7
+libclang==14.0.6
+Markdown==3.4.1
+MarkupSafe==2.1.1
 mmh3==3.0.0
 mock==2.0.0
-more-itertools==8.13.0
 nltk==3.7
 nose==1.3.7
 numpy==1.22.4
 oauth2client==4.1.3
 oauthlib==3.2.0
 opt-einsum==3.3.0
-orjson==3.7.2
+orjson==3.7.11
 overrides==6.1.0
 packaging==21.3
-pandas==1.4.2
-parameterized==0.7.5
+pandas==1.4.3
+parameterized==0.8.1
 pbr==5.9.0
-pluggy==0.13.1
+pluggy==1.0.0
 proto-plus==1.20.6
 protobuf==3.19.4
 psycopg2-binary==2.9.3
@@ -117,24 +117,24 @@
 pymongo==3.12.3
 PyMySQL==1.0.2
 pyparsing==3.0.9
-pytest==4.6.11
+pytest==7.1.2
 pytest-forked==1.4.0
-pytest-timeout==1.4.2
-pytest-xdist==1.34.0
+pytest-timeout==2.1.0
+pytest-xdist==2.5.0
 python-dateutil==2.8.2
 python-snappy==0.6.1
 pytz==2022.1
 PyYAML==6.0
-regex==2022.6.2
-requests==2.28.0
+regex==2022.7.25
+requests==2.28.1
 requests-mock==1.9.3
 requests-oauthlib==1.3.1
-rsa==4.8
+rsa==4.9
 scikit-learn==1.1.1
-scipy==1.8.1
+scipy==1.9.0
 six==1.16.0
 soupsieve==2.3.2.post1
-SQLAlchemy==1.4.37
+SQLAlchemy==1.4.39
 tenacity==5.1.5
 tensorboard==2.9.1
 tensorboard-data-server==0.6.1
@@ -145,13 +145,14 @@
 termcolor==1.1.0
 testcontainers==3.6.0
 threadpoolctl==3.1.0
+tomli==2.0.1
 tqdm==4.64.0
 typing-utils==0.1.0
-typing_extensions==4.2.0
+typing_extensions==4.3.0
 uritemplate==4.1.1
-urllib3==1.26.9
-wcwidth==0.2.5
-websocket-client==1.3.2
-Werkzeug==2.1.2
+urllib3==1.26.11
+websocket-client==1.3.3
+Werkzeug==2.2.1
 wrapt==1.14.1
-zipp==3.8.0
+zipp==3.8.1
+zstandard==0.18.0
diff --git a/sdks/python/setup.py b/sdks/python/setup.py
index a743ff3..250d4c5 100644
--- a/sdks/python/setup.py
+++ b/sdks/python/setup.py
@@ -20,8 +20,6 @@
 import os
 import sys
 import warnings
-from distutils.errors import DistutilsError
-from distutils.version import StrictVersion
 from pathlib import Path
 
 # Pylint and isort disagree here.
@@ -30,9 +28,16 @@
 from pkg_resources import DistributionNotFound
 from pkg_resources import get_distribution
 from pkg_resources import normalize_path
+from pkg_resources import parse_version
 from pkg_resources import to_filename
 from setuptools import Command
 
+# pylint: disable=wrong-import-order
+# It is recommended to import setuptools prior to importing distutils to avoid
+# using legacy behavior from distutils.
+# https://setuptools.readthedocs.io/en/latest/history.html#v48-0-0
+from distutils.errors import DistutilsError # isort:skip
+
 
 class mypy(Command):
   user_options = []
@@ -92,7 +97,7 @@
 
 REQUIRED_PIP_VERSION = '7.0.0'
 _PIP_VERSION = get_distribution('pip').version
-if StrictVersion(_PIP_VERSION) < StrictVersion(REQUIRED_PIP_VERSION):
+if parse_version(_PIP_VERSION) < parse_version(REQUIRED_PIP_VERSION):
   warnings.warn(
       "You are using version {0} of pip. " \
       "However, version {1} is recommended.".format(
@@ -103,7 +108,7 @@
 REQUIRED_CYTHON_VERSION = '0.28.1'
 try:
   _CYTHON_VERSION = get_distribution('cython').version
-  if StrictVersion(_CYTHON_VERSION) < StrictVersion(REQUIRED_CYTHON_VERSION):
+  if parse_version(_CYTHON_VERSION) < parse_version(REQUIRED_CYTHON_VERSION):
     warnings.warn(
         "You are using version {0} of cython. " \
         "However, version {1} is recommended.".format(
@@ -214,7 +219,7 @@
         'dill>=0.3.1.1,<0.3.2',
         'cloudpickle>=2.1.0,<3',
         'fastavro>=0.23.6,<2',
-        'grpcio>=1.33.1,<2',
+        'grpcio>=1.33.1,!=1.48.0,<2',
         'hdfs>=2.1.0,<3.0.0',
         'httplib2>=0.8,<0.21.0',
         'numpy>=1.14.3,<1.23.0',
@@ -226,6 +231,7 @@
         'pytz>=2018.3',
         'requests>=2.24.0,<3.0.0',
         'typing-extensions>=3.7.0',
+        'zstandard>=0.18.0,<1',
       # Dynamic dependencies must be specified in a separate list, otherwise
       # Dependabot won't be able to parse the main list. Any dynamic
       # dependencies will not receive updates from Dependabot.
@@ -249,9 +255,9 @@
             'pyyaml>=3.12,<7.0.0',
             'requests_mock>=1.7,<2.0',
             'tenacity>=5.0.2,<6.0',
-            'pytest>=4.4.0,<5.0',
-            'pytest-xdist>=1.29.0,<2',
-            'pytest-timeout>=1.3.3,<2',
+            'pytest>=7.1.2,<8.0',
+            'pytest-xdist>=2.5.0,<3',
+            'pytest-timeout>=2.1.0,<3',
             'scikit-learn>=0.20.0',
             'sqlalchemy>=1.3,<2.0',
             'psycopg2-binary>=2.8.5,<3.0.0',
@@ -261,6 +267,9 @@
           'gcp': [
             'cachetools>=3.1.0,<5',
             'google-apitools>=0.5.31,<0.5.32',
+            # Transitive dep. Required for google-cloud-spanner v1.
+            # See: https://github.com/apache/beam/issues/22454
+            'google-api-core!=2.8.2,<3',
             # NOTE: Maintainers, please do not require google-auth>=2.x.x
             # Until this issue is closed
             # https://github.com/googleapis/google-cloud-python/issues/10566
@@ -281,7 +290,7 @@
             'google-cloud-language>=1.3.0,<2',
             'google-cloud-videointelligence>=1.8.0,<2',
             'google-cloud-vision>=0.38.0,<2',
-            'google-cloud-recommendations-ai>=0.1.0,<=0.2.0'
+            'google-cloud-recommendations-ai>=0.1.0,<0.8.0'
           ],
           'interactive': [
             'facets-overview>=1.0.0,<2',
diff --git a/sdks/python/tox.ini b/sdks/python/tox.ini
index 1b8da25..75e6757 100644
--- a/sdks/python/tox.ini
+++ b/sdks/python/tox.ini
@@ -93,10 +93,9 @@
   {toxinidir}/scripts/run_pytest.sh {envname} "{posargs}"
 
 [testenv:py38-cloudcoverage]
-# More recent versions of pytest-cov do not support pytest 4.4.0
 deps =
   codecov
-  pytest-cov==2.9.0
+  pytest-cov==3.0.0
 passenv = GIT_* BUILD_* ghprb* CHANGE_ID BRANCH_NAME JENKINS_* CODECOV_*
 extras = test,gcp,interactive,dataframe,aws
 commands =
diff --git a/sdks/typescript/src/apache_beam/coders/js_coders.ts b/sdks/typescript/src/apache_beam/coders/js_coders.ts
index 6ffe02c..2f22a3a 100644
--- a/sdks/typescript/src/apache_beam/coders/js_coders.ts
+++ b/sdks/typescript/src/apache_beam/coders/js_coders.ts
@@ -25,6 +25,7 @@
   StrUtf8Coder,
   VarIntCoder,
 } from "./standard_coders";
+import { IterableCoder } from "./required_coders";
 import * as runnerApi from "../proto/beam_runner_api";
 
 export class BsonObjectCoder<T> implements Coder<T> {
@@ -94,6 +95,7 @@
     number: new NumberOrFloatCoder(),
     object: new BsonObjectCoder(),
     boolean: new BoolCoder(),
+    array: new IterableCoder(this),
   };
 
   // This is a map of type names to type markers. It maps a type name to its
@@ -103,6 +105,7 @@
     number: "N",
     object: "O",
     boolean: "B",
+    array: "A",
   };
 
   // This is a map of type markers to type names. It maps a type marker to its
@@ -112,6 +115,7 @@
     N: "number",
     O: "object",
     B: "boolean",
+    A: "array",
   };
 
   encode(element: T, writer: Writer, context: Context) {
@@ -119,7 +123,7 @@
       // typeof is "object" but BSON can't handle it.
       writer.string("Z");
     } else {
-      const type = typeof element;
+      const type = Array.isArray(element) ? "array" : typeof element;
       // TODO: Perf. Write a single byte (no need for the length prefix).
       writer.string(this.typeMarkers[type]);
       this.codersByType[type].encode(element, writer, context);
diff --git a/sdks/typescript/src/apache_beam/coders/standard_coders.ts b/sdks/typescript/src/apache_beam/coders/standard_coders.ts
index a521d40..a971424 100644
--- a/sdks/typescript/src/apache_beam/coders/standard_coders.ts
+++ b/sdks/typescript/src/apache_beam/coders/standard_coders.ts
@@ -28,15 +28,7 @@
 } from "./coders";
 import { BytesCoder, InstantCoder } from "./required_coders";
 import Long from "long";
-import {
-  Window,
-  Instant,
-  IntervalWindow,
-  KV,
-  PaneInfo,
-  Timing,
-  WindowedValue,
-} from "../values";
+import { IntervalWindow } from "../values";
 
 // Historical
 export * from "./required_coders";
@@ -44,36 +36,36 @@
 /**
  * @fileoverview Defines all of the Apache Beam standard coders.
  *
- * Beyond required coders, standard coders provide a efficient ways of encode
+ * Beyond required coders, standard coders provide an efficient way to encode
  * data for communication between the runner and various Beam workers for
  * types that commonly cross process boundaries. Though none of these coders
- * are strictly necessary, if encodings are given for these types it is highly
+ * is strictly necessary, if encodings are given for these types it is highly
  * advised to use these definitions that are interoperable with runners and
  * other SDKs.
  *
- * For schema-aware transforms RowCoder, which is a coder for rows of data
- * with a predetermined schema, is also advised.
+ * For the schema-aware transform RowCoder, which is a coder for rows of data
+ * with a predetermined schema, it is also advised.
  *
  * The formal specifications for these coders can be found in
  * model/pipeline/src/main/proto/beam_runner_api.proto
  */
 
-export class StrUtf8Coder implements Coder<String> {
-  static URN: string = "beam:coder:string_utf8:v1";
-  type: string = "stringutf8coder";
+export class StrUtf8Coder implements Coder<string> {
+  static URN = "beam:coder:string_utf8:v1";
+  type = "stringutf8coder";
   encoder = new TextEncoder();
   decoder = new TextDecoder();
 
-  encode(element: String, writer: Writer, context: Context) {
-    const encodedElement = this.encoder.encode(element as string);
+  encode(element: string, writer: Writer, context: Context) {
+    const encodedElement = this.encoder.encode(element);
     BytesCoder.INSTANCE.encode(encodedElement, writer, context);
   }
 
-  decode(reader: Reader, context: Context): String {
+  decode(reader: Reader, context: Context): string {
     return this.decoder.decode(BytesCoder.INSTANCE.decode(reader, context));
   }
 
-  toProto(pipelineContext: ProtoContext): runnerApi.Coder {
+  toProto(): runnerApi.Coder {
     return {
       spec: {
         urn: StrUtf8Coder.URN,
@@ -86,22 +78,20 @@
 globalRegistry().register(StrUtf8Coder.URN, StrUtf8Coder);
 
 export class VarIntCoder implements Coder<number> {
-  static URN: string = "beam:coder:varint:v1";
+  static URN = "beam:coder:varint:v1";
   static INSTANCE = new VarIntCoder();
 
-  type: string = "varintcoder";
+  type = "varintcoder";
 
-  encode(element: Number | Long | BigInt, writer: Writer, context: Context) {
-    var numEl = element as number;
-    writer.int32(numEl);
-    return;
+  encode(element: number, writer: Writer) {
+    writer.int32(element);
   }
 
-  decode(reader: Reader, context: Context): number {
+  decode(reader: Reader): number {
     return reader.int32();
   }
 
-  toProto(pipelineContext: ProtoContext): runnerApi.Coder {
+  toProto(): runnerApi.Coder {
     return {
       spec: {
         urn: VarIntCoder.URN,
@@ -114,22 +104,22 @@
 globalRegistry().register(VarIntCoder.URN, VarIntCoder);
 
 export class DoubleCoder implements Coder<number> {
-  static URN: string = "beam:coder:double:v1";
+  static URN = "beam:coder:double:v1";
 
-  encode(element: number, writer: Writer, context: Context) {
+  encode(element: number, writer: Writer) {
     const farr = new Float64Array([element]);
     const barr = new Uint8Array(farr.buffer).reverse();
     writeRawBytes(barr, writer);
   }
 
-  decode(reader: Reader, context: Context): number {
+  decode(reader: Reader): number {
     const barr = new Uint8Array(reader.buf);
     const dView = new DataView(barr.buffer.slice(reader.pos, reader.pos + 8));
     reader.double();
     return dView.getFloat64(0, false);
   }
 
-  toProto(pipelineContext: ProtoContext): runnerApi.Coder {
+  toProto(): runnerApi.Coder {
     return {
       spec: {
         urn: DoubleCoder.URN,
@@ -141,19 +131,19 @@
 }
 globalRegistry().register(DoubleCoder.URN, DoubleCoder);
 
-export class BoolCoder implements Coder<Boolean> {
-  static URN: string = "beam:coder:bool:v1";
-  type: string = "boolcoder";
+export class BoolCoder implements Coder<boolean> {
+  static URN = "beam:coder:bool:v1";
+  type = "boolcoder";
 
-  encode(element: Boolean, writer: Writer, context: Context) {
-    writer.bool(element as boolean);
+  encode(element: boolean, writer: Writer) {
+    writer.bool(element);
   }
 
-  decode(reader: Reader, context: Context): Boolean {
+  decode(reader: Reader): boolean {
     return reader.bool();
   }
 
-  toProto(pipelineContext: ProtoContext): runnerApi.Coder {
+  toProto(): runnerApi.Coder {
     return {
       spec: {
         urn: BoolCoder.URN,
@@ -166,8 +156,8 @@
 globalRegistry().register(BoolCoder.URN, BoolCoder);
 
 export class NullableCoder<T> implements Coder<T | undefined> {
-  static URN: string = "beam:coder:nullable:v1";
-  type: string = "nullablecoder";
+  static URN = "beam:coder:nullable:v1";
+  type = "nullablecoder";
 
   elementCoder: Coder<T>;
 
@@ -205,7 +195,7 @@
 globalRegistry().register(NullableCoder.URN, NullableCoder);
 
 export class IntervalWindowCoder implements Coder<IntervalWindow> {
-  static URN: string = "beam:coder:interval_window:v1";
+  static URN = "beam:coder:interval_window:v1";
   static INSTANCE: IntervalWindowCoder = new IntervalWindowCoder();
 
   encode(value: IntervalWindow, writer: Writer, context: Context) {
@@ -214,12 +204,12 @@
   }
 
   decode(reader: Reader, context: Context) {
-    var end = InstantCoder.INSTANCE.decode(reader, context);
-    var duration = <Long>reader.int64();
+    const end = InstantCoder.INSTANCE.decode(reader, context);
+    const duration = <Long>reader.int64();
     return new IntervalWindow(end.sub(duration), end);
   }
 
-  toProto(pipelineContext: ProtoContext): runnerApi.Coder {
+  toProto(): runnerApi.Coder {
     return {
       spec: {
         urn: IntervalWindowCoder.URN,
@@ -233,4 +223,7 @@
 globalRegistry().register(IntervalWindowCoder.URN, IntervalWindowCoder);
 
 import { requireForSerialization } from "../serialization";
-requireForSerialization("apache-beam/coders/standard_coders", exports);
+requireForSerialization(
+  "apache-beam/coders/standard_coders",
+  exports as Record<string, unknown>
+);
diff --git a/sdks/typescript/src/apache_beam/internal/pipeline.ts b/sdks/typescript/src/apache_beam/internal/pipeline.ts
index fe57f12..0dcd803 100644
--- a/sdks/typescript/src/apache_beam/internal/pipeline.ts
+++ b/sdks/typescript/src/apache_beam/internal/pipeline.ts
@@ -114,6 +114,7 @@
   context: PipelineContext;
   transformStack: string[] = [];
   defaultEnvironment: string;
+  usedStageNames: Set<string> = new Set();
 
   private proto: runnerApi.Pipeline;
   private globalWindowing: string;
@@ -147,9 +148,17 @@
     } else {
       this.proto.rootTransformIds.push(transformId);
     }
+    const uniqueName =
+      (parent ? parent.uniqueName + "/" : "") + extractName(transform);
+    if (this.usedStageNames.has(uniqueName)) {
+      throw new Error(
+        `Duplicate stage name: "${uniqueName}". ` +
+          "Use beam.withName(...) to give your transform a unique name."
+      );
+    }
+    this.usedStageNames.add(uniqueName);
     const transformProto: runnerApi.PTransform = {
-      uniqueName:
-        (parent ? parent.uniqueName + "/" : "") + extractName(transform),
+      uniqueName,
       subtransforms: [],
       inputs: objectMap(pvalue.flattenPValue(input), (pc) => pc.getId()),
       outputs: {},
diff --git a/sdks/typescript/src/apache_beam/internal/urns.ts b/sdks/typescript/src/apache_beam/internal/urns.ts
index e08ac58..37200b2 100644
--- a/sdks/typescript/src/apache_beam/internal/urns.ts
+++ b/sdks/typescript/src/apache_beam/internal/urns.ts
@@ -27,6 +27,8 @@
 export const JS_WINDOW_INTO_DOFN_URN = "beam:dofn:js_window_into:v1";
 export const JS_ASSIGN_TIMESTAMPS_DOFN_URN =
   "beam:dofn:js_assign_timestamps:v1";
+export const SERIALIZED_JS_COMBINEFN_INFO =
+  "beam:dofn:serialized_js_combinefn_info:v1";
 
 // Everything maps to the global window.
 export const GLOBAL_WINDOW_MAPPING_FN_URN = "beam:window_mapping_fn:global:v1";
diff --git a/sdks/typescript/src/apache_beam/runners/direct_runner.ts b/sdks/typescript/src/apache_beam/runners/direct_runner.ts
index 21070cc..38b2c65 100644
--- a/sdks/typescript/src/apache_beam/runners/direct_runner.ts
+++ b/sdks/typescript/src/apache_beam/runners/direct_runner.ts
@@ -96,6 +96,9 @@
       ) {
         yield "MergeStatus=" + windowing.mergeStatus;
       }
+      if (windowing.outputTime !== runnerApi.OutputTime_Enum.END_OF_WINDOW) {
+        yield "OutputTime=" + windowing.outputTime;
+      }
     }
   }
 
@@ -194,11 +197,17 @@
     );
     const windowingStrategy =
       context.descriptor.windowingStrategies[inputPc.windowingStrategyId];
-    // TODO: (Cleanup) Check or implement triggers, etc.
     if (
       windowingStrategy.mergeStatus !== runnerApi.MergeStatus_Enum.NON_MERGING
     ) {
-      throw new Error("Non-merging WindowFn: " + windowingStrategy);
+      throw new Error("Unsupported non-merging WindowFn: " + windowingStrategy);
+    }
+    if (
+      windowingStrategy.outputTime !== runnerApi.OutputTime_Enum.END_OF_WINDOW
+    ) {
+      throw new Error(
+        "Unsupported windowing output time: " + windowingStrategy
+      );
     }
     this.windowCoder = context.pipelineContext.getCoder(
       windowingStrategy.windowCoderId
@@ -206,12 +215,11 @@
   }
 
   process(wvalue: WindowedValue<any>) {
-    // TODO: (Cleanup) Assert non-merging, EOW timestamp, etc.
     for (const window of wvalue.windows) {
       const wkey =
-        encodeToBase64(window, this.windowCoder) +
+        operators.encodeToBase64(window, this.windowCoder) +
         " " +
-        encodeToBase64(wvalue.value.key, this.keyCoder);
+        operators.encodeToBase64(wvalue.value.key, this.keyCoder);
       if (!this.groups.has(wkey)) {
         this.groups.set(wkey, []);
       }
@@ -226,14 +234,16 @@
 
   async finishBundle() {
     for (const [wkey, values] of this.groups) {
-      // const [encodedWindow, encodedKey] = wkey.split(" ");
       const parts = wkey.split(" ");
       const encodedWindow = parts[0];
       const encodedKey = parts[1];
-      const window = decodeFromBase64(encodedWindow, this.windowCoder);
+      const window = operators.decodeFromBase64(
+        encodedWindow,
+        this.windowCoder
+      );
       const maybePromise = this.receiver.receive({
         value: {
-          key: decodeFromBase64(encodedKey, this.keyCoder),
+          key: operators.decodeFromBase64(encodedKey, this.keyCoder),
           value: values,
         },
         windows: [window],
@@ -507,19 +517,6 @@
 
 /////
 
-export function encodeToBase64<T>(element: T, coder: Coder<T>): string {
-  const writer = new protobufjs.Writer();
-  coder.encode(element, writer, CoderContext.wholeStream);
-  return Buffer.from(writer.finish()).toString("base64");
-}
-
-export function decodeFromBase64<T>(s: string, coder: Coder<T>): T {
-  return coder.decode(
-    new protobufjs.Reader(Buffer.from(s, "base64")),
-    CoderContext.wholeStream
-  );
-}
-
 function onlyElement<T>(arg: T[]): T {
   if (arg.length > 1) {
     Error("Expecting exactly one element.");
diff --git a/sdks/typescript/src/apache_beam/testing/assert.ts b/sdks/typescript/src/apache_beam/testing/assert.ts
index fd654b5..193e05d 100644
--- a/sdks/typescript/src/apache_beam/testing/assert.ts
+++ b/sdks/typescript/src/apache_beam/testing/assert.ts
@@ -31,20 +31,23 @@
 export function assertDeepEqual<T>(
   expected: T[]
 ): beam.PTransform<beam.PCollection<T>, void> {
-  return function assertDeepEqual(pcoll: beam.PCollection<T>) {
-    pcoll.apply(
-      assertContentsSatisfies((actual: T[]) => {
-        const actualArray: T[] = [...actual];
-        expected.sort((a, b) =>
-          JSON.stringify(a) < JSON.stringify(b) ? -1 : 1
-        );
-        actualArray.sort((a, b) =>
-          JSON.stringify(a) < JSON.stringify(b) ? -1 : 1
-        );
-        callAssertDeepEqual(actualArray, expected);
-      })
-    );
-  };
+  return beam.withName(
+    `assertDeepEqual(${JSON.stringify(expected).substring(0, 100)})`,
+    function assertDeepEqual(pcoll: beam.PCollection<T>) {
+      pcoll.apply(
+        assertContentsSatisfies((actual: T[]) => {
+          const actualArray: T[] = [...actual];
+          expected.sort((a, b) =>
+            JSON.stringify(a) < JSON.stringify(b) ? -1 : 1
+          );
+          actualArray.sort((a, b) =>
+            JSON.stringify(a) < JSON.stringify(b) ? -1 : 1
+          );
+          callAssertDeepEqual(actualArray, expected);
+        })
+      );
+    }
+  );
 }
 
 export function assertContentsSatisfies<T>(
diff --git a/sdks/typescript/src/apache_beam/transforms/combiners.ts b/sdks/typescript/src/apache_beam/transforms/combiners.ts
index fc7c643..bf4155b 100644
--- a/sdks/typescript/src/apache_beam/transforms/combiners.ts
+++ b/sdks/typescript/src/apache_beam/transforms/combiners.ts
@@ -17,6 +17,8 @@
  */
 
 import { CombineFn } from "./group_and_combine";
+import { Coder } from "../coders/coders";
+import { VarIntCoder } from "../coders/standard_coders";
 
 // TODO(cleanup): These reductions only work on Arrays, not Iterables.
 
@@ -26,6 +28,7 @@
   mergeAccumulators: (accumulators: number[]) =>
     accumulators.reduce((prev, current) => prev + current),
   extractOutput: (acc) => acc,
+  accumulatorCoder: () => new VarIntCoder(),
 };
 
 export const sum: CombineFn<number, number, number> = {
@@ -34,6 +37,7 @@
   mergeAccumulators: (accumulators: number[]) =>
     accumulators.reduce((prev, current) => prev + current),
   extractOutput: (acc: number) => acc,
+  accumulatorCoder: (inputCoder: Coder<number>) => inputCoder,
 };
 
 export const max: CombineFn<any, any, any> = {
diff --git a/sdks/typescript/src/apache_beam/transforms/group_and_combine.ts b/sdks/typescript/src/apache_beam/transforms/group_and_combine.ts
index 739a3a6..f6735b5 100644
--- a/sdks/typescript/src/apache_beam/transforms/group_and_combine.ts
+++ b/sdks/typescript/src/apache_beam/transforms/group_and_combine.ts
@@ -17,10 +17,16 @@
  */
 
 import { KV } from "../values";
-import { PTransform, PTransformClass, withName } from "./transform";
+import {
+  PTransform,
+  PTransformClass,
+  withName,
+  extractName,
+} from "./transform";
 import { flatten } from "./flatten";
 import { PCollection } from "../pvalue";
 import { PValue, P } from "../pvalue";
+import { Coder } from "../coders/coders";
 import * as internal from "./internal";
 import { count } from "./combiners";
 
@@ -36,6 +42,7 @@
   addInput: (A, I) => A;
   mergeAccumulators: (accumulators: Iterable<A>) => A;
   extractOutput: (A) => O;
+  accumulatorCoder?(inputCoder: Coder<I>): Coder<A>;
 }
 
 // TODO: (Typescript) When typing this as ((a: I, b: I) => I), types are not inferred well.
@@ -83,10 +90,13 @@
     combiner: Combiner<I>,
     resultName: string
   ) {
-    return new GroupByAndCombine(this.keyFn, this.keyNames, []).combining(
-      expr,
-      combiner,
-      resultName
+    return withName(
+      extractName(this),
+      new GroupByAndCombine(this.keyFn, this.keyNames, []).combining(
+        expr,
+        combiner,
+        resultName
+      )
     );
   }
 }
@@ -95,7 +105,10 @@
   key: string | string[] | ((element: T) => K),
   keyName: string | undefined = undefined
 ): GroupBy<T, K> {
-  return new GroupBy<T, K>(key, keyName);
+  return withName(
+    `groupBy(${extractName(key)}`,
+    new GroupBy<T, K>(key, keyName)
+  );
 }
 
 /**
@@ -122,10 +135,13 @@
     combiner: Combiner<I>,
     resultName: string
   ) {
-    return new GroupByAndCombine((_) => null, undefined, []).combining(
-      expr,
-      combiner,
-      resultName
+    return withName(
+      extractName(this),
+      new GroupByAndCombine((_) => null, undefined, []).combining(
+        expr,
+        combiner,
+        resultName
+      )
     );
   }
 }
@@ -158,16 +174,19 @@
     combiner: Combiner<I>,
     resultName: string // TODO: (Unique names) Optionally derive from expr and combineFn?
   ) {
-    return new GroupByAndCombine(
-      this.keyFn,
-      this.keyNames,
-      this.combiners.concat([
-        {
-          expr: extractFn(expr),
-          combineFn: toCombineFn(combiner),
-          resultName: resultName,
-        },
-      ])
+    return withName(
+      extractName(this),
+      new GroupByAndCombine(
+        this.keyFn,
+        this.keyNames,
+        this.combiners.concat([
+          {
+            expr: extractFn(expr),
+            combineFn: toCombineFn(combiner),
+            resultName: resultName,
+          },
+        ])
+      )
     );
   }
 
@@ -246,9 +265,7 @@
     createAccumulator: () => undefined,
     addInput: (a, b) => (a === undefined ? b : combiner(a, b)),
     mergeAccumulators: (accs) =>
-      [...accs]
-        .filter((a) => a !== null && a !== undefined)
-        .reduce(combiner, undefined),
+      [...accs].filter((a) => a !== null && a !== undefined).reduce(combiner),
     extractOutput: (a) => a,
   };
 }
@@ -307,30 +324,35 @@
   { [key: string]: PCollection<any> },
   PCollection<{ key: K; values: { [key: string]: Iterable<any> } }>
 > {
-  return function coGroupBy(inputs: { [key: string]: PCollection<any> }) {
-    const [keyFn, keyNames] = extractFnAndName(key, keyName || "key");
-    keyName = typeof keyNames === "string" ? keyNames : "key";
-    const tags = [...Object.keys(inputs)];
-    const tagged = [...Object.entries(inputs)].map(([tag, pcoll]) =>
-      pcoll.map((element) => ({
-        key: keyFn(element),
-        tag,
-        element,
-      }))
-    );
-    return P(tagged)
-      .apply(flatten())
-      .apply(groupBy("key"))
-      .map(function groupValues({ key, value }) {
-        const groupedValues: { [key: string]: any[] } = Object.fromEntries(
-          tags.map((tag) => [tag, []])
-        );
-        for (const { tag, element } of value) {
-          groupedValues[tag].push(element);
-        }
-        return { key, values: groupedValues };
-      });
-  };
+  return withName(
+    `coGroupBy(${extractName(key)})`,
+    function coGroupBy(inputs: { [key: string]: PCollection<any> }) {
+      const [keyFn, keyNames] = extractFnAndName(key, keyName || "key");
+      keyName = typeof keyNames === "string" ? keyNames : "key";
+      const tags = [...Object.keys(inputs)];
+      const tagged = [...Object.entries(inputs)].map(([tag, pcoll]) =>
+        pcoll.map(
+          withName(`map[${tag}]`, (element) => ({
+            key: keyFn(element),
+            tag,
+            element,
+          }))
+        )
+      );
+      return P(tagged)
+        .apply(flatten())
+        .apply(groupBy("key"))
+        .map(function groupValues({ key, value }) {
+          const groupedValues: { [key: string]: any[] } = Object.fromEntries(
+            tags.map((tag) => [tag, []])
+          );
+          for (const { tag, element } of value) {
+            groupedValues[tag].push(element);
+          }
+          return { key, values: groupedValues };
+        });
+    }
+  );
 }
 
 // TODO: (Typescript) Can I type T as "something that has this key" and/or,
diff --git a/sdks/typescript/src/apache_beam/transforms/internal.ts b/sdks/typescript/src/apache_beam/transforms/internal.ts
index 5eedf3d..8a78a9a 100644
--- a/sdks/typescript/src/apache_beam/transforms/internal.ts
+++ b/sdks/typescript/src/apache_beam/transforms/internal.ts
@@ -34,6 +34,8 @@
 import { RowCoder } from "../coders/row_coder";
 import { KV } from "../values";
 import { CombineFn } from "./group_and_combine";
+import { serializeFn } from "../internal/serialize";
+import { CombinePerKeyPrecombineOperator } from "../worker/operators";
 
 /**
  * `Impulse` is the basic *source* primitive `PTransformClass`. It receives a Beam
@@ -170,21 +172,84 @@
 export function combinePerKey<K, InputT, AccT, OutputT>(
   combineFn: CombineFn<InputT, AccT, OutputT>
 ): PTransform<PCollection<KV<K, InputT>>, PCollection<KV<K, OutputT>>> {
-  function expandInternal(input: PCollection<KV<any, InputT>>) {
+  function expandInternal(
+    input: PCollection<KV<any, InputT>>,
+    pipeline: Pipeline,
+    transformProto: runnerApi.PTransform
+  ) {
+    const pipelineComponents: runnerApi.Components =
+      pipeline.getProto().components!;
+    const inputProto = pipelineComponents.pcollections[input.getId()];
+
+    try {
+      // If this fails, we cannot lift, so we skip setting the liftable URN.
+      CombinePerKeyPrecombineOperator.checkSupportsWindowing(
+        pipelineComponents.windowingStrategies[inputProto.windowingStrategyId]
+      );
+
+      // Ensure the input is using the KV coder.
+      const inputCoderProto = pipelineComponents.coders[inputProto.coderId];
+      if (inputCoderProto.spec!.urn !== KVCoder.URN) {
+        return input
+          .apply(
+            withCoderInternal(
+              new KVCoder(new GeneralObjectCoder(), new GeneralObjectCoder())
+            )
+          )
+          .apply(combinePerKey(combineFn));
+      }
+
+      const inputValueCoder = pipeline.context.getCoder<InputT>(
+        inputCoderProto.componentCoderIds[1]
+      );
+
+      transformProto.spec = runnerApi.FunctionSpec.create({
+        urn: combinePerKey.urn,
+        payload: runnerApi.CombinePayload.toBinary({
+          combineFn: {
+            urn: urns.SERIALIZED_JS_COMBINEFN_INFO,
+            payload: serializeFn({ combineFn }),
+          },
+          accumulatorCoderId: pipeline.context.getCoderId(
+            combineFn.accumulatorCoder
+              ? combineFn.accumulatorCoder(inputValueCoder)
+              : new GeneralObjectCoder()
+          ),
+        }),
+      });
+    } catch (err) {
+      // Execute this as an unlifted combine.
+    }
+
     return input //
       .apply(groupByKey())
       .map(
-        withName("applyCombine", (kv) => ({
-          key: kv.key,
-          value: combineFn.extractOutput(
-            kv.value.reduce(
-              combineFn.addInput.bind(combineFn),
-              combineFn.createAccumulator()
-            )
-          ),
-        }))
+        withName("applyCombine", (kv) => {
+          // Artificially use multiple accumulators to emulate what would
+          // happen in a distributed combine for better testing.
+          const accumulators = [
+            combineFn.createAccumulator(),
+            combineFn.createAccumulator(),
+            combineFn.createAccumulator(),
+          ];
+          let ix = 0;
+          for (const value of kv.value) {
+            accumulators[ix % 3] = combineFn.addInput(
+              accumulators[ix % 3],
+              value
+            );
+          }
+          return {
+            key: kv.key,
+            value: combineFn.extractOutput(
+              combineFn.mergeAccumulators(accumulators)
+            ),
+          };
+        })
       );
   }
 
   return withName(`combinePerKey(${extractName(combineFn)})`, expandInternal);
 }
+
+combinePerKey.urn = "beam:transform:combine_per_key:v1";
diff --git a/sdks/typescript/src/apache_beam/worker/operators.ts b/sdks/typescript/src/apache_beam/worker/operators.ts
index e9a0718..a70c56d 100644
--- a/sdks/typescript/src/apache_beam/worker/operators.ts
+++ b/sdks/typescript/src/apache_beam/worker/operators.ts
@@ -29,8 +29,10 @@
 import { PipelineContext } from "../internal/pipeline";
 import { deserializeFn } from "../internal/serialize";
 import { Coder, Context as CoderContext } from "../coders/coders";
-import { Window, Instant, WindowedValue } from "../values";
+import { PaneInfo, Window, Instant, WindowedValue } from "../values";
+import { PaneInfoCoder } from "../coders/standard_coders";
 import { parDo, DoFn, SplitOptions } from "../transforms/pardo";
+import { CombineFn } from "../transforms/group_and_combine";
 import { WindowFn } from "../transforms/window";
 
 import {
@@ -305,6 +307,255 @@
 
 registerOperator("beam:transform:flatten:v1", FlattenOperator);
 
+// CombinePerKey operators.
+
+abstract class CombineOperator<I, A, O> {
+  receiver: Receiver;
+  combineFn: CombineFn<I, A, O>;
+
+  constructor(
+    transformId: string,
+    transform: PTransform,
+    context: OperatorContext
+  ) {
+    this.receiver = context.getReceiver(
+      onlyElement(Object.values(transform.outputs))
+    );
+    const spec = runnerApi.CombinePayload.fromBinary(transform.spec!.payload);
+    this.combineFn = deserializeFn(spec.combineFn!.payload).combineFn;
+  }
+}
+
+export class CombinePerKeyPrecombineOperator<I, A, O>
+  extends CombineOperator<I, A, O>
+  implements IOperator
+{
+  keyCoder: Coder<unknown>;
+  windowCoder: Coder<Window>;
+
+  groups: Map<string, A>;
+  maxKeys: number = 10000;
+
+  static checkSupportsWindowing(
+    windowingStrategy: runnerApi.WindowingStrategy
+  ) {
+    if (
+      windowingStrategy.mergeStatus !== runnerApi.MergeStatus_Enum.NON_MERGING
+    ) {
+      throw new Error("Unsupported non-merging WindowFn: " + windowingStrategy);
+    }
+    if (
+      windowingStrategy.outputTime !== runnerApi.OutputTime_Enum.END_OF_WINDOW
+    ) {
+      throw new Error(
+        "Unsupported windowing output time: " + windowingStrategy
+      );
+    }
+  }
+
+  constructor(
+    transformId: string,
+    transform: PTransform,
+    context: OperatorContext
+  ) {
+    super(transformId, transform, context);
+    const inputPc =
+      context.descriptor.pcollections[
+        onlyElement(Object.values(transform.inputs))
+      ];
+    this.keyCoder = context.pipelineContext.getCoder(
+      context.descriptor.coders[inputPc.coderId].componentCoderIds[0]
+    );
+    const windowingStrategy =
+      context.descriptor.windowingStrategies[inputPc.windowingStrategyId];
+    CombinePerKeyPrecombineOperator.checkSupportsWindowing(windowingStrategy);
+    this.windowCoder = context.pipelineContext.getCoder(
+      windowingStrategy.windowCoderId
+    );
+  }
+
+  process(wvalue: WindowedValue<any>) {
+    for (const window of wvalue.windows) {
+      const wkey =
+        encodeToBase64(window, this.windowCoder) +
+        " " +
+        encodeToBase64(wvalue.value.key, this.keyCoder);
+      if (!this.groups.has(wkey)) {
+        this.groups.set(wkey, this.combineFn.createAccumulator());
+      }
+      this.groups.set(
+        wkey,
+        this.combineFn.addInput(this.groups.get(wkey), wvalue.value.value)
+      );
+    }
+    if (this.groups.size > this.maxKeys) {
+      // Flush a random 10% of the map to make more room.
+      // TODO: Tune this, or better use LRU or ARC for this cache.
+      return this.flush(this.maxKeys * 0.9);
+    } else {
+      return NonPromise;
+    }
+  }
+
+  async startBundle() {
+    this.groups = new Map();
+  }
+
+  flush(target: number): ProcessResult {
+    const result = new ProcessResultBuilder();
+    const toDelete: string[] = [];
+    for (const [wkey, values] of this.groups) {
+      const parts = wkey.split(" ");
+      const encodedWindow = parts[0];
+      const encodedKey = parts[1];
+      const window = decodeFromBase64(encodedWindow, this.windowCoder);
+      result.add(
+        this.receiver.receive({
+          value: {
+            key: decodeFromBase64(encodedKey, this.keyCoder),
+            value: values,
+          },
+          windows: [window],
+          timestamp: window.maxTimestamp(),
+          pane: PaneInfoCoder.ONE_AND_ONLY_FIRING,
+        })
+      );
+      toDelete.push(wkey);
+      if (this.groups.size - toDelete.length <= target) {
+        break;
+      }
+    }
+    for (const wkey of toDelete) {
+      this.groups.delete(wkey);
+    }
+    return result.build();
+  }
+
+  async finishBundle() {
+    const maybePromise = this.flush(0);
+    if (maybePromise !== NonPromise) {
+      await maybePromise;
+    }
+    this.groups = null!;
+  }
+}
+
+registerOperator(
+  "beam:transform:combine_per_key_precombine:v1",
+  CombinePerKeyPrecombineOperator
+);
+
+class CombinePerKeyMergeAccumulatorsOperator<I, A, O>
+  extends CombineOperator<I, A, O>
+  implements IOperator
+{
+  async startBundle() {}
+
+  process(wvalue: WindowedValue<any>) {
+    const { key, value } = wvalue.value as { key: any; value: Iterable<A> };
+    return this.receiver.receive({
+      value: { key, value: this.combineFn.mergeAccumulators(value) },
+      windows: wvalue.windows,
+      timestamp: wvalue.timestamp,
+      pane: wvalue.pane,
+    });
+  }
+
+  async finishBundle() {}
+}
+
+registerOperator(
+  "beam:transform:combine_per_key_merge_accumulators:v1",
+  CombinePerKeyMergeAccumulatorsOperator
+);
+
+class CombinePerKeyExtractOutputsOperator<I, A, O>
+  extends CombineOperator<I, A, O>
+  implements IOperator
+{
+  async startBundle() {}
+
+  process(wvalue: WindowedValue<any>) {
+    const { key, value } = wvalue.value as { key: any; value: A };
+    return this.receiver.receive({
+      value: { key, value: this.combineFn.extractOutput(value) },
+      windows: wvalue.windows,
+      timestamp: wvalue.timestamp,
+      pane: wvalue.pane,
+    });
+  }
+
+  async finishBundle() {}
+}
+
+registerOperator(
+  "beam:transform:combine_per_key_extract_outputs:v1",
+  CombinePerKeyExtractOutputsOperator
+);
+
+class CombinePerKeyConvertToAccumulatorsOperator<I, A, O>
+  extends CombineOperator<I, A, O>
+  implements IOperator
+{
+  async startBundle() {}
+
+  process(wvalue: WindowedValue<any>) {
+    const { key, value } = wvalue.value as { key: any; value: I };
+    return this.receiver.receive({
+      value: {
+        key,
+        value: this.combineFn.addInput(
+          this.combineFn.createAccumulator(),
+          value
+        ),
+      },
+      windows: wvalue.windows,
+      timestamp: wvalue.timestamp,
+      pane: wvalue.pane,
+    });
+  }
+
+  async finishBundle() {}
+}
+
+registerOperator(
+  "beam:transform:combine_per_key_convert_to_accumulators:v1",
+  CombinePerKeyConvertToAccumulatorsOperator
+);
+
+class CombinePerKeyCombineGroupedValuesOperator<I, A, O>
+  extends CombineOperator<I, A, O>
+  implements IOperator
+{
+  async startBundle() {}
+
+  process(wvalue: WindowedValue<any>) {
+    const { key, value } = wvalue.value as { key: any; value: Iterable<I> };
+    let accumulator = this.combineFn.createAccumulator();
+    for (const input of value) {
+      accumulator = this.combineFn.addInput(accumulator, input);
+    }
+    return this.receiver.receive({
+      value: {
+        key,
+        value: this.combineFn.extractOutput(accumulator),
+      },
+      windows: wvalue.windows,
+      timestamp: wvalue.timestamp,
+      pane: wvalue.pane,
+    });
+  }
+
+  async finishBundle() {}
+}
+
+registerOperator(
+  "beam:transform:combine_grouped_values:v1",
+  CombinePerKeyCombineGroupedValuesOperator
+);
+
+// ParDo operators.
+
 class GenericParDoOperator implements IOperator {
   private doFn: DoFn<unknown, unknown, unknown>;
   private getStateProvider: () => StateProvider;
@@ -589,6 +840,21 @@
   }
 );
 
+///
+
+export function encodeToBase64<T>(element: T, coder: Coder<T>): string {
+  const writer = new protobufjs.Writer();
+  coder.encode(element, writer, CoderContext.wholeStream);
+  return Buffer.from(writer.finish()).toString("base64");
+}
+
+export function decodeFromBase64<T>(s: string, coder: Coder<T>): T {
+  return coder.decode(
+    new protobufjs.Reader(Buffer.from(s, "base64")),
+    CoderContext.wholeStream
+  );
+}
+
 function onlyElement<Type>(arg: Type[]): Type {
   if (arg.length > 1) {
     Error("Expecting exactly one element.");
diff --git a/sdks/typescript/test/combine_test.ts b/sdks/typescript/test/combine_test.ts
index f54f0a9..cee922d 100644
--- a/sdks/typescript/test/combine_test.ts
+++ b/sdks/typescript/test/combine_test.ts
@@ -17,11 +17,10 @@
  */
 
 import * as beam from "../src/apache_beam";
-import { DirectRunner } from "../src/apache_beam/runners/direct_runner";
+import { createRunner } from "../src/apache_beam/runners/runner";
 import * as testing from "../src/apache_beam/testing/assert";
 import { KV } from "../src/apache_beam/values";
 
-import { PortableRunner } from "../src/apache_beam/runners/portable_runner/runner";
 import * as combiners from "../src/apache_beam/transforms/combiners";
 import {
   CombineFn,
@@ -33,7 +32,7 @@
 
 describe("Apache Beam combiners", function () {
   it("runs wordcount with a countPerKey transform and asserts the result", async function () {
-    await new DirectRunner().run((root) => {
+    await createRunner().run((root) => {
       const lines = root.apply(
         beam.create([
           "In the beginning God created the heaven and the earth.",
@@ -84,7 +83,7 @@
   });
 
   it("runs wordcount with a countGlobally transform and asserts the result", async function () {
-    await new DirectRunner().run((root) => {
+    await createRunner().run((root) => {
       const lines = root.apply(
         beam.create(["And God said, Let there be light: and there was light"])
       );
@@ -134,7 +133,7 @@
       };
     }
 
-    await new DirectRunner().run((root) => {
+    await createRunner().run((root) => {
       const lines = root.apply(
         beam.create([
           "In the beginning God created the heaven and the earth.",
@@ -164,7 +163,7 @@
   });
 
   it("test GroupBy with combining", async function () {
-    await new DirectRunner().run((root) => {
+    await createRunner().run((root) => {
       const inputs = root.apply(
         beam.create([
           { k: "k1", a: 1, b: 100 },
@@ -190,7 +189,7 @@
   });
 
   it("test GroupBy list with combining", async function () {
-    await new DirectRunner().run((root) => {
+    await createRunner().run((root) => {
       const inputs = root.apply(
         beam.create([
           { a: 1, b: 10, c: 100 },
@@ -220,7 +219,7 @@
   });
 
   it("test GroupBy expr with combining", async function () {
-    await new DirectRunner().run((root) => {
+    await createRunner().run((root) => {
       const inputs = root.apply(
         beam.create([
           { a: 1, b: 10 },
@@ -247,7 +246,7 @@
   });
 
   it("test GroupBy with binary combinefn", async function () {
-    await new DirectRunner().run((root) => {
+    await createRunner().run((root) => {
       const inputs = root.apply(
         beam.create([
           { key: 0, value: 10 },
diff --git a/sdks/typescript/test/docs/programming_guide.ts b/sdks/typescript/test/docs/programming_guide.ts
index fc038b4..698fd4b 100644
--- a/sdks/typescript/test/docs/programming_guide.ts
+++ b/sdks/typescript/test/docs/programming_guide.ts
@@ -163,7 +163,7 @@
           );
 
         by_word_length
-          .map(sortValues)
+          .map(beam.withName("sortLenValues", sortValues))
           .apply(
             assertDeepEqual([{ key: 3, value: cats.concat(dogs).sort() }])
           );
@@ -186,8 +186,12 @@
           { name: "carl", phone: "444-555-6666" },
         ];
 
-        const emails = root.apply(beam.create(emails_list));
-        const phones = root.apply(beam.create(phones_list));
+        const emails = root.apply(
+          beam.withName("createEmails", beam.create(emails_list))
+        );
+        const phones = root.apply(
+          beam.withName("createPhones", beam.create(phones_list))
+        );
         // [END cogroupbykey_inputs]
 
         // [START cogroupbykey_raw_outputs]
@@ -328,8 +332,12 @@
     it("model_multiple_pcollections_flatten", async function () {
       await beam.createRunner().run((root: beam.Root) => {
         // [START model_multiple_pcollections_flatten]
-        const fib = root.apply(beam.create([1, 1, 2, 3, 5, 8]));
-        const pow = root.apply(beam.create([1, 2, 4, 8, 16, 32]));
+        const fib = root.apply(
+          beam.withName("createFib", beam.create([1, 1, 2, 3, 5, 8]))
+        );
+        const pow = root.apply(
+          beam.withName("createPow", beam.create([1, 2, 4, 8, 16, 32]))
+        );
         const result = beam.P([fib, pow]).apply(beam.flatten());
         // [END model_multiple_pcollections_flatten]
         result.apply(assertDeepEqual([1, 1, 1, 2, 2, 3, 4, 5, 8, 8, 16, 32]));
@@ -434,14 +442,15 @@
         }
 
         {
-          function processFn(element, context) {}
+          function processWithWindow(element, context) {}
+          function processWithPaneInfo(element, context) {}
 
           // [START window_param]
-          pcoll.map(processFn, { timestamp: pardo.windowParam() });
+          pcoll.map(processWithWindow, { timestamp: pardo.windowParam() });
           // [END window_param]
 
           // [START pane_info_param]
-          pcoll.map(processFn, { timestamp: pardo.paneInfoParam() });
+          pcoll.map(processWithPaneInfo, { timestamp: pardo.paneInfoParam() });
           // [END pane_info_param]
         }
       });
diff --git a/sdks/typescript/test/js_coders_test.ts b/sdks/typescript/test/js_coders_test.ts
index 308bd48..6ab6f86 100644
--- a/sdks/typescript/test/js_coders_test.ts
+++ b/sdks/typescript/test/js_coders_test.ts
@@ -73,6 +73,7 @@
         obj: { any: "any" },
         null: null,
         bool: true,
+        array: [1, 2, 3],
         // 'undef': undefined,  // TODO(pabloem): Figure out how to support undefined encoding/decoding.
         bigint: Number.MAX_SAFE_INTEGER + 100,
       };
diff --git a/sdks/typescript/test/primitives_test.ts b/sdks/typescript/test/primitives_test.ts
index c95c2a2..5e486db 100644
--- a/sdks/typescript/test/primitives_test.ts
+++ b/sdks/typescript/test/primitives_test.ts
@@ -78,7 +78,9 @@
     it("runs a map with singleton side input", async function () {
       await new DirectRunner().run((root) => {
         const input = root.apply(beam.create([1, 2, 1]));
-        const sideInput = root.apply(beam.create([4]));
+        const sideInput = root.apply(
+          beam.withName("createSide", beam.create([4]))
+        );
         input
           .map((e, context) => e / context.side.lookup(), {
             side: pardo.singletonSideInput(sideInput),
diff --git a/settings.gradle.kts b/settings.gradle.kts
index 56173ca..eb8ea76 100644
--- a/settings.gradle.kts
+++ b/settings.gradle.kts
@@ -119,6 +119,7 @@
 include(":sdks:java:container:java11")
 include(":sdks:java:container:java17")
 include(":sdks:java:core")
+include(":sdks:java:core:jmh")
 include(":sdks:java:expansion-service")
 include(":sdks:java:expansion-service:app")
 include(":sdks:java:extensions:arrow")
@@ -193,6 +194,7 @@
 include(":sdks:java:io:rabbitmq")
 include(":sdks:java:io:redis")
 include(":sdks:java:io:solr")
+include(":sdks:java:io:sparkreceiver")
 include(":sdks:java:io:snowflake")
 include(":sdks:java:io:snowflake:expansion-service")
 include(":sdks:java:io:splunk")
@@ -236,7 +238,7 @@
 include(":sdks:python:test-suites:tox:py38")
 include(":sdks:python:test-suites:tox:py39")
 include(":vendor:grpc-1_43_2")
-include(":vendor:bytebuddy-1_12_8")
+include(":vendor:grpc-1_48_1")
 include(":vendor:calcite-1_28_0")
 include(":vendor:guava-26_0-jre")
 include(":website")
diff --git a/vendor/README.md b/vendor/README.md
index 9a12e31..f7e7559 100644
--- a/vendor/README.md
+++ b/vendor/README.md
@@ -32,12 +32,12 @@
 The [linkage tool](https://lists.apache.org/thread.html/eb5d95b9a33d7e32dc9bcd0f7d48ba8711d42bd7ed03b9cf0f1103f1%40%3Cdev.beam.apache.org%3E)
 is useful for the vendored dependency upgrades. It reports the linkage errors across multiple Apache Beam artifact ids.
 
-For example, when we upgrade the version of gRPC to 1.43.2 and the version of the vendored gRPC is 0.1-SNAPSHOT,
+For example, when we upgrade the version of gRPC to 1.48.1 and the version of the vendored gRPC is 0.1-SNAPSHOT,
 we could run the linkage tool as following:
 
 ```
-$ ./gradlew -p vendor/grpc-1_43_2 publishMavenJavaPublicationToMavenLocal -Ppublishing -PvendoredDependenciesOnly
-$ ./gradlew -PvendoredDependenciesOnly -Ppublishing -PjavaLinkageArtifactIds=beam-vendor-grpc-1_43_2:0.1-SNAPSHOT :checkJavaLinkage
+$ ./gradlew -p vendor/grpc-1_48_1 publishMavenJavaPublicationToMavenLocal -Ppublishing -PvendoredDependenciesOnly
+$ ./gradlew -PvendoredDependenciesOnly -Ppublishing -PjavaLinkageArtifactIds=beam-vendor-grpc-1_48_1:0.1-SNAPSHOT :checkJavaLinkage
 ```
 
 ### Known Linkage Errors in the Vendored gRPC Dependencies
@@ -62,7 +62,7 @@
   The `io.netty.handler.ssl` package has classes that have references to missing classes in other
   unused optional SSL implementations.
 - References from `io.netty.handler.codec.compression`: Beam does not use the optional dependencies
-  for compression algorithms (jzlib, lzma, and lzf) through Netty's features.
+  for compression algorithms (brotli, jzlib, lzma, lzf, and zstd) through Netty's features.
 - References to `com.google.protobuf.nano` and `org.jboss.marshalling`: Beam does not use the
   optional serialization algorithms.
 - References from `io.netty.util.internal.logging`: Netty's logging framework can choose available
@@ -77,19 +77,22 @@
 
 Example PRs:
 - Updating gRPC version (large) https://github.com/apache/beam/pull/16460
+- Testing updated gRPC version (large) https://github.com/apache/beam/pull/22595
 - Updating protobuf for calcite (minor version update): https://github.com/apache/beam/pull/16476
 
 Steps:
 
 1. Generate new artifact files with `publishMavenJavaPublicationToMavenLocal` and
-   copy to a folder in Beam (e.g. `tempLib`):
+   copy to the `tempLib` folder in Beam:
 
 ```
-./gradlew -p vendor/grpc-1_43_2 publishMavenJavaPublicationToMavenLocal -Ppublishing -PvendoredDependenciesOnly
+./gradlew -p vendor/grpc-1_48_1 publishMavenJavaPublicationToMavenLocal -Ppublishing -PvendoredDependenciesOnly
+
+mkdir -p tempLib/org/apache/beam
 
 # Copy files (jar/poms/metadata) to your beam repository
-cp -R ~/.m2/repository/org/apache/beam/beam-vendor-grpc-1_43_2/ \
-      $BEAMDIR/tempLib/org/apache/beam/beam-vendor-grpc-1_43_2
+cp -R ~/.m2/repository/org/apache/beam/beam-vendor-grpc-1_48_1/ \
+      tempLib/org/apache/beam
 ```
 
 2. Add the folder to the expected project repositories:
diff --git a/vendor/bytebuddy-1_12_8/build.gradle.kts b/vendor/bytebuddy-1_12_8/build.gradle.kts
deleted file mode 100644
index 5f04db9..0000000
--- a/vendor/bytebuddy-1_12_8/build.gradle.kts
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-plugins { id("org.apache.beam.vendor-java") }
-
-description = "Apache Beam :: Vendored Dependencies :: ByteBuddy :: 1.12.0"
-
-group = "org.apache.beam"
-version = "0.1"
-
-val vendorJava = project.extensions.extraProperties.get("vendorJava") as groovy.lang.Closure<*>
-vendorJava(
-  mapOf(
-    "dependencies" to listOf("net.bytebuddy:byte-buddy:1.12.0"),
-    "relocations" to mapOf(
-            "net.bytebuddy" to "org.apache.beam.vendor.bytebuddy.v1_12_8.net.bytebuddy"),
-    "exclusions" to listOf(
-            "**/module-info.class"
-    ),
-    "groupId" to group,
-    "artifactId" to "beam-vendor-bytebuddy-1_12_8",
-    "version" to version
-  )
-)
diff --git a/vendor/grpc-1_48_1/build.gradle b/vendor/grpc-1_48_1/build.gradle
new file mode 100644
index 0000000..ad5dcbe
--- /dev/null
+++ b/vendor/grpc-1_48_1/build.gradle
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.beam.gradle.GrpcVendoring_1_48_1
+
+plugins { id 'org.apache.beam.vendor-java' }
+
+description = "Apache Beam :: Vendored Dependencies :: gRPC :: 1.48.1"
+
+group = "org.apache.beam"
+version = "0.1"
+
+vendorJava(
+  dependencies: GrpcVendoring_1_48_1.dependencies(),
+  runtimeDependencies: GrpcVendoring_1_48_1.runtimeDependencies(),
+  testDependencies: GrpcVendoring_1_48_1.testDependencies(),
+  relocations: GrpcVendoring_1_48_1.relocations(),
+  exclusions: GrpcVendoring_1_48_1.exclusions(),
+  artifactId: "beam-vendor-grpc-1_48_1",
+  groupId: group,
+  version: version,
+)
diff --git a/website/www/site/assets/js/copy-to-clipboard.js b/website/www/site/assets/js/copy-to-clipboard.js
index 4555f7e..8f58a75 100644
--- a/website/www/site/assets/js/copy-to-clipboard.js
+++ b/website/www/site/assets/js/copy-to-clipboard.js
@@ -29,7 +29,7 @@
     }
     let code = document.querySelectorAll('pre'),
     copyIcon = document.createElement('span');
-    copyIcon.innerHTML = '<a class="just-copy" type="button" data-bs-toggle="tooltip" data-bs-placement="bottom" title="Copy to clipboard"><img src="/images/copy-icon.svg"/></a>';
+    copyIcon.innerHTML = '<a class="just-copy" type="button" data-bs-toggle="tooltip" data-bs-placement="bottom" title="Copy to clipboard"><img src="{{ "images/copy-icon.svg" | absURL }}"/></a>';
 
     code.forEach((hl) => {
         if( !hl.parentElement.classList.contains('code-snippet') && !hl.parentElement.classList.contains('highlight')) {
diff --git a/website/www/site/assets/js/page-nav.js b/website/www/site/assets/js/page-nav.js
index 8d53fc3..3487421 100644
--- a/website/www/site/assets/js/page-nav.js
+++ b/website/www/site/assets/js/page-nav.js
@@ -82,7 +82,7 @@
           offset: $("." + idMainContainer).offset()
         };
 
-        if($(window).width() > CONST.DESKTOP_BREAKPOINT) {
+        if(window.innerWidth > CONST.DESKTOP_BREAKPOINT) {
           $("." + idPageNav).css({
             left: mainContainerData.offset.left +  mainContainerData.width - CONST.PAGENAV_WIDTH
           });
@@ -105,7 +105,7 @@
           var items = $(".page-nav > #TableOfContents li");
           var itemTags = $('ul', items).siblings('a');
           var img = document.createElement("img");
-          img.src = "/images/arrow-expandable.svg";
+          img.src = "{{ "images/arrow-expandable.svg" | absURL }}";
           img.classList="rotate";
 
           $(itemTags).prepend(img);
diff --git a/website/www/site/assets/scss/_banner.sass b/website/www/site/assets/scss/_banner.sass
deleted file mode 100644
index a908e9e..0000000
--- a/website/www/site/assets/scss/_banner.sass
+++ /dev/null
@@ -1,43 +0,0 @@
-/*!
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-.banner-container
-  height: 96px
-  background-color: #000954
-  margin-top: 0
-  padding-top: 5px
-  display: flex
-  justify-content: center
-  align-items: center
-  @media (max-width: $ak-breakpoint-lg)
-    height: 176px
-    margin-top: 64px
-    padding-top: 0
-
-  .banner-img-desktop
-    display: block
-    width: 100%
-    @media (max-width: $ak-breakpoint-lg)
-      display: none
-
-  .banner-img-mobile
-    display: none
-    width: auto
-    @media (max-width: $ak-breakpoint-lg)
-      display: block
-    @media (max-width: $ak-breakpoint-sm)
-      width: 100%
diff --git a/website/www/site/assets/scss/_case_study.scss b/website/www/site/assets/scss/_case_study.scss
index 6458927..69d40f1 100644
--- a/website/www/site/assets/scss/_case_study.scss
+++ b/website/www/site/assets/scss/_case_study.scss
@@ -215,7 +215,7 @@
     &:before {
       content: ' ';
       top: 16px;
-      background-image: url("/images/open-quote.svg");
+      background-image: url('{{ "images/open-quote.svg" | absURL }}');
       height: 37px;
       width: 46px;
       position: absolute;
@@ -225,7 +225,7 @@
 
     &:after {
       content: ' ';
-      background-image: url("/images/close-quote.svg");
+      background-image: url('{{ "images/close-quote.svg" | absURL }}');
       height: 37px;
       width: 46px;
       right: 12px;
diff --git a/website/www/site/assets/scss/_hero.scss b/website/www/site/assets/scss/_hero.scss
index d71d924..a8f6d1a 100644
--- a/website/www/site/assets/scss/_hero.scss
+++ b/website/www/site/assets/scss/_hero.scss
@@ -24,8 +24,7 @@
    width: 100%;
    height: 100%;
    display: inherit;
-   //usually -30px, change for 0 until summit is over and banner is removed
-   margin-top: 0;
+   margin-top: -30px;
    min-height: 361px;
    .hero-content {
      position: absolute;
diff --git a/website/www/site/assets/scss/_navbar-desktop.scss b/website/www/site/assets/scss/_navbar-desktop.scss
index ce21061..4df67c9 100644
--- a/website/www/site/assets/scss/_navbar-desktop.scss
+++ b/website/www/site/assets/scss/_navbar-desktop.scss
@@ -33,8 +33,7 @@
    height: 96px;
    width: 100%;
    align-items: center;
-   //usually 30px, change for 0 until summit is over and banner is removed
-   margin-bottom: 0;
+   margin-bottom: 30px;
    box-shadow: 0 4px 16px 0 rgba(0, 0, 0, 0.06);
    background-color: $color-white;
    z-index: 10000; // just to make sure that navbar always on top of other elements
diff --git a/website/www/site/assets/scss/main.scss b/website/www/site/assets/scss/main.scss
index e3510b5..d14ab45 100644
--- a/website/www/site/assets/scss/main.scss
+++ b/website/www/site/assets/scss/main.scss
@@ -54,6 +54,5 @@
 @import "_search.scss";
 @import "_powered_by.scss";
 @import "_case_study.scss";
-@import "_banner.sass";
 @import "_pipelines.scss";
 @import "_about.sass";
diff --git a/website/www/site/content/en/documentation/resources/learning-resources.md b/website/www/site/content/en/documentation/resources/learning-resources.md
index 4be5b50..6c681ce 100644
--- a/website/www/site/content/en/documentation/resources/learning-resources.md
+++ b/website/www/site/content/en/documentation/resources/learning-resources.md
@@ -66,6 +66,7 @@
 
 ### Machine Learning
 
+*   **[Machine Learning with Python using the RunInference API](/documentation/sdks/python-machine-learning/)** - Use Apache Beam with the RunInference API to use machine learning (ML) models to do local and remote inference with batch and streaming pipelines. Follow the [RunInference API pipeline examples](https://github.com/apache/beam/tree/master/sdks/python/apache_beam/examples/inference) to do image classification, image segmentation, language modeling, and MNIST digit classification. See examples of [RunInference transforms](/documentation/transforms/python/elementwise/runinference/).
 *   **[Machine Learning Preprocessing and Prediction](https://cloud.google.com/dataflow/examples/molecules-walkthrough)** - Predict the molecular energy from data stored in the [Spatial Data File](https://en.wikipedia.org/wiki/Spatial_Data_File) (SDF) format. Train a [TensorFlow](https://www.tensorflow.org/) model with [tf.Transform](https://github.com/tensorflow/transform) for preprocessing in Python. This also shows how to create batch and streaming prediction pipelines in Apache Beam.
 *   **[Machine Learning Preprocessing](https://cloud.google.com/blog/products/ai-machine-learning/pre-processing-tensorflow-pipelines-tftransform-google-cloud)** - Find the optimal parameter settings for simulated physical machines like a bottle filler or cookie machine. The goal of each simulated machine is to have the same input/output of the actual machine, making it a "digital twin". This uses [tf.Transform](https://github.com/tensorflow/transform) for preprocessing.
 
diff --git a/website/www/site/content/en/documentation/runners/spark.md b/website/www/site/content/en/documentation/runners/spark.md
index abc1031..b5caeac 100644
--- a/website/www/site/content/en/documentation/runners/spark.md
+++ b/website/www/site/content/en/documentation/runners/spark.md
@@ -443,7 +443,11 @@
 Spark also has a history server to [view after the fact](https://spark.apache.org/docs/latest/monitoring.html#viewing-after-the-fact).
 {{< paragraph class="language-java" >}}
 Metrics are also available via [REST API](https://spark.apache.org/docs/latest/monitoring.html#rest-api).
-Spark provides a [metrics system](https://spark.apache.org/docs/latest/monitoring.html#metrics) that allows reporting Spark metrics to a variety of Sinks. The Spark runner reports user-defined Beam Aggregators using this same metrics system and currently supports <code>GraphiteSink</code> and <code>CSVSink</code>, and providing support for additional Sinks supported by Spark is easy and straight-forward.
+Spark provides a [metrics system](https://spark.apache.org/docs/latest/monitoring.html#metrics) that allows reporting Spark metrics to a variety of Sinks.
+The Spark runner reports user-defined Beam Aggregators using this same metrics system and currently supports
+[GraphiteSink](https://beam.apache.org/releases/javadoc/{{< param release_latest >}}/org/apache/beam/runners/spark/metrics/sink/GraphiteSink.html)
+and [CSVSink](https://beam.apache.org/releases/javadoc/{{< param release_latest >}}/org/apache/beam/runners/spark/metrics/sink/CsvSink.html).
+Providing support for additional Sinks supported by Spark is easy and straight-forward.
 {{< /paragraph >}}
 {{< paragraph class="language-py" >}}Spark metrics are not yet supported on the portable runner.{{< /paragraph >}}
 
diff --git a/website/www/site/content/en/documentation/sdks/java-multi-language-pipelines.md b/website/www/site/content/en/documentation/sdks/java-multi-language-pipelines.md
index 93243be..f96fece 100644
--- a/website/www/site/content/en/documentation/sdks/java-multi-language-pipelines.md
+++ b/website/www/site/content/en/documentation/sdks/java-multi-language-pipelines.md
@@ -149,8 +149,7 @@
 ./gradlew :examples:multi-language:pythonDataframeWordCount --args=" \
 --runner=DataflowRunner \
 --output=gs://${OUTPUT_BUCKET}/count \
---region=${GCP_REGION} \
---experiments=use_runner_v2"
+--region=${GCP_REGION}"
 ```
 
 The pipeline outputs a file with the results to
diff --git a/website/www/site/content/en/documentation/sdks/python-machine-learning.md b/website/www/site/content/en/documentation/sdks/python-machine-learning.md
index 1235a71..6b88482 100644
--- a/website/www/site/content/en/documentation/sdks/python-machine-learning.md
+++ b/website/www/site/content/en/documentation/sdks/python-machine-learning.md
@@ -18,6 +18,8 @@
 
 # Machine Learning
 
+{{< button-pydoc path="apache_beam.ml.inference" class="RunInference" >}}
+
 You can use Apache Beam with the RunInference API to use machine learning (ML) models to do local and remote inference with batch and streaming pipelines. Starting with Apache Beam 2.40.0, PyTorch and Scikit-learn frameworks are supported. You can create multiple types of transforms using the RunInference API: the API takes multiple types of setup parameters from model handlers, and the parameter type determines the model implementation.
 
 ## Why use the RunInference API?
@@ -161,6 +163,10 @@
 For detailed instructions explaining how to build and run a pipeline that uses ML models, see the
 [Example RunInference API pipelines](https://github.com/apache/beam/tree/master/sdks/python/apache_beam/examples/inference) on GitHub.
 
+## Beam Java SDK support
+
+RunInference API is available to Beam Java SDK 2.41.0 and later through Apache Beam [Multi-language Pipelines framework](https://beam.apache.org/documentation/programming-guide/#multi-language-pipelines). Please see [here](https://github.com/apache/beam/blob/master/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/transforms/RunInference.java) for the Java wrapper transform to use and please see [here](https://github.com/apache/beam/blob/master/sdks/java/extensions/python/src/test/java/org/apache/beam/sdk/extensions/python/transforms/RunInferenceTransformTest.java) for some example pipelines.
+
 ## Troubleshooting
 
 If you run into problems with your pipeline or job, this section lists issues that you might encounter and provides suggestions for how to fix them.
@@ -198,4 +204,5 @@
 
 * [RunInference transforms](/documentation/transforms/python/elementwise/runinference)
 * [RunInference API pipeline examples](https://github.com/apache/beam/tree/master/sdks/python/apache_beam/examples/inference)
-* [apache_beam.ml.inference package](/releases/pydoc/current/apache_beam.ml.inference.html#apache_beam.ml.inference.RunInference)
\ No newline at end of file
+
+{{< button-pydoc path="apache_beam.ml.inference" class="RunInference" >}}
\ No newline at end of file
diff --git a/website/www/site/content/en/documentation/sdks/python-multi-language-pipelines.md b/website/www/site/content/en/documentation/sdks/python-multi-language-pipelines.md
index 668dcd8..332a629 100644
--- a/website/www/site/content/en/documentation/sdks/python-multi-language-pipelines.md
+++ b/website/www/site/content/en/documentation/sdks/python-multi-language-pipelines.md
@@ -205,7 +205,6 @@
 
 gsutil rm gs://$GCS_BUCKET/javaprefix/*
 
-# --experiments=use_runner_v2 is optional for Beam 2.32.0 and later
 python addprefix.py \
     --runner DataflowRunner \
     --temp_location $TEMP_LOCATION \
@@ -213,7 +212,6 @@
     --region $GCP_REGION \
     --job_name $JOB_NAME \
     --num_workers $NUM_WORKERS \
-    --experiments=use_runner_v2 \
     --input "gs://dataflow-samples/shakespeare/kinglear.txt" \
     --output "gs://$GCS_BUCKET/javaprefix/output"
 ```
diff --git a/website/www/site/content/en/documentation/sdks/python-pipeline-dependencies.md b/website/www/site/content/en/documentation/sdks/python-pipeline-dependencies.md
index ca91194..bf2e44e 100644
--- a/website/www/site/content/en/documentation/sdks/python-pipeline-dependencies.md
+++ b/website/www/site/content/en/documentation/sdks/python-pipeline-dependencies.md
@@ -77,7 +77,7 @@
 
         python setup.py sdist
 
-   See the [sdist documentation](https://docs.python.org/2/distutils/sourcedist.html) for more details on this command.
+   See the [sdist documentation](https://docs.python.org/3/distutils/sourcedist.html) for more details on this command.
 
 ## Multiple File Dependencies
 
diff --git a/website/www/site/layouts/partials/head.html b/website/www/site/layouts/partials/head.html
index 66fc62a..f9687e2 100644
--- a/website/www/site/layouts/partials/head.html
+++ b/website/www/site/layouts/partials/head.html
@@ -21,10 +21,10 @@
 
 {{ $scssMain := "scss/main.scss"}}
 {{ if .Site.IsServer }}
-    {{ $css := resources.Get $scssMain | toCSS (dict "enableSourceMap" true) | postCSS }}
+    {{ $css := resources.Get $scssMain | toCSS (dict "enableSourceMap" true) | resources.ExecuteAsTemplate "scss/main.css" . | postCSS }}
     <link href="{{ $css.RelPermalink }}" rel="stylesheet">
 {{ else }}
-    {{ $css := resources.Get $scssMain | toCSS (dict "enableSourceMap" false) | postCSS | minify | fingerprint }}
+    {{ $css := resources.Get $scssMain | toCSS (dict "enableSourceMap" false) | resources.ExecuteAsTemplate "scss/main.css" . | postCSS | minify | fingerprint }}
     <link rel="preload" href="{{ $css.RelPermalink }}" as="style">
     <link href="{{ $css.RelPermalink }}" rel="stylesheet" integrity="{{ $css.Data.integrity }}">
 {{ end }}
@@ -48,13 +48,13 @@
 {{ $sectionNav := resources.Get "js/section-nav.js" | minify | fingerprint }}
 <script type="text/javascript" src="{{ $sectionNav.RelPermalink }}" defer></script>
 
-{{ $pageNav := resources.Get "js/page-nav.js" | minify | fingerprint }}
+{{ $pageNav := resources.Get "js/page-nav.js" | resources.ExecuteAsTemplate "js/page-nav.js" . | minify | fingerprint }}
 <script type="text/javascript" src="{{ $pageNav.RelPermalink }}" defer></script>
 
 {{ $expandableList := resources.Get "js/expandable-list.js" | minify | fingerprint }}
 <script type="text/javascript" src="{{ $expandableList.RelPermalink }}" defer></script>
 
-{{ $copyToClipboard := resources.Get "js/copy-to-clipboard.js" | minify | fingerprint }}
+{{ $copyToClipboard := resources.Get "js/copy-to-clipboard.js" |  resources.ExecuteAsTemplate "js/copy-to-clipboard.js" . |minify | fingerprint }}
 <script type="text/javascript" src="{{ $copyToClipboard.RelPermalink }}" defer></script>
 
 {{ $calendar := resources.Get "js/calendar.js" | minify | fingerprint }}
diff --git a/website/www/site/layouts/partials/head_homepage.html b/website/www/site/layouts/partials/head_homepage.html
index edfecc4..8b991ed 100644
--- a/website/www/site/layouts/partials/head_homepage.html
+++ b/website/www/site/layouts/partials/head_homepage.html
@@ -21,10 +21,10 @@
 
 {{ $scssMain := "scss/main.scss"}}
 {{ if .Site.IsServer }}
-    {{ $css := resources.Get $scssMain | toCSS (dict "enableSourceMap" true) | postCSS }}
+    {{ $css := resources.Get $scssMain | toCSS (dict "enableSourceMap" true) | resources.ExecuteAsTemplate "scss/main.css" . | postCSS }}
     <link href="{{ $css.RelPermalink }}" rel="stylesheet">
 {{ else }}
-    {{ $css := resources.Get $scssMain | toCSS (dict "enableSourceMap" false) | postCSS | minify | fingerprint }}
+    {{ $css := resources.Get $scssMain | toCSS (dict "enableSourceMap" false) | resources.ExecuteAsTemplate "scss/main.css" . | postCSS | minify | fingerprint }}
     <link rel="preload" href="{{ $css.RelPermalink }}" as="style">
     <link href="{{ $css.RelPermalink }}" rel="stylesheet" integrity="{{ $css.Data.integrity }}">
 {{ end }}
@@ -48,7 +48,7 @@
 {{ $expandableList := resources.Get "js/expandable-list.js" | minify | fingerprint }}
 <script type="text/javascript" src="{{ $expandableList.RelPermalink }}" defer></script>
 
-{{ $copyToClipboard := resources.Get "js/copy-to-clipboard.js" | minify | fingerprint }}
+{{ $copyToClipboard := resources.Get "js/copy-to-clipboard.js" |  resources.ExecuteAsTemplate "js/copy-to-clipboard.js" . |minify | fingerprint }}
 <script type="text/javascript" src="{{ $copyToClipboard.RelPermalink }}" defer></script>
 
 {{ $calendar := resources.Get "js/calendar.js" | minify | fingerprint }}
diff --git a/website/www/site/layouts/partials/header.html b/website/www/site/layouts/partials/header.html
index 6c11c42..a900431 100644
--- a/website/www/site/layouts/partials/header.html
+++ b/website/www/site/layouts/partials/header.html
@@ -202,12 +202,6 @@
 </div>
 </div>
 </nav>
-<div class="banner-container">
-  <a href="https://2022.beamsummit.org/">
-    <img class="banner-img-desktop" src="/images/banner_desktop.png" alt="banner-desktop">
-    <img class="banner-img-mobile" src="/images/banner_mobile.png" alt="banner-mobile">
-  </a>
-</div>
 <script>
  function showSearch() {
    addPlaceholder();
diff --git a/website/www/site/static/images/banner_desktop.png b/website/www/site/static/images/banner_desktop.png
deleted file mode 100644
index 61e6e55..0000000
--- a/website/www/site/static/images/banner_desktop.png
+++ /dev/null
Binary files differ
diff --git a/website/www/site/static/images/banner_mobile.png b/website/www/site/static/images/banner_mobile.png
deleted file mode 100644
index db097bb..0000000
--- a/website/www/site/static/images/banner_mobile.png
+++ /dev/null
Binary files differ