Add versioned documentation (#3321)

* Updating Heron UI docs

* Added versioning in docs

* Adding support for Docs Versioning..

Signed-off-by: ChethanUK <chethanuk@outlook.com>

* Adding build-site script

Signed-off-by: ChethanUK <chethanuk@outlook.com>

* Update Broken link

* Adding build site in Makefile

* Update versions.json

* Adding tab

* Updating file permission to make it executable
diff --git a/website2/docs/getting-started-local-single-node.md b/website2/docs/getting-started-local-single-node.md
index 6196931..9a23a3e 100644
--- a/website2/docs/getting-started-local-single-node.md
+++ b/website2/docs/getting-started-local-single-node.md
@@ -20,7 +20,7 @@
     under the License.
 -->
 
-> The current version of Heron is **{{% heronVersion %}}**
+> The current version of Heron is {{heron:version}}
 
 
 The easiest way to get started learning Heron is to install the Heron client tools, which are currently available for:
diff --git a/website2/website/Makefile b/website2/website/Makefile
index a4c6ce8..ef7e5ed 100644
--- a/website2/website/Makefile
+++ b/website2/website/Makefile
@@ -6,3 +6,6 @@
 
 python-docs:
 	@scripts/python-doc-gen.sh 0.0.0
+
+buildsite:
+	@scripts/build-site.sh
diff --git a/website2/website/languages.js b/website2/website/languages.js
new file mode 100644
index 0000000..5a3a0cb
--- /dev/null
+++ b/website2/website/languages.js
@@ -0,0 +1,9 @@
+
+const languages = [
+    {
+      enabled: true,
+      name: 'English',
+      tag: 'en',
+    }
+];
+module.exports = languages;
\ No newline at end of file
diff --git a/website2/website/package.json b/website2/website/package.json
index c6a521c..ad11a11 100644
--- a/website2/website/package.json
+++ b/website2/website/package.json
@@ -9,7 +9,10 @@
     "rename-version": "docusaurus-rename-version"
   },
   "devDependencies": {
-    "docusaurus": "^1.10.0"
+    "docusaurus": "^1.10.0",
+    "remarkable-embed": "^0.4.1",
+    "replace-in-file": "^3.4.0",
+    "redoc": "^2.0.0-alpha.30"
   },
   "dependencies": {}
 }
diff --git a/website2/website/pages/en/index.js b/website2/website/pages/en/index.js
index 26c2839..8e164f9 100755
--- a/website2/website/pages/en/index.js
+++ b/website2/website/pages/en/index.js
@@ -67,7 +67,7 @@
         <div className="inner">
           <ProjectTitle siteConfig={siteConfig} />
           <PromoSection>
-            <Button href="/docs/getting-started-local-single-node">Documentation</Button>
+            <Button href="/docs/en/getting-started-local-single-node">Documentation</Button>
             {/* keep as reference for now <Button href={docUrl('doc1.html')}>Github</Button> */}
             <Button href="https://github.com/apache/incubator-heron" target="_blank">Github</Button>
           </PromoSection>
diff --git a/website2/website/pages/en/versions.js b/website2/website/pages/en/versions.js
index 8c4c3c8..b25ac50 100755
--- a/website2/website/pages/en/versions.js
+++ b/website2/website/pages/en/versions.js
@@ -13,11 +13,14 @@
 
 const CWD = process.cwd();
 
+const siteConfig = require(`${CWD}/siteConfig.js`);
+// versions
 const versions = require(`${CWD}/versions.json`);
 
 function Versions(props) {
-  const {config: siteConfig} = props;
-  const latestVersion = versions[0];
+  // const {config: siteConfig} = props;
+  const latestStableVersion = versions[0];
+
   const repoUrl = `https://github.com/${siteConfig.organizationName}/${
     siteConfig.projectName
   }`;
@@ -33,14 +36,16 @@
           <table className="versions">
             <tbody>
               <tr>
-                <th>{latestVersion}</th>
+                <th>{latestStableVersion}</th>
                 <td>
                   {/* You are supposed to change this href where appropriate
                         Example: href="<baseUrl>/docs(/:language)/:id" */}
-                  <a
+                  {/* <a
                     href={`${siteConfig.baseUrl}${siteConfig.docsUrl}/${
                       props.language ? props.language + '/' : ''
-                    }doc1`}>
+                    }doc1`}> */}
+                   <a
+                    href={`${siteConfig.baseUrl}docs/${props.language}/getting-started-local-single-node`}>
                     Documentation
                   </a>
                 </td>
@@ -63,9 +68,7 @@
                   {/* You are supposed to change this href where appropriate
                         Example: href="<baseUrl>/docs(/:language)/next/:id" */}
                   <a
-                    href={`${siteConfig.baseUrl}${siteConfig.docsUrl}/${
-                      props.language ? props.language + '/' : ''
-                    }next/doc1`}>
+                    href={`${siteConfig.baseUrl}docs/${props.language}/next/getting-started-local-single-node`}>
                     Documentation
                   </a>
                 </td>
@@ -81,17 +84,17 @@
           <table className="versions">
             <tbody>
               {versions.map(
+                
+                
                 version =>
-                  version !== latestVersion && (
+                  version !== latestStableVersion && (                  
                     <tr>
                       <th>{version}</th>
                       <td>
                         {/* You are supposed to change this href where appropriate
                         Example: href="<baseUrl>/docs(/:language)/:version/:id" */}
                         <a
-                          href={`${siteConfig.baseUrl}${siteConfig.docsUrl}/${
-                            props.language ? props.language + '/' : ''
-                          }${version}/doc1`}>
+                          href={`${siteConfig.baseUrl}docs/${props.language}/${version}/getting-started-local-single-node`}>
                           Documentation
                         </a>
                       </td>
@@ -115,4 +118,6 @@
   );
 }
 
+Versions.title = 'Versions';
+
 module.exports = Versions;
diff --git a/website2/website/releases.json b/website2/website/releases.json
new file mode 100644
index 0000000..bd3d303
--- /dev/null
+++ b/website2/website/releases.json
@@ -0,0 +1,5 @@
+[
+    "0.20.0",
+    "0.19.0.16",
+    "0.19.0.12"   
+]
\ No newline at end of file
diff --git a/website2/website/scripts/build-site.sh b/website2/website/scripts/build-site.sh
new file mode 100644
index 0000000..f3bb168
--- /dev/null
+++ b/website2/website/scripts/build-site.sh
@@ -0,0 +1,34 @@
+!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+ROOT_DIR=$(git rev-parse --show-toplevel)
+# @TODO
+VERSION=0.20
+
+set -x -e
+
+cd ${ROOT_DIR}/website2/website
+
+yarn
+yarn build
+
+node ./scripts/replace.js
+
+rm -rf ${ROOT_DIR}/generated-site/content
+mkdir -p ${ROOT_DIR}/generated-site/content
+cp -R build/incubator-heron/* ${ROOT_DIR}/generated-site/content
diff --git a/website2/website/scripts/replace.js b/website2/website/scripts/replace.js
new file mode 100644
index 0000000..71ed7ef
--- /dev/null
+++ b/website2/website/scripts/replace.js
@@ -0,0 +1,77 @@
+const replace = require('replace-in-file');
+
+const fs = require('fs')
+
+const CWD = process.cwd()
+const siteConfig = require(`${CWD}/siteConfig.js`);
+const docsDir = `${CWD}/build/${siteConfig.projectName}/docs`
+
+function getVersions() {
+    try {
+      console.log(JSON.parse(require('fs').readFileSync(`${CWD}/versions.json`, 'utf8')))
+      return JSON.parse(require('fs').readFileSync(`${CWD}/versions.json`, 'utf8'));
+    } catch (error) {
+      //console.error(error)
+      console.error('no versions found defaulting to 0.20.0')
+    }
+    return ['0.20.0']
+  }
+
+function doReplace(options) {
+replace(options)
+    .then(changes => {
+    if (options.dry) {
+        console.log('Modified files:');
+        console.log(changes.join('\n'))
+    }
+    })
+    .catch(error => {
+    console.error('Error occurred:', error);
+    });
+}
+
+const versions = getVersions();
+
+const latestVersion = versions[0];
+
+console.log(latestVersion)
+const from = [
+    /{{heron:version_latest}}/g,
+    /{{heron:version}}/g,
+];
+
+const options = {
+    files: [
+      `${docsDir}/*.html`,
+      `${docsDir}/**/*.html`
+    ],
+    ignore: versions.map(v => `${docsDir}/${v}/**/*`), // TODO add next and assets
+    from: from,
+    to: [
+      `${latestVersion}`,
+      `${versions}`,
+    ],
+    dry: false
+  };
+  
+doReplace(options);
+
+// replaces versions
+for (v of versions) {
+    if (v === latestVersion) {
+      continue
+    }
+    const opts = {
+      files: [
+        `${docsDir}/${v}/*.html`,
+        `${docsDir}/${v}/**/*.html`
+      ],
+      from: from,
+      to: [
+        `${latestVersion}`,
+        `${v}`,
+      ],
+      dry: true
+    };
+    doReplace(opts);
+}  
\ No newline at end of file
diff --git a/website2/website/siteConfig.js b/website2/website/siteConfig.js
index 9352be4..daaf172 100644
--- a/website2/website/siteConfig.js
+++ b/website2/website/siteConfig.js
@@ -8,6 +8,52 @@
 // See https://docusaurus.io/docs/site-config for all the possible
 // site configuration options.
 
+// To support embed [variable injection's..]
+// https://www.npmjs.com/package/remarkable-embed
+const {Plugin: Embed} = require('remarkable-embed');
+
+// Our custom remarkable plugin factory.
+const createVariableInjectionPlugin = variables => {
+  // `let` binding used to initialize the `Embed` plugin only once for efficiency.
+  // See `if` statement below.
+  let initializedPlugin;
+
+  const embed = new Embed();
+  embed.register({
+    // Call the render method to process the corresponding variable with
+    // the passed Remarkable instance.
+    // -> the Markdown markup in the variable will be converted to HTML.
+    inject: (key) => {
+      if (keyparts[0] == 'javadoc') {
+        return renderUrl(initializedPlugin, javadocUrl, keyparts);
+    // githubUrl:<name>:<path>
+      }
+    else {
+      keyparts = key.split("|");
+      // endpoint api: endpoint|<op>
+      if (keyparts[0] == 'endpoint') {
+          return renderEndpoint(initializedPlugin, restApiUrl + "#", keyparts);
+      }
+    }
+
+      return initializedPlugin.render(variables[key])
+    }
+    // inject: key => initializedPlugin.render(variables[key])
+  });
+
+  return (md, options) => {
+    if (!initializedPlugin) {
+      initializedPlugin = {
+        render: md.render.bind(md),
+        hook: embed.hook(md, options)
+      };
+    }
+
+    return initializedPlugin.hook;
+  };
+};
+
+const url = 'https://heronstreaming.io';
 const baseUrl = '/';
 
 // List of projects/orgs using your project for the users page.
@@ -22,18 +68,21 @@
   },
 ];
 
+const siteVariables = {
+};
+
 const siteConfig = {
-  title: '', // Title for your website.
+  title: 'Apache Heron', // Title for your website.
   tagline: 'A realtime, distributed, fault-tolerant stream processing engine',
-  url: 'https://your-docusaurus-test-site.com', // Your website URL
-  baseUrl: '/', // Base URL for your project */
   // For github.io type URLs, you would set the url and baseUrl like:
+  url: url,
+  baseUrl: '/', // Base URL for your project */
   //   url: 'https://facebook.github.io',
   //   baseUrl: '/test-site/',
 
   // Used for publishing and more
-  projectName: '',
-  organizationName: '',
+  projectName: 'incubator-heron',
+  organizationName: 'apache',
   // For top-level user or org sites, the organization is still the same.
   // e.g., for the https://JoelMarcey.github.io site, it would be set like...
   //   organizationName: 'JoelMarcey'
@@ -44,6 +93,9 @@
     {href: '#community', label: 'Community'},
     {blog: true, label: 'Blog'},
     {href: '#apache', label: 'Apache'},
+    // {page: 'download', label: 'Download'},
+    // Drop down for languages
+    // { languages: true }
   ],
 
   // If you have users set above, you add it here:
@@ -94,8 +146,13 @@
   onPageNav: 'separate',
   // No .html extensions for paths.
   cleanUrl: true,
+  scrollToTopOptions: {
+    zIndex: 100,
+  },
 
   // Open Graph and Twitter card images.
+  twitter: true,
+  twitterUsername: 'apache_pulsar',
   ogImage: 'img/undraw_online.svg',
   twitterImage: 'img/undraw_tweetstorm.svg',
 
@@ -108,6 +165,10 @@
   // You may provide arbitrary config keys to be used as needed by your
   // template. For example, if you need your repo's URL...
   //   repoUrl: 'https://github.com/facebook/test-site',
+
+  markdownPlugins: [
+    createVariableInjectionPlugin(siteVariables)
+  ],
 };
 
 module.exports = siteConfig;
diff --git a/website2/website/versioned_docs/version-0.20.0/cluster-config-instance.md b/website2/website/versioned_docs/version-0.20.0/cluster-config-instance.md
new file mode 100644
index 0000000..e228bf3
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/cluster-config-instance.md
@@ -0,0 +1,105 @@
+---
+id: version-0.20.0-cluster-config-instance
+title: Heron Instance
+sidebar_label: Heron Instance
+original_id: cluster-config-instance
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+You can configure the behavior of the [Heron
+Instances](heron-architecture#heron-instance) (HIs) in a
+topology using the parameters below.
+
+## Internal Configuration
+
+These parameters deal with the TCP write and read queue for each instance.
+
+Parameter | Meaning | Default
+:-------- |:------- |:-------
+`heron.instance.internal.bolt.read.queue.capacity` | The queue capacity (number of items) in bolt for buffer packets to read from stream manager | 128
+`heron.instance.internal.bolt.write.queue.capacity` | The queue capacity (number of items) in bolt for buffer packets to write to stream manager | 128
+`heron.instance.internal.spout.read.queue.capacity` | The queue capacity (number of items) in spout for buffer packets to read from stream manager | 1024
+`heron.instance.internal.spout.write.queue.capacity` | The queue capacity (number of items) in spout for buffer packets to write to stream manager | 128
+`heron.instance.internal.metrics.write.queue.capacity` | The queue capacity (number of items) for metrics packets to write to metrics manager | 128
+
+## Network Configuration
+
+You can configure how HIs collect and transmit data in one (but only one) of
+two ways: **time based** or **size based**. If you choose time based, you can
+specify the maximum batch time (in milliseconds) for reading from and writing
+to the HI's socket; if you choose size based, you can specify maximum batch
+sizes (in bytes) instead.
+
+Parameter | Meaning | Default
+:-------- |:------- |:-------
+`heron.instance.network.read.batch.time.ms` | Time based, the maximum batch time in ms for instance to read from stream manager per attempt | 16
+`heron.instance.network.read.batch.size.bytes` | Size based, the maximum batch size in bytes to read from stream manager | 32768
+`heron.instance.network.write.batch.time.ms` | Time based, the maximum batch time in ms for instance to write to stream manager per attempt | 16
+`heron.instance.network.write.batch.size.bytes` | Size based, the maximum batch size in bytes to write to stream manager | 32768
+
+### Other Network Parameters
+
+The following parameters do not need to be set in accordance with a time- or
+size-based system.
+
+Parameter | Meaning | Default
+:-------- |:------- |:-------
+`heron.instance.network.options.socket.send.buffer.size.bytes` | The maximum socket's send buffer size in bytes | 6553600
+`heron.instance.network.options.socket.received.buffer.size.bytes` | The maximum socket's received buffer size in bytes of instance's network options | 8738000
+`heron.instance.reconnect.streammgr.interval.sec` | Interval in seconds to reconnect to the stream manager, including the request timeout in connecting | 5
+`heron.instance.reconnect.streammgr.times` | The maximum number of connection attempts made to the SM before the SM is forcibly restarted | 60
+
+## Metrics Manager Configuration
+
+These parameters deal with how each HI interacts with the topology's [Stream
+Manager](heron-architecture#stream-manager).
+
+Parameter | Meaning | Default
+:-------- |:------- |:-------
+`heron.instance.metrics.system.sample.interval.sec` | The interval, in seconds, at which an instance samples its system metrics, e.g. CPU load. | 10
+`heron.instance.reconnect.metricsmgr.interval.sec` | Interval in seconds to reconnect to the metrics manager, including the request timeout in connecting | 5
+`heron.instance.reconnect.metricsmgr.times` | The maximum number of connection attempts to the MM before the MM is forcibly restarted | 60
+
+## Tuning
+
+These parameters are used to dynamically tune the available sizes in read and
+write queues to maintain high performance while avoiding garbage collection
+issues.
+
+Parameter | Meaning | Default
+:-------- |:------- |:-------
+`heron.instance.tuning.expected.bolt.read.queue.size` | The expected size on read queue in bolt | 5
+`heron.instance.tuning.expected.bolt.write.queue.size` | The expected size on write queue in bolt | 5
+`heron.instance.tuning.expected.spout.read.queue.size` | The expected size on read queue in spout | 512
+`heron.instance.tuning.expected.spout.write.queue.size` | The expected size on write queue in spout | 5
+`heron.instance.tuning.expected.metrics.write.queue.size` | The expected size on metrics write queue | 5
+`heron.instance.tuning.current.sample.weight` | TODO | 0.8
+
+## Other Parameters
+
+Parameter | Meaning | Default
+:-------- |:------- |:-------
+`heron.instance.set.data.tuple.capacity` | The maximum number of data tuples to batch in a `HeronDataTupleSet` protobuf message | 256
+`heron.instance.set.control.tuple.capacity` | The maximum number of control tuples to batch in a `HeronControlTupleSet` protobuf message | 256
+`heron.instance.ack.batch.time.ms` | The maximum time in ms for an spout to do acknowledgement per attempt, the ack batch could also break if there are no more ack tuples to process |128
+`heron.instance.emit.batch.time.ms` | The maximum time in ms for an spout instance to emit tuples per attempt | 16
+`heron.instance.emit.batch.size.bytes` | The maximum batch size in bytes for an spout to emit tuples per attempt | 32768
+`heron.instance.execute.batch.time.ms` | The maximum time in ms for an bolt instance to execute tuples per attempt | 16
+`heron.instance.execute.batch.size.bytes` | The maximum batch size in bytes for an bolt instance to execute tuples per attempt | 32768
+`heron.instance.state.check.interval.sec` | The time interval for an instance to check the state change, for instance, the interval a spout using to check whether activate/deactivate is invoked | 5
+`heron.instance.acknowledgement.nbuckets` | TODO | 10
diff --git a/website2/website/versioned_docs/version-0.20.0/cluster-config-metrics.md b/website2/website/versioned_docs/version-0.20.0/cluster-config-metrics.md
new file mode 100644
index 0000000..3c94a83
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/cluster-config-metrics.md
@@ -0,0 +1,58 @@
+---
+id: version-0.20.0-cluster-config-metrics
+title: Metrics Manager
+sidebar_label: Metrics Manager
+original_id: cluster-config-metrics
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+You can configure all of the [Metrics
+Managers](heron-architecture#metrics-manager) (MMs) in a topology
+using the parameters below.
+
+## Network Configuration
+
+You can configure how the MM collects and transmits data in one (but only one)
+of two ways: **time based** or **size based**. If you choose time based, you can
+specify the maximum batch time (in milliseconds) for reading from and writing to
+the MM's socket; if you choose size based, you can specify maximum batch sizes
+(in bytes) instead.
+
+### Time-based Configuration
+
+Config | Meaning | Default
+:----- |:------- |:-------
+`heron.metricsmgr.network.read.batch.time.ms` | The maximum batch time in milliseconds for the MM to read from the socket | 16
+`heron.metricsmgr.network.write.batch.time.ms` | The maximum batch time in milliseconds for the MM to write to the socket | 16
+
+### Size-based Configuration
+
+Config | Meaning | Default
+:----- |:------- |:-------
+`heron.metricsmgr.network.read.batch.size.bytes` | Size based, the maximum batch size in bytes to read from socket | 32768
+`heron.metricsmgr.network.write.batch.size.bytes` | Size based, the maximum batch size in bytes to write to socket | 32768
+
+## Buffer Configuration
+
+Each MM instance has a socket buffer for reading and writing metrics data. You
+can set maximum buffer sizes for both send and receive buffers.
+
+Config | Meaning | Default
+:----- |:------- |:-------
+`heron.metricsmgr.network.options.socket.send.buffer.size.bytes` | The maximum socket's send buffer size in bytes | 6553600
+`heron.metricsmgr.network.options.socket.received.buffer.size.bytes` | The maximum socket's received buffer size in bytes of the metrics manager's network options | 8738000
diff --git a/website2/website/versioned_docs/version-0.20.0/cluster-config-overview.md b/website2/website/versioned_docs/version-0.20.0/cluster-config-overview.md
new file mode 100644
index 0000000..a7dfa9d
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/cluster-config-overview.md
@@ -0,0 +1,65 @@
+---
+id: version-0.20.0-cluster-config-overview
+title: Cluster Config Overview
+sidebar_label: Cluster Config Overview
+original_id: cluster-config-overview
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+Heron clusters can be configured at two levels:
+
+1. **The system level** --- System-level configurations apply to the whole
+Heron cluster rather than to any specific component (e.g. logging configurations).
+2. **The component level** --- Component-level configurations enable you to establish 
+default configurations for different components. 
+These configurations are fixed at any stage of the topology's
+[lifecycle](heron-topology-concepts#topology-lifecycle), once the topology
+is deployed.
+
+Neither system- nor component-level configurations can be overridden by topology developers.
+
+All system-level configs and component-level defaults are declared in a
+[YAML](http://www.yaml.org/) config file in `heron/config/src/yaml/conf/{cluster}/heron_internals.yaml`
+in the Heron codebase. You can leave that file as is when [compiling
+Heron](compiling-overview) or modify the values to suit your use
+case.
+
+## The System Level
+
+There are a small handful of system-level configs for Heron. These are detailed
+in [System-level Configuration](cluster-config-system-level).
+
+## The Component Level
+
+There is a wide variety of component-level configurations that you can establish
+as defaults in your Heron cluster. These configurations tend to apply to
+specific components in a topology and are detailed in the docs below:
+
+* [Heron Instance](cluster-config-instance)
+* [Heron Metrics Manager](cluster-config-metrics)
+* [Heron Stream Manager](cluster-config-stream)
+* [Heron Topology Master](cluster-config-tmaster)
+
+### Overriding Heron Cluster Configuration
+
+The Heron configuration applies globally to a cluster. 
+It is discouraged to modify the configuration to suit one topology.
+It is not possible to override the Heron configuration
+for a topology via Heron client or other Heron tools.
+
+More on Heron's CLI tool can be found in [Managing Heron
+Topologies](user-manuals-heron-cli).
diff --git a/website2/website/versioned_docs/version-0.20.0/cluster-config-stream.md b/website2/website/versioned_docs/version-0.20.0/cluster-config-stream.md
new file mode 100644
index 0000000..e047c08
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/cluster-config-stream.md
@@ -0,0 +1,55 @@
+---
+id: version-0.20.0-cluster-config-stream
+title: Stream Manager
+sidebar_label: Stream Manager
+original_id: cluster-config-stream
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+You can configure the [Stream
+Manager](heron-architecture#stream-manager) (SM) in a
+topology using the parameters below, including how the SM handles [back
+pressure](#back-pressure-parameters).
+
+## Back Pressure Parameters
+
+Parameter | Meaning | Default
+:-------- |:------- |:-------
+`heron.streammgr.network.backpressure.threshold` | The number of times the SM should wait to see a buffer full while enqueueing data before declaring the start of backpressure | `3`
+`heron.streammgr.network.backpressure.highwatermark.mb` | The high water mark on the number of megabytes that can be left outstanding on a connection | `50`
+`heron.streammgr.network.backpressure.lowwatermark.md` | The low water mark on the number of megabytes that can be left outstanding on a connection | `30`
+`heron.streammgr.network.options.maximum.packet.mb` | The maximum packet size, in megabytes, for the SM's network options | `100`
+
+## Timeout Interval
+
+Parameter | Meaning | Default
+:-------- |:------- |:-------
+`heron.streammgr.xormgr.rotatingmap.nbuckets` | TODO | `3`
+
+## Other Parameters
+
+Parameter | Meaning | Default
+:-------- |:------- |:-------
+`heron.streammgr.packet.maximum.size.bytes` | Maximum size (in bytes) of packets sent out from the SM | `102400`
+`heron.streammgr.cache.drain.frequency.ms` | The frequency (in milliseconds) at which the SM's tuple cache is drained | `10`
+`heron.streammgr.cache.drain.size.mb` | The size threshold (in megabytes) at which the SM's tuple cache is drained | `100`
+`heron.streammgr.client.reconnect.interval.sec` | The reconnect interval to other SMs for the SM client (in seconds) | `1`
+`heron.streammgr.client.reconnect.tmaster.interval.sec` | The reconnect interval to the Topology Master for the SM client (in seconds) | `10`
+`heron.streammgr.tmaster.heartbeat.interval.sec` | The interval (in seconds) at which a heartbeat is sent to the Topology Master | `10`
+`heron.streammgr.connection.read.batch.size.mb` | The maximum batch size (in megabytes) at which the SM reads from the socket | `1`
+`heron.streammgr.connection.write.batch.size.mb` | The maximum batch size (in megabytes) to write by the stream manager to the socket | `1`
diff --git a/website2/website/versioned_docs/version-0.20.0/cluster-config-system-level.md b/website2/website/versioned_docs/version-0.20.0/cluster-config-system-level.md
new file mode 100644
index 0000000..3d878ca
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/cluster-config-system-level.md
@@ -0,0 +1,43 @@
+---
+id: version-0.20.0-cluster-config-system-level
+title: System Level Configuration
+sidebar_label: System Level Configuration
+original_id: cluster-config-system-level
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+The parameters in the sections below are set at the system level and thus do not
+apply to any specific component.
+
+## General
+
+Config | Meaning | Default
+:----- |:------- |:-------
+`heron.check.tmaster.location.interval.sec` | The interval, in seconds, after which to check if the topology master location has been fetched or not | 120
+`heron.metrics.export.interval` | The interval, in seconds, at which components export metrics to the topology's Metrics Manager
+
+## Logging
+
+Config | Meaning | Default
+:----- |:------- |:-------
+`heron.logging.directory` | The relative path to the logging directory | `log-files`
+`heron.logging.maximum.size.mb` | The maximum log file size (in megabytes) | 100
+`heron.logging.maximum.files` | The maximum number of log files | 5
+`heron.logging.prune.interval.sec` | The time interval, in seconds, at which Heron prunes log files | 300
+`heron.logging.flush.interval.sec` | The time interval, in seconds, at which Heron flushes log files | 10
+`heron.logging.err.threshold` | The threshold level to log error | 3
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.0/cluster-config-tmaster.md b/website2/website/versioned_docs/version-0.20.0/cluster-config-tmaster.md
new file mode 100644
index 0000000..34b50e2
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/cluster-config-tmaster.md
@@ -0,0 +1,39 @@
+---
+id: version-0.20.0-cluster-config-tmaster
+title: Topology Master
+sidebar_label: Topology Master
+original_id: cluster-config-tmaster
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+You can configure the [Topology
+Master](heron-architecture#topology-master) (TM) for a topology
+using the parameters below.
+
+Parameter | Meaning | Default
+:-------- |:------- |:-------
+`heron.tmaster.metrics.collector.maximum.interval.min` | The maximum interval, in minutes, for metrics to be kept in the Topology Master | 180
+`heron.tmaster.establish.retry.times` | The maximum time to retry to establish the Topology Master | 30
+`heron.tmaster.establish.retry.interval.sec` | The interval to retry to establish the Topology Master | 1
+`heron.tmaster.network.master.options.maximum.packet.mb` | The maximum packet size, in megabytes, of the Topology Master's network options for Stream Managers to connect to | 16
+`heron.tmaster.network.controller.options.maximum.packet.mb` | The maximum packet size, in megabytes, of the Topology Master's network options for scheduler to connect to | 1
+`heron.tmaster.network.stats.options.maximum.packet.mb` | The maximum packet size, in megabytes, of the Topology Master's network options for stat queries | 1
+`heron.tmaster.metrics.collector.purge.interval.sec` | The interval, in seconds, at which the Topology Master purges metrics from the socket | 60
+`heron.tmaster.metrics.collector.maximum.exception` | The maximum number of exceptions to be stored in the topology's metrics collector, to prevent potential out-of-memory issues | 256
+`heron.tmaster.metrics.network.bindallinterfaces` | Whether the metrics reporter binds on all interfaces | `False`
+`heron.tmaster.stmgr.state.timeout.sec` | The timeout, in seconds, for the Stream Manager, compared with (current time - last heartbeat time) | 60
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.0/compiling-code-organization.md b/website2/website/versioned_docs/version-0.20.0/compiling-code-organization.md
new file mode 100644
index 0000000..32eb47b
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/compiling-code-organization.md
@@ -0,0 +1,200 @@
+---
+id: version-0.20.0-compiling-code-organization
+title: Code Organization
+sidebar_label: Code Organization
+original_id: compiling-code-organization
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+This document contains information about the Heron codebase intended primarily
+for developers who want to contribute to Heron. The Heron codebase lives on
+[github]({{% githubMaster %}}).
+
+If you're looking for documentation about developing topologies for a Heron
+cluster, see [Building Topologies](topology-development-topology-api-java) instead.
+
+## Languages
+
+The primary programming languages for Heron are C++, Java, and Python.
+
+* **C++ 11** is used for most of Heron's core components, including the
+[Topology Master](heron-architecture#topology-master), and
+[Stream Manager](heron-architecture#stream-manager).
+
+* **Java 8** is used primarily for Heron's [topology
+API](heron-topology-concepts), and [Heron Instance](heron-architecture#heron-instance).
+It is currently the only language in which topologies can be written. Instructions can be found
+in [Building Topologies](../../developers/java/topologies), while documentation for the Java
+API can be found [here](/api/org/apache/heron/api/topology/package-summary.html). Please note that Heron topologies do not
+require Java 8 and can be written in Java 7 or later.
+
+* **Python 2** (specifically 2.7) is used primarily for Heron's [CLI interface](user-manuals-heron-cli) and UI components such as [Heron UI](user-manuals-heron-ui) and the [Heron Tracker](user-manuals-heron-tracker-runbook).
+
+## Main Tools
+
+* **Build tool** --- Heron uses [Bazel](http://bazel.io/) as its build tool.
+Information on setting up and using Bazel for Heron can be found in [Compiling Heron](compiling-overview).
+
+* **Inter-component communication** --- Heron uses [Protocol
+Buffers](https://developers.google.com/protocol-buffers/?hl=en) for
+communication between components. Most `.proto` definition files can be found in
+[`heron/proto`]({{% githubMaster %}}/heron/proto).
+
+* **Cluster coordination** --- Heron relies heavily on ZooKeeper for cluster
+coordination for distributed deployment, be it for [Aurora](schedulers-aurora-cluster) or for a [custom
+scheduler](extending-heron-scheduler) that you build. More information on ZooKeeper
+components in the codebase can be found in the [State
+Management](#state-management) section below.
+
+## Common Utilities
+
+The [`heron/common`]({{% githubMaster %}}/heron/common) contains a variety of
+utilities for each of Heron's languages, including useful constants, file
+utilities, networking interfaces, and more.
+
+## Cluster Scheduling
+
+Heron supports two cluster schedulers out of the box:
+[Aurora](schedulers-aurora-cluster) and a [local
+scheduler](schedulers-local). The Java code for each of those
+schedulers can be found in [`heron/schedulers`]({{% githubMaster %}}/heron/schedulers)
+, while the underlying scheduler API can be found [here](/api/org/apache/heron/spi/scheduler/package-summary.html)
+
+Info on custom schedulers can be found in [Implementing a Custom
+Scheduler](extending-heron-scheduler); info on the currently available schedulers
+can be found in [Deploying Heron on
+Aurora](schedulers-aurora-cluster) and [Local
+Deployment](schedulers-local).
+
+## State Management
+
+The parts of Heron's codebase related to
+[ZooKeeper](http://zookeeper.apache.org/) are mostly contained in
+[`heron/state`]({{% githubMaster %}}/heron/state). There are ZooKeeper-facing
+interfaces for [C++]({{% githubMaster %}}/heron/state/src/cpp),
+[Java]({{% githubMaster %}}/heron/state/src/java), and
+[Python]({{% githubMaster %}}/heron/state/src/python) that are used in a variety of
+Heron components.
+
+## Topology Components
+
+### Topology Master
+
+The C++ code for Heron's [Topology
+Master](heron-architecture#topology-master) is written in C++ can be
+found in [`heron/tmaster`]({{% githubMaster %}}/heron/tmaster).
+
+### Stream Manager
+
+The C++ code for Heron's [Stream
+Manager](heron-architecture#stream-manager) can be found in
+[`heron/stmgr`]({{% githubMaster %}}/heron/stmgr).
+
+### Heron Instance
+
+The Java code for [Heron
+instances](heron-architecture#heron-instance) can be found in
+[`heron/instance`]({{% githubMaster %}}/heron/instance).
+
+### Metrics Manager
+
+The Java code for Heron's [Metrics
+Manager](heron-architecture#metrics-manager) can be found in
+[`heron/metricsmgr`]({{% githubMaster %}}/heron/metricsmgr).
+
+If you'd like to implement your own custom metrics handler (known as a **metrics
+sink**), see [Implementing a Custom Metrics Sink](extending-heron-metric-sink).
+
+## Developer APIs
+
+### Topology API
+
+Heron's API for writing topologies is written in Java. The code for this API can
+be found in [`heron/api`]({{% githubMaster %}}/heron/api).
+
+Documentation for writing topologies can be found in [Building
+Topologies](topology-development-topology-api-java), while API documentation can be found
+[here](/api/org/apache/heron/api/topology/package-summary.html).
+
+### Simulator
+
+Heron enables you to run topologies in [`Simulator`](guides-simulator-mode)
+for debugging purposes.
+
+The Java API for simulator can be found in
+[`heron/simulator`](/api/org/apache/heron/simulator/package-summary.html).
+
+### Example Topologies
+
+Heron's codebase includes a wide variety of example
+[topologies](heron-topology-concepts) built using Heron's topology API for
+Java. Those examples can be found in
+[`heron/examples`]({{% githubMaster %}}/heron/examples).
+
+## User Interface Components
+
+### Heron CLI
+
+Heron has a tool called `heron` that is used to both provide a CLI interface
+for [managing topologies](user-manuals-heron-cli) and to perform much of
+the heavy lifting behind assembling physical topologies in your cluster.
+The Python code for `heron` can be found in
+[`heron/tools/cli`]({{% githubMaster %}}/heron/tools/cli).
+
+Sample configurations for different Heron schedulers
+
+* [Local scheduler](schedulers-local) config can be found in [`heron/config/src/yaml/conf/local`]({{% githubMaster %}}/heron/config/src/yaml/conf/local),
+* [Aurora scheduler](schedulers-aurora-cluster) config can be found [`heron/config/src/yaml/conf/aurora`]({{% githubMaster %}}/heron/config/src/yaml/conf/aurora).
+
+### Heron Tracker
+
+The Python code for the [Heron Tracker](user-manuals-heron-tracker-runbook) can be
+found in [`heron/tools/tracker`]({{% githubMaster %}}/heron/tools/tracker).
+
+The Tracker is a web server written in Python. It relies on the
+[Tornado](http://www.tornadoweb.org/en/stable/) framework. You can add new HTTP
+routes to the Tracker in
+[`main.py`]({{% githubMaster %}}/heron/tools/tracker/src/python/main.py) and
+corresponding handlers in the
+[`handlers`]({{% githubMaster %}}/heron/tools/tracker/src/python/handlers) directory.
+
+### Heron UI
+
+The Python code for the [Heron UI](user-manuals-heron-ui) can be found in
+[`heron/tools/ui`]({{% githubMaster %}}/heron/tools/ui).
+
+Like Heron Tracker, Heron UI is a web server written in Python that relies on
+the [Tornado](http://www.tornadoweb.org/en/stable/) framework. You can add new
+HTTP routes to Heron UI in
+[`main.py`]({{% githubMaster %}}/heron/web/source/python/main.py) and corresponding
+handlers in the [`handlers`]({{% githubMaster %}}/heron/web/source/python/handlers)
+directory.
+
+### Heron Shell
+
+The Python code for the [Heron Shell](user-manuals-heron-shell) can be
+found in [`heron/shell`]({{% githubMaster %}}/heron/shell). The HTTP handlers and
+web server are defined in
+[`main.py`]({{% githubMaster %}}/heron/shell/src/python/main.py) while the HTML,
+JavaScript, CSS, and images for the web UI can be found in the
+[`assets`]({{% githubMaster %}}/heron/shell/assets) directory.
+
+## Tests
+
+There are a wide variety of tests for Heron that are scattered throughout the
+codebase. For more info see [Testing Heron](compiling-running-tests).
diff --git a/website2/website/versioned_docs/version-0.20.0/compiling-docker.md b/website2/website/versioned_docs/version-0.20.0/compiling-docker.md
new file mode 100644
index 0000000..552f739
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/compiling-docker.md
@@ -0,0 +1,203 @@
+---
+id: version-0.20.0-compiling-docker
+title: Compiling With Docker
+sidebar_label: Compiling With Docker
+original_id: compiling-docker
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+For developing Heron, you will need to compile it for the environment that you
+want to use it in. If you'd like to use Docker to create that build environment,
+Heron provides a convenient script to make that process easier.
+
+Currently, only Ubuntu 14.04, Ubuntu 15.10, and CentOS 7 are supported, but if you
+need another platform there are instructions for adding new ones
+[below](#contributing-new-environments).
+
+### Requirements
+
+* [Docker](https://docs.docker.com)
+
+### Running Docker in a Virtual Machine
+
+If you are running Docker in a virtual machine (VM), it is recommended that you
+adjust your settings to help speed up the build. To do this, open
+[VirtualBox](https://www.virtualbox.org/wiki/Downloads) and go to the container
+in which Docker is running (usually "default" or whatever name you used to
+create the VM), click on the VM, and then click on **Settings**.
+
+**Note**: You will need to stop the VM before modifying these settings.
+
+![VirtualBox Processors](assets/virtual-box-processors.png)
+![VirtualBox Memory](assets/virtual-box-memory.png)
+
+## Building Heron
+
+Heron provides a `build-arfifacts.sh` script for Docker located in the
+`docker` folder. To run that script:
+
+```bash
+$ cd /path/to/heron/repo
+$ docker/build-artifacts.sh
+```
+
+Running the script by itself will display usage information:
+
+```
+Usage: docker/build-artifacts.sh <platform> <version_string> [source-tarball] <output-directory>
+
+Platforms Supported: darwin, ubuntu14.04, ubuntu15.10, centos7
+
+Example:
+  ./build-artifacts.sh ubuntu14.04 0.12.0 .
+
+NOTE: If running on OSX, the output directory will need to
+      be under /Users so virtualbox has access to.
+```
+
+The following arguments are required:
+
+* `platform` --- Currently, this can be one of: `ubuntu14.04`, `centos7`. You
+  can add other platforms using the [instructions
+  below](#contributing-new-environments).
+* `version-string` --- The Heron release for which you'd like to build
+  artifacts.
+* `output-directory` --- The directory in which you'd like the release to be
+  built.
+
+Here's an example usage:
+
+```bash
+$ docker/scripts/build-artifacts.sh ubuntu14.04 0.12.0 ~/heron-release
+```
+
+This will build a Docker container specific to Ubuntu 14.04, create a source
+tarball of the Heron repository, run a full release build of Heron, and then
+copy the artifacts into the `~/heron-release` directory.
+
+Optionally, you can also include a tarball of the Heron source if you have one.
+By default, the script will create a tarball of the current source in the Heron
+repo and use that to build the artifacts.
+
+**Note**: If you are running on Mac OS X, Docker must be run inside a VM.
+Therefore, you must make sure that both the source tarball and destination
+directory are somewhere under your home directory. For example, you cannot
+output the Heron artifacts to `/tmp` because `/tmp` refers to the directory
+inside the VM, not on the host machine. Your home directory, however, is
+automatically linked in to the VM and can be accessed normally.
+
+After the build has completed, you can go to your output directory and see all
+of the generated artifacts:
+
+```bash
+$ ls ~/heron-release
+heron-0.12.0-ubuntu14.04.tar
+heron-0.12.0-ubuntu14.04.tar.gz
+heron-core-0.12.0-ubuntu14.04.tar.gz
+heron-install-0.12.0-ubuntu14.04.sh
+heron-layer-0.12.0-ubuntu14.04.tar
+heron-tools-0.12.0-ubuntu14.04.tar.gz
+```
+
+## Contributing New Environments
+
+You'll notice that there are multiple
+[Dockerfiles](https://docs.docker.com/engine/reference/builder/) in the `docker`
+directory of Heron's source code, one for each of the currently supported
+platforms.
+
+To add support for a new platform, add a new `Dockerfile` to that directory and
+append the name of the platform to the name of the file. If you'd like to add
+support for Debian 8, for example, add a file named `Dockerfile.debian8`. Once
+you've done that, follow the instructions in the [Docker
+documentation](https://docs.docker.com/engine/articles/dockerfile_best-practices/).
+
+You should make sure that your `Dockerfile` specifies *at least* all of the
+following:
+
+### Step 1 --- The OS being used in a [`FROM`](https://docs.docker.com/engine/reference/builder/#from) statement.
+
+Here's an example:
+
+```dockerfile
+FROM centos:centos7
+ ```
+
+### Step 2 --- A `TARGET_PLATFORM` environment variable using the [`ENV`](https://docs.docker.com/engine/reference/builder/#env) instruction.
+
+Here's an example:
+
+```dockerfile
+ENV TARGET_PLATFORM centos
+```
+
+### Step 3 --- A general dependency installation script using a [`RUN`](https://docs.docker.com/engine/reference/builder/#run) instruction.
+
+Here's an example:
+
+```dockerfile
+RUN apt-get update && apt-get -y install \
+         automake \
+         build-essential \
+         cmake \
+         curl \
+         libssl-dev \
+         git \
+         libtool \
+         libunwind8 \
+         libunwind-setjmp0-dev \
+         python \
+         python2.7-dev \
+         python-software-properties \
+         software-properties-common \
+         python-setuptools \
+         unzip \
+         wget
+```
+
+### Step 4 --- An installation script for Java 8 and a `JAVA_HOME` environment variable
+
+Here's an example:
+
+```dockerfile
+RUN \
+     echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | debconf-set-selections && \
+     add-apt-repository -y ppa:webupd8team/java && \
+     apt-get update && \
+     apt-get install -y oracle-java8-installer && \
+     rm -rf /var/lib/apt/lists/* && \
+     rm -rf /var/cache/oracle-jdk8-installer
+
+ENV JAVA_HOME /usr/lib/jvm/java-8-oracle
+```
+
+#### Step 5 - An installation script for [Bazel](http://bazel.io/) version {{% bazelVersion %}} or above.
+Here's an example:
+
+```dockerfile
+RUN wget -O /tmp/bazel.sh https://github.com/bazelbuild/bazel/releases/download/0.23.2/bazel-0.23.2-installer-linux-x86_64.sh \
+         && chmod +x /tmp/bazel.sh \
+         && /tmp/bazel.sh
+```
+
+### Step 6 --- Add the `bazelrc` configuration file for Bazel and the `compile.sh` script (from the `docker` folder) that compiles Heron
+
+```dockerfile
+ADD bazelrc /root/.bazelrc
+ADD compile.sh /compile.sh
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/compiling-linux.md b/website2/website/versioned_docs/version-0.20.0/compiling-linux.md
new file mode 100644
index 0000000..eac1e71
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/compiling-linux.md
@@ -0,0 +1,240 @@
+---
+id: version-0.20.0-compiling-linux
+title: Compiling on Linux
+sidebar_label: Compiling on Linux
+original_id: compiling-linux
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron can currently be built on the following Linux platforms:
+
+* [Ubuntu 14.04](#building-on-ubuntu-14.04)
+* [CentOS 7](#building-on-centos-7)
+
+## Building on Ubuntu 14.04
+
+To build Heron on a fresh Ubuntu 14.04 installation:
+
+### Step 1 --- Update Ubuntu
+
+```bash
+$ sudo apt-get update -y
+$ sudo apt-get upgrade -y
+```
+
+### Step 2 --- Install required libraries
+
+```bash
+$ sudo apt-get install git build-essential automake cmake libtool-bin zip \
+  libunwind-setjmp0-dev zlib1g-dev unzip pkg-config python-setuptools -y
+```
+
+#### Step 3 --- Set the following environment variables
+
+```bash
+export CC=/usr/bin/gcc
+export CCX=/usr/bin/g++
+```
+
+### Step 4 --- Install JDK 8 and set JAVA_HOME
+
+```bash
+$ sudo add-apt-repository ppa:webupd8team/java
+$ sudo apt-get update -y
+$ sudo apt-get install oracle-java8-installer -y
+$ export JAVA_HOME="/usr/lib/jvm/java-8-oracle"
+```
+
+#### Step 5 - Install Bazel {{% bazelVersion %}}
+
+```bash
+wget -O /tmp/bazel.sh https://github.com/bazelbuild/bazel/releases/download/0.23.2/bazel-0.23.2-installer-linux-x86_64.sh
+chmod +x /tmp/bazel.sh
+/tmp/bazel.sh --user
+```
+
+Make sure to download the appropriate version of Bazel (currently {{%
+bazelVersion %}}).
+
+### Step 6 --- Install python development tools
+```bash
+$ sudo apt-get install  python-dev python-pip
+```
+
+### Step 7 --- Make sure the Bazel executable is in your `PATH`
+
+```bash
+$ export PATH="$PATH:$HOME/bin"
+```
+
+### Step 8 --- Fetch the latest version of Heron's source code
+
+```bash
+$ git clone https://github.com/apache/incubator-heron.git && cd heron
+```
+
+### Step 9 --- Configure Heron for building with Bazel
+
+```bash
+$ ./bazel_configure.py
+```
+
+### Step 10 --- Build the project
+
+```bash
+$ bazel build --config=ubuntu heron/...  
+```
+
+### Step 11 --- Build the packages
+
+```bash
+$ bazel build --config=ubuntu scripts/packages:binpkgs  
+$ bazel build --config=ubuntu scripts/packages:tarpkgs
+```
+
+This will install Heron packages in the `bazel-bin/scripts/packages/` directory.
+
+## Manually Installing Libraries
+
+If you encounter errors with [libunwind](http://www.nongnu.org/libunwind), [libtool](https://www.gnu.org/software/libtool), or
+[gperftools](https://github.com/gperftools/gperftools/releases), we recommend
+installing them manually.
+
+### Compling and installing libtool
+
+```bash
+$ wget http://ftpmirror.gnu.org/libtool/libtool-2.4.6.tar.gz
+$ tar -xvf libtool-2.4.6.tar.gz
+$ cd libtool-2.4.6
+$ ./configure
+$ make
+$ sudo make install
+```
+
+### Compiling and installing libunwind
+
+```bash
+$ wget http://download.savannah.gnu.org/releases/libunwind/libunwind-1.1.tar.gz
+$ tar -xvf libunwind-1.1.tar.gz
+$ cd libunwind-1.1
+$ ./configure
+$ make
+$ sudo make install
+```
+
+### Compiling and installing gperftools
+
+```bash
+$ wget https://github.com/gperftools/gperftools/releases/download/gperftools-2.5/gperftools-2.5.tar.gz
+$ tar -xvf gperftools-2.5.tar.gz
+$ cd gperftools-2.5
+$ ./configure
+$ make
+$ sudo make install
+```
+
+## Building on CentOS 7
+
+To build Heron on a fresh CentOS 7 installation:
+
+### Step 1 --- Install the required dependencies
+
+```bash
+$ sudo yum install gcc gcc-c++ kernel-devel wget unzip zlib-devel zip git automake cmake patch libtool -y
+```
+
+### Step 2 --- Install libunwind from source
+
+```bash
+$ wget http://download.savannah.gnu.org/releases/libunwind/libunwind-1.1.tar.gz
+$ tar xvf libunwind-1.1.tar.gz
+$ cd libunwind-1.1
+$ ./configure
+$ make
+$ sudo make install
+```
+
+### Step 3 --- Set the following environment variables
+
+```bash
+$ export CC=/usr/bin/gcc
+$ export CCX=/usr/bin/g++
+```
+
+### Step 4 --- Install JDK 8
+
+```bash
+$ cd /opt/
+$ sudo wget --no-cookies --no-check-certificate \
+  --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" \
+  "http://download.oracle.com/otn-pub/java/jdk/8u91-b14/jdk-8u91-linux-x64.tar.gz"
+$ sudo tar xzf jdk-8u91-linux-x64.tar.gz
+```
+
+Use `alternatives` to configure the Java version:
+
+```bash
+$ sudo cd /opt/jdk1.8.0_91/
+$ sudo alternatives --install /usr/bin/java java /opt/jdk1.8.0_91/bin/java 2
+$ sudo alternatives --config java
+```
+
+Set the `javac` and `jar` commands:
+
+```bash
+$ sudo alternatives --install /usr/bin/jar jar /opt/jdk1.8.0_91/bin/jar 2
+$ sudo alternatives --install /usr/bin/javac javac /opt/jdk1.8.0_91/bin/javac 2
+$ sudo alternatives --set jar /opt/jdk1.8.0_91/bin/jar
+$ sudo alternatives --set javac /opt/jdk1.8.0_91/bin/javac
+```
+
+Export Java-related environment variables:
+
+```bash
+export JAVA_HOME=/opt/jdk1.8.0_91
+export JRE_HOME=/opt/jdk1.8.0_91/jre
+export PATH=$PATH:/opt/jdk1.8.0_91/bin:/opt/jdk1.8.0_91/jre/bin
+```
+
+#### Step 5 - Install Bazel {{% bazelVersion %}}
+
+```bash
+wget -O /tmp/bazel.sh https://github.com/bazelbuild/bazel/releases/download/0.23.2/bazel-0.23.2-installer-linux-x86_64.sh
+chmod +x /tmp/bazel.sh
+/tmp/bazel.sh --user
+```
+
+Make sure to download the appropriate version of Bazel (currently {{%
+bazelVersion %}}).
+
+### Step 6 --- Download Heron and compile it
+
+```bash
+$ git clone https://github.com/apache/incubator-heron.git && cd heron
+$ ./bazel_configure.py
+$ bazel build --config=centos heron/...
+```
+
+### Step 7 --- Build the binary packages
+
+```bash
+$ bazel build --config=centos scripts/packages:binpkgs
+$ bazel build --config=centos scripts/packages:tarpkgs
+```
+
+This will install Heron packages in the `bazel-bin/scripts/packages/` directory.
diff --git a/website2/website/versioned_docs/version-0.20.0/compiling-osx.md b/website2/website/versioned_docs/version-0.20.0/compiling-osx.md
new file mode 100644
index 0000000..1bbcc85
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/compiling-osx.md
@@ -0,0 +1,81 @@
+---
+id: version-0.20.0-compiling-osx
+title: Compiling on OS X
+sidebar_label: Compiling on OS X
+original_id: compiling-osx
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+This is a step-by-step guide to building Heron on Mac OS X (versions 10.10 and
+  10.11).
+
+### Step 1 --- Install Homebrew
+
+If [Homebrew](http://brew.sh/) isn't yet installed on your system, you can
+install it using this one-liner:
+
+```bash
+$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
+```
+
+### Step 2 --- Install bazel and other required libraries
+
+```bash
+brew install bazel
+brew install automake
+brew install cmake
+brew install libtool
+```
+
+### Step 3 --- Set the following environment variables
+
+```bash
+$ export CC=/usr/bin/clang
+$ export CXX=/usr/bin/clang++
+$ echo $CC $CXX
+```
+
+### Step 4 --- Fetch the latest version of Heron's source code
+
+```bash
+$ git clone https://github.com/apache/incubator-heron.git && cd incubator-heron
+```
+
+### Step 5 --- Configure Heron for building with Bazel
+
+```bash
+$ ./bazel_configure.py
+```
+
+If this configure script fails with missing dependencies, Homebrew can be used
+to install those dependencies.
+
+### Step 6 --- Build the project
+
+```bash
+$ bazel build --config=darwin heron/...
+```
+
+### Step 7 --- Build the packages
+
+```bash
+$ bazel build --config=darwin scripts/packages:binpkgs
+$ bazel build --config=darwin scripts/packages:tarpkgs
+```
+
+This will install Heron packages in the `bazel-bin/scripts/packages/` directory.
diff --git a/website2/website/versioned_docs/version-0.20.0/compiling-overview.md b/website2/website/versioned_docs/version-0.20.0/compiling-overview.md
new file mode 100644
index 0000000..41a8b12
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/compiling-overview.md
@@ -0,0 +1,142 @@
+---
+id: version-0.20.0-compiling-overview
+title: Compiling Heron
+sidebar_label: Compiling Overview
+original_id: compiling-overview
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron is currently available for [Mac OS X 10.14](compiling-osx),
+[Ubuntu 14.04](compiling-linux), and [CentOS
+7](compiling-linux#building-on-centos-7). This guide describes the basics of the
+Heron build system. For step-by-step build instructions for a specific platform,
+the following guides are available:
+
+* [Building on Linux Platforms](compiling-linux)
+* [Building on Mac OS X](compiling-mac)
+
+Heron can be built either [in its entirety](#building-all-components), as [individual components](#building-specific-components).
+
+Instructions on running unit tests for Heron can also be found in [Testing Heron](compiling-running-tests).
+
+## Requirements
+
+You must have the following installed to compile Heron:
+
+* [Bazel](http://bazel.io/docs/install.html) = {{% bazelVersion %}}. Later
+  versions might work but have not been tested. See [Installing Bazel]({{< ref
+  "#installing-bazel" >}}) below.
+* [Java 8](http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html)
+  is required by Bazel and Heron;
+  [topologies](../../../concepts/topologies) can be written in Java 7 or above
+  , but Heron jars are required to run with a Java 8 JRE.
+* [Autoconf](http://www.gnu.org/software/autoconf/autoconf.html) >=
+  2.6.3
+* [Automake](https://www.gnu.org/software/automake/) >= 1.11.1
+* [GNU Make](https://www.gnu.org/software/make/) >= 3.81
+* [GNU Libtool](http://www.gnu.org/software/libtool/) >= 2.4.6
+* [gcc/g++](https://gcc.gnu.org/) >= 4.8.1 (Linux platforms)
+* [CMake](https://cmake.org/) >= 2.6.4
+* [Python](https://www.python.org/) >= 2.7 (not including Python 3.x)
+* [Perl](https://www.perl.org/) >= 5.8.8
+
+Export the `CC` and `CXX` environment variables with a path specific to your
+machine:
+
+```bash
+$ export CC=/your-path-to/bin/c_compiler
+$ export CXX=/your-path-to/bin/c++_compiler
+$ echo $CC $CXX
+```
+
+## Installing Bazel
+
+Heron uses the [Bazel](http://bazel.io) build tool. Bazel releases can be found
+[here](https://github.com/bazelbuild/bazel/releases/tag/{{% bazelVersion %}})
+and installation instructions can be found [here](http://bazel.io/docs/install.html).
+
+To ensure that Bazel has been installed, run `bazel version` and check the
+version (listed next to `Build label` in the script's output) to ensure that you
+have Bazel {{% bazelVersion %}}.
+
+## Configuring Bazel
+
+There is a Python script that you can run to configure Bazel on supported
+platforms:
+
+```bash
+$ cd /path/to/heron
+$ ./bazel_configure.py
+```
+
+## Building
+
+### Bazel OS Environments
+
+Bazel builds are specific to a given OS. When building you must specify an
+OS-specific configuration using the `--config` flag. The following OS values
+are supported:
+
+* `darwin` (Mac OS X)
+* `ubuntu` (Ubuntu 14.04)
+* `centos5` (CentOS 5)
+
+For example, on Mac OS X (`darwin`), the following command will build all
+packages:
+
+```bash
+$ bazel build --config=darwin heron/...
+```
+
+Production release packages include additional performance optimizations
+not enabled by default. Enabling these optimizations increases build time.
+To enable production optimizations, include the `opt` flag:
+```bash
+$ bazel build -c opt --config=PLATFORM heron/...
+```
+
+### Building All Components
+
+The Bazel build process can produce either executable install scripts or
+bundled tars. To build executables or tars for all Heron components at once,
+use the following `bazel build` commands, respectively:
+
+```bash
+$ bazel build --config=PLATFORM scripts/packages:binpkgs
+$ bazel build --config=PLATFORM scripts/packages:tarpkgs
+```
+
+Resulting artifacts can be found in subdirectories below the `bazel-bin`
+directory. The `heron-tracker` executable, for example, can be found at
+`bazel-bin/heron/tools/tracker/src/python/heron-tracker`.
+
+### Building Specific Components
+
+As an alternative to building a full release, you can build Heron executables
+for a single Heron component (such as the [Heron
+Tracker](user-manuals-heron-tracker-runbook)) by passing a target to the `bazel
+build` command. For example, the following command would build the Heron Tracker:
+
+```bash
+$ bazel build --config=darwin heron/tools/tracker/src/python:heron-tracker
+```
+
+## Testing Heron
+
+Instructions for running Heron unit tests can be found at [Testing
+Heron](compiling-running-tests).
diff --git a/website2/website/versioned_docs/version-0.20.0/compiling-running-tests.md b/website2/website/versioned_docs/version-0.20.0/compiling-running-tests.md
new file mode 100644
index 0000000..167411c
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/compiling-running-tests.md
@@ -0,0 +1,92 @@
+---
+id: version-0.20.0-compiling-running-tests
+title: Running Tests
+sidebar_label: Running Tests
+original_id: compiling-running-tests
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron uses [Bazel](compiling-overview#installing-bazel) for building
+and running unit tests. Before running tests, first set up your build environment
+as described in [Compiling Heron](compiling-overview).
+
+### Running Unit Tests
+
+The following command will run all tests:
+
+```bash
+$ bazel test --config=darwin heron/...
+```
+
+To run a specific [test
+target](http://bazel.io/docs/test-encyclopedia.html), pass the test target name.
+
+```bash
+$ bazel test --config=darwin heron/statemgrs/tests/java:localfs-statemgr_unittest
+```
+
+### Discovering Unit Test Targets
+
+To see a full listing of all Bazel test targets:
+
+```bash
+$ bazel query 'kind(".*_test rule", ...)'
+```
+
+For **Java** targets only:
+
+```bash
+$ bazel query 'kind("java_test rule", ...)'
+```
+
+For **C++** targets:
+
+```bash
+$ bazel query 'kind("cc_test rule", ...)'
+```
+
+For **Python** targets:
+
+```bash
+$ bazel query 'kind("pex_test rule", ...)'
+```
+
+### Running Integration Tests
+
+Integration tests are divided into two categories:
+
+* Functional integration tests
+
+    These integration tests are designed for testing the functionality of 
+    Heron, such as topologies and groupings.
+    To run the functional integration tests on a Mac OS X, do the following:
+
+    ```bash
+    $ ./scripts/run_integration_test.sh
+    ```
+
+* Failure integration tests
+
+    These integration tests are designed for testing recovery from failure/restart
+    in certain processes, such as Topology Master and Metrics Manager.
+    To run the failure integration tests on a Mac OS X, do the following:
+
+    ```bash
+    $ bazel build --config=darwin integration_test/src/...
+    $ ./bazel-bin/integration_test/src/python/local_test_runner/local-test-runner
+    ```
diff --git a/website2/website/versioned_docs/version-0.20.0/deployment-api-server.md b/website2/website/versioned_docs/version-0.20.0/deployment-api-server.md
new file mode 100644
index 0000000..ff5facb
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/deployment-api-server.md
@@ -0,0 +1,95 @@
+---
+id: version-0.20.0-deployment-api-server
+title: The Heron API Server
+sidebar_label: The Heron API Server
+original_id: deployment-api-server
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+The **Heron API server** is a necessary component
+
+> If you're running Heron [locally](../../getting-started) on your laptop, you won't need to run the Heron API server separately; its functions will be handled automatically.
+
+## Installation
+
+The Heron API server executable (`heron-apiserver`) is installed automatically when you install the [Heron tools](getting-started-local-single-node#step-1-download-the-heron-tools).
+
+## Running the Heron API server
+
+You can start up the Heron API server using the `heron-apiserver` command. When you do so you'll need to specify two things:
+
+* A [base template](#base-templates) for the scheduler that the API server will be interacting with
+* A [cluster name](#cluster-name) for the Heron cluster
+
+Here's an example:
+
+```bash
+$ heron-apiserver \
+  --base-template mesos \
+  --cluster sandbox
+```
+
+## Base templates
+
+The Heron API server works by accepting incoming commands from the [Heron CLI tool](user-manuals-heron-cli) and interacts with a variety of Heron components, including:
+
+* a scheduler ([Mesos](schedulers-mesos-local-mac), [Aurora](schedulers-aurora-cluster), the [local filesystem](schedulers-local), etc.)
+* an uploader ([Amazon S3](uploaders-amazon-s3), the [local filesystem](uploaders-local-fs), etc.)
+
+When you [install](#installation) the Heron tools, a directory will automatically be created in `~/.herontools/conf` on MacOS and `/usr/local/herontools/conf` on other platforms. That directory contains a number of base templates for all of the currently supported schedulers. Modify the configuration for your scheduler, for example [Mesos](schedulers-mesos-local-mac) using the YAML files in the `mesos` folder, and then select the proper base template using the `--base-template` flag. Here's an example for Mesos:
+
+```bash
+$ heron-apiserver \
+  --base-template mesos \
+  --cluster my-cluster
+```
+
+> For a full guide to Heron configuration, see [Configuring a cluster](cluster-config-overview).
+
+## Cluster name
+
+In addition to specifying a base template when starting up the API server, you also need to specify a name for the cluster that the Heron API server will be serving. Here's an example:
+
+```bash
+$ heron-apiserver \
+  --base-template mesos \
+  --cluster us-west-prod
+```
+
+{{< alert "api-server-cluster-name" >}}
+
+## Other options
+
+In addition to specifying a base template and cluster name, you can also specify:
+
+Flag | Description
+:----|:-----------
+`--config-path` | A non-default path to a base configuration
+`--port` | The port to bind to (the default is 9000)
+
+## Configuration overrides
+
+When you specify a [base template](#base-templates) when running the Heron API server, the server will use whatever configuration is found in the template files. You can override configuration on a per-parameter basis, however, using the `-D` flag. Here's an example:
+
+```bash
+$ heron-apiserver \
+  --base-template aurora \
+  --cluster us-west-prod \
+  -D heron.statemgr.connection.string=zk-1:2181,zk-2:2181,zk-3:2181 \
+  -D heron.class.uploader=com.acme.uploaders.MyCustomUploader
+```
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.0/deployment-configuration.md b/website2/website/versioned_docs/version-0.20.0/deployment-configuration.md
new file mode 100644
index 0000000..956da9d
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/deployment-configuration.md
@@ -0,0 +1,114 @@
+---
+id: version-0.20.0-deployment-configuration
+title: Configuring a Cluster
+sidebar_label: Configuration
+original_id: deployment-configuration
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+To setup a Heron cluster, you need to configure a few files. Each file configures
+a component of the Heron streaming framework.
+
+* **scheduler.yaml** --- This file specifies the required classes for launcher,
+scheduler, and for managing the topology at runtime. Any other specific parameters
+for the scheduler go into this file.
+
+* **statemgr.yaml** --- This file contains the classes and the configuration for state manager.
+The state manager maintains the running state of the topology as logical plan, physical plan,
+scheduler state, and execution state.
+
+* **uploader.yaml** --- This file specifies the classes and configuration for the uploader,
+which uploads the topology jars to storage. Once the containers are scheduled, they will
+download these jars from the storage for running.
+
+* **heron_internals.yaml** --- This file contains parameters that control
+how heron behaves. Tuning these parameters requires advanced knowledge of heron architecture and its
+components. For starters, the best option is just to copy the file provided with sample
+configuration. Once you are familiar with the system you can tune these parameters to achieve
+high throughput or low latency topologies.
+
+* **metrics_sinks.yaml** --- This file specifies where the run-time system and topology metrics
+will be routed. By default, the `file sink` and `tmaster sink` need to be present. In addition,
+`scribe sink` and `graphite sink` are also supported.
+
+* **packing.yaml** --- This file specifies the classes for `packing algorithm`, which defaults
+to Round Robin, if not specified.
+
+* **client.yaml** --- This file controls the behavior of the `heron` client. This is optional.
+
+# Assembling the Configuration
+
+All configuration files are assembled together to form the cluster configuration. For example,
+a cluster named `devcluster` that uses the Aurora for scheduler, ZooKeeper for state manager and
+HDFS for uploader will have the following set of configurations.
+
+## scheduler.yaml (for Aurora)
+
+```yaml
+# scheduler class for distributing the topology for execution
+heron.class.scheduler: org.apache.heron.scheduler.aurora.AuroraScheduler
+
+# launcher class for submitting and launching the topology
+heron.class.launcher: org.apache.heron.scheduler.aurora.AuroraLauncher
+
+# location of java
+heron.directory.sandbox.java.home: /usr/lib/jvm/java-1.8.0-openjdk-amd64/
+
+# Invoke the IScheduler as a library directly
+heron.scheduler.is.service: False
+```
+
+## statemgr.yaml (for ZooKeeper)
+
+```yaml
+# zookeeper state manager class for managing state in a persistent fashion
+heron.class.state.manager: org.apache.heron.statemgr.zookeeper.curator.CuratorStateManager
+
+# zookeeper state manager connection string
+heron.statemgr.connection.string:  "127.0.0.1:2181"
+
+# path of the root address to store the state in zookeeper  
+heron.statemgr.root.path: "/heron"
+
+# create the zookeeper nodes, if they do not exist
+heron.statemgr.zookeeper.is.initialize.tree: True
+```
+
+## uploader.yaml (for HDFS)
+```yaml
+# Directory of config files for hadoop client to read from
+heron.uploader.hdfs.config.directory:              "/home/hadoop/hadoop/conf/"
+
+# The URI of the directory for uploading topologies in the HDFS
+heron.uploader.hdfs.topologies.directory.uri:      "hdfs:///heron/topology/"
+```
+
+## packing.yaml (for Round Robin)
+```yaml
+# packing algorithm for packing instances into containers
+heron.class.packing.algorithm:    org.apache.heron.packing.roundrobin.RoundRobinPacking
+```
+
+## client.yaml (for heron cli)
+```yaml
+# should the role parameter be required
+heron.config.role.required: false
+
+# should the environ parameter be required
+heron.config.env.required: false
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/deployment-overview.md b/website2/website/versioned_docs/version-0.20.0/deployment-overview.md
new file mode 100644
index 0000000..7b6d6ac9
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/deployment-overview.md
@@ -0,0 +1,75 @@
+---
+id: version-0.20.0-deployment-overview
+title: Deployment Overiew
+sidebar_label: Deployment Overiew
+original_id: deployment-overview
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron is designed to be run in clustered, scheduler-driven environments. It can
+be run in a `multi-tenant` or `dedicated` clusters. Furthermore, Heron supports
+`multiple clusters` and a user can submit topologies to any of these clusters. Each
+of the cluster can use `different scheduler`. A typical Heron deployment is shown
+in the following figure.
+
+<br />
+![Heron Deployment](assets/heron-deployment.png)
+<br/>
+
+A Heron deployment requires several components working together. The following must
+be deployed to run Heron topologies in a cluster:
+
+* **Scheduler** --- Heron requires a scheduler to run its topologies. It can
+be deployed on an existing cluster running alongside other big data frameworks.
+Alternatively, it can be deployed on a cluster of its own. Heron currently
+supports several scheduler options:
+  * [Aurora](schedulers-aurora-cluster)
+  * [Local](schedulers-local)
+  * [Slurm](schedulers-slurm)
+  * [YARN](schedulers-yarn)
+  * [Kubernetes By Hand](schedulers-k8s-by-hand)
+  * [Kubernetes with Helm](schedulers-k8s-with-helm)
+
+* **State Manager** --- Heron state manager tracks the state of all deployed
+topologies. The topology state includes its logical plan,
+physical plan, and execution state. Heron supports the following state managers:
+  * [Local File System](state-managers-local-fs)
+  * [Zookeeper](state-managers-zookeeper)
+
+* **Uploader** --- The Heron uploader distributes the topology jars to the
+servers that run them. Heron supports several uploaders
+  * [HDFS](uploaders-hdfs)
+  * [Local File System](uploaders-local-fs)
+  * [Amazon S3](uploaders-amazon-s3)
+
+* **Metrics Sinks** --- Heron collects several metrics during topology execution.
+These metrics can be routed to a sink for storage and offline analysis.
+Currently, Heron supports the following sinks
+
+  * `File Sink`
+  * `Graphite Sink`
+  * `Scribe Sink`
+
+* **Heron Tracker** --- Tracker serves as the gateway to explore the topologies.
+It exposes a REST API for exploring logical plan, physical plan of the topologies and
+also for fetching metrics from them.
+
+* **Heron UI** --- The UI provides the ability to find and explore topologies visually.
+UI displays the DAG of the topology and how the DAG is mapped to physical containers
+running in clusters. Furthermore, it allows the ability to view logs, take heap dump, memory
+histograms, show metrics, etc.
diff --git a/website2/website/versioned_docs/version-0.20.0/extending-heron-metric-sink.md b/website2/website/versioned_docs/version-0.20.0/extending-heron-metric-sink.md
new file mode 100644
index 0000000..caf312f
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/extending-heron-metric-sink.md
@@ -0,0 +1,179 @@
+---
+id: version-0.20.0-extending-heron-metric-sink
+title: Implementing a Custom Metrics Sink
+sidebar_label: Custom Metrics Sink
+original_id: extending-heron-metric-sink
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Each Heron container has its own centralized [Metrics
+Manager](heron-architecture#metrics-manager) (MM), which collects
+metrics from all [Heron Instances](heron-architecture#heron-instance) in
+the container. You can define how the MM processes metrics by implementing a
+**metrics sink**, which specifies how the MM handles incoming
+[`MetricsRecord`](/api/org/apache/heron/spi/metricsmgr/metrics/MetricsRecord.html)
+objects.
+
+> Java is currently the only supported language for custom metrics sinks. This may change in the future.
+
+## Currently supported Sinks
+
+Heron comes equipped out of the box with three metrics sinks that you can apply
+for a specific topology. The code for these sinks may prove helpful for
+implementing your own.
+
+Sink | How it works
+:----|:------------
+[Prometheus](observability-prometheus) | [`PrometheusSink`](/api/org/apache/heron/metricsmgr/sink/PrometheusSink.html) sends each `MetricsRecord` object to a specified path in the [Prometheus](https://prometheus.io) instance.
+[Graphite](observability-graphite) | [`GraphiteSink`](/api/org/apache/heron/metricsmgr/sink/GraphiteSink.html) sends each `MetricsRecord` object to a [Graphite](http://graphite.wikidot.com/) instance according to a Graphite prefix.
+[Scribe](observability-scribe) | [`ScribeSink`](/api/org/apache/heron/metricsmgr/sink/ScribeSink.html) sends each `MetricsRecord` object to a [Scribe](https://github.com/facebookarchive/scribe) instance according to a Scribe category and namespace.
+Local filesystem | [`FileSink`](/api/org/apache/heron/metricsmgr/sink/FileSink.html) writes each `MetricsRecord` object to a JSON file at a specified path.
+
+## Java Setup
+
+In order to create a custom metrics sink, you need to import the `heron-spi`
+library into your project.
+
+#### Maven
+
+```xml
+<dependency>
+  <groupId>org.apache.heron</groupId>
+  <artifactId>heron-spi</artifactId>
+  <version>{{% heronVersion %}}</version>
+</dependency>
+```
+
+#### Gradle
+
+```groovy
+dependencies {
+  compile group: "org.apache.heron", name: "heron-spi", version: "{{% heronVersion %}}"
+}
+```
+
+## The `IMetricsSink` Interface
+
+Each metrics sink must implement the
+[`IMetricsSink`](/api/org/apache/heron/spi/metricsmgr/sink/IMetricsSink.html)
+interface, which requires you to implement the following methods:
+
+Method | Description
+:------|:-----------
+[`init`](/api/org/apache/heron/spi/metricsmgr/sink/IMetricsSink.html#init-java.util.Map-org.apache.heron.spi.metricsmgr.sink.SinkContext-) | Defines the initialization behavior of the sink. The `conf` map is the configuration that is passed to the sink by the `.yaml` configuration file at `heron/config/metrics_sink.yaml`; the [`SinkContext`](/api/org/apache/heron/spi/metricsmgr/sink/SinkContext.html) object enables you to access values from the sink's runtime context (the ID of the metrics manager, the ID of the sink, and the name of the topology).
+[`processRecord`](/api/org/apache/heron/spi/metricsmgr/sink/IMetricsSink.html#processRecord-org.apache.heron.spi.metricsmgr.metrics.MetricsRecord-) | Defines how each [`MetricsRecord`](/api/org/apache/heron/spi/metricsmgr/metrics/MetricsRecord.html) that passes through the sink is processed.
+[`flush`](/api/org/apache/heron/spi/metricsmgr/sink/IMetricsSink.html#flush--) | Flush any buffered metrics; this function is called at the interval specified by the `flush-frequency-ms` parameter. More info can be found in the [Stream Manager](../../concepts/architecture#stream-manager) documentation.
+[`close`](/api/org/apache/heron/spi/metricsmgr/sink/IMetricsSink.html#close--) | Closes the stream and releases any system resources associated with it; if the stream is already closed, invoking `close()` has no effect.
+
+Your implementation of those interfaces will need to be packaged into a JAR file
+and distributed to the `heron-core/lib/metricsmgr` folder of your [Heron
+release](compiling-overview).
+
+## Example Implementation
+
+Below is an example implementation that simply prints the contents of each
+metrics record as it passes through:
+
+```java
+import org.apache.heron.metricsmgr.api.metrics.MetricsInfo;
+import org.apache.heron.metricsmgr.api.metrics.MetricsRecord;
+import org.apache.heron.metricsmgr.api.sink.IMetricsSink;
+import org.apache.heron.metricsmgr.api.sink.SinkContext;
+
+public class PrintSink implements IMetricsSink {
+    @Override
+    public void init(Map<String, Object> conf, SinkContext context) {
+        System.out.println("Sink configuration:");
+        // This will print out each config in the supplied configuration
+        for (Map.Entry<String, Object> config : conf.entrySet()) {
+            System.out.println(String.format("%s: %s", config.getKey(), config.getValue());
+        }
+        System.out.println(String.format("Topology name: %s", context.getTopologyName());
+        System.out.println(String.format("Sink ID: %s", context.getSinkId()));
+    }
+
+    @Override
+    public void processRecord(MetricsRecord record) {
+        String recordString = String.format("Record received: %s", record.toString());
+        System.out.println(recordString);
+    }
+
+    @Override
+    public void flush() {
+        // Since we're just printing to stdout in this sink, we don't need to
+        // specify any flush() behavior
+    }
+
+    @Override
+    public void close() {
+        // Since we're just printing to stdout in this sink, we don't need to
+        // specify any close() behavior
+    }
+}
+```
+
+## Configuring Your Custom Sink
+
+The configuration for your sink needs to be provided in the
+`metrics_sinks.yaml` configuration file in your scheduler's base configuration template.
+
+At the top of that file there's a `sinks` parameter that lists each available
+sink by name. You should add the sink you want to use to that list. Here's an example:
+
+```yaml
+sinks:
+  - file-sink
+  - scribe-sink
+  - tmaster-sink
+  - print-sink
+  - prometheus-sink
+```
+
+For each sink you are required to specify the following:
+
+Parameter | Description
+:---------|:-----------
+`class` | The Java class name of your custom implementation of the `IMetricsSink` interface, e.g. `biz.acme.heron.metrics.PrintSink`.
+`flush-frequency-ms` | The frequency (in milliseconds) at which the `flush()` method is called in your implementation of `IMetricsSink`.
+`sink-restart-attempts` | The number of times that a sink will attempt to restart if it throws exceptions and dies. If you do not set this, the default is 0; if you set it to -1, the sink will attempt to restart forever.
+
+Here is an example `metrics_sinks.yaml` configuration:
+
+```yaml
+sinks:
+  - custom-sink
+
+print-sink:
+  class: "biz.acme.heron.metrics.CustomSink"
+  flush-frequency-ms: 60000 # One minute
+  sink-restart-attempts: -1 # Attempt to restart forever
+  some-other-config: false
+```
+
+It is optional to add other configurations for the sink. All configurations will be constructed
+as an unmodifiable map `Map<String, Object> conf` and passed to the sink via the `init` function.
+
+## Using Your Custom Sink
+
+Once you've made a JAR for your custom Java sink, distributed that JAR to
+`heron-core/lib/metricsmgr` folder, and changed the configuration in
+`metrics_sinks.yaml` file in the base configuration template, any topology submitted using that configuration will include the custom sink.
+
+You must [re-compile
+Heron](compiling-overview) if you want to include the configuration in a new distribution of [Heron CLI](user-manuals-heron-cli).
+
diff --git a/website2/website/versioned_docs/version-0.20.0/extending-heron-scheduler.md b/website2/website/versioned_docs/version-0.20.0/extending-heron-scheduler.md
new file mode 100644
index 0000000..9049b48
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/extending-heron-scheduler.md
@@ -0,0 +1,97 @@
+---
+id: version-0.20.0-extending-heron-scheduler
+title: Implementing a Custom Scheduler
+sidebar_label: Custom Scheduler
+original_id: extending-heron-scheduler
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+To run a Heron topology, you’ll need to set up a scheduler that is responsible 
+for topology management. Note: one scheduler is managing only one topology, 
+for the purpose of better isolation. Heron currently supports the following schedulers out of the box:
+
+* [Aurora](schedulers-aurora-cluster)
+* [Kubernetes](schedulers-k8s-by-hand)
+* [Kubernetes-Helm](schedulers-k8s-with-helm)
+* [Nomad](schedulers-nomad)
+* [Local scheduler](schedulers-local)
+* [Slurm scheduler](schedulers-slurm)
+
+If you'd like to run Heron on a not-yet-supported system, such as
+[Amazon ECS](https://aws.amazon.com/ecs/), you can create your own scheduler
+using Heron's spi, as detailed in the
+sections below.
+
+Java is currently the only supported language for custom schedulers. This may
+change in the future.
+
+## Java Setup
+
+In order to create a custom scheduler, you need to import the `heron-spi`
+library into your project.
+
+#### Maven
+
+```xml
+<dependency>
+  <groupId>org.apache.heron</groupId>
+  <artifactId>heron-spi</artifactId>
+  <version>{{% heronVersion %}}</version>
+</dependency>
+```
+
+#### Gradle
+
+```groovy
+dependencies {
+  compile group: "org.apache.heron", name: "heron-spi", version: "{{% heronVersion %}}"
+}
+```
+
+## Interfaces
+
+Creating a custom scheduler involves implementing each of the following Java
+interfaces:
+
+Interface | Role | Examples
+:-------- |:---- |:--------
+[`IPacking`](/api/org/apache/heron/spi/packing/IPacking.html) | Defines the algorithm used to generate physical plan for a topology. | [RoundRobin](/api/org/apache/heron/packing/roundrobin/RoundRobinPacking.html)
+[`ILauncher`](/api/org/apache/heron/spi/scheduler/ILauncher.html) | Defines how the scheduler is launched | [Aurora](/api/org/apache/heron/scheduler/aurora/AuroraLauncher.html), [local](/api/org/apache/heron/scheduler/local/LocalLauncher.html)
+[`IScheduler`](/api/org/apache/heron/spi/scheduler/IScheduler.html) | Defines the scheduler object used to construct topologies | [local](/api/org/apache/heron/scheduler/local/LocalScheduler.html)
+[`IUploader`](/api/org/apache/heron/spi/uploader/IUploader.html) | Uploads the topology to a shared location accessible to the runtime environment of the topology | [local](/api/org/apache/heron/uploader/localfs/LocalFileSystemUploader.html) [HDFS](/api/org/apache/heron/uploader/hdfs/HdfsUploader.html) [S3](/api/org/apache/heron/uploader/s3/S3Uploader.html)
+
+Heron provides a number of built-in implementations out of box.
+
+## Running the Scheduler
+
+To run the a custom scheduler, the implementation of the interfaces above must be specified in the [config](deployment-configuration).
+By default, the heron-cli looks for configurations under `${HERON_HOME}/conf/`. The location can be overridden using option `--config-path`. 
+Below is an example showing the command for [topology
+submission](user-manuals-heron-cli#submitting-a-topology):
+
+```bash
+$ heron submit [cluster-name-storing-your-new-config]/[role]/[env] \
+    --config-path [config-folder-path-storing-your-new-config] \
+    /path/to/topology/my-topology.jar \
+    biz.acme.topologies.MyTopology 
+```
+
+The implementation for each of the interfaces listed above must be on Heron's
+[classpath](https://docs.oracle.com/javase/tutorial/essential/environment/paths.html). 
+
+
diff --git a/website2/website/versioned_docs/version-0.20.0/getting-started-local-single-node.md b/website2/website/versioned_docs/version-0.20.0/getting-started-local-single-node.md
new file mode 100644
index 0000000..f600634
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/getting-started-local-single-node.md
@@ -0,0 +1,286 @@
+---
+id: version-0.20.0-getting-started-local-single-node
+title: Local (Single Node)
+sidebar_label: Local (Single Node)
+original_id: getting-started-local-single-node
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+> The current version of Heron is **{{heron:version}}**
+
+
+The easiest way to get started learning Heron is to install the Heron client tools, which are currently available for:
+
+* [MacOS](#macos-homebrew)
+* [Ubuntu >= 14.04](#using-installation-scripts)
+* [CentOS](#using-installation-scripts)
+
+For other platforms, you need to build from source. Please refer to the [guide to compiling Heron](compiling-overview).
+
+## Step 1 --- Download the Heron tools
+
+Heron tools can be installed on [macOS](#macos-homebrew) using [Homebrew](https://brew.sh) and on Linux using [installation scripts](#using-installation-scripts).
+
+> You can install using [installation scripts](#using-installation-scripts) on macOS as well.
+
+## macOS/Homebrew
+
+The easiest way to get started with Heron on macOS is using [Homebrew](https://brew.sh):
+
+```bash
+$ brew tap streamlio/homebrew-formulae
+$ brew update
+$ brew install heron
+```
+
+This will install a variety of executables and other resources by default in `/usr/local/opt/heron`.
+
+> Homebrew may use a different folder than `/usr/local/opt/heron`. To check which folder is being used, run `brew --prefix heron`.
+
+## Using installation scripts
+
+To install Heron binaries directly, using installation scripts, go to Heron's [releases page](https://github.com/apache/incubator-heron/releases) on GitHub
+and see a full listing of Heron releases for each available platform. The installation script for macOS (`darwin`), for example, is named
+`heron-install-{{% heronVersion %}}-darwin.sh`.
+
+Download the for your platform either from the releases page or using [`wget`](https://www.gnu.org/software/wget/). Here's a `wget` example for Ubuntu:
+
+```bash
+$ wget https://github.com/apache/incubator-heron/releases/download/{{% heronVersion %}}/heron-install-{{% heronVersion %}}-ubuntu.sh
+```
+
+Once you've downloaded the script, make it executable using [chmod](https://en.wikipedia.org/wiki/Chmod):
+
+```bash
+$ chmod +x heron-*.sh
+```
+
+> The script will install executables in the `~/bin` folder. You should add that folder to your `PATH` using `export PATH=~/bin:$PATH`.
+
+Now run the [Heron client](user-manuals-heron-cli) installation script with the `--user` flag set. Here's an example for Ubuntu:
+
+```bash
+$ ./heron-install-{{% heronVersion %}}-ubuntu.sh --user
+Heron installer
+---------------
+
+Uncompressing...done
+...
+Heron is now installed!
+```
+
+To check that Heron is successfully installed, run `heron version`:
+
+```bash
+$ heron version
+heron.build.git.revision : 26bb4096130a05f9799510bbce6c37a69a7342ef
+heron.build.git.status : Clean
+heron.build.host : ...
+heron.build.time : Sat Aug  6 12:35:47 PDT {{% currentYear %}}
+heron.build.timestamp : 1470512147000
+heron.build.user : ...
+heron.build.version : {{% heronVersion %}}
+```
+
+## Step 2 --- Launch an example topology
+
+> #### Note for macOS users
+
+> If you want to run topologies locally on macOS, you may need to add your
+> hostname to your `/etc/hosts` file under `localhost`. Here's an example line:
+> `127.0.0.1 localhost My-Mac-Laptop.local`. You can fetch your hostname by simply
+> running `hostname` in your shell.
+
+If you set the `--user` flag when running the installation scripts, some example
+topologies will be installed in your `~/.heron/examples` directory. You can
+launch an example [topology](heron-topology-concepts) locally (on your machine)
+using the [Heron CLI tool](user-manuals-heron-cli):
+
+```bash
+$ heron submit local \
+  ~/.heron/examples/heron-streamlet-examples.jar \
+  org.apache.heron.examples.streamlet.WindowedWordCountTopology \
+  WindowedWordCountTopology \
+  --deploy-deactivated
+```
+
+The output should look something like this:
+
+```bash
+INFO: Launching topology 'WindowedWordCountTopology'
+
+...
+
+INFO: Topology 'WindowedWordCountTopology' launched successfully
+INFO: Elapsed time: 3.409s.
+```
+
+This will *submit* the topology to your locally running Heron cluster but it
+won't *activate* the topology because the `--deploy-deactivated` flag was set.
+Activating the topology will be explored in [step
+5](#step-5-explore-topology-management-commands) below.
+
+Note that the output shows whether the topology has been launched successfully as well
+the working directory for the topology.
+
+To check what's under the working directory, run:
+
+```bash
+$ ls -al ~/.herondata/topologies/local/${ROLE}/WindowedWordCountTopology
+-rw-r--r--   1 username  staff     6141 Oct 12 09:58 WindowedWordCountTopology.defn
+-rw-r--r--   1 username  staff        5 Oct 12 09:58 container_1_flatmap1_4.pid
+-rw-r--r--   1 username  staff        5 Oct 12 09:58 container_1_logger1_3.pid
+# etc.
+```
+
+All instances' log files can be found in `log-files` under the working directory:
+
+```bash
+$ ls -al ~/.herondata/topologies/local/${ROLE}/WindowedWordCountTopology/log-files
+total 408
+-rw-r--r--   1 username  staff   5055 Oct 12 09:58 container_1_flatmap1_4.log.0
+-rw-r--r--   1 username  staff      0 Oct 12 09:58 container_1_flatmap1_4.log.0.lck
+-rw-r--r--   1 username  staff   5052 Oct 12 09:58 container_1_logger1_3.log.0
+# etc.
+```
+
+## Step 3 --- Start Heron Tracker
+
+The [Heron Tracker](user-manuals-heron-tracker-runbook) is a web service that
+continuously gathers information about your Heron cluster. You can launch the
+tracker by running the `heron-tracker` command (which is already installed):
+
+```bash
+$ heron-tracker
+... Running on port: 8888
+... Using config file: $HOME/.herontools/conf/heron_tracker.yaml
+```
+
+You can reach Heron Tracker in your browser at [http://localhost:8888](http://localhost:8888)
+and see something like the following upon successful submission of the topology:
+![Heron Tracker](assets/heron-tracker.png)
+
+To explore Heron Tracker, please refer to [Heron Tracker Rest API](user-manuals-tracker-rest)
+
+## Step 4 --- Start Heron UI
+
+[Heron UI](user-manuals-heron-ui-runbook) is a user interface that uses Heron Tracker to
+provide detailed visual representations of your Heron topologies. To launch
+Heron UI:
+
+```bash
+$ heron-ui
+... Running on port: 8889
+... Using tracker url: http://localhost:8888
+```
+
+You can open Heron UI in your browser at [http://localhost:8889](http://localhost:8889)
+and see something like this upon successful submission of the topology:
+![Heron UI](assets/heron-ui.png)
+
+To play with Heron UI, please refer to [Heron UI Usage Guide](guides-ui-guide)
+
+## Step 5 --- Explore topology management commands
+
+In step 2 you submitted a topology to your local cluster. The `heron` CLI tool
+also enables you to activate, deactivate, and kill topologies and more.
+
+```bash
+$ heron activate local WindowedWordCountTopology
+$ heron deactivate local WindowedWordCountTopology
+$ heron kill local WindowedWordCountTopology
+```
+
+Upon successful actions, a message similar to the following will appear:
+
+```bash
+INFO: Successfully activated topology 'WindowedWordCountTopology'
+INFO: Elapsed time: 1.980s.
+```
+
+For more info on these commands, read about [topology
+lifecycles](heron-topology-concepts#topology-lifecycle).
+
+To list the available CLI commands, run `heron` by itself:
+
+```bash
+usage: heron <command> <options> ...
+
+Available commands:
+    activate           Activate a topology
+    deactivate         Deactivate a topology
+    help               Prints help for commands
+    kill               Kill a topology
+    restart            Restart a topology
+    submit             Submit a topology
+    version            Print version of heron-cli
+
+For detailed documentation, go to http://heronstreaming.io
+```
+
+To invoke help output for a command, run `heron help COMMAND`. Here's an
+example:
+
+```bash
+$ heron help submit
+usage: heron submit [options] cluster/[role]/[environ] topology-file-name topology-class-name [topology-args]
+
+Required arguments:
+  cluster/[role]/[env]  Cluster, role, and environ to run topology
+  topology-file-name    Topology jar/tar/zip file
+  topology-class-name   Topology class name
+
+Optional arguments:
+  --config-path (a string; path to cluster config; default: "$HOME/.heron/conf")
+  --config-property (key=value; a config key and its value; default: [])
+  --deploy-deactivated (a boolean; default: "false")
+  -D DEFINE             Define a system property to pass to java -D when
+                        running main.
+  --verbose (a boolean; default: "false")
+```
+
+## Step 6 --- Explore other example topologies
+
+The source code for the example topologies can be found
+[on
+GitHub]({{% githubMaster %}}/examples/src/java/org/apache/heron/examples).
+The included example topologies:
+
+* `AckingTopology.java` --- A topology with acking enabled.
+* `ComponentJVMOptionsTopology.java` --- A topology that supplies JVM options
+  for each component.
+* `CustomGroupingTopology.java` --- A topology that implements custom grouping.
+* `ExclamationTopology.java` --- A spout that emits random words to a bolt that
+  then adds an exclamation mark.
+* `MultiSpoutExclamationTopology.java` --- a topology with multiple spouts.
+* `MultiStageAckingTopology.java` --- A three-stage topology. A spout emits to a
+  bolt that then feeds to another bolt.
+* `TaskHookTopology.java` --- A topology that uses a task hook to subscribe to
+   event notifications.
+
+## Troubleshooting
+In case of any issues, please refer to [Quick Start Troubleshooting](getting-started-troubleshooting-guide).
+
+### Next Steps
+
+* [Migrate Storm topologies](getting-started-migrate-storm-topologies) to Heron with simple `pom.xml`
+  changes
+* [Deploy topologies](deployment-overview) in clustered, scheduler-driven
+  environments (such as on [Aurora](schedulers-aurora-cluster)
+  and [locally](schedulers-local))
+* [Develop topologies](heron-architecture) for Heron
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.0/getting-started-migrate-storm-topologies.md b/website2/website/versioned_docs/version-0.20.0/getting-started-migrate-storm-topologies.md
new file mode 100644
index 0000000..b3413d9
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/getting-started-migrate-storm-topologies.md
@@ -0,0 +1,128 @@
+---
+id: version-0.20.0-getting-started-migrate-storm-topologies
+title: Migrate Storm Topologies
+sidebar_label: Migrate Storm Topologies
+original_id: getting-started-migrate-storm-topologies
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron is designed to be fully backwards compatible with existing [Apache
+Storm](http://storm.apache.org/index.html) v1 projects, which means that you can
+migrate an existing Storm [topology](heron-topology-concepts) to Heron by making
+just a few adjustments to the topology's `pom.xml` [Maven configuration
+file](https://maven.apache.org/pom.html).
+
+## Step 1. Add Heron dependencies to  `pom.xml`
+
+Copy the [`pom.xml`](https://maven.apache.org/pom.html) segments below and paste
+them into your existing Storm `pom.xml` file in the [dependencies
+block](https://maven.apache.org/pom.html#Dependencies).
+
+```xml
+<dependency>
+  <groupId>org.apache.heron</groupId>
+  <artifactId>heron-api</artifactId>
+  <version>{{< heronVersion >}}</version>
+  <scope>compile</scope>
+</dependency>
+<dependency>
+  <groupId>org.apache.heron</groupId>
+  <artifactId>heron-storm</artifactId>
+  <version>{{< heronVersion >}}</version>
+  <scope>compile</scope>
+</dependency>
+```
+
+## Step 2. Remove Storm dependencies from `pom.xml`
+
+Delete the Storm dependency, which looks like this:
+
+```xml
+<dependency>
+  <groupId>org.apache.storm</groupId>
+  <artifactId>storm-core</artifactId>
+  <version>storm-VERSION</version>
+  <scope>provided</scope>
+</dependency>
+```
+
+## Step 3 (if needed). Remove the Clojure plugin from `pom.xml`
+
+Delete the [Clojure plugin](https://maven.apache.org/pom.html#Plugins), which
+should look like this:
+
+```xml
+<plugin>
+  <groupId>com.theoryinpractise</groupId>
+  <artifactId>clojure-maven-plugin</artifactId>
+  <version>1.3.12</version>
+  <extensions>true</extensions>
+  <configuration>
+    <sourceDirectories>
+      <sourceDirectory>src/clj</sourceDirectory>
+    </sourceDirectories>
+  </configuration>
+</plugin>
+```
+
+## Step 4. Run Maven commands
+
+Run the following [Maven lifecycle](https://maven.apache.org/run.html) commands:
+
+```bash
+$ mvn clean
+$ mvn compile
+$ mvn package
+```
+
+> [Storm Distribute RPC](http://storm.apache.org/releases/0.10.0/Distributed-RPC.html) is deprecated in Heron.
+
+## Step 4 (optional). Launch your upgraded Heron topology
+
+You can launch the compiled Maven project on your [local
+cluster](schedulers-local) using `heron submit`.
+
+First, modify your project's base directory `{basedir}` and
+`{PATH-TO-PROJECT}.jar`, which is located in `${basedir}/target` by [Maven
+convention](https://maven.apache.org/guides/getting-started/). Then modify the
+`TOPOLOGY-FILE-NAME` and `TOPOLOGY-CLASS-NAME` for your project:
+
+```bash
+$ heron submit local \
+  ${basedir}/target/PATH-TO-PROJECT.jar \
+  TOPOLOGY-FILE-NAME \
+  TOPOLOGY-CLASS-NAME
+```
+
+Here's an example submit command using the example topology from the [Quick
+Start Guide](getting-started-local-single-node) guide:
+
+```bash
+$ heron submit local \
+  ~/.heron/examples/heron-api-examples.jar \ # The path of the topology's jar file
+  org.apache.heron.examples.api.ExclamationTopology \ # The topology's Java class
+  ExclamationTopology # The name of the topology
+```
+
+### Next Steps
+
+* [Deploy topologies](deployment-overview) in clustered, scheduler-driven
+  environments (such as on [Aurora](schedulers-aurora-cluster)
+  and
+  [locally](schedulers-local)
+* [Develop topologies](heron-architecture) for Heron
diff --git a/website2/website/versioned_docs/version-0.20.0/getting-started-troubleshooting-guide.md b/website2/website/versioned_docs/version-0.20.0/getting-started-troubleshooting-guide.md
new file mode 100644
index 0000000..ff32fcf
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/getting-started-troubleshooting-guide.md
@@ -0,0 +1,141 @@
+---
+id: version-0.20.0-getting-started-troubleshooting-guide
+title: Troubleshooting Guide
+sidebar_label: Troubleshooting Guide
+original_id: getting-started-troubleshooting-guide
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+This guide provides basic help for issues frequently encountered when deploying topologies.
+
+### 1. How can I get more debugging information?
+
+Enable the `--verbose` flag to see more debugging information, for example
+
+```bash
+heron submit ... ExclamationTopology --verbose        
+```
+
+### 2. Why does the topology launch successfully but fail to start?
+
+Even if the topology is submitted successfully, it could still fail to
+start some component. For example, TMaster may fail to start due to unfulfilled
+dependencies.
+
+For example, the following message can appear:
+
+```bash
+$ heron activate local ExclamationTopology
+
+...
+
+[2016-05-27 12:02:38 -0600] org.apache.heron.common.basics.FileUtils SEVERE: \
+Failed to read from file.
+java.nio.file.NoSuchFileException: \
+/home//.herondata/repository/state/local/pplans/ExclamationTopology
+
+...
+
+[2016-05-27 12:02:38 -0600] org.apache.heron.spi.utils.TMasterUtils SEVERE: \
+Failed to get physical plan for topology ExclamationTopology
+
+...
+
+ERROR: Failed to activate topology 'ExclamationTopology'
+INFO: Elapsed time: 1.883s.
+```
+
+#### What to do
+
+* This file will show if any specific components have failed to start.
+
+    ```bash
+    ~/.herondata/topologies/{cluster}/{role}/{TopologyName}/heron-executor.stdout
+    ```
+
+    For example, there may be errors when trying to spawn a Stream Manager process in the file:
+
+    ```bash
+    Running stmgr-1 process as ./heron-core/bin/heron-stmgr ExclamationTopology \
+    ExclamationTopology0a9c6550-7f3d-44fb-97ea-5c779fac6924 ExclamationTopology.defn LOCALMODE \
+    /Users/${USERNAME}/.herondata/repository/state/local stmgr-1 \
+    container_1_word_2,container_1_exclaim1_1 58106 58110 58109 ./heron-conf/heron_internals.yaml
+    2016-06-09 16:20:28:  stdout:
+    2016-06-09 16:20:28:  stderr: error while loading shared libraries: libunwind.so.8: \
+    cannot open shared object file: No such file or directory
+    ```
+
+    Then fix it correspondingly.
+
+* It is also possible that the host has an issue with resolving localhost.
+To check, run the following command in a shell.
+
+    ```bash
+    $ python -c "import socket; print socket.gethostbyname(socket.gethostname())"
+    Traceback (most recent call last):
+      File "<string>", line 1, in <module>
+    socket.gaierror: [Errno 8] nodename nor servname provided, or not known
+    ```
+
+    If the output looks like a normal IP address, such as `127.0.0.1`,
+    you don't have this issue.
+    If the output is similar to the above, you need to modify the `/etc/hosts`
+    file to correctly resolve localhost, as shown below.
+
+    1. Run the following command, whose output is your computer's hostname.
+
+        ```bash
+        $ python -c "import socket; print socket.gethostname()"
+        ```
+
+    2. Open the `/etc/hosts` file as superuser and find a line containing
+
+        ```bash
+        127.0.0.1	localhost
+        ```
+
+    3. Append your hostname after the word "localhost" on the line.
+    For example, if your hostname was `tw-heron`, then the line should
+    look like the following:
+
+        ```bash
+        127.0.0.1   localhost   tw-heron
+        ```
+
+    4. Save the file. The change should usually be reflected immediately,
+    although rebooting might be necessary depending on your platform.
+
+### 3. Why does the process fail during runtime?
+
+If a component (e.g., TMaster or Stream Manager) has failed during runtime, visit the component's logs in
+
+```bash
+~/.herondata/topologies/{cluster}/{role}/{TopologyName}/log-files/
+```
+
+### 4. How to force kill and clean up a topology?
+
+In general, it suffices to run:
+
+```bash
+heron kill ...
+```
+
+If returned error, the topology can still be killed by running
+    `kill pid` to kill all associated running process and `rm -rf ~/.herondata/`
+    to clean up the state.
diff --git a/website2/website/versioned_docs/version-0.20.0/guides-data-model.md b/website2/website/versioned_docs/version-0.20.0/guides-data-model.md
new file mode 100644
index 0000000..123b479
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/guides-data-model.md
@@ -0,0 +1,133 @@
+---
+id: version-0.20.0-guides-data-model
+title: Heron Data Model
+sidebar_label: Heron Data Model
+original_id: guides-data-model
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Tuple is Heron's core data type. All
+data that is fed into a Heron topology via
+[spouts](../../concepts/topologies#spouts) and then processed by
+[bolts](../../concepts/topologies#bolts) consists of tuples.
+
+Heron has a [`Tuple`](/api/org/apache/heron/api/tuple/Tuple.html)
+interface for working with tuples. Heron `Tuple`s can hold values of any type;
+values are accessible either by providing an index or a field name.
+
+## Using Tuples
+
+Heron's `Tuple` interface contains the methods listed in the [Javadoc
+definition](/api/org/apache/heron/api/tuple/Tuple.html).
+
+### Accessing Primitive Types By Index
+
+Heron `Tuple`s support a wide variety of primitive Java types, including
+strings, Booleans, byte arrays, and more.
+[`getString`](/api/org/apache/heron/api/tuple/Tuple.html#getString-int-)
+method, for example, takes an integer index and returns either a string or
+`null` if no string value is present at that index. Analogous methods can be
+found in the Javadoc.
+
+### Accessing Primitive Types By Field
+
+In addition to being accessible via index, values stored in Heron tuples are
+accessible via field name as well. The
+[`getStringByField`](/api/org/apache/heron/api/tuple/Tuple.html#getStringByField-java.lang.String-)
+method, for example, takes a field name string and returns either a string or
+`null` if no string value is present for that field name. Analogous methods can
+be found in the Javadoc.
+
+### Using Non-primitive Types
+
+In addition to primitive types, you can access any value in a Heron `Tuple` as a
+Java `Object`. As for primitive types, you can access `Object`s on the basis of
+an index or a field name. The following methods return either an `Object` or
+`null` if no object is present:
+
+* [`getValue`](/api/org/apache/heron/api/tuple/Tuple.html#getValue-int-)
+* [`getValueByField`](/api/org/apache/heron/api/tuple/Tuple.html#getValueByField-java.lang.String-)
+
+You can also retrieve all objects contained in a Heron `Tuple` as a Java
+[List](https://docs.oracle.com/javase/8/docs/api/java/util/List.html) using the
+[`getValues`](/api/org/apache/heron/api/tuple/Tuple.html#getValues--)
+method.
+
+### User-defined Types
+
+You use Heron tuples in conjunction with more complex, user-defined types using
+[type casting](http://www.studytonight.com/java/type-casting-in-java), provided
+that you've created and registered a [custom serializer](../serialization) for the type.
+Here's an example (which assumes that a serializer for the type
+`Tweet` has been created and registered):
+
+```java
+public void execute(Tuple input) {
+    // The following return null if no value is present or throws a
+    // ClassCastException if type casting fails:
+    Tweet tweet = (Tweet) input.getValue(0);
+    List<Tweet> allTweets = input.getValues();
+}
+```
+
+More info on custom serialization can be found in [Creating Custom Tuple
+Serializers](guides-tuple-serialization).
+
+### Fields
+
+The `getFields` method returns a
+[`Fields`](http://heronproject.github.io/topology-api/org/apache/heron/api/tuple/Fields)
+object that contains all of the fields in the tuple. More on fields can be found
+[below](#Fields).
+
+### Other Methods
+
+There are additional methods available for determining the size of Heron
+`Tuple`s, extracting contextual information, and more. For a full listing of
+methods, see the
+[Javadoc](/api/org/apache/heron/api/tuple/Tuple.html).
+
+## Fields
+
+From the methods in the list above you can see that you can retrieve single
+values from a Heron tuple on the basis of their index. You can also retrieve
+multiple values using a
+[`Fields`](/api/org/apache/heron/api/tuple/Fields.html) object,
+which can be initialized either using varargs or a list of strings:
+
+```java
+// Using varargs
+Fields fruits = new Fields("apple", "orange", "banana");
+
+// Using a list of strings
+List<String> fruitNames = new LinkedList<String>();
+fruitNames.add("apple");
+// Add "orange" and "banana" as well
+Fields fruits = new Fields(fruitNames);
+```
+
+You can then use that object in conjunction with a tuple:
+
+```java
+public void execute(Tuple input) {
+    List<Object> values = input.select(fruits);
+    for (Object value : values) {
+        System.out.println(value);
+    }
+}
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/guides-effectively-once-java-topologies.md b/website2/website/versioned_docs/version-0.20.0/guides-effectively-once-java-topologies.md
new file mode 100644
index 0000000..21f0994
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/guides-effectively-once-java-topologies.md
@@ -0,0 +1,286 @@
+---
+id: version-0.20.0-guides-effectively-once-java-topologies
+title: Effectively Once Java Topologies
+sidebar_label: Effectively Once Java Topologies
+original_id: guides-effectively-once-java-topologies
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+> **This document pertains to the older, Storm-based, Heron Topology API.** Heron now offers several APIs for building topologies. Topologies created using the Topology API can still run on Heron and there are currently no plans to deprecate this API. We would, however, recommend that you use the Streamlet API for future work.
+
+You can create Heron topologies that have [effectively-once](heron-delivery-semantics#stateful-topologies) semantics by doing two things:
+
+1. Set the [delivery semantics](#specifying-delivery-semantics) of the topology to `EFFECTIVELY_ONCE`.
+2. Create topology processing logic in which each component (i.e. each spout and bolt) implements the [`IStatefulComponent`](/api/java/org/apache/heron/api/topology/IStatefulComponent.html) interface.
+
+## Specifying delivery semantics
+
+You can specify the [delivery semantics](heron-delivery-semantics) of a Heron topology via configuration. To apply effectively-once semantics to a topology:
+
+```java
+import org.apache.heron.api.Config;
+
+Config topologyConfig = new Config();
+topologyConfig.setTopologyReliabilityMode(Config.TopologyReliabilityMode.ATLEAST_ONCE);
+```
+
+The other possible values for the `TopologyReliabilityMode` enum are `ATMOST_ONCE` and `EFFECTIVELY_ONCE`.
+
+> Instead of "delivery semantics" terminology, the original Topology API for Heron uses "reliability mode" terminology. In spite of the terminological difference, the two sets of terms are synonymous.
+
+## Stateful components
+
+Stateful spouts and bolts need to implement the [`IStatefulComponent`](/api/java/org/apache/heron/api/topology/IStatefulComponent.html) interface, which requires implementing two methods (both of which are `void` methods):
+
+Method | Input | Description
+:------|:------|:-----------
+`preSave` | Checkpoint ID (`String`)| The action taken immediately prior to the component's state being saved. 
+`initState` | Initial state ([`State<K, V>`](/api/java/org/apache/heron/examples/api/StatefulWordCountTopology.ConsumerBolt.html#initState-org.apache.heron.api.state.State-)) | Initializes the state of the function or operator to that of a previous checkpoint.
+
+> Remember that stateful components automatically handle all state storage in the background using a State Manager (the currently available State Managers are [ZooKeeper](state-managers-zookeeper) and the [local filesystem](state-managers-local-fs). You don't need to, for example, save state to an external database.
+
+## The `State` class
+
+Heron topologies with effectively-once semantics need to be stateful topologies (you can also create stateful topologies with at-least-once or at-most-once semantics). All state in stateful topologies is handled through a [`State`](/api/java/org/apache/heron/api/state/State.html) class which has the same semantics as a standard Java [`Map`](https://docs.oracle.com/javase/8/docs/api/java/util/Map.html), and so it includes methods like `get`, `set`, `put`, `putIfAbsent`, `keySet`, `compute`, `forEach`, `merge`, and so on.
+
+Each stateful spout or bolt must be associated with a single `State` object that handles the state, and that object must also be typed as `State<K, V>`, for example `State<String, Integer>`, `State<long, MyPojo>`, etc. An example usage of the state object can be found in the [example topology](#example-effectively-once-topology) below.
+
+## Example effectively-once topology
+
+In the sections below, we'll build a stateful topology with effectively-once semantics from scratch. The topology will work like this:
+
+* A [`RandomIntSpout`](#example-stateful-spout) will continuously emit random integers between 1 and 100
+* An [`AdditionBolt`](#example-stateful-bolt) will receive those random numbers and add each number to a running sum. When the sum reaches 1,000,000, it will go back to zero. The bolt won't emit any data but will simply log the current sum.
+
+> You can see the code for another stateful Heron topology with effectively-once semantics in [this word count example](https://github.com/apache/incubator-heron/blob/master/examples/src/java/org/apache/heron/examples/api/StatefulWordCountTopology.java).
+
+### Example stateful spout
+
+The `RandomIntSpout` shown below continuously emits a never-ending series of random integers between 1 and 100 in the `random-int` field.
+
+> It's important to note that *all* components in stateful topologies must be stateful (i.e. implement the `IStatefulComponent` interface) for the topology to provide effectively-once semantics. That includes spouts, even simple ones like the spout in this example.
+
+```java
+import org.apache.heron.api.spout.BaseRichSpout;
+import org.apache.heron.api.spout.SpoutOutputCollector;
+import org.apache.heron.api.state.State;
+import org.apache.heron.api.topology.IStatefulComponent;
+import org.apache.heron.api.topology.TopologyContext;
+import org.apache.heron.api.tuple.Fields;
+import org.apache.heron.api.tuple.Values;
+
+import java.util.Map;
+import java.util.concurrent.ThreadLocalRandom;
+
+public class RandomIntSpout extends BaseRichSpout implements IStatefulComponent<String, Integer> {
+    private SpoutOutputCollector spoutOutputCollector;
+    private State<String, Integer> count;
+
+    public RandomIntSpout() {
+    }
+
+    // Generates a random integer between 1 and 100
+    private int randomInt() {
+        return ThreadLocalRandom.current().nextInt(1, 101);
+    }
+
+    // These two methods are required to implement the IStatefulComponent interface
+    @Override
+    public void preSave(String checkpointId) {
+        System.out.println(String.format("Saving spout state at checkpoint %s", checkpointId));
+    }
+
+    @Override
+    public void initState(State<String, Integer> state) {
+        count = state;
+    }
+
+    // These three methods are required to extend the BaseRichSpout abstract class
+    @Override
+    public void open(Map<String, Object> map, TopologyContext ctx, SpoutOutputCollector collector) {
+        spoutOutputCollector = collector;
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        declarer.declare(new Fields("random-int"));
+    }
+
+    @Override
+    public void nextTuple() {
+        int randomInt = randomInt();
+        collector.emit(new Values(randomInt));
+    }
+}
+```
+
+A few things to note in this spout:
+
+* All state is handled by the `count` variable, which is of type `State<String, Integer>`. In that state object, the key is always `count`, while the value is the current sum.
+* This is a very simple topology, so the `preSave` method simply logs the current checkpoint ID. This method could be used in a variety of more complex ways.
+* The `initState` method simply accepts the current state as-is. This method can be used for a wide variety of purposes, for example deserializing the `State` object to a user-defined type.
+* Only one field will be declared: the `random-int` field.
+
+### Example stateful bolt
+
+The `AdditionBolt` takes incoming tuples from the `RandomIntSpout` and adds each integer to produce a running sum. If the sum ever exceeds 1 million, then it resets to zero.
+
+```java
+import org.apache.heron.api.bolt.BaseRichBolt;
+import org.apache.heron.api.bolt.OutputCollector;
+import org.apache.heron.api.state.State;
+import org.apache.heron.api.topology.IStatefulComponent;
+import org.apache.heron.api.topology.TopologyContext;
+
+import java.util.Map;
+
+public class AdditionBolt extends BaseRichBolt implements IStatefulComponent<String, Integer> {
+    private OutputCollector outputCollector;
+    private State<String, Integer> count;
+
+    public AdditionBolt() {
+    }
+
+    // These two methods are required to implement the IStatefulComponent interface
+    @Override
+    public void preSave(String checkpointId) {
+        System.out.println(String.format("Saving spout state at checkpoint %s", checkpointId));
+    }
+
+    @Override
+    public void initState(State<String, Integer> state) {
+        count = state;
+    }
+
+    // These three methods are required to extend the BaseRichSpout abstract class
+    @Override
+    public void prepare(Map<String, Object>, TopologyContext ctx, OutputCollector collector) {
+        outputCollector = collector;
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        // This bolt has no output fields, so none will be declared
+    }
+
+    @Override
+    public void execute(Tuple tuple) {
+        // Extract the incoming random integer from the arriving tuple
+        int incomingRandomInt = tuple.getInt(tuple.fieldIndex("random-int"));
+
+        // Get the current sum from the count object, defaulting to zero in case
+        // this is the first processing operation.
+        int currentSum = count.getOrDefault("count", 0);
+
+        int newSum = incomingValue + currentSum;
+
+        // Reset the sum to zero if it exceeds 1,000,000
+        if (newSum > 1000000) {
+            newSum = 0;
+        }
+
+        // Update the count state
+        count.put("count", newSum);
+
+        System.out.println(String.format("The current saved sum is: %d", newSum));
+    }
+}
+```
+
+A few things to notice in this bolt:
+
+* As in the `RandomIntSpout`, all state is handled by the `count` variable, which is of type `State<String, Integer>`. In that state object, the key is always `count`, while the value is the current sum.
+* As in the `RandomIntSpout`, the `preSave` method simply logs the current checkpoint ID.
+* The bolt has no output (it simply logs the current stored sum), so no output fields need to be declared.
+
+### Putting the topology together
+
+Now that we have a stateful spout and bolt in place, we can build and configure the topology:
+
+```java
+import org.apache.heron.api.Config;
+import org.apache.heron.api.HeronSubmitter;
+import org.apache.heron.api.exception.AlreadyAliveException;
+import org.apache.heron.api.exception.InvalidTopologyException;
+import org.apache.heron.api.topology.TopologyBuilder;
+import org.apache.heron.api.tuple.Fields;
+
+public class EffectivelyOnceTopology {
+    public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
+        Config topologyConfig = new Config();
+
+        // Apply effectively-once semantics and set the checkpoint interval to 10 seconds
+        topologyConfig.setTopologyReliabilityMode(Config.TopologyReliabilityMode.EFFECTIVELY_ONCE);
+        topologyConfig.setTopologyStatefulCheckpointIntervalSecs(10);
+
+        // Build the topology out of the example spout and bolt
+        TopologyBuilder topologyBuilder = new TopologyBuilder();
+        topologyBuilder.setSpout("random-int-spout", new RandomIntSpout());
+        topologyBuilder.setBolt("addition-bolt", new AdditionBolt())
+                .fieldsGrouping("random-int-spout", new Fields("random-int"));
+
+        HeronSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
+    }
+}
+```
+
+### Submitting the topology
+
+The code for this topology can be found in [this GitHub repository](https://github.com/streamlio/heron-java-effectively-once-example). You can clone the repo locally like this:
+
+```bash
+$ git clone https://github.com/streamlio/heron-java-effectively-once-example
+```
+
+Once you have the repo locally, you can submit the topology to a [running Heron installation](getting-started-local-single-node) like this (if you have [Maven](https://maven.apache.org/) installed):
+
+```bash
+$ cd heron-java-effectively-once-example
+$ mvn assembly:assembly
+$ heron submit local \
+  target/effectivelyonce-latest-jar-with-dependencies.jar \
+  io.streaml.example.effectivelyonce.RunningSumTopology \
+  RunningSumTopology
+```
+
+> By default, Heron uses the [local filesystem](state-managers-local-fs) as a State Manager. If you're running Heron locally using the instructions in the [Quick Start Guide](getting-started-local-single-node) then you won't need to change any settings to run this example stateful topology with effectively-once semantics.
+
+From there, you can see the log output for the bolt by running the [Heron Tracker](user-manuals-heron-tracker-runbook) and [Heron UI](user-manuals-heron-ui):
+
+```bash
+$ heron-tracker
+
+# In a different terminal window
+$ heron-ui
+```
+
+> For installation instructions for the Heron Tracker and the Heron UI, see the [Quick Start Guide](../../../getting-getting-started-local-single-node
+Once the Heron UI is running, navigate to http://localhost:8889 and click on the `RunningSumTopology` link. You should see something like this in the window that opens up:
+
+![Logical topology drilldown](assets/logical-topology.png)
+
+Click on **addition-bolt** on the right (under **1 Container and 1 Instances**) and then click on the blug **logs** button. You should see log output like this:
+
+```bash
+[2017-10-06 13:39:07 -0700] [STDOUT] stdout: The current saved sum is: 0
+[2017-10-06 13:39:07 -0700] [STDOUT] stdout: The current saved sum is: 68
+[2017-10-06 13:39:07 -0700] [STDOUT] stdout: The current saved sum is: 93
+[2017-10-06 13:39:07 -0700] [STDOUT] stdout: The current saved sum is: 117
+[2017-10-06 13:39:07 -0700] [STDOUT] stdout: The current saved sum is: 123
+[2017-10-06 13:39:07 -0700] [STDOUT] stdout: The current saved sum is: 185
+```
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.0/guides-packing-algorithms.md b/website2/website/versioned_docs/version-0.20.0/guides-packing-algorithms.md
new file mode 100644
index 0000000..baf39d0
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/guides-packing-algorithms.md
@@ -0,0 +1,119 @@
+---
+id: version-0.20.0-guides-packing-algorithms
+title: Packing Algorithms
+sidebar_label: Packing Algorithms
+original_id: guides-packing-algorithms
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+### Overview
+
+This guide provides basic steps at using and tuning the First Fit Decreasing packing algorithm in
+order to  utilize the resources efficiently. This packing algorithm aims at utilizing as few
+containers as possible, thus limiting the overall resources used. The algorithm is based on the
+First Fit Decreasing heuristic for the [Binpacking problem](https://en.wikipedia.org/wiki/Bin_packing_problem).
+The algorithm is useful in the following scenarios:
+
+1. The user does not know how many containers to use. This algorithm decides the number of
+   containers to be used and thus the user does not have to specify the number of containers
+   in the topology configuration.
+2. The user wants to minimize the resource consumption. The First Fit Decreasing packing algorithm
+   uses a minimum number of containers in order to reduce the resources allocated to the topology.
+   Note that for each new container, a stream manager process will be launched which will increase
+   the amount of resources used. Thus, reducing the number of containers can result in further
+   resource savings.
+3. The user expects that the provisioned per-instance RAM would be either the one specified in the
+   component RAM or the default value. The algorithm guarantees that the placement of instances in
+   the containers will never result in an allocation that assigns to one or more instances a smaller
+   amount of RAM than expected.
+
+To enable the First Fit Decreasing
+algorithm, update the `packing.yaml` file as follows:
+
+```yaml
+# packing algorithm for packing instances into containers
+heron.class.packing.algorithm:    org.apache.heron.packing.binpacking.FirstFitDecreasingPacking
+```
+
+The algorithm accepts as input the values of the following parameters:
+
+1. Component RAM
+2. Hint for the maximum container RAM
+   (`org.apache.heron.api.Config.TOPOLOGY_CONTAINER_MAX_RAM_HINT`)
+3. Hint for the maximum container CPU
+   (`org.apache.heron.api.Config.TOPOLOGY_CONTAINER_MAX_CPU_HINT`)
+4. Hint for the maximum container Disk
+   (`org.apache.heron.api.Config.TOPOLOGY_CONTAINER_MAX_DISK_HINT`)
+5. Padding percentage (`org.apache.heron.api.Config.TOPOLOGY_CONTAINER_PADDING_PERCENTAGE`)
+6. Component Parallelisms
+
+Parameter 1 determines the RAM requirement of each component in the topology.
+If the requirement is not specified then a default value of 1GB is used. The First Fit Decreasing
+algorithm guarantees that the amount of memory allocated to a component is either the one specified
+by the user or the default one.
+
+The parameters 2-4 determine the maximum container size with respect to RAM, CPU cores and disk.
+If one of these parameters is not specified by the user then
+the hint for the corresponding maximum container resource is set to the default resource requirement
+of 4 Heron instances.
+
+Note that these values take into account only the resources allocated for the user's instances.
+Additional per container resources for system-related processes such as the stream manager can be
+added to the maximum container size defined above. Thus, the algorithm might eventually produce
+containers slightly bigger that the boundary determined by parameters 2-4. The amount of the
+additional resources allocated to each container to account for additional internal Heron resource
+requirements, is determined by the padding percentage specified in parameter 5. If the user does
+not specify the padding percentage, then the system will use a default value of 10.
+In this case, after a container has been filled with user instances, an additional 10% of resources
+will be allocated to it.
+
+Based on these parameters, the algorithm decides how to place the instances in the containers
+and how many containers to use. More specifically, the algorithm first sorts the instances in
+decreasing order of their RAM requirements. It then picks the instance on the head of the sorted
+list and places it in the first container that has enough resources (RAM, CPU cores, disk) to
+accommodate it. If none of the existing containers have the requires resources, then a new container
+is allocated. Note that if an the RAM requirements of an instance exceed the value of
+parameter 2, then the algorithm returns an empty packing plan. After all the instances have
+been allocated to the containers, the algorithm adds the per-container padding resources
+as specified by parameter 5. The packing plan produced by the First Fit Decreasing packing algorithm
+can contain heterogeneous containers. Note that the algorithm does not require the number of
+containers as input.
+
+### Configuring the First Fit Decreasing Packing Algorithm
+
+1. The methods `org.apache.heron.api.Config.setContainerMaxRamHint(long bytes)`,
+   `org.apache.heron.api.Config.setContainerMaxCpuHint(float ncpus)`,
+   `org.apache.heron.api.Config.setContainerMaxDiskHint(long bytes)`
+   can be used to set parameters 2-4 when defining a topology.
+
+2. The `org.apache.heron.api.Config.setContainerPaddingPercentage(int percentage)`
+   method can be used to set the padding percentage
+   defined in parameter 5 when defining a topology.
+
+   Here's an example code snippet for setting these parameters when defining a topology:
+
+   ```java
+
+     // Set up the topology and its config
+     org.apache.heron.api.Config topologyConfig = new org.apache.heron.api.Config();
+
+     long maxContainerRam = 10L * Constants.GB;
+
+     topologyConfig.setContainerMaxRamHint(maxContainerRam);
+     topologyConfig.setContainerPaddingPercentage(5);
+   ```
diff --git a/website2/website/versioned_docs/version-0.20.0/guides-python-topologies.md b/website2/website/versioned_docs/version-0.20.0/guides-python-topologies.md
new file mode 100644
index 0000000..2ae4d53
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/guides-python-topologies.md
@@ -0,0 +1,364 @@
+---
+id: version-0.20.0-guides-python-topologies
+title: Python Topologies
+sidebar_label: Python Topologies
+original_id: guides-python-topologies
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+> The current version of `heronpy` is [{{% heronpyVersion %}}](https://pypi.python.org/pypi/heronpy/{{% heronpyVersion %}}).
+
+Support for developing Heron topologies in Python is provided by a Python library called [`heronpy`](https://pypi.python.org/pypi/heronpy).
+
+> #### Python API docs
+> You can find API docs for the `heronpy` library [here](/api/python).
+
+## Setup
+
+First, you need to install the `heronpy` library using [pip](https://pip.pypa.io/en/stable/), [EasyInstall](https://wiki.python.org/moin/EasyInstall), or an analogous tool:
+
+```shell
+$ pip install heronpy
+$ easy_install heronpy
+```
+
+Then you can include `heronpy` in your project files. Here's an example:
+
+```python
+from heronpy.api.bolt.bolt import Bolt
+from heronpy.api.spout.spout import Spout
+from heronpy.api.topology import Topology
+```
+
+## Writing topologies in Python
+
+Heron [topologies](heron-topology-concepts) are networks of [spouts](topology-development-topology-api-python#spouts) that pull data into a topology and [bolts](topology-development-topology-api-python#bolts) that process that ingested data.
+
+> You can see how to create Python spouts in the [Implementing Python Spouts](topology-development-topology-api-python#spouts) guide and how to create Python bolts in the [Implementing Python Bolts](topology-development-topology-api-python#bolts) guide.
+
+Once you've defined spouts and bolts for a topology, you can then compose the topology in one of two ways:
+
+* You can use the [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder) class inside of a main function.
+
+    Here's an example:
+
+    ```python
+    #!/usr/bin/env python
+    from heronpy.api.topology import TopologyBuilder
+
+
+    if __name__ == "__main__":
+        builder = TopologyBuilder("MyTopology")
+        # Add spouts and bolts
+        builder.build_and_submit()
+    ```
+
+* You can subclass the [`Topology`](/api/python/topology.m.html#heronpy.topology.Topology) class.
+
+    Here's an example:
+
+    ```python
+    from heronpy.api.stream import Grouping
+    from heronpy.api.topology import Topology
+
+
+    class MyTopology(Topology):
+        my_spout = WordSpout.spec(par=2)
+        my_bolt = CountBolt.spec(par=3, inputs={spout: Grouping.fields("word")})
+    ```
+
+## Defining topologies using the [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder) class
+
+If you create a Python topology using a [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder), you need to instantiate a `TopologyBuilder` inside of a standard Python main function, like this:
+
+```python
+from heronpy.api.topology import TopologyBuilder
+
+
+if __name__ == "__main__":
+    builder = TopologyBuilder("MyTopology")
+```
+
+Once you've created a `TopologyBuilder` object, you can add bolts using the [`add_bolt`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder.add_bolt) method and spouts using the [`add_spout`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder.add_spout) method. Here's an example:
+
+```python
+builder = TopologyBuilder("MyTopology")
+builder.add_bolt("my_bolt", CountBolt, par=3)
+builder.add_spout("my_spout", WordSpout, par=2)
+```
+
+Both the `add_bolt` and `add_spout` methods return the corresponding [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec) object.
+
+The `add_bolt` method takes four arguments and an optional `config` parameter:
+
+Argument | Data type | Description | Default
+:--------|:----------|:------------|:-------
+`name` | `str` | The unique identifier assigned to this bolt | |
+`bolt_cls` | class | The subclass of [`Bolt`](/api/python/bolt/bolt.m.html#heronpy.bolt.bolt.Bolt) that defines this bolt | |
+`par` | `int` | The number of instances of this bolt in the topology | |
+`config` | `dict` | Specifies the configuration for this spout | `None`
+
+The `add_spout` method takes three arguments and an optional `config` parameter:
+
+Argument | Data type | Description | Default
+:--------|:----------|:------------|:-------
+`name` | `str` | The unique identifier assigned to this spout | |
+`spout_cls` | class | The subclass of [`Spout`](/api/python/spout/spout.m.html#heronpy.spout.spout.Spout) that defines this spout | |
+`par` | `int` | The number of instances of this spout in the topology | |
+`inputs` | `dict` or `list` | Either a `dict` mapping from [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec) to [`Grouping`](/api/python/stream.m.html#heronpy.stream.Grouping) *or* a list of [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec)s, in which case the [`shuffle`](/api/python/stream.m.html#heronpy.stream.Grouping.SHUFFLE) grouping is used
+`config` | `dict` | Specifies the configuration for this spout | `None`
+
+### Example
+
+The following is an example implementation of a word count topology in Python that subclasses [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder).
+
+```python
+from your_spout import WordSpout
+from your_bolt import CountBolt
+
+from heronpy.api.stream import Grouping
+from heronpy.api.topology import TopologyBuilder
+
+
+if __name__ == "__main__":
+    builder = TopologyBuilder("WordCountTopology")
+    # piece together the topology
+    word_spout = builder.add_spout("word_spout", WordSpout, par=2)
+    count_bolt = builder.add_bolt("count_bolt", CountBolt, par=2, inputs={word_spout: Grouping.fields("word")})
+    # submit the toplogy
+    builder.build_and_submit()
+```
+
+Note that arguments to the main method can be passed by providing them in the
+`heron submit` command.
+
+### Topology-wide configuration
+
+If you're building a Python topology using a `TopologyBuilder`, you can specify configuration for the topology using the [`set_config`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder.set_config) method. A topology's config is a `dict` in which the keys are a series constants from the [`api_constants`](/api/python/api_constants.m.html) module and values are configuration values for those parameters.
+
+Here's an example:
+
+```python
+from heronpy.api import api_constants
+from heronpy.api.topology import TopologyBuilder
+
+
+if __name__ == "__main__":
+    topology_config = {
+        api_constants.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS: True
+    }
+    builder = TopologyBuilder("MyTopology")
+    builder.set_config(topology_config)
+    # Add bolts and spouts, etc.
+```
+
+### Launching the topology
+
+If you want to [submit](../../../operators/heron-cli#submitting-a-topology) Python topologies to a Heron cluster, they need to be packaged as a [PEX](https://pex.readthedocs.io/en/stable/whatispex.html) file. In order to produce PEX files, we recommend using a build tool like [Pants](http://www.pantsbuild.org/python_readme.html) or [Bazel](https://github.com/benley/bazel_rules_pex).
+
+If you defined your topology by subclassing the [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder) class and built a `word_count.pex` file for that topology in the `~/topology` folder. You can submit the topology to a cluster called `local` like this:
+
+```bash
+$ heron submit local \
+  ~/topology/word_count.pex \
+  - # No class specified
+```
+
+Note the `-` in this submission command. If you define a topology by subclassing `TopologyBuilder` you do not need to instruct Heron where your main method is located.
+
+> #### Example topologies buildable as PEXs
+> * See [this repo](https://github.com/streamlio/pants-dev-environment) for an example of a Heron topology written in Python and deployable as a Pants-packaged PEX.
+> * See [this repo](https://github.com/streamlio/bazel-dev-environment) for an example of a Heron topology written in Python and deployable as a Bazel-packaged PEX.
+
+## Defining a topology by subclassing the [`Topology`](/api/python/topology.m.html#heronpy.topology.Topology) class
+
+If you create a Python topology by subclassing the [`Topology`](/api/python/topology.m.html#heronpy.topology.Topology) class, you need to create a new topology class, like this:
+
+```python
+from my_spout import WordSpout
+from my_bolt import CountBolt
+
+from heronpy.api.stream import Grouping
+from heronpy.api.topology import Topology
+
+
+class MyTopology(Topology):
+    my_spout = WordSpout.spec(par=2)
+    my_bolt_inputs = {my_spout: Grouping.fields("word")}
+    my_bolt = CountBolt.spec(par=3, inputs=my_bolt_inputs)
+```
+
+All you need to do is place [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec)s as the class attributes
+of your topology class, which are returned by the `spec()` method of
+your spout or bolt class. You do *not* need to run a `build` method or anything like that; the `Topology` class will automatically detect which spouts and bolts are included in the topology.
+
+> If you use this method to define a new Python topology, you do *not* need to have a main function.
+
+For bolts, the [`spec`](/api/python/bolt/bolt.m.html#heronpy.bolt.bolt.Bolt.spec) method for spouts takes three optional arguments::
+
+Argument | Data type | Description | Default
+:--------|:----------|:------------|:-------
+`name` | `str` | The unique identifier assigned to this bolt or `None` if you want to use the variable name of the return `HeronComponentSpec` as the unique identifier for this bolt | |
+`par` | `int` | The number of instances of this bolt in the topology | |
+`config` | `dict` | Specifies the configuration for this bolt | `None`
+
+
+For spouts, the [`spec`](/api/python/spout/spout.m.html#heronpy.spout.spout.Spout.spec) method takes four optional arguments:
+
+Argument | Data type | Description | Default
+:--------|:----------|:------------|:-------
+`name` | `str` | The unique identifier assigned to this spout or `None` if you want to use the variable name of the return `HeronComponentSpec` as the unique identifier for this spout | `None` |
+`inputs` | `dict` or `list` | Either a `dict` mapping from [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec) to [`Grouping`](/api/python/stream.m.html#heronpy.stream.Grouping) *or* a list of [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec)s, in which case the [`shuffle`](/api/python/stream.m.html#heronpy.stream.Grouping.SHUFFLE) grouping is used
+`par` | `int` | The number of instances of this spout in the topology | `1` |
+`config` | `dict` | Specifies the configuration for this spout | `None`
+
+### Example
+
+Here's an example topology definition with one spout and one bolt:
+
+```python
+from my_spout import WordSpout
+from my_bolt import CountBolt
+
+from heronpy.api.stream import Grouping
+from heronpy.api.topology import Topology
+
+
+class WordCount(Topology):
+    word_spout = WordSpout.spec(par=2)
+    count_bolt = CountBolt.spec(par=2, inputs={word_spout: Grouping.fields("word")})
+```
+
+### Launching
+
+If you defined your topology by subclassing the [`Topology`](/api/python/topology.m.html#heronpy.topology.Topology) class,
+your main Python file should *not* contain a main method. You will, however, need to instruct Heron which class contains your topology definition.
+
+Let's say that you've defined a topology by subclassing `Topology` and built a PEX stored in `~/topology/dist/word_count.pex`. The class containing your topology definition is `topology.word_count.WordCount`. You can submit the topology to a cluster called `local` like this:
+
+```bash
+$ heron submit local \
+  ~/topology/dist/word_count.pex \
+  topology.word_count.WordCount \ # Specifies the topology class definition
+  WordCountTopology
+```
+
+### Topology-wide configuration
+
+If you're building a Python topology by subclassing `Topology`, you can specify configuration for the topology using the [`set_config`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder.set_config) method. A topology's config is a `dict` in which the keys are a series constants from the [`api_constants`](/api/python/api_constants.m.html) module and values are configuration values for those parameters.
+
+Here's an example:
+
+```python
+from heronpy.api.topology import Topology
+from heronpy.api import api_constants
+
+
+class MyTopology(Topology):
+    config = {
+        api_constants.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS: True
+    }
+    # Add bolts and spouts, etc.
+```
+
+## Multiple streams
+
+To specify that a component has multiple output streams, instead of using a list of
+strings for `outputs`, you can specify a list of `Stream` objects, in the following manner.
+
+```python
+class MultiStreamSpout(Spout):
+    outputs = [
+        Stream(fields=["normal", "fields"], name="default"),
+        Stream(fields=["error_message"], name="error_stream"),
+    ]
+```
+
+To select one of these streams as the input for your bolt, you can simply
+use `[]` to specify the stream you want. Without any stream specified, the `default`
+stream will be used.
+
+```python
+class MultiStreamTopology(Topology):
+    spout = MultiStreamSpout.spec()
+    error_bolt = ErrorBolt.spec(inputs={spout["error_stream"]: Grouping.LOWEST})
+    consume_bolt = ConsumeBolt.spec(inputs={spout: Grouping.SHUFFLE})
+```
+
+## Declaring output fields using the `spec()` method
+
+In Python topologies, the output fields of your spouts and bolts
+need to be declared by placing `outputs` class attributes, as there is
+no `declareOutputFields()` method. `heronpy` enables you to dynamically declare output fields as a list using the
+`optional_outputs` argument in the `spec()` method.
+
+This is useful in a situation like below.
+
+```python
+class IdentityBolt(Bolt):
+    # Statically declaring output fields is not allowed
+    class process(self, tup):
+        emit([tup.values])
+
+
+class DynamicOutputField(Topology):
+    spout = WordSpout.spec()
+    bolt = IdentityBolt.spec(inputs={spout: Grouping.ALL}, optional_outputs=["word"])
+```
+
+You can also declare outputs in the `add_spout()` and the `add_bolt()`
+method for the `TopologyBuilder` in the same way.
+
+## Example topologies
+
+There are a number of example topologies that you can peruse in the [`examples/src/python`]({{% githubMaster %}}/examples/src/python) directory of the [Heron repo]({{% githubMaster %}}):
+
+Topology | File | Description
+:--------|:-----|:-----------
+Word count | [`word_count_topology.py`]({{% githubMaster %}}/examples/src/python/word_count_topology.py) | The [`WordSpout`]({{% githubMaster %}}/examples/src/python/spout/word_spout.py) spout emits random words from a list, while the [`CountBolt`]({{% githubMaster %}}/examples/src/python/bolt/count_bolt.py) bolt counts the number of words that have been emitted.
+Multiple streams | [`multi_stream_topology.py`]({{% githubMaster %}}/examples/src/python/multi_stream_topology.py) | The [`MultiStreamSpout`]({{% githubMaster %}}/examples/src/python/spout/multi_stream_spout.py) emits multiple streams to downstream bolts.
+Half acking | [`half_acking_topology.py`]({{% githubMaster %}}/examples/src/python/half_acking_topology.py) | The [`HalfAckBolt`]({{% githubMaster %}}/examples/src/python/bolt/half_ack_bolt.py) acks only half of all received tuples.
+Custom grouping | [`custom_grouping_topology.py`]({{% githubMaster %}}/examples/src/python/custom_grouping_topology.py) | The [`SampleCustomGrouping`]({{% githubMaster %}}/examples/src/python/custom_grouping_topology.py#L26) class provides a custom field grouping.
+
+You can build the respective PEXs for these topologies using the following commands:
+
+```shell
+$ bazel build examples/src/python:word_count
+$ bazel build examples/src/python:multi_stream
+$ bazel build examples/src/python:half_acking
+$ bazel build examples/src/python:custom_grouping
+```
+
+All built PEXs will be stored in `bazel-bin/examples/src/python`. You can submit them to Heron like so:
+
+```shell
+$ heron submit local \
+  bazel-bin/examples/src/python/word_count.pex - \
+  WordCount
+$ heron submit local \
+  bazel-bin/examples/src/python/multi_stream.pex \
+  heron.examples.src.python.multi_stream_topology.MultiStream
+$ heron submit local \
+  bazel-bin/examples/src/python/half_acking.pex - \
+  HalfAcking
+$ heron submit local \
+  bazel-bin/examples/src/python/custom_grouping.pex \
+  heron.examples.src.python.custom_grouping_topology.CustomGrouping
+```
+
+By default, the `submit` command also activates topologies. To disable this behavior, set the `--deploy-deactivated` flag.
diff --git a/website2/website/versioned_docs/version-0.20.0/guides-simulator-mode.md b/website2/website/versioned_docs/version-0.20.0/guides-simulator-mode.md
new file mode 100644
index 0000000..43c86a0
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/guides-simulator-mode.md
@@ -0,0 +1,80 @@
+---
+id: version-0.20.0-guides-simulator-mode
+title: Simulator Mode
+sidebar_label: Simulator Mode
+original_id: guides-simulator-mode
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Simulator mode is specifically designed for topology developers to easily debug or optimize their 
+topologies.
+
+Simulator mode simulates a heron cluster in a single JVM process, which is useful for developing and 
+testing topologies. Running topologies under simulator mode is similar to running topologies on a 
+cluster.
+
+# Develop a topology using simulator mode
+
+To run in simulator mode, use the ``SimulatorMode`` class, which is
+in ``storm-compatibility-unshaded_deploy.jar``  (under ``bazel-bin/storm-compatibility/src/java``).
+
+For example:
+
+```java
+import org.apache.heron.simulator.Simulator;
+Simulator simulator = new Simulator();
+```
+
+You can then submit topologies using the ``submitTopology`` method on the ``Simulator`` object. Just
+like the corresponding method on ``StormSubmitter``, ``submitTopology`` takes a name, a topology 
+configuration, and a topology object.
+
+For example:
+
+```java
+simulator.submitTopology("test", conf, builder.createTopology());
+```
+
+Other lifecycle methods to use with simulator mode are:
+
+```java
+simulator.killTopology("test");
+simulator.activate("test");
+simulator.deactivate("test");
+simulator.shutdown();
+```
+
+To kill a topology, one could also terminate the process.
+
+The simulator mode will run in separate threads other than the main thread. All the above methods are 
+thread-safe. This means that one could invoke these methods in other threads and monitor the 
+corresponding behaviors interactively.
+
+# Debug topology using IntelliJ
+
+Bolts and Spouts run as separate threads in simulator. To add breakpoints inside a bolt/spout, the 
+Suspend Policy of the breakpoint needs to be set to Thread. To change the Suspend Policy, right 
+click on the breakpoint as shown in the following image:
+
+![Set Breakpoint](assets/intellij-set-breakpoint.jpg)
+
+If it's not convenient to check the output and logs in the IntelliJ console, save them to a local file 
+by choosing `Run -> Edit Configurations....` as shown in the following image:
+
+![Save Console](assets/intellij-save-console.jpg)
+
diff --git a/website2/website/versioned_docs/version-0.20.0/guides-topology-tuning.md b/website2/website/versioned_docs/version-0.20.0/guides-topology-tuning.md
new file mode 100644
index 0000000..f350ec9
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/guides-topology-tuning.md
@@ -0,0 +1,76 @@
+---
+id: version-0.20.0-guides-topology-tuning
+title: Topology Tuning Guide
+sidebar_label: Topology Tuning Guide
+original_id: guides-topology-tuning
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+### Overview
+
+This guide provides basic steps at tuning a topology to utilize resources efficiently. Currently, resources are primarily measured in terms of CPU cores and RAM. In Heron, some of the basic parameters that are available to tune a topology are, but not limited to, the following:
+
+1. Container RAM
+2. Container CPU
+3. Component RAMs
+4. Component Parallelisms
+5. Number of Containers
+
+Note that tuning a topology may be difficult and may take multiple iterations. Before
+proceeding, please make sure you understand concepts to understand the
+terminology, as well as the reasoning behind taking these steps.
+
+### Steps to Tune a Topology
+
+1. Launch the topology with an initial estimate of resources. These can be based
+   on input data size, component logic, or experience from another working
+   topology.
+
+2. Resolve any backpressure issues by increasing the parallelism or container
+   RAM, or CPU, or appropriately if backpressure is due to an external service.
+
+3. Make sure there is no spout lag. In steady state, the topology should be able
+   to read the whole of data.
+
+4. Repeat steps 2 and 3 until there is no backpressure and no spout lag.
+
+5. By now, the CPU usage and RAM usage are stable. Based on daily of weekly data
+   trends, leave appropriate room for usage spikes, and cut down the rest of the
+   unused resources allocated to topology.
+
+While these steps seem simple, it might take some time to get the topology to
+its optimal usage. Below are some of the tips that can be helpful during tuning
+or in general.
+
+### Additional Tips
+
+1. If component RAMs for all the components is provided, that will the RAM
+   assigned to those instances. Use this configuration according to their
+   functionality to save of resources. By default, every instance is assigned
+   1GB of RAM, which can be higher that what it requires. Note that if container
+   RAM is specified, after setting aside some RAM for internal components of
+   Heron, rest of it is equally divided among all the instances present in the
+   container.
+
+2. A memory intensive operation in bolts can result in GC issues. Be aware of
+   objects that might enter old generation, and cause memory starvation.
+
+3. You can use `Scheme`s in spouts to sample down the data. This can helpful
+   when dealing with issues if writing to external services, or just trying to
+   get an early estimate of usage without utilizing much resources. Note that
+   this would still require 100% resource usage in spouts.
diff --git a/website2/website/versioned_docs/version-0.20.0/guides-troubeshooting-guide.md b/website2/website/versioned_docs/version-0.20.0/guides-troubeshooting-guide.md
new file mode 100644
index 0000000..5e978cc
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/guides-troubeshooting-guide.md
@@ -0,0 +1,238 @@
+---
+id: version-0.20.0-guides-troubeshooting-guide
+title: Topology Troubleshooting Guide
+sidebar_label: Topology Troubleshooting Guide
+original_id: guides-troubeshooting-guide
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+### Overview
+
+This guide provides basic steps to troubleshoot a topology.
+These are starting steps to troubleshoot potential issues and identify root causes easily.
+
+This guide is organized into following broad sections:
+
+* [Determine topology running status and health](#running)
+* [Identify topology problems](#problem)
+* [Frequently seen issues](#frequent)
+
+This guide is useful for topology developers. Issues related to Heron configuration setup or
+its [internal architecture](heron-architecture), like `schedulers`, etc, are discussed in Configuration and Heron Developers respectively, and not discussed here.
+
+<a name="running"></a>
+
+### Determine topology running status and health
+
+#### 1. Estimate your data rate
+
+It is important to estimate how much data a topology is expected to consume.
+A useful approach is to begin by estimating a data rate in terms of items per minute. The emit count (tuples per minute) of each spout should match the data rate for the corresponding data
+stream. If spouts are not consuming and emitting the data at the same rate as it
+is produced, this is called `spout lag`.
+
+Some spouts, like `Kafka Spout` have a lag metric that can be
+directly used to measure health. It is recommended to have some kind of lag
+metric for a custom spout, so that it's easier to check and create monitoring alerts.
+
+#### 2. Absent Backpressure
+
+Backpressure initiated by an instance means that the concerned instance is not
+able to consume data at the same rate at which it is being receiving. This
+results in all spouts getting clamped (they will not consume any more data)
+until the backpressure is relieved by the instance.
+
+Backpressure is measured in milliseconds per minute, the time an instance was under backpressure.  For example, a value of 60,000 means an instance was under backpressure for the whole minute (60 seconds).
+
+A healthy topology should not have backpressure. Backpressure usually results in the
+spout lag build up since spouts get clamped, but it should not be considered as
+a cause, only a symptom.  
+
+Therefore, adjust and iterate Topology until backpressure is absent.
+
+#### 3. Absent failures
+
+Failed tuples are generally considered bad for a topology, unless it is a required feature (for instance, lowest possible latency is needed at the expense of possible dropped tuples). If
+`acking` is disabled, or even when enabled and not handled properly in spouts,
+this can result in data loss, without adding spout lag.
+
+
+<a name="problem"></a>
+### Identify topology problems
+
+#### 1. Look at instances under backpressure
+
+Backpressure metrics identifies which instances have been under backpressure. Therefore, jump directly to the logs of that instance to see what is going wrong with the
+instance. Some of the known causes of backpressure are discussed in the [frequently seen issues](#frequent) section below.
+
+#### 2. Look at items pending to be acked
+
+Spouts export a metric which is a sampled value of the number of tuples
+still in flight in the topology. Sometimes, `max-spout-pending` config limits
+the consumption rate of the topology. Increasing that spout's parallelism
+generally solves the issue.
+
+<a name="frequent"></a>
+
+### Frequently seen issues
+
+#### 1. Topology does not launch
+
+*Symptom* - Heron client fails to launch the topology.
+
+Note that heron client will execute the topology's `main` method on the local
+system, which means spouts and bolts get instantiated locally, serialized, and then
+sent over to schedulers as part of `topology.defn`. It is important to make sure
+that:
+
+1. All spouts and bolts are serializable.
+2. Don't instantiate a non-serializable attribute in constructor. Leave those to
+   a bolt's `prepare` or a spout's `open` method, which gets called during start
+   time of the instances.
+3. The `main` method should not try to access anything that your local machine
+   may not have access to.
+
+#### 2. Topology does not start
+
+We assume here that heron client has successfully launched the topology.
+
+*Symptom* - Physical plan or logical plan does not show up on UI
+
+*Possible Cause* - One of more of stream managers have not yet connected to
+Tmaster.
+
+*What to do* -
+
+1. Go to the Tmaster logs for the topology. The zeroth container is reserved for
+   Tmaster. Go to the container and browse to
+
+        log-files/heron-tmaster-<topology-name><topology-id>.INFO
+
+    and see which stream managers have not yet connected. The `stmgr` ID
+    corresponds to the container number. For example, `stmgr-10` corresponds to
+    container 10, and so on.
+
+2. Visit that container to
+    see what is wrong in stream manager's logs, which can be found in `log-files`
+    directory similar to Tmaster.
+
+#### 3. Instances are not starting up
+
+A topology would not start until all the instances are running. This may be a cause of a topology not starting.
+
+*Symptom* - The stream manager logs for that instance never showed that the
+instance connected to it.
+
+*Possible Cause* - Bad configs being passed when the instance process was
+getting launched.
+
+*What to do* -
+
+1. Visit the container and browse to `heron-executor.stdout` and
+   `heron-executor.stderr` files. All commands to instantiate the instances and
+   stream managers are redirected to these files.
+
+2. Check JVM configs for anything amiss.
+
+3. If `Xmx` is too low, increase `containerRAM` or `componentRAM`. Note that
+   because heron sets aside some RAM for its internal components, like stream
+   manager and metrics manager, having a large number of instances and low
+   `containerRAM` may starve off these instances.
+
+#### 4. Metrics for a component are missing/absent
+
+*Symptom* - The upstream component is emitting data, but this component is not
+executing any, and no metrics are being reported.
+
+*Possible Cause* - The component might be stuck in a deadlock. Since one
+instance is a single JVM process and user code is called from the main thread,
+it is possible that execution is stuck in `execute` method.
+
+*What to do* -
+
+1. Check logs for one of the concerned instances. If `open` (in a spout) or
+   `prepare` (in a bolt) method is not completed, check the code logic to see
+   why the method is not completed.
+
+2. Check the code logic if there is any deadlock in a bolt's `execute` or a
+   spout's `nextTuple`, `ack` or `fail` methods. These methods should be
+   non-blocking.
+
+#### 5. There is backpressure from internal bolt
+
+Bolts are called internal if it does not talk to any external service. For example,
+the last bolt might be talking to some database to write its results, and would
+not be called an internal bolt.
+
+This is invariably due to lack of resources given to this bolt. Increasing
+parallelism or RAM (based on code logic) can solve the issue.
+
+#### 6. There is backpressure from external bolt
+
+By the same definition as above, an external bolt is the one which is accessing
+an external service. It might still be emitting data downstream.
+
+*Possible Cause 1* - External service is slowing down this bolt.
+
+*What to do* -
+
+1. Check if the external service is the bottleneck, and see if adding resources
+   to it can solve it.
+
+2. Sometimes, changing bolt logic to tune caching vs write rate can make a
+   difference.
+
+*Possible Cause 2* - Resource crunch for this bolt, just like an internal bolt
+above.
+
+*What to do* -
+
+1. This should be handled in the same was as internal bolt - by increasing the
+   parallelism or RAM for the component.
+
+#### 7. Debugging Java topologies.
+The jar containing the code for building the topology, along with the spout and bolt 
+code, is deployed in the containers. A Heron Instance is started in each container, 
+with each Heron Instance responsible for running a bolt or a spout. One way to debug 
+Java code is to write debug logs to the log files for tracking and debugging purposes.
+
+Logging is the preferred mode for debugging as it makes it easer to find issues in both 
+the short and long term in the topology. If you want to perform step-by-step debugging 
+of a JVM process, however, this can be achieved by enabling remote debugging for the Heron Instance.
+
+Follow these steps to enable remote debugging:
+
+1. Add the java options to enable debuggin on all the Heron Instances that will be started.
+   This can be achieved by adding the options ```-agentlib:jdwp=transport=dt_socket,address=8888,server=y,suspend=n```. Here's an example:
+
+    ```java
+    conf.setDebug(true);
+    conf.setMaxSpoutPending(10);
+    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
+    conf.setComponentJvmOptions("word",
+           "-agentlib:jdwp=transport=dt_socket,address=8888,server=y,suspend=n");
+    conf.setComponentJvmOptions("exclaim1",
+           "-agentlib:jdwp=transport=dt_socket,address=8888,server=y,suspend=n");
+    ```
+
+2. Use the steps as given in the tutorial to setup remote debugging eith eclipse.
+   [set up Remote Debugging in Eclipse](http://help.eclipse.org/neon/index.jsp?topic=%2Forg.eclipse.jdt.doc.user%2Ftasks%2Ftask-remotejava_launch_config.htm) . 
+   To setup remote debugging with intelij use [remote debugging instructions](https://www.jetbrains.com/help/idea/2016.2/run-debug-configuration-remote.html) .
+ 
+3. Once the topology is activated start the debugger at ```localhost:{port}``` if in standalone
+   local deployment or ``` {IP}/{hostname}:{port}``` for multi container remote deployment. And you will be able to debug the code step by step.
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.0/guides-tuple-serialization.md b/website2/website/versioned_docs/version-0.20.0/guides-tuple-serialization.md
new file mode 100644
index 0000000..7e8819c
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/guides-tuple-serialization.md
@@ -0,0 +1,56 @@
+---
+id: version-0.20.0-guides-tuple-serialization
+title: Tuple Serialization
+sidebar_label: Tuple Serialization
+original_id: guides-tuple-serialization
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+The tuple is Heron's core data type. Heron's native
+[`Tuple`](/api/org/apache/heron/api/tuple/Tuple.html) interface supports
+a broad range of [basic data types](guides-data-model#using-tuples), such as
+strings, integers, and booleans, out of the box, but tuples can contain values
+of any type. You can use data types beyond the core types by providing a custom
+serializer using the instructions below.
+
+## Kryo
+
+Heron uses [Kryo](https://github.com/EsotericSoftware/kryo) for tuple
+serialization and deserialization. You can create a custom tuple serializer by
+extending Kryo's abstract
+[`Serializer`](http://code.google.com/p/kryo/source/browse/trunk/src/com/esotericsoftware/kryo/Serializer.java)
+class. More information can be found in [Kryo's
+documentation](https://github.com/EsotericSoftware/kryo#serializers).
+
+## Registering a Serializer
+
+Once you've created a custom Kryo serializer for a type:
+
+1. Make sure that the code for the serializer is on Heron's
+[classpath](../compiling/compiling/#classpath).
+2. Register the class with Kryo using the `topology.kryo.register` parameter for
+your topology. Here's an example:
+
+  ```yaml
+  topology.kryo.register:
+    - biz.acme.heron.datatypes.CustomType1 # This type will use the default FieldSerializer
+    - biz.acme.heron.datatypes.CustomType2: com.example.heron.serialization.CustomSerializer
+  ```
+
+Once your custom serializer is on Heron's classpath and Heron is aware of its
+existence, you must [re-compile](compiling-overview) Heron.
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.0/guides-ui-guide.md b/website2/website/versioned_docs/version-0.20.0/guides-ui-guide.md
new file mode 100644
index 0000000..30a082b
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/guides-ui-guide.md
@@ -0,0 +1,206 @@
+---
+id: version-0.20.0-guides-ui-guide
+title: Heron UI Guide
+sidebar_label: Heron UI Guide
+original_id: guides-ui-guide
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+### Overview
+
+This guide describes how to make best use of Heron UI for monitoring and
+debugging topologies.
+
+The UI provides a lot of information about a topology or a part of it quickly,
+thus reducing debugging time considerably. Some of these features are
+listed below. A complete set of features can be found in following sections.
+
+1. See logical plan of a topology
+2. See physical plan of a topology
+3. Configs of a topology
+4. See some basic metrics for each of the instances and components
+5. Links to get logs, memory histogram, jstack, heapdump and exceptions of
+   a particular instance
+
+#### Topologies Page
+
+Heron UI is a user interface that uses the Heron Tracker to display detailed, colorful visual representations of topologies, including the logical and physical plan for each topology. 
+
+Start the Heron tracker using `heron-tracker &` which uses default heron_tracker.yaml configuration file. It's a centralized gateway for cluster-wide information about topologies, including which topologies are running, being launched, being killed, etc. It exposes Json Restful endpoint and relies on Zookeeper nodes.
+
+Launc the Heron UI by the command:
+
+```bash
+heron-ui &
+```
+
+By default Heron UI will be started at `http://localhost:8888`
+
+Below is the home page of Heron UI.
+
+The following information or actions can be found on this page.
+
+1. List of all topologies
+2. Number of topologies filtered after search (total by default)
+3. A topology's overview
+4. Filter the topologies using `cluster`
+5. Filter the topologies using string matching in names, clusters, environs,
+   roles, versions, or submitters
+6. Sort the topologies based on a particular column
+7. Click on the topology name to find more info about the topology
+
+![All topologies](assets/all-topologies.png)
+
+#### Topology Page
+
+Below is the main page to monitor a topology.
+
+1. Name of the topology
+2. [Logical plan](heron-topology-concepts#logical-plan) of the topology
+3. [Physical plan](heron-topology-concepts#physical-plan) of the topology
+4. Health metrics for the topology
+5. General info about the topology
+6. General metrics for the topology
+7. Click components for more details
+8. Click instances for more details
+9. Click on aggregated metrics to color instances by metrics
+10. Link to topology level configs
+11. Link to job page  only if the scheduler provides a link
+   <!-- (TODO: Link to this guide) -->
+12. Link to viz dashboard for this topology only if Tracker is configured with
+   one. <!-- (TODO: Link to this configuration) -->
+
+![Topology1](assets/topology1.png)
+
+![Topology2](assets/topology2.png)
+
+Each node in logical plan can be clicked for more specific info about that
+component.
+
+1. Averaged or max metrics for all instances of this component
+2. Aggregated metrics for all instances of this component
+3. List of all instances and their aggregated metrics
+4. [Instance level operations](#instance-actions-pages), which are described in more details below
+
+![Topology Component](assets/topology-component.png)
+
+Clicking on an instance will highlight that instance in the list.
+
+1. Aggregated metrics are only for this instance
+2. Quick access to logs, exceptions and job pages for this instance
+3. Component counters are still aggregated for all instances
+4. The selected instance is highlighted
+
+![Topology Instance](assets/topology-instance.png)
+
+#### Aggregate Topology Metrics
+
+Selecting a metric will highlight the components and instances based on their
+health with respect to the metric, green being healthy, red indicating a problem.
+This is a quick way to find out which instances are having issues.
+
+![Topology Capacity](assets/topology-capacity.png)
+
+![Topology Failures](assets/topology-failures.png)
+
+#### Config Page
+
+These are the topology configurations <!-- (TODO: Add link to Topology
+Configurations) --> that your topology is configured with. Note that spout and
+bolt level configurations are not part of topology config.
+
+![Config](assets/config.png)
+
+#### <a name="instance-actions-pages">Instance Action Pages</a>
+
+These actions are available for all the instances. They are described in the
+next sections.
+
+![Instance Links](assets/topology-instance-links.png)
+
+#### Logs Page
+
+These are the logs generated by the selected instance. The whole logs file can
+also be downloaded.
+
+![Logs](assets/logs.png)
+
+#### Job Page
+
+Below is the directory view of the container. All instances from a container
+will point to the same job page. Following information is available on this page,
+amongst other things.
+
+1. The jar or tar file associated with this topology
+2. Logs for heron-executor <!-- TODO: Link heron-executor -->
+3. `log-files` folder which has instance logs, as well as `stream manager` or
+   `tmaster` logs.
+
+![Jobpage](assets/jobpage1.png)
+
+![Jobpage logfiles](assets/jobpage2-logfiles.png)
+
+#### Exceptions Page
+
+This page lists all exceptions logged by this instance. The exceptions are
+deduplicated, and for each exception, the page shows the number of times this
+exception occurred, the latest and the oldest occurance times.
+
+![Exceptions](assets/exceptions.png)
+
+#### PID Page
+
+This link can be used to find the process ID for an instance. Since each instance
+runs in its own JVM process, this will be unique for a host. The PID is also
+used for other tasks, such as getting jstack or heap dump for an instance.
+
+![PID](assets/pid.png)
+
+#### Jstack Page
+
+Click on this link to run the `jstack` command on the host against the PID for
+the instance. The output of the command is printed on the page in the browser
+itself.
+
+![Jstack](assets/jstack.png)
+
+#### Memory Histogram Page
+
+Click on this link to run the `jmap -histo` command on the host against the PID
+for the instance. The output of the command is printed on the page in the
+browser itself.
+
+![Histo](assets/histo.png)
+
+#### Memory Dump page
+
+Click on this link to run the `jmap -dump:format=b,file=/tmp/heap.bin` command
+agaist the PID for the instance. Follow the instructions on the page to download
+the heap dump file. This link does not download the file.
+
+![Memory Dump](assets/dump.png)
+
+#### Kill Heron UI server
+
+To kill Heron UI server run the following command:
+
+```bash
+kill $(pgrep -f heron-ui)
+```
+
+To stop all the Heron tools, kill the Heron Tracker as well using `kill $(pgrep -f heron-tracker)`.
diff --git a/website2/website/versioned_docs/version-0.20.0/heron-architecture.md b/website2/website/versioned_docs/version-0.20.0/heron-architecture.md
new file mode 100644
index 0000000..1b4c025
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/heron-architecture.md
@@ -0,0 +1,318 @@
+---
+id: version-0.20.0-heron-architecture
+title: Heron Architecture
+sidebar_label: Heron Architecture
+original_id: heron-architecture
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron is a general-purpose stream processing engine designed for speedy performance,
+low latency, isolation, reliability, and ease of use for developers and administrators
+alike. Heron was [open
+sourced](https://blog.twitter.com/engineering/en_us/topics/open-source/2016/open-sourcing-twitter-heron.html)
+by [Twitter](https://twitter.github.io/).
+
+> We recommend reading [Heron's Design Goals](heron-design-goals) and [Heron Topologies](../topologies) in conjunction with this guide.
+
+The sections below:
+
+* clarify the distinction between Heron and [Apache Storm](#relationship-with-apache-storm)
+* describe Heron's basic [system architecture](#basic-system-architecture)
+* explain the role of major [components](#topology-components) of Heron's architecture
+* provide an overview of what happens when [submit a topology](#topology-submission)
+
+## Topologies
+
+You can think of a Heron cluster as a mechanism for managing the lifecycle of
+stream-processing entities called **topologies**. Topologies can be written in
+Java or Python.
+
+
+More information can be found
+in the [Heron Topologies](heron-topology-concepts) document.
+
+## Relationship with Apache Storm
+
+[Apache Storm](https://storm.apache.org) is a stream processing system originally
+open sourced by Twitter in 2011. Heron, also developed at Twitter, was created
+to overcome many of the shortcomings that Storm exhibited when run in production
+at Twitter scale.
+
+Shortcoming | Solution
+:-----------|:--------
+Resource isolation | Heron uses process-based isolation both between topologies and between containers within topologies, which is more reliable and easier to monitor and debug than Storm's model, which involves shared communication threads in the same [JVM](https://en.wikipedia.org/wiki/Java_virtual_machine)
+Resource efficiency | Storm requires [scheduler](#schedulers) resources to be provisioned up front, which can lead to over-provisioning. Heron avoids this problem by using cluster resources on demand.
+Throughput | For a variety of architectural reasons, Heron has consistently been shown to provide much higher throughput and much lower latency than Storm
+
+### Storm compatibility
+
+Heron was built to be fully backwards compatible with Storm and thus to enable
+[topology](heron-topology-concepts) developers to use Heron to run topologies created using
+Storm's [topology API](http://storm.apache.org/about/simple-api.html).
+
+Currently, Heron is compatible with topologies written using:
+
+1. The new [Heron Streamlet API](topology-development-streamlet-api) 
+1. The [Heron Topology API](topology-development-topology-api-java)
+
+If you have existing topologies created using the [Storm API](http://storm.apache.org/about/simple-api.html),
+you can make them Heron compatible by following [these simple instructions](../../migrate-storm-to-heron)
+
+Heron was initially developed at Twitter with a few main goals in mind:
+
+1. Providing blazing-fast performance, reliability, and easy troubleshooting by leveraging a process-based computing model and full topology isolation.
+2. Retaining full compatibility with Storm's data model and [topology API](http://storm.apache.org/about/simple-api.html).
+
+For a more in-depth discussion of Heron and Storm, see the [Twitter Heron:
+Stream Processing at Scale](http://dl.acm.org/citation.cfm?id=2742788) paper.
+
+Heron thus enables you to achieve major gains along a variety of axes---throughput,
+latency, reliability---without needing to sacrifice engineering resources.
+
+## Heron Design Goals
+
+For a description of the core goals of Heron as well as the principles that have
+guided its development, see [Heron Design Goals](heron-design-goals).
+
+## Basic system architecture
+
+
+# Topology Components
+
+From an architectural standpoint, Heron was built as an interconnected set of modular
+components. 
+
+
+The following core components of Heron topologies are discussed in depth in
+the sections below:
+
+* [Topology Master](#topology-master)
+* [Containers](#containers)
+* [Stream Manager](#stream-manager)
+* [Heron Instance](#heron-instance)
+* [Metrics Manager](#metrics-manager)
+* [Heron Tracker](#heron-tracker)
+
+### Topology Master
+
+The **Topology Master** \(TM) manages a topology throughout its entire lifecycle,
+from the time it's submitted until it's ultimately killed. When `heron` deploys
+a topology it starts a single TM and multiple [containers](heron-architecture#container).
+The **TM** creates an ephemeral [ZooKeeper](http://zookeeper.apache.org) node to
+ensure that there's only one **TM** for the topology and that the **TM** is easily
+discoverable by any process in the topology. The **TM** also constructs the [physical
+plan](heron-topology-concepts#physical-plan) for a topology which it relays to different
+components.
+
+![Topology Master](assets/tmaster.png)
+
+#### Topology Master Configuration
+
+TMs have a variety of [configurable
+parameters](cluster-config-tmaster) that you can adjust at each
+phase of a topology's [lifecycle](heron-topology-concepts#topology-lifecycle).
+
+### Containers
+
+Each Heron topology consists of multiple **containers**, each of which houses
+multiple [Heron Instances](#heron-instance), a [Stream
+Manager](#stream-manager), and a [Metrics Manager](#metrics-manager). Containers
+communicate with the topology's **TM** to ensure that the topology forms a fully
+connected graph.
+
+For an illustration, see the figure in the [Topology Master](#topology-master)
+section above.
+
+> In Heron, all topology containerization is handled by the scheduler, be it [Mesos](schedulers-meso-local-mac), [Kubernetes](schedulers-k8s-with-helm), [YARN](schedulers-k8s-by-hand), or something else. Heron schedulers typically use [cgroups](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/ch01) to manage Heron topology processes.
+
+### Stream Manager
+
+The **Stream Manager** (SM) manages the routing of tuples between topology
+components. Each [Heron Instance]({{< ref "#heron-instance" >}}) in a topology connects to its
+local **SM**, while all of the **SMs** in a given topology connect to one another to
+form a network. Below is a visual illustration of a network of **SMs**:
+
+![Heron Data Flow](assets/data-flow.png)
+
+In addition to being a routing engine for data streams, **SMs** are responsible for
+propagating [back pressure](https://en.wikipedia.org/wiki/Back_pressure)
+within the topology when necessary. Below is an illustration of back pressure:
+
+![Back Pressure 1](assets/backpressure1.png)
+
+In the diagram above, assume that bolt **B3** (in container **A**) receives all
+of its inputs from spout **S1**. **B3** is running more slowly than other
+components. In response, the SM for container **A** will refuse input from the
+SMs in containers **C** and **D**, which will lead to the socket buffers in
+those containers filling up, which could lead to throughput collapse.
+
+In a situation like this, Heron's back pressure mechanism will kick in. The **SM**
+in container **A** will send a message to all the other **SMs**, then all
+**SMs** will cut off inputs from local spouts and no new data will be accepted
+into the topology.
+
+![Back Pressure 2](assets/backpressure2.png)
+
+Once the lagging bolt (**B3**) begins functioning normally, the **SM** in container
+**A** will notify the other **SMs** and stream routing within the topology will
+return to normal.
+
+#### Stream Manager Configuration
+
+**SMs** have a variety of [configurable
+parameters](state-managers-local-fs) that you can adjust at each
+phase of a topology's [lifecycle](heron-topology-concepts#topology-lifecycle).
+
+### Heron Instance
+
+A **Heron Instance** (HI) is a process that handles a single task of a
+[spout](../topologies#spouts) or [bolt](heron-topology-concepts##bolts), which allows
+for easy debugging and profiling.
+
+Currently, Heron only supports Java, so all
+**HIs** are [JVM](https://en.wikipedia.org/wiki/Java_virtual_machine) processes, but
+this will change in the future.
+
+#### Heron Instance Configuration
+
+**HIs** have a variety of [configurable
+parameters](cluster-config-instance) that you can adjust at
+each phase of a topology's [lifecycle](heron-topology-concepts##topology-lifecycle).
+
+### Metrics Manager
+
+Each topology runs a **Metrics Manager** (MM) that collects and exports metrics from
+all components in a [container]({{< ref "#container" >}}). It then routes those metrics to
+both the [Topology Master]({{< ref "#topology-master" >}}) and to external collectors, such as
+[Scribe](https://github.com/facebookarchive/scribe),
+[Graphite](http://graphite.wikidot.com/), or analogous systems.
+
+You can adapt Heron to support additional systems by implementing your own
+[custom metrics sink](extending-heron-metric-sink).
+
+# Cluster-level Components
+
+All of the components listed in the sections above can be found in each
+topology. The components listed below are cluster-level components that function
+outside of particular topologies.
+
+### Heron CLI
+
+Heron has a **CLI** tool called `heron` that is used to manage topologies.
+Documentation can be found in [Managing
+Topologies](user-manuals-heron-cli).
+
+### Heron API server
+
+The [Heron API server](deployment-api-server) handles all requests from
+the [Heron CLI tool](#heron-cli), uploads topology artifacts to the designated storage
+system, and interacts with the scheduler.
+
+> When running Heron [locally](getting-started-local-single-node), you won't need to deploy
+> or configure the Heron API server.
+
+### Heron Tracker
+
+The **Heron Tracker** (or just Tracker) is a centralized gateway for
+cluster-wide information about topologies, including which topologies are
+running, being launched, being killed, etc. It relies on the same
+[ZooKeeper](http://zookeeper.apache.org) nodes as the topologies in the cluster
+and exposes that information through a JSON REST API. The Tracker can be
+run within your Heron cluster (on the same set of machines managed by your
+Heron [scheduler](schedulers-local)) or outside of it.
+
+Instructions on running the tracker including JSON API docs can be found in [Heron
+Tracker](user-manuals-heron-tracker-runbook).
+
+### Heron UI
+
+**Heron UI** is a rich visual interface that you can use to interact with
+topologies. Through **Heron UI** you can see color-coded visual representations of
+the [logical](heron-topology-concepts#logical-plan) and
+[physical](heron-topology-concepts#physical-plan) plan of each topology in your cluster.
+
+For more information, see the [Heron UI](user-manuals-heron-ui) document.
+
+<!--
+## Topology Submit Sequence
+
+[Topology Lifecycle](../topologies#topology-lifecycle) describes the lifecycle states of a Heron
+topology. The diagram below illustrates the sequence of interactions amongst the Heron architectural
+components during the `submit` and `deactivate` client actions. Additionally, the system interaction
+while viewing a topology on the Heron UI is shown.
+
+The source for this diagram lives here:
+https://docs.google.com/drawings/d/10d1Q_VO0HFtOHftDV7kK6VbZMVI5EpEYHrD-LR7SczE
+
+<img src="assets/topology-submit-sequence-diagram.png" alt="Topology Sequence Diagram"/>
+-->
+
+## Topology submission
+
+The diagram below illustrates what happens when you submit a Heron topology:
+
+{{< diagram
+    width="80"
+    url="https://www.lucidchart.com/publicSegments/view/766a2ee5-7a07-4eff-9fde-dd79d6cc355e/image.png" >}}
+
+Component | Description
+:---------|:-----------
+Client | When a topology is submitted using the [`heron submit`](user-manuals-heron-cli#submitting-a-topology) command of the [Heron CLI tool](user-manuals-heron-cli), it first executes the `main` function of the topology and creates a `.defn` file containing the topology's [logical plan](heron-topology-concepts#logical-plan). Then, it runs [`org.apache.heron.scheduler.SubmitterMain`](/api/java/org/apache/heron/scheduler/SubmitterMain.html), which is responsible for uploading the topology artifact to the [Heron API server](deployment-api-server).
+Heron API server | When the [Heron API server](deployment-api-server) has been notified that a topology is being submitted, it does two things. First, it uploads the topology artifacts (a JAR for Java or a PEX for Python, plus a few other files) to a storage service; Heron supports multiple uploaders for a variety of storage systems, such as [Amazon S3](uploaders-amazon-s3), [HDFS](uploaders-hdfs), and the [local filesystem](uploaders-local-fs).
+Heron scheduler | When the Heron CLI (client) submits a topology to the Heron API server, the API server notifies the Heron scheduler and also provides the scheduler with the topology's [logical plan](heron-topology-concepts#logical-plan), [physical plan](heron-topology-concepts#physical-plan), and some other artifacts. The scheduler, be it [Mesos](schedulers-mesos-local-mac), [Aurora](schedulers-aurora-cluster), the [local filesystem](schedulers-local), or something else, then deploys the topology using containers.
+Storage | When the topology is deployed to containers by the scheduler, the code running in those containers then downloads the remaining necessary topology artifacts (essentially the code that will run in those containers) from the storage system.
+
+* Shared Services
+
+    When the main scheduler (`org.apache.heron.scheduler.SchedulerMain`) is invoked
+    by the launcher, it fetches the submitted topology artifact from the
+    topology storage, initializes the **State Manager**, and prepares a physical plan that
+    specifies how multiple instances should be packed into containers. Then, it starts
+    the specified scheduler, such as `org.apache.heron.scheduler.local.LocalScheduler`,
+    which invokes the `heron-executor` for each container.
+
+* Topologies
+
+    `heron-executor` process is started for each container and is responsible for
+    executing the **Topology Master** or **Heron Instances** (Bolt/Spout) that are
+    assigned to the container. Note that the **Topology Master** is always executed
+    on container 0. When `heron-executor` executes normal **Heron Instances**
+    (i.e. except for container 0), it first prepares
+    the **Stream Manager** and the **Metrics Manager** before starting
+    `org.apache.heron.instance.HeronInstance` for each instance that is
+    assigned to the container.
+    
+    **Heron Instance** has two threads: the gateway thread and the slave thread.
+    The gateway thread is mainly responsible for communicating with the **Stream Manager**
+    and the **Metrics Manager** using `StreamManagerClient` and `MetricsManagerClient`
+    respectively, as well as sending/receiving tuples to/from the slave
+    thread. On the other hand, the slave thread runs either Spout or Bolt
+    of the topology based on the physical plan.
+    
+    When a new **Heron Instance** is started, its `StreamManagerClient` establishes
+    a connection and registers itself with the **Stream Manager**.
+    After the successful registration, the gateway thread sends its physical plan to
+    the slave thread, which then executes the assigned instance accordingly.
+    
+
+## Codebase
+
+Heron is primarily written in **Java**, **C++**, and **Python**.
+
+A detailed guide to the Heron codebase can be found
+[here](compiling-code-organization).
diff --git a/website2/website/versioned_docs/version-0.20.0/heron-delivery-semantics.md b/website2/website/versioned_docs/version-0.20.0/heron-delivery-semantics.md
new file mode 100644
index 0000000..a2a2f66
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/heron-delivery-semantics.md
@@ -0,0 +1,81 @@
+---
+id: version-0.20.0-heron-delivery-semantics
+title: Heron Delivery Semantics
+sidebar_label: Heron Delivery Semantics
+original_id: heron-delivery-semantics
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron provides support for multiple delivery semantics, and you can select delivery semantics on a topology-by-topology basis. Thus, if you have topologies for which [at-most-once](#available-semantics) semantics are perfectly acceptable, for example, you can run them alongside topologies with more stringent semantics (such as effectively once).
+
+## Available semantics
+
+Heron supports three delivery semantics:
+
+Semantics | Description | When to use?
+:---------|:------------|:------------
+At most once | Heron processes tuples using a best-effort strategy. With at-most-once semantics, it's possible that some of the tuples delivered into the system may be lost due to some combination of processing, machine, and network failures. What sets at-most-once semantics apart from the others is that Heron will not attempt to retry a processing step upon failure, which means that the tuple may fail to be delivered. | When some amount of data loss is acceptable
+At least once | Tuples injected into the Heron topology are guaranteed to be processed at least once; no tuple will fail to be processed. It's possible, however, that any given tuple is processed more than once in the presence of various failures, retries, or other contingencies. | When you need to guarantee no data loss
+Effectively once | Heron ensures that the data it receives is processed effectively once---even in the presence of various failures---leading to accurate results. **This applies only to [stateful topologies](#stateful-topologies)**. "Effectively" in this case means that there's a guarantee that tuples that cause [state changes](#stateful-topologies) will be processed once (that is, they will have *an effect* on state once). | When you're using [stateful topologies](#stateful-processing) and need strong 
+
+You can see a visual representation of these different delivery semantics in the figure below:
+
+![Heron delivery semantics](https://www.lucidchart.com/publicSegments/view/f35df5fd-bfc1-4270-aad6-40766abae024/image.png)
+
+In this diagram, you see three Heron topologies, each of which is processing a series of tuples (`(1,2,3)`, `(7,8,11)`, etc.).
+
+* The topology in the upper left offers at-most-once semantics, which means that each tuple will either be delivered once or fail to be processed. In this case, the `(1,5)` tuple fails to be processed.
+* The topology in the lower left offers at-least-once semantics, which means that each tuple will be delivered either once or more than once. In this case, the `(7,8,11)` tuple is processed more than once (perhaps due to a network glitch or a retry).
+* The topology in the upper right offers effectively once semantics, which means that every tuple is delivered one time and one time only. This does *not* mean that every tuple is processed exactly one time. Some tuples may be processed multiple times *within the topology*, but we use the "effectively once" terminology here to express that
+
+## Requirements for effectively once
+
+In order to use effectively-once semantics with a topology, that topology must satisfy two conditions:
+
+1. It must be a [stateful, idempotent topology](#stateful-topologies).
+2. The input stream into the topology must be strongly consistent. In order to provide effectively-once semantics, topologies need to be able to "rewind" state in case of failure. The state that it "rewinds" needs to be reliable state---preferably durably stored.
+
+    If the input to the topology is, for example, a messaging system that cannot ensure stream consistency, then effectively-once semantics cannot be applied, as the state "rewind" may return differing results. To put it somewhat differently, Heron can only provide delivery semantics as stringent as its data input sources can themselves provide.
+
+### Exactly-once semantics?
+
+There has been a lot of discussion recently surrounding so-called "exactly-once" processing semantics. We'll avoid this term in the Heron documentation because we feel that it's misleading. "Exactly-once" semantics would mean that no processing step is ever performed more than once---and thus that no processing step is ever retried.
+
+It's important to always keep in mind that *no system* can provide exactly-once semantics in the face of failures (as [this article](http://bravenewgeek.com/you-cannot-have-exactly-once-delivery) argues). But that's okay because they don't really need to; the truly important thing is that a stream processing system be able to recover from failures by "rewinding" state to a previous, pre-failure point and to re-attempt to apply processing logic. We use the tern **effectively once**, following [Victor Klang](https://twitter.com/viktorklang/status/789036133434978304), for this style of semantics.
+
+Heron *can* provide effectively-once guarantees if a topology meets the conditions [outlined above](#requirements-for-effectively-once), but it cannot provide "exactly-once" semantics.
+
+## Stateful topologies
+
+The Heron topologies that you create can be either stateful or non stateful.
+
+* In **stateful topologies**, each component must implement an interface that requires it to store its state every time it processes a tuple (both spouts *and* bolts must do so).
+* In **non-stateful topologies**, there is no requirement that any processing components store a state snapshot. Non-stateful topologies can provide at-most-once or at-least-once semantics, but never effectively-once semantics.
+
+> Heron currently supports two state managers: [ZooKeeper](state-managers-zookeeper) and the [local filesystem](state-managers-local-fs), although others are currently under development.
+
+Stateful topologies, in turn, are of two types:
+
+* **Idempotent** stateful topologies are stateful topologies in which applying the processing graph to an input more than once, it will continue to return the same result. A basic example is multiplying a number by 0. The first time you do so, the number will change (always to 0), but if you apply that transformation again and again, it will not change.
+
+    For topologies to provide effectively-once semantics, they need to transform tuple inputs idempotently as well. If they don't, and applying the topology's processing graph multiple times yields different results, then effectively-once semantics *cannot* be achieved.
+
+    If you'd like to create idempotent stateful topologies, make sure to write tests to ensure that idempotency requirements are being met.
+* **Non-idempotent** stateful topologies are stateful topologies that do not apply processing logic along the model of "multiply by zero" and thus cannot provide effectively-once semantics. An example of a non-idempotent
+
+> Remember: effectively-once semantics can only be applied to topologies that are: (a) stateful and (b) idempotent.
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.0/heron-design-goals.md b/website2/website/versioned_docs/version-0.20.0/heron-design-goals.md
new file mode 100644
index 0000000..7c4e7e6
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/heron-design-goals.md
@@ -0,0 +1,99 @@
+---
+id: version-0.20.0-heron-design-goals
+title: Heron Design Goals
+sidebar_label: Heron Design Goals
+original_id: heron-design-goals
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+From the beginning, Heron was envisioned as a new kind of stream processing
+system, built to meet the most demanding of technological requirements, to
+handle even the most massive of workloads, and to meet the needs of organizations
+of all sizes and degrees of complexity. Amongst these requirements:
+
+* The ability to process billions of events per minute
+* Extremely low end-to-end latency
+* Predictable behavior regardless of scale and in the face of issue like extreme traffic spikes and pipeline congestion
+* Simple administration, including:
+  * The ability to deploy on shared infrastructure
+  * Powerful monitoring capabilities
+  * Fine-grained configurability
+* Easy debuggability
+
+To meet these requirements, a few core design goals have guided---and continue to
+guide---Heron's development:
+
+* [Modularity](#modularity)
+* [Extensibility](#extensibility)
+* [Isolation](#isolation)
+* [Constrained resource usage](#constrained-resource-usage)
+* [Apache Storm compatibility](#apache-storm-compatibility)
+* [Backpressure handling](#backpressure-handling)
+* [Multiple delivery semantics](#multiple-delivery-semantics)
+
+### Modularity
+
+Heron was designed to serve a wide range of requirements, use cases, platforms,
+programming languages and so on. In order to suit varying---and often
+unforeseeable---needs, Heron provides support for mulitple:
+
+* [schedulers](heron-architecture#schedulers)
+* metrics sinks
+* operating systems and platforms
+* topology [uploaders](heron-architecture#uploaders)
+
+### Extensibility
+
+Due to its fundamentally [modular](#modularity) character, Heron is remarkably
+easy to extend to meet your needs, with simple APIs that you can use to add
+support for new schedulers, programming languages (for topologies), topology
+uploaders, etc.
+
+### Isolation
+
+Heron topologies should be process based rather than
+thread based, and each process should run in isolation for the sake of easy
+debugging, profiling, and troubleshooting.
+
+### Constrained resource usage
+
+Heron topologies should use only those resources that they are
+initially allocated and never exceed those bounds. This makes Heron safe to run
+in shared infrastructure.
+
+### Apache Storm compatibility
+
+Although Heron has a [Functional API](topology-development-streamlet-api)
+that we recommend for all future topology development, Heron is fully API and
+data model compatible with [Apache Storm](http://storm.apache.org), making it
+easy for developers to transition from Storm to Heron.
+
+### Backpressure handling
+
+In a distributed system like Heron, there are no guarantees that all system
+components will execute at the same speed. Heron has built-in [back pressure
+mechanisms](heron-architecture#stream-manager) to ensure that topologies can
+self-adjust in case components lag.
+
+### Multiple delivery semantics
+
+Heron provides support for
+[at-most-once](heron-delivery-semantics#available-semantics),
+[at-least-once](heron-delivery-semantics#available-semantics), and
+[effectively-once](heron-delivery-semantics#available-semantics) processing
+semantics.
diff --git a/website2/website/versioned_docs/version-0.20.0/heron-resources-resources.md b/website2/website/versioned_docs/version-0.20.0/heron-resources-resources.md
new file mode 100644
index 0000000..c0abdae
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/heron-resources-resources.md
@@ -0,0 +1,69 @@
+---
+id: version-0.20.0-heron-resources-resources
+title: Heron Resources
+sidebar_label: Heron Resources
+original_id: heron-resources-resources
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron Resources outside this documentation:
+
+## Books
+
+* [Stream Processing with Heron](http://shop.oreilly.com/product/0636920203131.do)
+
+## Conference & Journal Papers
+
+* [Streaming@Twitter - Bulletin of the IEEE Computer Society Technical Committee on Data Engineering](http://sites.computer.org/debull/A15dec/p15.pdf) (Jul, 2016)
+* [Twitter Heron: Stream Processing at
+  Scale - SIGMOD’15](http://dl.acm.org/citation.cfm?id=2742788) (May, 2015)
+* [Storm@Twitter - SIGMOD'14](http://dl.acm.org/citation.cfm?id=2595641) (Jun, 2014)
+
+## Videos
+
+* [Twitter Heron on Apache Aurora - #compute event @Twitter](https://m.youtube.com/watch?v=ua0ufmr9sQI&feature=youtu.be) (Apr, 2016)
+* [Flying Faster with Heron - InfoQ](http://www.infoq.com/presentations/twitter-heron) (Apr, 2016)
+* [Twitter Heron: Stream Processing at
+  Scale - @Scale](https://www.youtube.com/watch?v=pUaFOuGgmco) (Sep, 2015)
+* [Stream Processing and Anomaly Detection - Velocity O'Reilly - Note: requires O'Reilly login ](
+https://player.oreilly.com/videos/9781491927977?login=true)(Jun, 2015)
+* [Building Apache Heron - BazelCon 2018](https://www.youtube.com/watch?v=yBTSfA4YDtY&t=1s)(Oct, 2018)
+
+##  Blog Posts
+* [Leaving the Nest: Heron donated to Apache Software Foundation](https://blog.twitter.com/engineering/en_us/topics/open-source/2018/heron-donated-to-apache-software-foundation.html) (Feb, 2018)
+* [Open Sourcing Twitter Heron](https://blog.twitter.com/2016/open-sourcing-twitter-heron) (May, 2016)
+* [Flying Faster with Twitter
+  Heron](https://blog.twitter.com/2015/flying-faster-with-twitter-heron) (June, 2015)
+* [Deploying Heron on a Cluster of Machines with Apache Aurora](http://streamanalytics.blogspot.com/2016/06/deploying-heron-on-cluster-of-machines.html) (Supun Kamburugamuve, Jun, 2016)
+* [Setting up Heron Locally with Apache Aurora](http://pulasthisupun.blogspot.com/2016/06/setting-up-heron-cluster-with-apache.html) (Pulasthi Supun, Jun, 2016)
+* [Introducing Heron’s ECO; A Flexible Way To Manage Topologies](https://1904labs.com/2018/02/14/introducing-herons-eco-flexible-way-manage-topologies/) (Feb, 2018)
+
+## Slides
+
+* [Real-Time Analytics: Algorithms and Systems - Twitter University ](
+http://www.slideshare.net/arunkejariwal/real-time-analytics-algorithms-and-systems) (May, 2016)
+* [Stream Processing and Anomaly Detection - Velocity O'Reilly ](http://www.slideshare.net/arunkejariwal/velocity-2015final)(Jun, 2015)
+
+## Press 
+
+* [Heron, Twitter's Data Streaming Platform, Has Been Open Sourced](http://www.benzinga.com/tech/16/06/8119962/heron-twitters-data-streaming-platform-has-been-open-sourced) (BenZinga, Jun, 2016)
+* [Twitter Open Sources Heron -- Data Streaming For Dummies](http://www.forbes.com/sites/adrianbridgwater/2016/06/16/twitter-open-sources-heron-data-streaming-for-dummies/#6f8984319b50) (Forbes, Jun, 2016)
+* [Getting Started with Heron on Apache Mesos and Apache Kafka](https://allthingshadoop.com/2016/05/30/getting-started-with-heron-on-apache-mesos-and-apache-kafka/) (All Things Hadoop, May, 2016)
+* [Twitter open-sources Heron, its real-time stream-processing engine](http://venturebeat.com/2016/05/25/twitter-open-sources-heron-its-real-time-stream-processing-engine/) (VentureBeat, May, 2016)
+* [Twitter's Heron Will Start a New Chapter in Real-Time Streaming](http://www.forbes.com/sites/janakirammsv/2015/06/08/twitters-heron-will-start-a-new-chapter-in-real-time-streaming/#62c8645b2306) (Forbes, Jun, 2015)
+* [Twitter Has Replaced Storm with Heron](https://www.infoq.com/news/2015/06/twitter-storm-heron) (InfoQ, Jun, 2015)
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.0/heron-streamlet-concepts.md b/website2/website/versioned_docs/version-0.20.0/heron-streamlet-concepts.md
new file mode 100644
index 0000000..2294caa
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/heron-streamlet-concepts.md
@@ -0,0 +1,811 @@
+---
+id: version-0.20.0-heron-streamlet-concepts
+title: Heron Streamlets
+sidebar_label: Heron Streamlets
+original_id: heron-streamlet-concepts
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+When it was first released, Heron offered a **Topology API**---heavily indebted to the [Storm API](http://storm.apache.org/about/simple-api.html)---for developing topology logic. In the original Topology API, developers creating topologies were required to explicitly:
+
+* define the behavior of every [spout](topology-development-topology-api-java#spouts) and [bolt](topology-development-topology-api-java#bolts) in the topology 
+* specify how those spouts and bolts are meant to be interconnected
+
+### Problems with the Topology API
+
+Although the Storm-inspired API provided a powerful low-level interface for creating topologies, the spouts-and-bolts model also presented a variety of drawbacks for Heron developers:
+
+Drawback | Description
+:--------|:-----------
+Verbosity | In the original Topology API for both Java and Python, creating spouts and bolts required substantial boilerplate and forced developers to both provide implementations for spout and bolt classes and also to specify the connections between those spouts and bolts.
+Difficult debugging | When spouts, bolts, and the connections between them need to be created "by hand," it can be challenging to trace the origin of problems in the topology's processing chain
+Tuple-based data model | In the older topology API, spouts and bolts passed [tuples](https://en.wikipedia.org/wiki/Tuple) and nothing but tuples within topologies. Although tuples are a powerful and flexible data type, the topology API forced *all* spouts and bolts to implement their own serialization/deserialization logic.
+
+### Advantages of the Streamlet API
+
+In contrast with the Topology API, the Heron Streamlet API offers:
+
+Advantage | Description
+:---------|:-----------
+Boilerplate-free code | Instead of needing to implement spout and bolt classes over and over again, the Heron Streamlet API enables you to create stream processing logic out of functions, such as map, flatMap, join, and filter functions, instead.
+Easy debugging | With the Heron Streamlet API, you don't have to worry about spouts and bolts, which means that you can more easily surface problems with your processing logic.
+Completely flexible, type-safe data model | Instead of requiring that all processing components pass tuples to one another (which implicitly requires serialization to and deserializaton from your application-specific types), the Heron Streamlet API enables you to write your processing logic in accordance with whatever types you'd like---including tuples, if you wish.<br /><br />In the Streamlet API for [Java](topology-development-streamlet-api), all streamlets are typed (e.g. `Streamlet<MyApplicationType>`), which means that type errors can be caught at compile time rather than at runtime.
+
+## Streamlet API topology model
+
+Instead of spouts and bolts, as with the Topology API, the Streamlet API enables you to create **processing graphs** that are then automatically converted to spouts and bolts under the hood. Processing graphs consist of the following components:
+
+* **Sources** supply the processing graph with data from random generators, databases, web service APIs, filesystems, pub-sub messaging systems, or anything that implements the [source](#source-operations) interface.
+* **Operators** supply the graph's processing logic, operating on data passed into the graph by sources.
+* **Sinks** are the terminal endpoints of the processing graph, determining what the graph *does* with the processed data. Sinks can involve storing data in a database, logging results to stdout, publishing messages to a topic in a pub-sub messaging system, and much more.
+
+The diagram below illustrates both the general model (with a single source, three operators, and one sink), and a more concrete example that includes two sources (an [Apache Pulsar](https://pulsar.incubator.apache.org) topic and the [Twitter API](https://developer.twitter.com/en/docs)), three operators (a [join](#join-operations), [flatMap](#flatmap-operations), and [reduce](#reduce-operations) operation), and two [sinks](#sink-operations) (an [Apache Cassandra](http://cassandra.apache.org/) table and an [Apache Spark](https://spark.apache.org/) job).
+
+![Topology Operators](https://www.lucidchart.com/publicSegments/view/d84026a1-d12e-4878-b8d5-5aa274ec0415/image.png)
+
+### Streamlets
+
+The core construct underlying the Heron Streamlet API is that of the **streamlet**. A streamlet is an unbounded, ordered collection of **elements** of some data type (streamlets can consist of simple types like integers and strings or more complex, application-specific data types).
+
+**Source streamlets** supply a Heron processing graph with data inputs. These inputs can come from a wide variety of sources, such as pub-sub messaging systems like [Apache
+Kafka](http://kafka.apache.org/) and [Apache Pulsar](https://pulsar.incubator.apache.org) (incubating), random generators, or static files like CSV or [Apache Parquet](https://parquet.apache.org/) files.
+
+Source streamlets can then be manipulated in a wide variety of ways. You can, for example:
+
+* apply [map](#map-operations), [filter](#filter-operations), [flatMap](#flatmap-operations), and many other operations to them
+* apply operations, such as [join](#join-operations) and [union](#union-operations) operations, that combine streamlets together
+* [reduce](#reduce-by-key-and-window-operations) all elements in a streamlet to some single value, based on key
+* send data to [sinks](#sink-operations) (store elements)
+
+The diagram below shows an example streamlet:
+
+![Streamlet](https://www.lucidchart.com/publicSegments/view/5c451e53-46f8-4e36-86f4-9a11ca015c21/image.png)
+
+
+In this diagram, the **source streamlet** is produced by a random generator that continuously emits random integers between 1 and 100. From there:
+
+* A filter operation is applied to the source streamlet that filters out all values less than or equal to 30
+* A *new streamlet* is produced by the filter operation (with the Heron Streamlet API, you're always transforming streamlets into other streamlets)
+* A map operation adds 15 to each item in the streamlet, which produces the final streamlet in our graph. We *could* hypothetically go much further and add as many transformation steps to the graph as we'd like.
+* Once the final desired streamlet is created, each item in the streamlet is sent to a sink. Sinks are where items leave the processing graph. 
+
+### Supported languages
+
+The Heron Streamlet API is currently available for:
+
+* [Java](topology-development-streamlet-api)
+* [Scala](topology-development-streamlet-scala)
+
+### The Heron Streamlet API and topologies
+
+With the Heron Streamlet API *you still create topologies*, but only implicitly. Heron automatically performs the heavy lifting of converting the streamlet-based processing logic that you create into spouts and bolts and, from there, into containers that are then deployed using whichever [scheduler](schedulers-local.md) your Heron cluster relies upon.
+
+From the standpoint of both operators and developers [managing topologies' lifecycles](#topology-lifecycle), the resulting topologies are equivalent. From a development workflow standpoint, however, the difference is profound. You can think of the Streamlet API as a highly convenient tool for creating spouts, bolts, and the logic that connects them.
+
+The basic workflow looks like this:
+
+![Streamlet](https://www.lucidchart.com/publicSegments/view/6b2e9b49-ef1f-45c9-8094-1e2cefbaed7b/image.png)
+
+When creating topologies using the Heron Streamlet API, you simply write code (example [below](#java-processing-graph-example)) in a highly functional style. From there:
+
+* that code is automatically converted into spouts, bolts, and the necessary connective logic between spouts and bolts
+* the spouts and bolts are automatically converted into a [logical plan](topology-development-topology-api-java#logical-plan) that specifies how the spouts and bolts are connected to each other
+* the logical plan is automatically converted into a [physical plan](topology-development-topology-api-java#physical-plan) that determines how the spout and bolt instances (the colored boxes above) are distributed across the specified number of containers (in this case two)
+
+With a physical plan in place, the Streamlet API topology can be submitted to a Heron cluster.
+
+#### Java processing graph example
+
+The code below shows how you could implement the processing graph shown [above](#streamlets) in Java:
+
+```java
+import java.util.concurrent.ThreadLocalRandom;
+
+import org.apache.heron.streamlet.Builder;
+import org.apache.heron.streamlet.Config;
+import org.apache.heron.streamlet.Runner;
+
+Builder builder = Builder.newBuilder();
+
+// Function for generating random integers
+int randomInt(int lower, int upper) {
+    return ThreadLocalRandom.current().nextInt(lower, upper + 1);
+}
+
+// Source streamlet
+builder.newSource(() -> randomInt(1, 100))
+    // Filter operation
+    .filter(i -> i > 30)
+    // Map operation
+    .map(i -> i + 15)
+    // Log sink
+    .log();
+
+Config config = new Config();
+// This topology will be spread across two containers
+config.setNumContainers(2);
+
+// Submit the processing graph to Heron as a topology
+new Runner("IntegerProcessingGraph", config, builder).run();
+```
+
+As you can see, the Java code for the example streamlet processing graph requires very little boilerplate and is heavily indebted to Java 8 [lambda](https://docs.oracle.com/javase/tutorial/java/javaOO/lambdaexpressions.html) patterns.
+
+## Streamlet operations
+
+In the Heron Streamlet API, processing data means *transforming streamlets into other streamlets*. This can be done using a wide variety of available operations, including many that you may be familiar with from functional programming:
+
+Operation | Description
+:---------|:-----------
+[map](#map-operations) | Returns a new streamlet by applying the supplied mapping function to each element in the original streamlet
+[flatMap](#flatMap-operations) | Like a map operation but with the important difference that each element of the streamlet is flattened into a collection type
+[filter](#filter-operations) | Returns a new streamlet containing only the elements that satisfy the supplied filtering function
+[union](#filter-operations) | Unifies two streamlets into one, without [windowing](#windowing) or modifying the elements of the two streamlets
+[clone](#clone-operations) | Creates any number of identical copies of a streamlet
+[transform](#transform-operations) | Transform a streamlet using whichever logic you'd like (useful for transformations that don't neatly map onto the available operations) | Modify the elements from an incoming streamlet and update the topology's state
+[keyBy](#key-by-operations) | Returns a new key-value streamlet by applying the supplied extractors to each element in the original streamlet
+[reduceByKey](#reduce-by-key-operations) | Produces a streamlet of key-value on each key and in accordance with a reduce function that you apply to all the accumulated values
+[reduceByKeyAndWindow](#reduce-by-key-and-window-operations) |  Produces a streamlet of key-value on each key, within a [time window](#windowing), and in accordance with a reduce function that you apply to all the accumulated values
+[countByKey](#count-by-key-operations) | A special reduce operation of counting number of tuples on each key
+[countByKeyAndWindow](#count-by-key-and-window-operations) | A special reduce operation of counting number of tuples on each key, within a [time window](#windowing)
+[split](#split-operations) | Split a streamlet into multiple streamlets with different id.
+[withStream](#with-stream-operations) | Select a stream with id from a streamlet that contains multiple streams
+[applyOperator](#apply-operator-operations) | Returns a new streamlet by applying an user defined operator to the original streamlet
+[join](#join-operations) | Joins two separate key-value streamlets into a single streamlet on a key, within a [time window](#windowing), and in accordance with a join function
+[log](#log-operations) | Logs the final streamlet output of the processing graph to stdout
+[toSink](#sink-operations) | Sink operations terminate the processing graph by storing elements in a database, logging elements to stdout, etc.
+[consume](#consume-operations) | Consume operations are like sink operations except they don't require implementing a full sink interface (consume operations are thus suited for simple operations like logging)
+
+### Map operations
+
+Map operations create a new streamlet by applying the supplied mapping function to each element in the original streamlet.
+
+#### Java example
+
+```java
+import org.apache.heron.streamlet.Builder;
+
+Builder processingGraphBuilder = Builder.newBuilder();
+
+Streamlet<Integer> ones = processingGraphBuilder.newSource(() -> 1);
+Streamlet<Integer> thirteens = ones.map(i -> i + 12);
+```
+
+In this example, a supplier streamlet emits an indefinite series of 1s. The `map` operation then adds 12 to each incoming element, producing a streamlet of 13s. The effect of this operation is to transform the `Streamlet<Integer>` into a `Streamlet<Integer>` with different values (map operations can also convert streamlets into streamlets of a different type).
+
+### FlatMap operations
+
+FlatMap operations are like [map operations](#map-operations) but with the important difference that each element of the streamlet is "flattened" into a collection type. In the Java example below, a supplier streamlet emits the same sentence over and over again; the `flatMap` operation transforms each sentence into a Java `List` of individual words.
+
+#### Java example
+
+```java
+Streamlet<String> sentences = builder.newSource(() -> "I have nothing to declare but my genius");
+Streamlet<List<String>> words = sentences
+        .flatMap((sentence) -> Arrays.asList(sentence.split("\\s+")));
+```
+
+The effect of this operation is to transform the `Streamlet<String>` into a `Streamlet<List<String>>` containing each word emitted by the source streamlet.
+
+### Filter operations
+
+Filter operations retain some elements in a streamlet and exclude other elements on the basis of a provided filtering function.
+
+#### Java example
+
+```java
+Streamlet<Integer> randomInts =
+    builder.newSource(() -> ThreadLocalRandom.current().nextInt(1, 11));
+Streamlet<Integer> lessThanSeven = randomInts
+        .filter(i -> i <= 7);
+```
+
+In this example, a source streamlet consisting of random integers between 1 and 10 is modified by a filter operation that removes all streamlet elements that are greater than 7.
+
+### Union operations
+
+Union operations combine two streamlets of the same type into a single streamlet without modifying the elements.
+
+#### Java example
+
+```java
+Streamlet<String> oohs = builder.newSource(() -> "ooh");
+Streamlet<String> aahs = builder.newSource(() -> "aah");
+
+Streamlet<String> combined = oohs
+        .union(aahs);
+```
+
+Here, one streamlet is an endless series of "ooh"s while the other is an endless series of "aah"s. The `union` operation combines them into a single streamlet of alternating "ooh"s and "aah"s.
+
+### Clone operations
+
+Clone operations enable you to create any number of "copies" of a streamlet. Each of the "copy" streamlets contains all the elements of the original and can be manipulated just like the original streamlet.
+
+#### Java example
+
+```java
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+Streamlet<Integer> integers = builder.newSource(() -> ThreadLocalRandom.current().nextInt(100));
+
+List<Streamlet<Integer>> copies = integers.clone(5);
+Streamlet<Integer> ints1 = copies.get(0);
+Streamlet<Integer> ints2 = copies.get(1);
+Streamlet<Integer> ints3 = copies.get(2);
+// and so on...
+```
+
+In this example, a streamlet of random integers between 1 and 100 is split into 5 identical streamlets.
+
+### Transform operations
+
+Transform operations are highly flexible operations that are most useful for:
+
+* operations involving state in [stateful topologies](heron-delivery-semantics#stateful-topologies)
+* operations that don't neatly fit into the other categories or into a lambda-based logic
+
+Transform operations require you to implement three different methods:
+
+* A `setup` method that enables you to pass a context object to the operation and to specify what happens prior to the `transform` step
+* A `transform` operation that performs the desired transformation
+* A `cleanup` method that allows you to specify what happens after the `transform` step
+
+The context object available to a transform operation provides access to:
+
+* the current state of the topology
+* the topology's configuration
+* the name of the stream
+* the stream partition
+* the current task ID
+
+Here's a Java example of a transform operation in a topology where a stateful record is kept of the number of items processed:
+
+```java
+import org.apache.heron.streamlet.Context;
+import org.apache.heron.streamlet.SerializableTransformer;
+
+import java.util.function.Consumer;
+
+public class CountNumberOfItems implements SerializableTransformer<String, String> {
+    private int numberOfItems;
+
+    public void setup(Context context) {
+        numberOfItems = (int) context.getState("number-of-items");
+        context.getState().put("number-of-items", numberOfItems + 1);
+    }
+
+    public void transform(String in, Consumer<String> consumer) {
+        String transformedString = // Apply some operation to the incoming value
+        consumer.accept(transformedString);
+    }
+
+    public void cleanup() {
+        System.out.println(
+                String.format("Successfully processed new state: %d", numberOfItems));
+    }
+}
+```
+
+This operation does a few things:
+
+* In the `setup` method, the [`Context`](/api/java/org/apache/heron/streamlet/Context.html) object is used to access the current state (which has the semantics of a Java `Map`). The current number of items processed is incremented by one and then saved as the new state.
+* In the `transform` method, the incoming string is transformed in some way and then "accepted" as the new value.
+* In the `cleanup` step, the current count of items processed is logged.
+
+Here's that operation within the context of a streamlet processing graph:
+
+```java
+builder.newSource(() -> "Some string over and over");
+        .transform(new CountNumberOfItems())
+        .log();
+```
+
+### Key by operations
+
+Key by operations convert each item in the original streamlet into a key-value pair and return a new streamlet.
+
+#### Java example
+
+```java
+import java.util.Arrays;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .keyBy(
+        // Key extractor (in this case, each word acts as the key)
+        word -> word,
+        // Value extractor (get the length of each word)
+        word -> workd.length()
+    )
+    // The result is logged
+    .log();
+```
+
+### Reduce by key operations
+
+You can apply [reduce](https://docs.oracle.com/javase/tutorial/collections/streams/reduction.html) operations to streamlets by specifying:
+
+* a key extractor that determines what counts as the key for the streamlet
+* a value extractor that determines which final value is chosen for each element of the streamlet
+* a reduce function that produces a single value for each key in the streamlet
+
+Reduce by key operations produce a new streamlet of key-value window objects (which include a key-value pair including the extracted key and calculated value).
+
+#### Java example
+
+```java
+import java.util.Arrays;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .reduceByKeyAndWindow(
+        // Key extractor (in this case, each word acts as the key)
+        word -> word,
+        // Value extractor (each word appears only once, hence the value is always 1)
+        word -> 1,
+        // Reduce operation (a running sum)
+        (x, y) -> x + y
+    )
+    // The result is logged
+    .log();
+```
+
+### Reduce by key and window operations
+
+You can apply [reduce](https://docs.oracle.com/javase/tutorial/collections/streams/reduction.html) operations to streamlets by specifying:
+
+* a key extractor that determines what counts as the key for the streamlet
+* a value extractor that determines which final value is chosen for each element of the streamlet
+* a [time window](heron-topology-concepts#window-operations) across which the operation will take place
+* a reduce function that produces a single value for each key in the streamlet
+
+Reduce by key and window operations produce a new streamlet of key-value window objects (which include a key-value pair including the extracted key and calculated value, as well as information about the window in which the operation took place).
+
+#### Java example
+
+```java
+import java.util.Arrays;
+
+import org.apache.heron.streamlet.WindowConfig;
+
+Builder builder = Builder.newBuilder();
+
+builder.newSource(() -> "Mary had a little lamb")
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .reduceByKeyAndWindow(
+        // Key extractor (in this case, each word acts as the key)
+        word -> word,
+        // Value extractor (each word appears only once, hence the value is always 1)
+        word -> 1,
+        // Window configuration
+        WindowConfig.TumblingCountWindow(50),
+        // Reduce operation (a running sum)
+        (x, y) -> x + y
+    )
+    // The result is logged
+    .log();
+```
+
+### Count by key operations
+
+Count by key operations extract keys from data in the original streamlet and count the number of times a key has been encountered.
+
+#### Java example
+
+```java
+import java.util.Arrays;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .countByKeyAndWindow(word -> word)
+    // The result is logged
+    .log();
+```
+
+### Count by key and window operations
+
+Count by key and window operations extract keys from data in the original streamlet and count the number of times a key has been encountered within each [time window](#windowing).
+
+#### Java example
+
+```java
+import java.util.Arrays;
+
+import org.apache.heron.streamlet.WindowConfig;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .countByKeyAndWindow(
+        // Key extractor (in this case, each word acts as the key)
+        word -> word,
+        // Window configuration
+        WindowConfig.TumblingCountWindow(50),
+    )
+    // The result is logged
+    .log();
+```
+
+### Split operations
+
+Split operations split a streamlet into multiple streamlets with different id by getting the corresponding stream ids from each item in the origina streamlet.
+
+#### Java example
+
+```java
+import java.util.Arrays;
+
+Map<String, SerializablePredicate<String>> splitter = new HashMap();
+    splitter.put("long_word", s -> s.length() >= 4);
+    splitter.put("short_word", s -> s.length() < 4);
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    // Splits the stream into streams of long and short words
+    .split(splitter)
+    // Choose the stream of the short words
+    .withStream("short_word")
+    // The result is logged
+    .log();
+```
+
+### With stream operations
+
+With stream operations select a stream with id from a streamlet that contains multiple streams. They are often used with [split](#split-operations).
+
+### Apply operator operations
+
+Apply operator operations apply a user defined operator (like a bolt) to each element of the original streamlet and return a new streamlet.
+
+#### Java example
+
+```java
+import java.util.Arrays;
+
+private class MyBoltOperator extends MyBolt implements IStreamletRichOperator<Double, Double> {
+}
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    // Apply user defined operation
+    .applyOperator(new MyBoltOperator())
+    // The result is logged
+    .log();
+```
+
+### Join operations
+
+Join operations in the Streamlet API take two streamlets (a "left" and a "right" streamlet) and join them together:
+
+* based on a key extractor for each streamlet
+* over key-value elements accumulated during a specified [time window](#windowing)
+* based on a [join type](#join-types) ([inner](#inner-joins), [outer left](#outer-left-joins), [outer right](#outer-right-joins), or [outer](#outer-joins))
+* using a join function that specifies *how* values will be processed
+
+You may already be familiar with `JOIN` operations in SQL databases, like this:
+
+```sql
+SELECT username, email
+FROM all_users
+INNER JOIN banned_users ON all_users.username NOT IN banned_users.username;
+```
+
+> If you'd like to unite two streamlets into one *without* applying a window or a join function, you can use a [union](#union-operations) operation, which are available for key-value streamlets as well as normal streamlets.
+
+All join operations are performed:
+
+1. Over elements accumulated during a specified [time window](#windowing)
+1. In accordance with a key and value extracted from each streamlet element (you must provide extractor functions for both)
+1. In accordance with a join function that produces a "joined" value for each pair of streamlet elements
+
+#### Join types
+
+The Heron Streamlet API supports four types of joins:
+
+Type | What the join operation yields | Default?
+:----|:-------------------------------|:--------
+[Inner](#inner-joins) | All key-values with matched keys across the left and right stream | Yes
+[Outer left](#outer-left-joins) | All key-values with matched keys across both streams plus unmatched keys in the left stream |
+[Outer right](#outer-right-joins) | All key-values with matched keys across both streams plus unmatched keys in the left stream |
+[Outer](#outer-joins) | All key-values across both the left and right stream, regardless of whether or not any given element has a matching key in the other stream |
+
+#### Inner joins
+
+Inner joins operate over the [Cartesian product](https://en.wikipedia.org/wiki/Cartesian_product) of the left stream and the right stream, i.e. over all the whole set of all ordered pairs between the two streams. Imagine this set of key-value pairs accumulated within a time window:
+
+Left streamlet | Right streamlet
+:--------------|:---------------
+("player1", 4) | ("player1", 10)
+("player1", 5) | ("player1", 12)
+("player1", 17) | ("player2", 27)
+
+An inner join operation would thus apply the join function to all key-values with matching keys, thus **3 &times; 2 = 6** in total, producing this set of key-values:
+
+Included key-values |
+:-------------------|
+("player1", 4) |
+("player1", 5) |
+("player1", 10) |
+("player1", 12) |
+("player1", 17) |
+
+> Note that the `("player2", 27)` key-value pair was *not* included in the stream because there's no matching key-value in the left streamlet.
+
+If the supplied join function, say, added the values together, then the resulting joined stream would look like this:
+
+Operation | Joined Streamlet
+:---------|:----------------
+4 + 10 | ("player1", 14)
+4 + 12 | ("player1", 16)
+5 + 10 | ("player1", 15)
+5 + 12 | ("player1", 17)
+17 + 10 | ("player1", 27)
+17 + 12 | ("player1", 29)
+
+> Inner joins are the "default" join type in the Heron Streamlet API. If you call the `join` method without specifying a join type, an inner join will be applied.
+
+##### Java example
+
+```java
+class Score {
+    String playerUsername;
+    int playerScore;
+
+    // Setters and getters
+}
+
+Streamlet<Score> scores1 = /* A stream of player scores */;
+Streamlet<Score> scores2 = /* A second stream of player scores */;
+
+scores1
+    .join(
+        scores2,
+        // Key extractor for the left stream (scores1)
+        score -> score.getPlayerUsername(),
+        // Key extractor for the right stream (scores2)
+        score -> score.getPlayerScore(),
+        // Window configuration
+        WindowConfig.TumblingCountWindow(50),
+        // Join function (selects the larger score as the value using
+        // using a ternary operator)
+        (x, y) ->
+            (x.getPlayerScore() >= y.getPlayerScore()) ?
+                x.getPlayerScore() :
+                y.getPlayerScore()
+    )
+    .log();
+```
+
+In this example, two streamlets consisting of `Score` objects are joined. In the `join` function, a key and value extractor are supplied along with a window configuration and a join function. The resulting, joined streamlet will consist of key-value pairs in which each player's username will be the key and the joined---in this case highest---score will be the value.
+
+By default, an [inner join](#inner-joins) is applied in join operations but you can also specify a different join type. Here's a Java example for an [outer right](#outer-right-joins) join:
+
+```java
+import org.apache.heron.streamlet.JoinType;
+
+scores1
+    .join(
+        scores2,
+        // Key extractor for the left stream (scores1)
+        score -> score.getPlayerUsername(),
+        // Key extractor for the right stream (scores2)
+        score -> score.getPlayerScore(),
+        // Window configuration
+        WindowConfig.TumblingCountWindow(50),
+        // Join type
+        JoinType.OUTER_RIGHT,
+        // Join function (selects the larger score as the value using
+        // using a ternary operator)
+        (x, y) ->
+            (x.getPlayerScore() >= y.getPlayerScore()) ?
+                x.getPlayerScore() :
+                y.getPlayerScore()
+    )
+    .log();
+```
+
+#### Outer left joins
+
+An outer left join includes the results of an [inner join](#inner-joins) *plus* all of the unmatched keys in the left stream. Take this example left and right streamlet:
+
+Left streamlet | Right streamlet
+:--------------|:---------------
+("player1", 4) | ("player1", 10)
+("player2", 5) | ("player4", 12)
+("player3", 17) |
+
+The resulting set of key-values within the time window:
+
+Included key-values |
+:-------------------|
+("player1", 4) |
+("player1", 10) |
+("player2", 5) |
+("player3", 17) |
+
+In this case, key-values with a key of `player4` are excluded because they are in the right stream but have no matching key with any element in the left stream.
+
+#### Outer right joins
+
+An outer right join includes the results of an [inner join](#inner-joins) *plus* all of the unmatched keys in the right stream. Take this example left and right streamlet (from [above](#outer-left-joins)):
+
+Left streamlet | Right streamlet
+:--------------|:---------------
+("player1", 4) | ("player1", 10)
+("player2", 5) | ("player4", 12)
+("player3", 17) |
+
+The resulting set of key-values within the time window:
+
+Included key-values |
+:-------------------|
+("player1", 4) |
+("player1", 10) |
+("player2", 5) |
+("player4", 17) |
+
+In this case, key-values with a key of `player3` are excluded because they are in the left stream but have no matching key with any element in the right stream.
+
+#### Outer joins
+
+Outer joins include *all* key-values across both the left and right stream, regardless of whether or not any given element has a matching key in the other stream. If you want to ensure that no element is left out of a resulting joined streamlet, use an outer join. Take this example left and right streamlet (from [above](#outer-left-joins)):
+
+Left streamlet | Right streamlet
+:--------------|:---------------
+("player1", 4) | ("player1", 10)
+("player2", 5) | ("player4", 12)
+("player3", 17) |
+
+The resulting set of key-values within the time window:
+
+Included key-values |
+:-------------------|
+("player1", 4)
+("player1", 10)
+("player2", 5)
+("player4", 12)
+("player3", 17)
+
+> Note that *all* key-values were indiscriminately included in the joined set.
+
+### Sink operations
+
+In processing graphs like the ones you build using the Heron Streamlet API, **sinks** are essentially the terminal points in your graph, where your processing logic comes to an end. A processing graph can end with writing to a database, publishing to a topic in a pub-sub messaging system, and so on. With the Streamlet API, you can implement your own custom sinks.
+
+#### Java example
+
+```java
+import org.apache.heron.streamlet.Context;
+import org.apache.heron.streamlet.Sink;
+
+public class FormattedLogSink implements Sink<T> {
+    private String streamletName;
+
+    public void setup(Context context) {
+        streamletName = context.getStreamletName();
+    }
+
+    public void put(T element) {
+        String message = String.format("Streamlet %s has produced an element with a value of: '%s'",
+                streamletName,
+                element.toString());
+        System.out.println(message);
+    }
+
+    public void cleanup() {}
+}
+```
+
+In this example, the sink fetches the name of the enclosing streamlet from the context passed in the `setup` method. The `put` method specifies how the sink handles each element that is received (in this case, a formatted message is logged to stdout). The `cleanup` method enables you to specify what happens after the element has been processed by the sink.
+
+Here is the `FormattedLogSink` at work in an example processing graph:
+
+```java
+Builder builder = Builder.newBuilder();
+
+builder.newSource(() -> "Here is a string to be passed to the sink")
+        .toSink(new FormattedLogSink());
+```
+
+> [Log operations](#log-operations) rely on a log sink that is provided out of the box. You'll need to implement other sinks yourself.
+
+### Consume operations
+
+Consume operations are like [sink operations](#sink-operations) except they don't require implementing a full sink interface. Consume operations are thus suited for simple operations like formatted logging.
+
+#### Java example
+
+```java
+Builder builder = Builder.newBuilder()
+        .newSource(() -> generateRandomInteger())
+        .filter(i -> i % 2 == 0)
+        .consume(i -> {
+            String message = String.format("Even number found: %d", i);
+            System.out.println(message);
+        });
+```
+
+## Partitioning
+
+In the topology API, processing parallelism can be managed via adjusting the number of spouts and bolts performing different operations, enabling you to, for example, increase the relative parallelism of a bolt by using three of that bolt instead of two.
+
+The Heron Streamlet API provides a different mechanism for controlling parallelism: **partitioning**. To understand partitioning, keep in mind that rather than physical spouts and bolts, the core processing construct in the Heron Streamlet API is the processing step. With the Heron Streamlet API, you can explicitly assign a number of partitions to each processing step in your graph (the default is one partition).
+
+The example topology [above](#streamlets), for example, has five steps:
+
+* the random integer source
+* the "add one" map operation
+* the union operation
+* the filtering operation
+* the logging operation.
+
+You could apply varying numbers of partitions to each step in that topology like this:
+
+```java
+Builder builder = Builder.newBuilder();
+
+Streamlet<Integer> zeroes = builder.newSource(() -> 0)
+        .setName("zeroes");
+
+builder.newSource(() -> ThreadLocalRandom.current().nextInt(1, 11))
+        .setName("random-ints")
+        .setNumPartitions(3)
+        .map(i -> i + 1)
+        .setName("add-one")
+        .repartition(3)
+        .union(zeroes)
+        .setName("unify-streams")
+        .repartition(2)
+        .filter(i -> i != 2)
+        .setName("remove-all-twos")
+        .repartition(1)
+        .log();
+```
+
+### Repartition operations
+
+As explained [above](#partitioning), when you set a number of partitions for a specific operation (included for source streamlets), the same number of partitions is applied to all downstream operations *until* a different number is explicitly set.
+
+```java
+import java.util.Arrays;
+
+Builder builder = Builder.newBuilder();
+
+builder.newSource(() -> ThreadLocalRandom.current().nextInt(1, 11))
+    .repartition(4, (element, numPartitions) -> {
+        if (element > 5) {
+            return Arrays.asList(0, 1);
+        } else {
+            return Arrays.asList(2, 3);
+        }
+    });
+```
+
diff --git a/website2/website/versioned_docs/version-0.20.0/heron-topology-concepts.md b/website2/website/versioned_docs/version-0.20.0/heron-topology-concepts.md
new file mode 100644
index 0000000..873e005
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/heron-topology-concepts.md
@@ -0,0 +1,207 @@
+---
+id: version-0.20.0-heron-topology-concepts
+title: Heron Topologies
+sidebar_label: Heron Topologies
+original_id: heron-topology-concepts
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+> **Don't want to manually create spouts and bolts? Try the Heron Streamlet API.**.  If you find manually creating and connecting spouts and bolts to be overly cumbersome, we recommend trying out the [Heron Streamlet API](topology-development-streamlet-api) for Java, which enables you to create your topology logic using a highly streamlined logic inspired by functional programming concepts. 
+
+
+A Heron **topology** is a [directed acyclic
+graph](https://en.wikipedia.org/wiki/Directed_acyclic_graph) (DAG) used to process
+streams of data. Topologies can be stateless or 
+[stateful](heron-delivery-semantics#stateful-topologies) depending on your use case.
+
+Heron topologies consist of two basic components:
+
+* [Spouts](topology-development-topology-api-java#spouts) inject data into Heron topologies, potentially from external sources like pub-sub messaging systems (Apache Kafka, Apache Pulsar, etc.)
+* [Bolts](topology-development-topology-api-java#bolts) apply user-defined processing logic to data supplied by spouts
+
+Spouts and bolts are connected to one another via **streams** of data. Below is a
+visual illustration of a simple Heron topology:
+
+![Heron topology](assets/topology.png)
+
+In the diagram above, spout **S1** feeds data to bolts **B1** and **B2** for
+processing; in turn, bolt **B1** feeds processed data to bolts **B3** and
+**B4**, while bolt **B2** feeds processed data to bolt **B4**. This is just a
+simple example; you can create arbitrarily complex topologies in Heron.
+
+## Creating topologies
+
+There are currently two APIs available that you can use to build Heron topologies:
+
+1. The higher-level [Heron Streamlet API](topology-development-streamlet-api), which enables you to create topologies in a declarative, developer-friendly style inspired by functional programming concepts (such as map, flatMap, and filter operations)
+1. The lower-level [topology API](topology-development-topology-api-java) , based on the original [Apache Storm](http://storm.apache.org/about/simple-api.html) API, which requires you to specify spout and bolt logic directly
+
+## Topology Lifecycle
+
+Once you've set up a Heron cluster, you
+can use Heron's [CLI tool](user-manuals-heron-cli) to manage the entire
+lifecycle of a topology, which typically goes through the following stages:
+
+1. [Submit](user-manuals-heron-cli#submitting-a-topology) the topology
+   to the cluster. The topology is not yet processing streams but is ready to be
+   activated.
+2. [Activate](user-manuals-heron-cli#activating-a-topology) the
+   topology. The topology will begin processing streams in accordance with
+   the topology architecture that you've created.
+3. [Restart](user-manuals-heron-cli#restarting-a-topology) an
+   active topology if, for example, you need to update the topology configuration.
+4. [Deactivate](user-manuals-heron-cli#deactivating-a-topology) the
+   topology. Once deactivated, the topology will stop processing but
+   remain running in the cluster.
+5. [Kill](user-manuals-heron-cli#killing-a-topology) a topology to completely
+   remove it from the cluster.  It is no longer known to the Heron cluster and
+   can no longer be activated. Once killed, the only way to run that topology is
+   to re-submit and re-activate it.
+
+## Logical Plan
+
+A topology's **logical plan** is analagous to a database [query
+plan](https://en.wikipedia.org/wiki/Query_plan) in that it maps out the basic
+operations associated with a topology. Here's an example logical plan for the
+example Streamlet API topology [below](#streamlet-api-example):
+
+![Topology logical Plan](https://www.lucidchart.com/publicSegments/view/4e6e1ede-45f1-471f-b131-b3ecb7b7c3b5/image.png)
+
+Whether you use the [Heron Streamlet API](#the-heron-streamlet-api) or the [topology
+API](#the-topology-api), Heron automatically transforms the processing logic that
+you create into both a logical plan and a [physical plan](#physical-plan).
+
+## Physical Plan
+
+A topology's **physical plan** is related to its logical plan but with the
+crucial difference that a physical plan determines the "physical" execution
+logic of a topology, i.e. how topology processes are divided between containers. Here's a
+basic visual representation of a
+physical plan:
+
+![Topology Physical Plan](https://www.lucidchart.com/publicSegments/view/5c2fe0cb-e4cf-4192-9416-b1b64b5ce958/image.png)
+
+In this example, a Heron topology consists of two [spout](topology-development-topology-api-java#spouts) and five
+different [bolts](topology-development-topology-api-java#bolts) (each of which has multiple instances) that have automatically 
+been distributed between five different containers.
+
+
+## Window operations
+
+**Windowed computations** gather results from a topology or topology component within a specified finite time frame rather than, say, on a per-tuple basis.
+
+Here are some examples of window operations:
+
+* Counting how many customers have purchased a product during each one-hour period in the last 24 hours.
+* Determining which player in an online game has the highest score within the last 1000 computations.
+
+### Sliding windows
+
+**Sliding windows** are windows that overlap, as in this figure:
+
+![Sliding time window](https://www.lucidchart.com/publicSegments/view/57d2fcbb-591b-4403-9258-e5b8e1e25de2/image.png)
+
+For sliding windows, you need to specify two things:
+
+1. The length or duration of the window (length if the window is a [count window](#count-windows), duration if the window is a [time window](#time-windows)).
+1. The sliding interval, which determines when the window slides, i.e. at what point during the current window the new window begins.
+
+In the figure above, the duration of the window is 20 seconds, while the sliding interval is 10 seconds. Each new window begins five seconds into the current window.
+
+> With sliding time windows, data can be processed in more than one window. Tuples 3, 4, and 5 above are processed in both window 1 and window 2 while tuples 6, 7, and 8 are processed in both window 2 and window 3.
+
+Setting the duration of a window to 16 seconds and the sliding interval to 12 seconds would produce this window arrangement:
+
+![Sliding time window with altered time interval](https://www.lucidchart.com/publicSegments/view/44bd4835-a692-44e6-a5d8-8e47151e3167/image.png)
+
+Here, the sliding interval determines that a new window is always created 12 seconds into the current window.
+
+### Tumbling windows
+
+**Tumbling windows** are windows that don't overlap, as in this figure:
+
+![Tumbling time window](https://www.lucidchart.com/publicSegments/view/881f99ee-8f93-448f-a178-b9f72dce6491/image.png)
+
+Tumbling windows don't overlap because a new window doesn't begin until the current window has elapsed. For tumbling windows, you only need to specify the length or duration of the window but *no sliding interval*.
+
+> With tumbling windows, data are *never* processed in more than one window because the windows never overlap. Also, in the figure above, the duration of the window is 20 seconds.
+
+### Count windows
+
+**Count windows** are specified on the basis of the number of operations rather than a time interval. A count window of 100 would mean that a window would elapse after 100 tuples have been processed, *with no relation to clock time*.
+
+With count windows, this scenario (for a count window of 50) would be completely normal:
+
+Window | Tuples processed | Clock time
+:------|:-----------------|:----------
+1 | 50 | 10 seconds
+2 | 50 | 12 seconds
+3 | 50 | 1 hour, 12 minutes
+4 | 50 | 5 seconds
+
+### Time windows
+
+**Time windows** differ from [count windows](#count-windows) because you need to specify a time duration (in seconds) rather than a number of tuples processed.
+
+With time windows, this scenario (for a time window of 30 seconds) would be completely normal:
+
+Window | Tuples processed | Clock time
+:------|:-----------------|:----------
+1 | 150 | 30 seconds
+2 | 50 | 30 seconds
+3 | 0 | 30 seconds
+4 | 375 | 30 seconds
+
+### All window types
+
+As explained above, windows differ along two axes: sliding (overlapping) vs. tumbling (non overlapping) and count vs. time. This produces four total types:
+
+1. [Sliding](#sliding-windows) [time](#time-windows) windows
+1. [Sliding](#sliding-windows) [count](#count-windows) windows
+1. [Tumbling](#tumbling-windows) [time](#time-windows) windows
+1. [Tumbling](#tumbling-windows) [count](#count-windows) windows
+
+## Resource allocation with the Heron Streamlet API
+
+When creating topologies using the Streamlet API, there are three types of resources that you can specify:
+
+1. The number of containers into which the topology's [physical plan](#physical-plan) will be split
+1. The total number of CPUs allocated to be used by the topology
+1. The total amount of RAM allocated to be used by the topology
+
+For each topology, there are defaults for each resource type:
+
+Resource | Default | Minimum
+:--------|:--------|:-------
+Number of containers | 1 | 1
+CPU | 1.0 | 1.0
+RAM | 512 MB | 192MB
+
+### Allocating resources to topologies
+
+For instructions on allocating resources to topologies, see the language-specific documentation for:
+
+* [Java](topology-development-streamlet-api#containers-and-resources)
+
+
+## Data Model
+
+Heron's original topology API required using a fundamentally tuple-driven data model.
+You can find more information in [Heron's Data Model](guides-data-model).
+
+
diff --git a/website2/website/versioned_docs/version-0.20.0/observability-graphite.md b/website2/website/versioned_docs/version-0.20.0/observability-graphite.md
new file mode 100644
index 0000000..60fceff
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/observability-graphite.md
@@ -0,0 +1,102 @@
+---
+id: version-0.20.0-observability-graphite
+title: Observability with Graphite
+sidebar_label: Graphite
+original_id: observability-graphite
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+To observe Heron's runtime metrics, you can integrate Heron and the Heron UI with
+[Graphite](http://graphite.readthedocs.io/en/latest/overview.html) and
+[Grafana](http://grafana.org/).
+
+To accomplish this, you need to do the following:
+
+* Export topology metrics from Heron
+* Gather Aurora and Linux metrics with Diamond
+* Set up a scripted dashboard with Grafana
+* Configure the Heron UI to link to Grafana
+
+### Exporting Topology Metrics From Heron
+
+Heron supports custom metric exporters from the Metrics Manager. You can either build your own Graphite metrics sink or use the [provided Graphite sink](extending-heron-metric-sink).
+
+### Gathering Metrics From Aurora
+
+In addition to the topology-specific data available from Heron, much more data is available directly
+from Aurora and the Linux kernel. These can help identify many operational problems, such as
+CPU throttling or crashes.
+
+A common way to collect data from these sources is via a system metrics collection daemon such as
+[collectd](https://collectd.org/) or [Diamond](https://github.com/python-diamond/Diamond)
+
+Diamond has the following relevant collectors available:
+
+* [Aurora](https://github.com/python-diamond/Diamond/tree/master/src/collectors/aurora)
+
+
+### Creating A Scripted Grafana Dashboard
+
+A convienent way to view topology-specific metrics in Graphite is to create a
+[scripted dashboard in Grafana](http://docs.grafana.org/reference/scripting/). The scripted
+dashboard should accept information such as the topology name as query arguments, which will allow
+the Heron UI to deep link to a specific topology's dashboard.
+
+Suggested targets for the scripted dashboard include:
+
+**Component Backpressure**:
+
+```python
+'aliasByNode(sortByMaxima(highestAverage(heron.' + topology_name + '.stmgr.stmgr-*.
+time_spent_back_pressure_by_compid.*, 5)), 5)'
+```
+
+**Fail Count by Component**:
+
+```python
+'sumSeriesWithWildcards(aliasByNode(heron.' + topology_name + '.*.*.fail-count.default,2),3)'`
+```
+
+**CPU Throttling periods**:
+
+```python
+aliasByNode(nonNegativeDerivative(mesos.tasks.prod.*.' + topology_name + '.*.cpu.
+nr_throttled), 4,5)
+```
+
+**JVM Deaths**:
+
+```python
+'aliasByNode(drawAsInfinite(maximumAbove(removeAboveValue(heron.' + topology_name + '.*.*.jvm.
+uptime-secs, 60),1)),2,3)'
+```
+
+**Top 5 worst GC components**:
+
+```python
+'aliasByNode(highestMax(nonNegativeDerivative(heron.' + topology_name + '.*.*.jvm.gc-time-ms.
+PS-*),5), 2,3,6)'
+```
+
+### Configuring The Heron UI Link To Grafana
+
+Finally, you can configure the Heron UI to deep link to scripted dashboards by specifying an
+[observability URL format]
+(https://github.com/apache/incubator-heron/blob/master/heron/tools/config/src/yaml/tracker/heron_tracker.yaml)
+(`viz.url.format`) in the Heron Tracker's configuration. This will add topology-specific buttons to
+the Heron UI enabling you to drill-down into your Grafana dashboards.
diff --git a/website2/website/versioned_docs/version-0.20.0/observability-prometheus.md b/website2/website/versioned_docs/version-0.20.0/observability-prometheus.md
new file mode 100644
index 0000000..6457cbd
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/observability-prometheus.md
@@ -0,0 +1,45 @@
+---
+id: version-0.20.0-observability-prometheus
+title: Observability with Prometheus
+sidebar_label: Prometheus
+original_id: observability-prometheus
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+You can integrate Heron with [Prometheus](https://prometheus.io/) to monitor and gather runtime metrics exported by Heron topologies.
+
+## Exporting topology metrics from Heron to Prometheus
+
+Heron supports custom metric exporters from the Metrics Manager. You can either build your own Prometheus metrics sink or use the [provided Prometheus sink](extending-heron-metric-sink).
+
+To set up your Heron cluster to export to Prometheus, you need to make two changes to the `metrics_sinks.yaml` configuration file:
+
+* Add `prometheus-sink` to the `sinks` list
+* Add a `prometheus-sink` map to the file that sets values for the [parameters](#prometheus-parameters) listed below. You can uncomment the existing `prometheus-sink` map to get the default configuration.
+
+### Prometheus parameters
+
+Parameter | Description | Default
+:---------|:------------|:-------
+`class` | The Java class used to control Prometheus sink behavior | [`org.apache.heron.metricsmgr.sink.PrometheusSink`](/api/org/apache/heron/metricsmgr/sink/PrometheusSink.html)
+`port` | The port on which Prometheus will scrape for metrics. **Note**: You must supply a `port` *or* `port-file` config. | 8080
+`port-file` | The path to a text file that contains the port number to be used by Prometheus for metrics scraping. **Note**: You must supply a `port` *or* `port-file` config. | `metrics.port`
+`path` | The Prometheus path on which to publish metrics | `/metrics`
+`include-topology-name` | Whether the name of the Heron topology will be included in names for metrics | `true`
+`metrics-cache-max-size` | The maximum number of metrics cached and published | 1000000
+`metrics-cache-ttl-sec` | The time to live (TTL) for metrics, i.e. the time, in seconds after which a metric that was collected will stop being published | 600 (10 minutes)
diff --git a/website2/website/versioned_docs/version-0.20.0/observability-scribe.md b/website2/website/versioned_docs/version-0.20.0/observability-scribe.md
new file mode 100644
index 0000000..3e091db
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/observability-scribe.md
@@ -0,0 +1,49 @@
+---
+id: version-0.20.0-observability-scribe
+title: Observability with Scribe
+sidebar_label: Scribe
+original_id: observability-scribe
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+You can integrate Heron with [Scribe](https://github.com/facebookarchive/scribe/wiki) to monitor and gather runtime metrics exported by Heron topologies.
+
+## Exporting topology metrics from Heron to Scribe
+
+Heron supports custom metric exporters from the Metrics Manager. You can either build your own Scribe metrics sink or use the [provided Scribe sink](extending-heron-metric-sink).
+
+To set up your Heron cluster to export to Scribe, you need to make two changes to the `metrics_sinks.yaml` configuration file:
+
+* Add `scribe-sink` to the `sinks` list
+* Add a `scribe-sink` map to the file that sets values for the [parameters](#scribe-parameters) listed below. You can uncomment the existing `prometheus-sink` map to get the default configuration.
+
+### Scribe parameters
+
+Parameter | Description | Default
+:---------|:------------|:-------
+`class` | The Java class used to control Prometheus sink behavior | [`org.apache.heron.metricsmgr.sink.ScribeSink`](/api/org/apache/heron/metricsmgr/sink/ScribeSink.html)
+`flush-frequency-ms` | How frequently, in milliseconds, the `flush()` method is called | 60000 (one minute)
+`sink-restart-attempts` | How many times Heron should attempt to publish metrics to Scribe before no longer attempting | -1 (forever)
+`scribe-host` | The Scribe host to export metrics to | 127.0.0.1
+`scribe-port` | The Scribe port to export metrics to | 1463
+`scribe-category` | The Scribe category to export metrics to | `scribe-category`
+`service-namespace` | The service name for the category in `scribe-category` | `heron`
+`scribe-timeout-ms` | The timeout, in millisconds, when attempting to export metrics to Scribe | 200
+`scribe-connect-server-attempts` | The maximum number of retry attempts when connecting to Scribe on the configured host and port | 2
+`scribe-retry-attempts` | The maximum number of retry attempts when writing metrics to Scribe | 5
+`scribe-retry-interval-ms` | The time interval, in milliseconds, at which Heron attempts to write metrics to Scribe | 100
diff --git a/website2/website/versioned_docs/version-0.20.0/schedulers-aurora-cluster.md b/website2/website/versioned_docs/version-0.20.0/schedulers-aurora-cluster.md
new file mode 100644
index 0000000..fd777bc
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/schedulers-aurora-cluster.md
@@ -0,0 +1,139 @@
+---
+id: version-0.20.0-schedulers-aurora-cluster
+title: Aurora Cluster
+sidebar_label: Aurora Cluster
+original_id: schedulers-aurora-cluster
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron supports deployment on [Apache Aurora](http://aurora.apache.org/) out of
+the box. A step by step guide on how to setup Heron with Apache Aurora locally 
+can be found in [Setting up Heron with Aurora Cluster Locally on Linux](schedulers-aurora-local). You can also run Heron on
+a [local scheduler](schedulers-local). 
+
+## How Heron on Aurora Works
+
+Aurora doesn't have a Heron scheduler *per se*. Instead, when a topology is
+submitted to Heron, `heron` cli interacts with Aurora to automatically deploy
+all the [components](heron-architecture) necessary to [manage
+topologies](user-manuals-heron-cli).
+
+## ZooKeeper
+
+To run Heron on Aurora, you'll need to set up a ZooKeeper cluster and configure
+Heron to communicate with it. Instructions can be found in [Setting up
+ZooKeeper](state-managers-zookeeper).
+
+## Hosting Binaries
+
+To deploy Heron, the Aurora cluster needs access to the
+Heron core binary, which can be hosted wherever you'd like, so long as
+it's accessible to Aurora (for example in [Amazon
+S3](https://aws.amazon.com/s3/) or using a local blob storage solution). You
+can download the core binary from github or build it using the instructions
+in [Creating a New Heron Release](compiling-overview#building-all-components).
+
+Command for fetching the binary is in the `heron.aurora` config file. By default it is 
+using a `curl` command to fetch the binary. For example, if the binary is hosted in 
+HDFS, you need to change the fetch user package command in `heron.aurora` to use the 
+`hdfs` command instead of `curl`.
+
+### `heron.aurora` example binary fetch using HDFS
+
+```bash
+fetch_heron_system = Process(
+  name = 'fetch_heron_system',
+  cmdline = 'hdfs dfs -get %s %s && tar zxf %s' % (heron_core_release_uri, 
+        core_release_file, core_release_file)
+)
+```
+
+Once your Heron binaries are hosted somewhere that is accessible to Aurora, you
+should run tests to ensure that Aurora can successfully fetch them.
+
+## Uploading the Topologies
+
+Heron uses an uploader to upload the topology to a shared location so that a worker can fetch 
+the topology to its sandbox. The configuration for an uploader is in the `uploader.yaml` 
+config file. For distributed Aurora deployments, Heron can use `HdfsUploader` or `S3Uploader`. 
+Details on configuring the uploaders can be found in the documentation for the 
+[HDFS](uploaders-hdfs) and [S3](uploaders-amazon-s3) uploaders. 
+
+After configuring an uploader, the `heron.aurora` config file needs to be modified accordingly to 
+fetch the topology. 
+
+### `heron.aurora` example topology fetch using HDFS
+
+```bash
+fetch_user_package = Process(
+  name = 'fetch_user_package',
+  cmdline = 'hdfs dfs -get %s %s && tar zxf %s' % (heron_topology_jar_uri, 
+          topology_package_file, topology_package_file)
+)
+```
+
+## Aurora Scheduler Configuration
+
+To configure Heron to use Aurora scheduler, modify the `scheduler.yaml`
+config file specific for the Heron cluster. The following must be specified
+for each cluster:
+
+* `heron.class.scheduler` --- Indicates the class to be loaded for Aurora scheduler.
+You should set this to `org.apache.heron.scheduler.aurora.AuroraScheduler`
+
+* `heron.class.launcher` --- Specifies the class to be loaded for launching and
+submitting topologies. To configure the Aurora launcher, set this to
+`org.apache.heron.scheduler.aurora.AuroraLauncher`
+
+* `heron.package.core.uri` --- Indicates the location of the heron core binary package.
+The local scheduler uses this URI to download the core package to the working directory.
+
+* `heron.directory.sandbox.java.home` --- Specifies the java home to
+be used when running topologies in the containers.
+
+* `heron.scheduler.is.service` --- This config indicates whether the scheduler
+is a service. In the case of Aurora, it should be set to `False`.
+
+### Example Aurora Scheduler Configuration
+
+```yaml
+# scheduler class for distributing the topology for execution
+heron.class.scheduler: org.apache.heron.scheduler.aurora.AuroraScheduler
+
+# launcher class for submitting and launching the topology
+heron.class.launcher: org.apache.heron.scheduler.aurora.AuroraLauncher
+
+# location of the core package
+heron.package.core.uri: file:///vagrant/.herondata/dist/heron-core-release.tar.gz
+
+# location of java - pick it up from shell environment
+heron.directory.sandbox.java.home: /usr/lib/jvm/java-1.8.0-openjdk-amd64/
+
+# Invoke the IScheduler as a library directly
+heron.scheduler.is.service: False
+```
+
+## Working with Topologies
+
+After setting up ZooKeeper and generating an Aurora-accessible Heron core binary
+release, any machine that has the `heron` cli tool can be used to manage Heron
+topologies (i.e. can submit topologies, activate and deactivate them, etc.).
+
+The most important thing at this stage is to ensure that `heron` cli is available
+across all machines. Once the cli is available, Aurora as a scheduler
+can be enabled by specifying the proper configuration when managing topologies.
diff --git a/website2/website/versioned_docs/version-0.20.0/schedulers-aurora-local.md b/website2/website/versioned_docs/version-0.20.0/schedulers-aurora-local.md
new file mode 100644
index 0000000..28b4ade
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/schedulers-aurora-local.md
@@ -0,0 +1,330 @@
+---
+id: version-0.20.0-schedulers-aurora-local
+title: Setting up Heron with Aurora Cluster Locally on Linux
+sidebar_label: Aurora Locally
+original_id: schedulers-aurora-local
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+It is possible to setup Heron with a locally running Apache Aurora cluster.
+This is a step by step guide on how to configure and setup all the necessary
+components.
+
+## Setting Up Apache Aurora Cluster locally
+
+You first need to setup Apache Aurora locally. More detailed description of the
+following steps can be found in [A local Cluster with Vagrant](http://aurora.apache.org/documentation/latest/getting-started/vagrant/)
+
+### Step 1: Install VirtualBox and Vagrant
+
+Download and install VirtualBox and Vagrant on your machine. If vagrant is successfully
+installed in your machine the following command should list several common commands
+for this tool
+
+```bash
+$ vagrant
+```
+
+### Step 2: Clone the Aurora repository
+
+You can get the source repository for Aurora with the following command
+
+```bash
+$ dgit clone git://git.apache.org/aurora.git
+```
+
+Once the clone is complete cd into the aurora folder
+
+```bash
+$ cd aurora
+```
+
+### Step 3: Starting Local Aurora Cluster
+
+To start the local cluster all you have to do is execute the following command. It will install all the needed dependencies like Apache Mesos and Zookeeper in the VM.
+
+```bash
+$ vagrant up
+```
+
+Additionally to get rid of some of the warning messages that you get during up command execute the following command
+
+```bash
+$ vagrant plugin install vagrant-vbguest
+```
+
+You can verify that the Aurora cluster is properly running by opening the following links in your web-browser
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Agent - http://192.168.33.7:5051
+
+If you go into http://192.168.33.7:8081/scheduler you can notice that the name of the default cluster that is setup in aurora is
+named `devcluster` this will be important to note when submitting typologies from heron.
+
+![Heron topology](assets/aurora-local-cluster-start.png)
+
+## Installing Heron within the Cluster VM
+
+Now that the Aurora cluster is setup you need to install heron within the cluster VM in order to be able to get the Heron
+deployment working. Since this is a fresh VM instance you will have to install the basic software such as "unzip" and set
+the JAVA_HOME path as an environmental variable ( Just need to add this to .bashrc file). After you have the basic stuff
+working follow the following steps to install Heron in the VM. You can ssh into the VM with the following command
+
+```bash
+$ vagrant ssh
+```
+
+### Step 1.a : Download installation script files
+
+You can download the script files that match your Linux distribution from
+https://github.com/apache/incubator-heron/releases/tag/{{% heronVersion %}}
+
+For example for the {{% heronVersion %}} release the files you need to download For Ubuntu will be the following.
+
+* `heron-install-{{% heronVersion %}}-ubuntu.sh`
+
+Optionally - You want need the following for the steps in the blog post
+
+* `heron-api-install-{{% heronVersion %}}-ubuntu.sh`
+* `heron-core-{{% heronVersion %}}-ubuntu.tar.gz`
+
+### Step 1.b: Execute the client and tools shell scripts
+
+
+```bash
+$ chmod +x heron-install-VERSION-PLATFORM.sh
+$ ./heron-install-VERSION-PLATFORM.sh --user
+Heron client installer
+----------------------
+
+Uncompressing......
+Heron is now installed!
+
+Make sure you have "/home/vagrant/bin" in your path.
+```
+
+After this you need to add the path "/home/vagrant/bin". You can just execute the following command
+or add it to the end of  .bashrc file ( which is more convenient ).
+
+```bash
+$ export PATH=$PATH:/home/vagrant/bin
+```
+
+Install the following packages to make sure that you have all the needed dependencies in the VM.
+You might have to do sudo apt-get update before you execute the following.
+
+```bash
+$ sudo apt-get install git build-essential automake cmake libtool zip libunwind-setjmp0-dev zlib1g-dev unzip pkg-config -y
+```
+
+## Configuring State Manager ( Apache Zookeeper )
+
+Since Heron only uses Apache Zookeeper for coordination the load on the Zookeeper
+node is minimum. Because of this it is sufficient to use a single Zookeeper node or
+if you have an Zookeeper instance running for some other task you can simply use that.
+Since Apache Aurora already uses an Zookeeper instance you can directly use that instance
+to execute State Manager tasks of Heron. First you need to configure Heron to work with
+the Zookeeper instance. You can find meanings of each attribute in [Setting Up ZooKeeper
+State Manager](state-managers-zookeeper). Configurations for State manager are
+located in the directory `/home/vagrant/.heron/conf/aurora`.
+
+Open the file `statemgr.yaml` using vim ( or some other text editor you prefer )
+and add/edit the file to include the following.
+
+```yaml
+# local state manager class for managing state in a persistent fashion
+heron.class.state.manager: org.apache.heron.statemgr.zookeeper.curator.CuratorStateManager
+
+# local state manager connection string
+heron.statemgr.connection.string:  "127.0.0.1:2181"
+
+# path of the root address to store the state in a local file system
+heron.statemgr.root.path: "/heronroot"
+
+# create the zookeeper nodes, if they do not exist
+heron.statemgr.zookeeper.is.initialize.tree: True
+
+# timeout in ms to wait before considering zookeeper session is dead
+heron.statemgr.zookeeper.session.timeout.ms: 30000
+
+# timeout in ms to wait before considering zookeeper connection is dead
+heron.statemgr.zookeeper.connection.timeout.ms: 30000
+
+# timeout in ms to wait before considering zookeeper connection is dead
+heron.statemgr.zookeeper.retry.count: 10
+
+# duration of time to wait until the next retry
+heron.statemgr.zookeeper.retry.interval.ms: 10000
+```
+
+## Creating Paths in Zookeeper
+
+Next you need to create some paths within Zookeeper since some of the paths
+are not created by Heron automatically. So you need to create them manually.
+Since Aurora installation already installed Zookeeper, you can use the Zookeeper
+cli to create the manual paths.
+
+```bash
+$ sudo ./usr/share/zookeeper/bin/zkCli.sh
+```
+
+This will connect to the Zookeeper instance running locally. Then execute the
+following commands from within the client to create paths `/heronroot/topologies`
+and `/heron/topologies`. Later in "Associating new Aurora cluster into Heron UI"
+you will see that you only need to create `/heronroot/topologies` but for now lets
+create both to make sure you don't get any errors when you run things.
+
+```bash
+create /heronroot null
+create /heronroot/topologies null
+```
+
+```bash
+create /heron null
+create /heron/topologies null
+```
+
+## Configuring Scheduler ( Apache Aurora )
+
+Next you need to configure Apache Aurora to be used as the Scheduler for our Heron
+local cluster. In order to do this you need to edit the `scheduler.yaml` file that is
+also located in `/home/vagrant/.heron/conf/aurora`. Add/Edit the file to include the
+following. More information regarding parameters can be found in [Aurora Cluster](schedulers-aurora-cluster)
+
+```yaml
+# scheduler class for distributing the topology for execution
+heron.class.scheduler: org.apache.heron.scheduler.aurora.AuroraScheduler
+
+# launcher class for submitting and launching the topology
+heron.class.launcher: org.apache.heron.scheduler.aurora.AuroraLauncher
+
+# location of the core package
+heron.package.core.uri: file:///home/vagrant/.heron/dist/heron-core.tar.gz
+
+# location of java - pick it up from shell environment
+heron.directory.sandbox.java.home: /usr/lib/jvm/java-1.8.0-openjdk-amd64/
+
+# Invoke the IScheduler as a library directly
+heron.scheduler.is.service: False
+```
+
+Additionally edit the `client.yaml` file and change the core uri to make it consistant.
+
+```yaml
+# location of the core package
+heron.package.core.uri: file:///home/vagrant/.heron/dist/heron-core.tar.gz
+```
+
+### Important Step: Change folder name `aurora` to `devcluster`
+
+Next you need to change the folder name of `/home/vagrant/.heron/conf/aurora` to
+`/home/vagrant/.heron/conf/devcluster`. This is because the name of your aurora
+cluster is `devcluster` as you noted in a previous step. You can do this with the
+following commands
+
+```bash
+$ cd /home/vagrant/.heron/conf/
+$ mv aurora devcluster
+```
+
+## Submitting Example Topology to Aurora cluster
+
+Now you can submit a topology to the aurora cluster. this can be done with the following command.
+
+```bash
+$ heron submit devcluster/heronuser/devel --config-path ~/.heron/conf/ ~/.heron/examples/heron-api-examples.jar org.apache.heron.examples.api.ExclamationTopology ExclamationTopology
+```
+
+Now you should be able to see the topology in the Aurora UI ( http://192.168.33.7:8081/scheduler/heronuser ) .
+
+![Heron topology](assets/aurora-local-topology-submitted.png)
+
+### Understanding the parameters
+
+
+below is a brief explanation on some of the important parameters that are used in this command. the first
+parameter `devcluster/heronuser/devel` defines cluster, role and env ( env can have values `prod | devel | test | staging` ).
+The cluster is the name of the aurora cluster which is `devcluster` in our case. You can give something like your
+name for the role name and for env you need to choose from one of the env values.
+
+`--config-path` points to the config folder. the program will automatically look for a folder with the cluster name.
+This is why you had to change the name of the aurora conf folder to devcluster.
+
+Now that everything is working you need to perform one last step to be able to see the typologies that you can see in Aurora UI in Heron UI.
+
+## Associating new Aurora cluster into Heron UI
+
+Heron UI uses information that is gets from the heron tracker when displaying the information in the heron UI interface.
+So in-order to allow the Heron UI to show Aurora cluster information you need to modify configuration of the Heron tracker
+so that it can identify the Aurora Cluster.
+
+Heron Tracker configurations are located at `/home/vagrant/.herontools/conf` the configuration file is named `heron_tracker.yaml`.
+By default you should see the following in the file
+
+```yaml
+statemgrs:
+  -
+    type: "file"
+    name: "local"
+    rootpath: "~/.herondata/repository/state/local"
+    tunnelhost: "localhost"
+  -
+    type: "zookeeper"
+    name: "localzk"
+    hostport: "localhost:2181"
+    rootpath: "/heron"
+    tunnelhost: "localhost"
+```
+
+You can see that there already two entries. Before, you had to create paths in Zookeeper for `/heron/topologies` this is
+because the entry named `localzk` in this file. If you remove this you will not need to create that path in Zookeeper.
+Now all you have to is to add a new entry for the aurora cluster into this file ( lets comment out `localzk` ).
+Then the file would look like below.
+
+```yaml
+statemgrs:
+  -
+    type: "file"
+    name: "local"
+    rootpath: "~/.herondata/repository/state/local"
+    tunnelhost: "localhost"
+  #-
+   #type: "zookeeper"
+   # name: "localzk"
+   # hostport: "localhost:2181"
+   # rootpath: "/heron"
+   # tunnelhost: "localhost"
+  -
+    type: "zookeeper"
+    name: "devcluster"
+    hostport: "localhost:2181"
+    rootpath: "/heronroot"
+    tunnelhost: "localhost"
+```
+
+Now you can start Heron tracker and then Heron UI, Now you will be able to see the aurora cluster from the
+Heron UI ( http://192.168.33.7:8889/topologies ) as below
+
+```bash
+$ heron-tracker
+$ heron-ui
+```
+
+![Heron topology](assets/heron-ui-topology-submitted.png)
diff --git a/website2/website/versioned_docs/version-0.20.0/schedulers-k8s-by-hand.md b/website2/website/versioned_docs/version-0.20.0/schedulers-k8s-by-hand.md
new file mode 100644
index 0000000..13191fa9
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/schedulers-k8s-by-hand.md
@@ -0,0 +1,589 @@
+---
+id: version-0.20.0-schedulers-k8s-by-hand
+title: Kubernetes by hand
+sidebar_label: Kubernetes by hand
+original_id: schedulers-k8s-by-hand
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+> This document shows you how to install Heron on Kubernetes in a step-by-step, "by hand" fashion. An easier way to install Heron on Kubernetes is to use the [Helm](https://helm.sh) package manager. For instructions on doing so, see [Heron on Kubernetes with Helm](schedulers-k8s-with-helm)).
+
+Heron supports deployment on [Kubernetes](https://kubernetes.io/) (sometimes called **k8s**). Heron deployments on Kubernetes use Docker as the containerization format for Heron topologies and use the Kubernetes API for scheduling.
+
+You can use Heron on Kubernetes in multiple environments:
+
+* Locally using [Minikube](#minikube)
+* In the cloud on [Google Container Engine](#google-container-engine) (GKE)
+* In [any other](#general-kubernetes-clusters) Kubernetes cluster
+
+## Requirements
+
+In order to run Heron on Kubernetes, you will need:
+
+* A Kubernetes cluster with at least 3 nodes (unless you're running locally on [Minikube](#minikube))
+* The [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) CLI tool installed and set up to communicate with your cluster
+* The [`heron`](getting-started-local-single-node) CLI tool
+
+Any additional requirements will depend on where you're running Heron on Kubernetes.
+
+## How Heron on Kubernetes Works
+
+When deploying to Kubernetes, each Heron container is deployed as a Kubernetes
+[pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) inside of a Docker container. If there
+are 20 containers that are going to be deployed with a topoology, for example, then there will be 20 pods
+deployed to your Kubernetes cluster for that topology.
+
+## Minikube
+
+[Minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) enables you to run a Kubernetes cluster locally on a single machine.
+
+### Requirements
+
+To run Heron on Minikube you'll need to [install Minikube](https://kubernetes.io/docs/getting-started-guides/minikube/#installation) in addition to the other requirements listed [above](#requirements).
+
+### Starting Minikube
+
+First you'll need to start up Minikube using the `minikube start` command. We recommend starting Minikube with:
+
+* at least 7 GB of memory
+* 5 CPUs
+* 20 GB of storage
+
+This command will accomplish precisely that:
+
+```bash
+$ minikube start \
+  --memory=7168 \
+  --cpus=5 \
+  --disk-size=20G
+```
+
+### Starting components
+
+There are a variety of Heron components that you'll need to start up separately *and in order*. Make sure that the necessary pods are up and in the `RUNNING` state before moving on to the next step. You can track the progress of the pods using this command:
+
+```bash
+$ kubectl get pods -w
+```
+
+#### ZooKeeper
+
+Heron uses [ZooKeeper](https://zookeeper.apache.org) for a variety of coordination- and configuration-related tasks. To start up ZooKeeper on Minikube:
+
+```bash
+$ kubectl create -f https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/minikube/zookeeper.yaml
+```
+
+#### BookKeeper
+
+When running Heron on Kubernetes, [Apache BookKeeper](https://bookkeeper.apache.org) is used for things like topology artifact storage. You can start up BookKeeper using this command:
+
+```bash
+$ kubectl create -f https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/minikube/bookkeeper.yaml
+```
+
+#### Heron tools
+
+The so-called "Heron tools" include the [Heron UI](user-manuals-heron-ui) and the [Heron Tracker](user-manuals-heron-tracker-runbook). To start up the Heron tools:
+
+```bash
+$ kubectl create -f https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/minikube/tools.yaml
+```
+
+#### Heron API server
+
+The Heron API server is the endpoint that the Heron CLI client uses to interact with the other components of Heron. To start up the Heron API server on Minikube:
+
+```bash
+$ kubectl create -f https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/minikube/apiserver.yaml
+```
+
+### Managing topologies
+
+Once all of the [components](#starting-components) have been successfully started up, you need to open up a proxy port to your Minikube Kubernetes cluster using the [`kubectl proxy`](https://kubernetes.io/docs/tasks/access-kubernetes-api/http-proxy-access-api/) command:
+
+```bash
+$ kubectl proxy -p 8001
+```
+
+> Note: All of the following Kubernetes specific urls are valid with the Kubernetes 1.10.0 release.
+
+Now, verify that the Heron API server running on Minikube is available using curl:
+
+```bash
+$ curl http://localhost:8001/api/v1/namespaces/default/services/heron-apiserver:9000/proxy/api/v1/version
+```
+
+You should get a JSON response like this:
+
+```json
+{
+  "heron.build.git.revision" : "ddbb98bbf173fb082c6fd575caaa35205abe34df",
+  "heron.build.git.status" : "Clean",
+  "heron.build.host" : "ci-server-01",
+  "heron.build.time" : "Sat Mar 31 09:27:19 UTC 2018",
+  "heron.build.timestamp" : "1522488439000",
+  "heron.build.user" : "release-agent",
+  "heron.build.version" : "0.17.8"
+}
+```
+
+Success! You can now manage Heron topologies on your Minikube Kubernetes installation. To submit an example topology to the cluster:
+
+```bash
+$ heron submit kubernetes \
+  --service-url=http://localhost:8001/api/v1/namespaces/default/services/heron-apiserver:9000/proxy \
+  ~/.heron/examples/heron-api-examples.jar \
+  org.apache.heron.examples.api.AckingTopology acking
+```
+
+You can also track the progress of the Kubernetes pods that make up the topology. When you run `kubectl get pods` you should see pods with names like `acking-0` and `acking-1`.
+
+Another option is to set the service URL for Heron using the `heron config` command:
+
+```bash
+$ heron config kubernetes set service_url \
+  http://localhost:8001/api/v1/namespaces/default/services/heron-apiserver:9000/proxy
+```
+
+That would enable you to manage topologies without setting the `--service-url` flag.
+
+### Heron UI
+
+The [Heron UI](user-manuals-heron-ui) is an in-browser dashboard that you can use to monitor your Heron [topologies](heron-topology-concepts). It should already be running in Minikube.
+
+You can access [Heron UI](user-manuals-heron-ui) in your browser by navigating to http://localhost:8001/api/v1/namespaces/default/services/heron-ui:8889/proxy/topologies.
+
+## Google Container Engine
+
+You can use [Google Container Engine](https://cloud.google.com/container-engine/) (GKE) to run Kubernetes clusters on [Google Cloud Platform](https://cloud.google.com/).
+
+### Requirements
+
+To run Heron on GKE, you'll need to create a Kubernetes cluster with at least three nodes. This command would create a three-node cluster in your default Google Cloud Platform zone and project:
+
+```bash
+$ gcloud container clusters create heron-gke-cluster \
+  --machine-type=n1-standard-4 \
+  --num-nodes=3
+```
+
+You can specify a non-default zone and/or project using the `--zone` and `--project` flags, respectively.
+
+Once the cluster is up and running, enable your local `kubectl` to interact with the cluster by fetching your GKE cluster's credentials:
+
+```bash
+$ gcloud container clusters get-credentials heron-gke-cluster
+Fetching cluster endpoint and auth data.
+kubeconfig entry generated for heron-gke-cluster.
+```
+
+Finally, you need to create a Kubernetes [secret](https://kubernetes.io/docs/concepts/configuration/secret) that specifies the Cloud Platform connection credentials for your service account. First, download your Cloud Platform credentials as a JSON file, say `key.json`. This command will download your credentials:
+
+```bash
+$ gcloud iam service-accounts create key.json \
+  --iam-account=YOUR-ACCOUNT
+```
+
+### Topology artifact storage
+
+Heron on Google Container Engine supports two static file storage options for topology artifacts:
+
+* [Google Cloud Storage](#google-cloud-storage-setup)
+* [BookKeeper](#bookkeeper-setup)
+
+#### Google Cloud Storage setup
+
+If you're running Heron on GKE, you can use either [Google Cloud Storage](https://cloud.google.com/storage/) or [Apache BookKeeper](https://bookkeeper.apache.org) for topology artifact storage.
+
+> If you'd like to use BookKeeper instead of Google Cloud Storage, skip to the [BookKeeper](#bookkeeper-setup) section below.
+
+To use Google Cloud Storage for artifact storage, you'll need to create a [Google Cloud Storage](https://cloud.google.com/storage/) bucket. Here's an example bucket creation command using [`gsutil`'](https://cloud.google.com/storage/docs/gsutil):
+
+```bash
+$ gsutil mb gs://my-heron-bucket
+```
+
+Cloud Storage bucket names must be globally unique, so make sure to choose a bucket name carefully. Once you've created a bucket, you need to create a Kubernetes [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configmap/) that specifies the bucket name. Here's an example:
+
+```bash
+$ kubectl create configmap heron-apiserver-config \
+  --from-literal=gcs.bucket=BUCKET-NAME
+```
+
+> You can list your current service accounts using the `gcloud iam service-accounts list` command.
+
+Then you can create the secret like this:
+
+```bash
+$ kubectl create secret generic heron-gcs-key \
+  --from-file=key.json=key.json
+```
+
+Once you've created a bucket, a `ConfigMap`, and a secret, you can move on to [starting up](#starting-components) the various components of your Heron installation.
+
+### Starting components
+
+There are a variety of Heron components that you'll need to start up separately *and in order*. Make sure that the necessary pods are up and in the `RUNNING` state before moving on to the next step. You can track the progress of the pods using this command:
+
+```bash
+$ kubectl get pods -w
+```
+
+#### ZooKeeper
+
+Heron uses [ZooKeeper](https://zookeeper.apache.org) for a variety of coordination- and configuration-related tasks. To start up ZooKeeper on your GKE cluster:
+
+```bash
+$ kubectl create -f https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/gcp/zookeeper.yaml
+```
+
+#### BookKeeper setup
+
+> If you're using [Google Cloud Storage](#google-cloud-storage-setup) for topology artifact storage, skip to the [Heron tools](#heron-tools-gke) section below.
+
+To start up an [Apache BookKeeper](https://bookkeeper.apache.org) cluster for Heron:
+
+```bash
+$ kubectl create -f https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/gcp/bookkeeper.yaml
+```
+
+#### Heron tools <a id="heron-tools-gke"></a>
+
+The so-called "Heron tools" include the [Heron UI](user-manuals-heron-ui) and the [Heron Tracker](user-manuals-heron-tracker-runbook). To start up the Heron tools:
+
+```bash
+$ kubectl create -f https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/gcp/tools.yaml
+```
+
+#### Heron API server
+
+The [Heron API server](deployment-api-server) is the endpoint that the [Heron CLI client](user-manuals-heron-cli) uses to interact with the other components of Heron. Heron on Google Container Engine has two separate versions of the Heron API server that you can run depending on which artifact storage system you're using ([Google Cloud Storage](#google-cloud-storage-setup) or [Apache BookKeeper](#bookkeeper-setup)).
+
+If you're using Google Cloud Storage:
+
+```bash
+$ kubectl create -f https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/gcp/gcs-apiserver.yaml
+```
+
+If you're using Apache BookKeeper:
+
+```bash
+$ kubectl create -f https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/gcp/bookkeeper-apiserver.yaml
+```
+
+### Managing topologies
+
+Once all of the [components](#starting-components) have been successfully started up, you need to open up a proxy port to your GKE Kubernetes cluster using the [`kubectl proxy`](https://kubernetes.io/docs/tasks/access-kubernetes-api/http-proxy-access-api/) command:
+
+```bash
+$ kubectl proxy -p 8001
+```
+> Note: All of the following Kubernetes specific urls are valid with the Kubernetes 1.10.0 release.
+
+Now, verify that the Heron API server running on GKE is available using curl:
+
+```bash
+$ curl http://localhost:8001/api/v1/namespaces/default/services/heron-apiserver:9000/proxy/api/v1/version
+```
+
+You should get a JSON response like this:
+
+```json
+{
+  "heron.build.git.revision" : "bf9fe93f76b895825d8852e010dffd5342e1f860",
+  "heron.build.git.status" : "Clean",
+  "heron.build.host" : "ci-server-01",
+  "heron.build.time" : "Sun Oct  1 20:42:18 UTC 2017",
+  "heron.build.timestamp" : "1506890538000",
+  "heron.build.user" : "release-agent1",
+  "heron.build.version" : "0.16.2"
+}
+```
+
+Success! You can now manage Heron topologies on your GKE Kubernetes installation. To submit an example topology to the cluster:
+
+```bash
+$ heron submit kubernetes \
+  --service-url=http://localhost:8001/api/v1/proxy/namespaces/default/services/heron-apiserver:9000 \
+  ~/.heron/examples/heron-api-examples.jar \
+  org.apache.heron.examples.api.AckingTopology acking
+```
+
+You can also track the progress of the Kubernetes pods that make up the topology. When you run `kubectl get pods` you should see pods with names like `acking-0` and `acking-1`.
+
+Another option is to set the service URL for Heron using the `heron config` command:
+
+```bash
+$ heron config kubernetes set service_url \
+  http://localhost:8001/api/v1/namespaces/default/services/heron-apiserver:9000/proxy
+```
+
+That would enable you to manage topologies without setting the `--service-url` flag.
+
+### Heron UI
+
+The [Heron UI](user-manuals-heron-ui) is an in-browser dashboard that you can use to monitor your Heron [topologies](heron-topology-concepts). It should already be running in your GKE cluster.
+
+You can access [Heron UI](user-manuals-heron-ui) in your browser by navigating to http://localhost:8001/api/v1/namespaces/default/services/heron-ui:8889/proxy/topologies.
+
+## General Kubernetes clusters
+
+Although [Minikube](#minikube) and [Google Container Engine](#google-container-engine) provide two easy ways to get started running Heron on Kubernetes, you can also run Heron on any Kubernetes cluster. The instructions in this section are tailored to non-Minikube, non-GKE Kubernetes installations.
+
+### Requirements
+
+To run Heron on a general Kubernetes installation, you'll need to fulfill the [requirements](#requirements) listed at the top of this doc. Once those requirements are met, you can begin starting up the various [components](#starting-components) that comprise a Heron on Kubernetes installation.
+
+### Starting components
+
+There are a variety of Heron components that you'll need to start up separately *and in order*. Make sure that the necessary pods are up and in the `RUNNING` state before moving on to the next step. You can track the progress of the pods using this command:
+
+```bash
+$ kubectl get pods -w
+```
+
+#### ZooKeeper
+
+Heron uses [ZooKeeper](https://zookeeper.apache.org) for a variety of coordination- and configuration-related tasks. To start up ZooKeeper on your Kubernetes cluster:
+
+```bash
+$ kubectl create -f https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/general/zookeeper.yaml
+```
+
+#### BookKeeper
+
+When running Heron on Kubernetes, [Apache BookKeeper](https://bookkeeper.apache.org) is used for things like topology artifact storage (unless you're running on GKE). You can start up BookKeeper using this command:
+
+```bash
+$ kubectl create -f https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/general/bookkeeper.yaml
+```
+
+#### Heron tools
+
+The so-called "Heron tools" include the [Heron UI](user-manuals-heron-ui) and the [Heron Tracker](user-manuals-heron-tracker-runbook). To start up the Heron tools:
+
+```bash
+$ kubectl create -f https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/general/tools.yaml
+```
+
+#### Heron API server
+
+The Heron API server is the endpoint that the Heron CLI client uses to interact with the other components of Heron. To start up the Heron API server on your Kubernetes cluster:
+
+```bash
+$ kubectl create -f https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/general/apiserver.yaml
+```
+
+### Managing topologies
+
+Once all of the [components](#starting-components) have been successfully started up, you need to open up a proxy port to your GKE Kubernetes cluster using the [`kubectl proxy`](https://kubernetes.io/docs/tasks/access-kubernetes-api/http-proxy-access-api/) command:
+
+```bash
+$ kubectl proxy -p 8001
+```
+
+> Note: All of the following Kubernetes specific urls are valid with the Kubernetes 1.10.0 release.
+
+Now, verify that the Heron API server running on GKE is available using curl:
+
+```bash
+$ curl http://localhost:8001/api/v1/namespaces/default/services/heron-apiserver:9000/proxy/api/v1/version
+```
+
+You should get a JSON response like this:
+
+```json
+{
+  "heron.build.git.revision" : "ddbb98bbf173fb082c6fd575caaa35205abe34df",
+  "heron.build.git.status" : "Clean",
+  "heron.build.host" : "ci-server-01",
+  "heron.build.time" : "Sat Mar 31 09:27:19 UTC 2018",
+  "heron.build.timestamp" : "1522488439000",
+  "heron.build.user" : "release-agent",
+  "heron.build.version" : "0.17.8"
+}
+```
+
+Success! You can now manage Heron topologies on your GKE Kubernetes installation. To submit an example topology to the cluster:
+
+```bash
+$ heron submit kubernetes \
+  --service-url=http://localhost:8001/api/v1/namespaces/default/services/heron-apiserver:9000/proxy \
+  ~/.heron/examples/heron-api-examples.jar \
+  org.apache.heron.examples.api.AckingTopology acking
+```
+
+You can also track the progress of the Kubernetes pods that make up the topology. When you run `kubectl get pods` you should see pods with names like `acking-0` and `acking-1`.
+
+Another option is to set the service URL for Heron using the `heron config` command:
+
+```bash
+$ heron config kubernetes set service_url \
+  http://localhost:8001/api/v1/namespaces/default/services/heron-apiserver:9000/proxy
+```
+
+That would enable you to manage topologies without setting the `--service-url` flag.
+
+### Heron UI
+
+The [Heron UI](user-manuals-heron-ui) is an in-browser dashboard that you can use to monitor your Heron [topologies](heron-topology-concepts). It should already be running in your GKE cluster.
+
+You can access [Heron UI](user-manuals-heron-ui) in your browser by navigating to http://localhost:8001/api/v1/proxy/namespaces/default/services/heron-ui:8889.
+
+## Heron on Kubernetes configuration
+
+You can configure Heron on Kubernetes using a variety of YAML config files, listed in the sections below.
+
+### client.yaml
+
+#### Configuration for the `heron` CLI tool.
+
+| name                          | description                                             | default                                                   |
+|-------------------------------|---------------------------------------------------------|-----------------------------------------------------------|
+| heron.package.core.uri        | Location of the core Heron package                      | file:///vagrant/.herondata/dist/heron-core-release.tar.gz |
+| heron.config.is.role.required | Whether a role is required to submit a topology         | False                                                     |
+| heron.config.is.env.required  | Whether an environment is required to submit a topology | False                                                     |
+
+### heron_internals.yaml
+
+#### Configuration for a wide variety of Heron components, including logging, each topology's stream manager and topology master, and more.
+
+| name                                                               | description                                                                                                                                         | default   |
+|--------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|-----------|
+| heron.logging.directory                                            | The relative path to the logging directory                                                                                                          | log-files |
+| heron.logging.maximum.size.mb                                      | The maximum log file size (in MB)                                                                                                                   | 100       |
+| heron.logging.maximum.files                                        | The maximum number of log files                                                                                                                     | 5         |
+| heron.check.tmaster.location.interval.sec                          | The interval, in seconds, after which to check if the topology master location has been fetched or not                                              | 120       |
+| heron.logging.prune.interval.sec                                   | The interval, in seconds, at which to prune C++ log files                                                                                           | 300       |
+| heron.logging.flush.interval.sec                                   | The interval, in seconds, at which to flush C++ log files                                                                                           | 10        |
+| heron.logging.err.threshold                                        | The threshold level at which to log errors                                                                                                          | 3         |
+| heron.metrics.export.interval.sec                                  | The interval, in seconds, at which different components export metrics to the metrics manager                                                       | 60        |
+| heron.metrics.max.exceptions.per.message.count                     | The maximum count of exceptions in one `MetricPublisherPublishMessage` protobuf message                                                             | 1024      |
+| heron.streammgr.cache.drain.frequency.ms                           | The frequency, in milliseconds, at which to drain the tuple cache in the stream manager                                                             | 10        |
+| heron.streammgr.stateful.buffer.size.mb                            | The sized-based threshold (in MB) for buffering data tuples waiting for checkpoint markers before giving up                                         | 100       |
+| heron.streammgr.cache.drain.size.mb                                | The sized-based threshold (in MB) for draining the tuple cache                                                                                      | 100       |
+| heron.streammgr.xormgr.rotatingmap.nbuckets                        | For efficient acknowledgements                                                                                                                      | 3         |
+| heron.streammgr.mempool.max.message.number                         | The max number of messages in the memory pool for each message type                                                                                 | 512       |
+| heron.streammgr.client.reconnect.interval.sec                      | The reconnect interval to other stream managers (in seconds) for the stream manager client                                                          | 1         |
+| heron.streammgr.client.reconnect.tmaster.interval.sec              | The reconnect interval to the topology master (in seconds) for the stream manager client                                                            | 10        |
+| heron.streammgr.client.reconnect.tmaster.max.attempts              | The max reconnect attempts to tmaster for stream manager client                                                                                     | 30        |
+| heron.streammgr.network.options.maximum.packet.mb                  | The maximum packet size (in MB) of the stream manager's network options                                                                             | 10        |
+| heron.streammgr.tmaster.heartbeat.interval.sec                     | The interval (in seconds) at which to send heartbeats                                                                                               | 10        |
+| heron.streammgr.connection.read.batch.size.mb                      | The maximum batch size (in MB) for the stream manager to read from socket                                                                           | 1         |
+| heron.streammgr.connection.write.batch.size.mb                     | Maximum batch size (in MB) for the stream manager to write to socket                                                                                | 1         |
+| heron.streammgr.network.backpressure.threshold                     | The number of times Heron should wait to see a buffer full while enqueueing data before declaring the start of backpressure                         | 3         |
+| heron.streammgr.network.backpressure.highwatermark.mb              | The high-water mark on the number (in MB) that can be left outstanding on a connection                                                              | 100       |
+| heron.streammgr.network.backpressure.lowwatermark.mb               | The low-water mark on the number (in MB) that can be left outstanding on a connection                                                               |           |
+| heron.tmaster.metrics.collector.maximum.interval.min               | The maximum interval (in minutes) for metrics to be kept in the topology master                                                                     | 180       |
+| heron.tmaster.establish.retry.times                                | The maximum number of times to retry establishing connection with the topology master                                                               | 30        |
+| heron.tmaster.establish.retry.interval.sec                         | The interval at which to retry establishing connection with the topology master                                                                     | 1         |
+| heron.tmaster.network.master.options.maximum.packet.mb             | Maximum packet size (in MB) of topology master's network options to connect to stream managers                                                      | 16        |
+| heron.tmaster.network.controller.options.maximum.packet.mb         | Maximum packet size (in MB) of the topology master's network options to connect to scheduler                                                        | 1         |
+| heron.tmaster.network.stats.options.maximum.packet.mb              | Maximum packet size (in MB) of the topology master's network options for stat queries                                                               | 1         |
+| heron.tmaster.metrics.collector.purge.interval.sec                 | The interval (in seconds) at which the topology master purges metrics from socket                                                                   | 60        |
+| heron.tmaster.metrics.collector.maximum.exception                  | The maximum number of exceptions to be stored in the topology metrics collector, to prevent out-of-memory errors                                    | 256       |
+| heron.tmaster.metrics.network.bindallinterfaces                    | Whether the metrics reporter should bind on all interfaces                                                                                          | False     |
+| heron.tmaster.stmgr.state.timeout.sec                              | The timeout (in seconds) for the stream manager, compared with (current time - last heartbeat time)                                                 | 60        |
+| heron.metricsmgr.network.read.batch.time.ms                        | The maximum batch time (in milliseconds) for the metrics manager to read from socket                                                                | 16        |
+| heron.metricsmgr.network.read.batch.size.bytes                     | The maximum batch size (in bytes) to read from socket                                                                                               | 32768     |
+| heron.metricsmgr.network.write.batch.time.ms                       | The maximum batch time (in milliseconds) for the metrics manager to write to socket                                                                 | 32768     |
+| heron.metricsmgr.network.options.socket.send.buffer.size.bytes     | The maximum socket send buffer size (in bytes)                                                                                                      | 6553600   |
+| heron.metricsmgr.network.options.socket.received.buffer.size.bytes | The maximum socket received buffer size (in bytes) for the metrics manager's network options                                                        | 8738000   |
+| heron.metricsmgr.network.options.maximum.packetsize.bytes          | The maximum packet size that the metrics manager can read                                                                                           | 1048576   |
+| heron.instance.network.options.maximum.packetsize.bytes            | The maximum size of packets that Heron instances can read                                                                                           | 10485760  |
+| heron.instance.internal.bolt.read.queue.capacity                   | The queue capacity (num of items) in bolt for buffer packets to read from stream manager                                                            | 128       |
+| heron.instance.internal.bolt.write.queue.capacity                  | The queue capacity (num of items) in bolt for buffer packets to write to stream manager                                                             | 128       |
+| heron.instance.internal.spout.read.queue.capacity                  | The queue capacity (num of items) in spout for buffer packets to read from stream manager                                                           | 1024      |
+| heron.instance.internal.spout.write.queue.capacity                 | The queue capacity (num of items) in spout for buffer packets to write to stream manager                                                            | 128       |
+| heron.instance.internal.metrics.write.queue.capacity               | The queue capacity (num of items) for metrics packets to write to metrics manager                                                                   | 128       |
+| heron.instance.network.read.batch.time.ms                          | Time based, the maximum batch time in ms for instance to read from stream manager per attempt                                                       | 16        |
+| heron.instance.network.read.batch.size.bytes                       | Size based, the maximum batch size in bytes to read from stream manager                                                                             | 32768     |
+| heron.instance.network.write.batch.time.ms                         | Time based, the maximum batch time (in milliseconds) for the instance to write to the stream manager per attempt                                    | 16        |
+| heron.instance.network.write.batch.size.bytes                      | Size based, the maximum batch size in bytes to write to stream manager                                                                              | 32768     |
+| heron.instance.network.options.socket.send.buffer.size.bytes       | The maximum socket's send buffer size in bytes                                                                                                      | 6553600   |
+| heron.instance.network.options.socket.received.buffer.size.bytes   | The maximum socket's received buffer size in bytes of instance's network options                                                                    | 8738000   |
+| heron.instance.set.data.tuple.capacity                             | The maximum number of data tuple to batch in a HeronDataTupleSet protobuf                                                                           | 1024      |
+| heron.instance.set.data.tuple.size.bytes                           | The maximum size in bytes of data tuple to batch in a HeronDataTupleSet protobuf                                                                    | 8388608   |
+| heron.instance.set.control.tuple.capacity                          | The maximum number of control tuple to batch in a HeronControlTupleSet protobuf                                                                     | 1024      |
+| heron.instance.ack.batch.time.ms                                   | The maximum time in ms for a spout to do acknowledgement per attempt, the ack batch could also break if there are no more ack tuples to process     | 128       |
+| heron.instance.emit.batch.time.ms                                  | The maximum time in ms for an spout instance to emit tuples per attempt                                                                             | 16        |
+| heron.instance.emit.batch.size.bytes                               | The maximum batch size in bytes for an spout to emit tuples per attempt                                                                             | 32768     |
+| heron.instance.execute.batch.time.ms                               | The maximum time in ms for an bolt instance to execute tuples per attempt                                                                           | 16        |
+| heron.instance.execute.batch.size.bytes                            | The maximum batch size in bytes for an bolt instance to execute tuples per attempt                                                                  | 32768     |
+| heron.instance.state.check.interval.sec                            | The time interval for an instance to check the state change, for example, the interval a spout uses to check whether activate/deactivate is invoked | 5         |
+| heron.instance.force.exit.timeout.ms                               | The time to wait before the instance exits forcibly when uncaught exception happens                                                                 | 2000      |
+| heron.instance.reconnect.streammgr.interval.sec                    | Interval in seconds to reconnect to the stream manager, including the request timeout in connecting                                                 | 5         |
+| heron.instance.reconnect.streammgr.interval.sec                    | Interval in seconds to reconnect to the stream manager, including the request timeout in connecting                                                 | 60        |
+| heron.instance.reconnect.metricsmgr.interval.sec                   | Interval in seconds to reconnect to the metrics manager, including the request timeout in connecting                                                | 5         |
+| heron.instance.reconnect.metricsmgr.times                          | Interval in seconds to reconnect to the metrics manager, including the request timeout in connecting                                                | 60        |
+| heron.instance.metrics.system.sample.interval.sec                  | The interval in second for an instance to sample its system metrics, for instance, CPU load.                                                        | 10        |
+| heron.instance.slave.fetch.pplan.interval.sec                      | The time interval (in seconds) at which Heron instances fetch the physical plan from slaves                                                         | 1         |
+| heron.instance.acknowledgement.nbuckets                            | For efficient acknowledgement                                                                                                                       | 10        |
+| heron.instance.tuning.expected.bolt.read.queue.size                | The expected size on read queue in bolt                                                                                                             | 8         |
+| heron.instance.tuning.expected.bolt.write.queue.size               | The expected size on write queue in bolt                                                                                                            | 8         |
+| heron.instance.tuning.expected.spout.read.queue.size               | The expected size on read queue in spout                                                                                                            | 512       |
+| heron.instance.tuning.expected.spout.write.queue.size              | The exepected size on write queue in spout                                                                                                          | 8         |
+| heron.instance.tuning.expected.metrics.write.queue.size            | The expected size on metrics write queue                                                                                                            | 8         |
+| heron.instance.tuning.current.sample.weight                        |                                                                                                                                                     | 0.8       |
+| heron.instance.tuning.interval.ms                                  | Interval in ms to tune the size of in & out data queue in instance                                                                                  | 100       |
+
+### packing.yaml
+
+| name                          | description                                             | default                                               |
+|-------------------------------|---------------------------------------------------------|-------------------------------------------------------|
+| heron.class.packing.algorithm | Packing algorithm for packing instances into containers | org.apache.heron.packing.roundrobin.RoundRobinPacking |
+
+### scheduler.yaml
+
+| name                              | description                                                 | default                                                   |
+|-----------------------------------|-------------------------------------------------------------|-----------------------------------------------------------|
+| heron.class.scheduler             | scheduler class for distributing the topology for execution | org.apache.heron.scheduler.kubernetes.KubernetesScheduler |
+| heron.class.launcher              | launcher class for submitting and launching the topology    | org.apache.heron.scheduler.kubernetes.KubernetesLauncher  |
+| heron.directory.sandbox.java.home | location of java - pick it up from shell environment        | $JAVA_HOME                                                |
+| heron.kubernetes.scheduler.uri    | The URI of the Kubernetes API                               |                                                           |
+| heron.scheduler.is.service        | Invoke the IScheduler as a library directly                 | false                                                     |
+| heron.executor.docker.image       | docker repo for executor                                    | heron/heron:latest                                        |
+
+### stateful.yaml
+
+| name                            | description                                            | default                                                         |
+|---------------------------------|--------------------------------------------------------|-----------------------------------------------------------------|
+| heron.statefulstorage.classname | The type of storage to be used for state checkpointing | org.apache.heron.statefulstorage.localfs.LocalFileSystemStorage |
+
+### statemgr.yaml
+
+| name                                           | description                                                           | default                                                         |
+|------------------------------------------------|-----------------------------------------------------------------------|-----------------------------------------------------------------|
+| heron.class.state.manager                      | local state manager class for managing state in a persistent fashion  | org.apache.heron.statemgr.zookeeper.curator.CuratorStateManager |
+| heron.statemgr.connection.string               | local state manager connection string                                 |                                                                 |
+| heron.statemgr.root.path                       | path of the root address to store the state in a local file system    | /heron                                                          |
+| heron.statemgr.zookeeper.is.initialize.tree    | create the zookeeper nodes, if they do not exist                      | True                                                            |
+| heron.statemgr.zookeeper.session.timeout.ms    | timeout in ms to wait before considering zookeeper session is dead    | 30000                                                           |
+| heron.statemgr.zookeeper.connection.timeout.ms | timeout in ms to wait before considering zookeeper connection is dead | 30000                                                           |
+| heron.statemgr.zookeeper.retry.count           | timeout in ms to wait before considering zookeeper connection is dead | 10                                                              |
+| heron.statemgr.zookeeper.retry.interval.ms     | duration of time to wait until the next retry                         | 10000                                                           |
+
+### uploader.yaml
+
+| name                         | description                                                                             | default                                 |
+|------------------------------|-----------------------------------------------------------------------------------------|-----------------------------------------|
+| heron.class.uploader         | uploader class for transferring the topology files (jars, tars, PEXes, etc.) to storage | org.apache.heron.uploader.s3.S3Uploader |
+| heron.uploader.s3.bucket     | S3 bucket in which topology assets will be stored (if AWS S3 is being used)             |                                         |
+| heron.uploader.s3.access_key | AWS access key (if AWS S3 is being used)                                                |                                         |
+| heron.uploader.s3.secret_key | AWS secret access key (if AWS S3 is being used)                                         |                                         |
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.0/schedulers-k8s-with-helm.md b/website2/website/versioned_docs/version-0.20.0/schedulers-k8s-with-helm.md
new file mode 100644
index 0000000..2923838
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/schedulers-k8s-with-helm.md
@@ -0,0 +1,307 @@
+---
+id: version-0.20.0-schedulers-k8s-with-helm
+title: Kubernetes with Helm
+sidebar_label: Kubernetes with Helm
+original_id: schedulers-k8s-with-helm
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+> If you'd prefer to install Heron on Kubernetes *without* using the [Helm](https://helm.sh) package manager, see the [Heron on Kubernetes by hand](schedulers-k8s-by-hand) document.
+
+[Helm](https://helm.sh) is an open source package manager for [Kubernetes](https://kubernetes.io) that enables you to quickly and easily install even the most complex software systems on Kubernetes. Heron has a Helm [chart](https://docs.helm.sh/developing_charts/#charts) that you can use to install Heron on Kubernetes using just a few commands. The chart can be used to install Heron on the following platforms:
+
+* [Minikube](#minikube) (the default)
+* [Google Kubernetes Engine](#google-kubernetes-engine)
+* [Amazon Web Services](#amazon-web-services)
+* [Bare metal](#bare-metal)
+
+## Requirements
+
+In order to install Heron on Kubernetes using Helm, you'll need to have an existing Kubernetes cluster on one of the supported [platforms](#specifying-a-platform) (which includes [bare metal](#bare-metal) installations).
+
+## Installing the Helm client
+
+In order to get started, you need to install Helm on your machine. Installation instructions for [macOS](#helm-for-macos) and [Linux](#helm-for-linux) are below.
+
+### Helm for macOS
+
+You can install Helm on macOS using [Homebrew](https://brew.sh):
+
+```bash
+$ brew install kubernetes-helm
+```
+
+### Helm for Linux
+
+You can install Helm on Linux using a simple installation script:
+
+```bash
+$ curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > install-helm.sh
+$ chmod 700 install-helm.sh
+$ ./install-helm.sh
+```
+
+## Installing Helm in your Kubernetes cluster
+
+To run Helm on Kubernetes, you need to first make sure that [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl) is using the right configuration context for your cluster. To check which context is being used:
+
+```bash
+$ kubectl config current-context
+```
+
+If the context is correct, then you can get Helm running using just one command:
+
+```bash
+$ helm init
+```
+
+If the output of that command includes `Happy Helming!` then Helm is ready to go.
+
+## Installing Heron on Kubernetes
+
+Once you've installed the Helm client on your machine and gotten Helm running in your Kubernetes cluster, you need to make your client aware of the `heron-charts` Helm repository, which houses the chart for Heron:
+
+```bash
+$ helm repo add heron-charts https://storage.googleapis.com/heron-charts
+"heron-charts" has been added to your repositories
+```
+
+Now you can install the Heron package:
+
+```bash
+$ helm install heron-charts/heron
+```
+
+This will install Heron and provide the installation with a random name like `jazzy-anaconda`. To provide the installation with a name, such as `heron-kubernetes`:
+
+```bash
+$ helm install heron-charts/heron \
+  --name heron-kubernetes
+```
+
+### Specifying a platform
+
+The default platform for running Heron on Kubernetes is [Minikube](#minikube). To specify a different platform, you can use the `--set platform=PLATFORM` flag. Here's an example:
+
+```bash
+$ helm install heron-charts/heron \
+  --set platform=gke
+```
+
+The available platforms are:
+
+Platform | Tag
+:--------|:---
+[Minikube](#minikube) | `minikube`
+[Google Kubernetes Engine](#google-kubernetes-engine) | `gke`
+[Amazon Web Services](#amazon-web-services) | `aws`
+[Bare metal](#bare-metal) | `baremetal`
+
+#### Minikube
+
+To run Heron on Minikube, you need to first [install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/). Once Minikube is installed, you can start it by running `minikube start`. Please note, however, that Heron currently requires the following resources:
+
+* 7 GB of memory
+* 5 CPUs
+* 20 GB of disk space
+
+To start up Minikube with the minimum necessary resources:
+
+```bash
+$ minikube start \
+  --memory=7168 \
+  --cpus=5 \
+  --disk-size=20g
+```
+
+Once Minikube is running, you can then install Heron in one of two ways:
+
+```bash
+# Use the Minikube default
+$ helm install heron-charts/heron
+
+# Explicitly select Minikube
+$ helm install heron-charts/heron \
+  --set platform=minikube
+```
+
+#### Google Kubernetes Engine
+
+The resources required to run Heron on [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) vary based on your use case. To run a basic Heron cluster intended for development and experimentation, you'll need at least:
+
+* 3 nodes
+* [n1-standard-4](https://cloud.google.com/compute/docs/machine-types#standard_machine_types) machines
+
+To create a cluster with those resources using the [gcloud](https://cloud.google.com/sdk/gcloud/) tool:
+
+```bash
+$ gcloud container clusters create heron-gke-dev-cluster \
+  --num-nodes=3 \
+  --machine-type=n1-standard-2
+```
+
+For a production-ready cluster you'll want a larger cluster with:
+
+* *at least* 8 nodes
+* [n1-standard-4 or n1-standard-8](https://cloud.google.com/compute/docs/machine-types#standard_machine_types) machines (preferably the latter)
+
+To create such a cluster:
+
+```bash
+$ gcloud container clusters create heron-gke-prod-cluster \
+  --num-nodes=8 \
+  --machine-type=n1-standard-8
+```
+
+Once the cluster has been successfully created, you'll need to install that cluster's credentials locally so that they can be used by [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). You can do this in just one command:
+
+```bash
+$ gcloud container clusters get-credentials heron-gke-dev-cluster # or heron-gke-prod-cluster
+```
+
+Once, the cluster is running (that could take a few minutes), you can initialize Helm on the cluster:
+
+```bash
+$ helm init
+```
+
+Then, you'll need to adjust some RBAC permissions for your cluster:
+
+```bash
+$ kubectl create serviceaccount tiller \
+  --namespace kube-system \
+$ kubectl create clusterrolebinding tiller-cluster-rule \
+  --clusterrole cluster-admin \
+  --serviceaccount kube-system:tiller
+$ kubectl patch deploy tiller-deploy \
+  --namespace kube-system \
+  --patch '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
+```
+
+Finally, you can install Heron:
+
+```bash
+$ helm install heron-charts/heron \
+  --set platform=gke
+```
+
+##### Resource configurations
+
+Helm enables you to supply sets of variables via YAML files. There are currently a handful of different resource configurations that can be applied to your Heron on GKE cluster upon installation:
+
+Configuration | Description
+:-------------|:-----------
+[`small.yaml`](https://github.com/apache/incubator-heron/blob/master/deploy/kubernetes/gke/small.yaml) | Smaller Heron cluster intended for basic testing, development, and experimentation
+[`medium.yaml`](https://github.com/apache/incubator-heron/blob/master/deploy/kubernetes/gke/medium.yaml) | Closer geared for production usage
+
+To apply the `small` configuration, for example:
+
+```bash
+$ helm install heron-charts/heron \
+  --set platform=gke \
+  --values https://raw.githubusercontent.com/apache/incubator-heron/master/deploy/kubernetes/gcp/small.yaml
+```
+
+#### Amazon Web Services
+
+To run Heron on Kubernetes on Amazon Web Services (AWS), you'll need to 
+
+```bash
+$ helm install heron-charts/heron \
+  --set platform=aws
+```
+
+##### Using S3 uploader
+
+You can make Heron to use S3 to distribute the user topologies. First you need to set up a S3 bucket and configure an IAM user with enough permissions over it. Get access keys for the user. Then you can deploy Heron like this:
+
+```bash
+$ helm install heron-charts/heron \
+  --set platform=aws \
+  --set uploader.class=s3 \
+  --set uploader.s3Bucket=heron \
+  --set uploader.s3PathPrefix=topologies \
+  --set uploader.s3AccessKey=XXXXXXXXXXXXXXXXXXXX \
+  --set uploader.s3SecretKey=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX \
+  --set uploader.s3Region=us-west-1
+```
+
+#### Bare metal
+
+To run Heron on a bare metal Kubernetes cluster:
+
+```bash
+$ helm install heron-charts/heron \
+  --set platform=baremetal
+```
+
+### Managing topologies
+
+> When setting the `heron` CLI configuration, make sure that the cluster name matches the name of the Helm installation. This can be either the name auto-generated by Helm or the name you supplied via the `--name` flag upon installation (in some of the examples above, the `heron-kubernetes` name was used). Make sure to adjust the name accordingly if necessary.
+
+Once all of the components have been successfully started up, you need to open up a proxy port to your Kubernetes cluster using the [`kubectl proxy`](https://kubernetes.io/docs/tasks/access-kubernetes-api/http-proxy-access-api/) command:
+
+```bash
+$ kubectl proxy -p 8001
+```
+> Note: All of the following Kubernetes specific urls are valid with the Kubernetes 1.10.0 release.
+ 
+Now, verify that the Heron API server running on Minikube is available using curl:
+
+```bash
+$ curl http://localhost:8001/api/v1/namespaces/default/services/heron-kubernetes-apiserver:9000/proxy/api/v1/version
+```
+
+
+You should get a JSON response like this:
+
+```json
+{
+  "heron.build.git.revision" : "ddbb98bbf173fb082c6fd575caaa35205abe34df",
+  "heron.build.git.status" : "Clean",
+  "heron.build.host" : "ci-server-01",
+  "heron.build.time" : "Sat Mar 31 09:27:19 UTC 2018",
+  "heron.build.timestamp" : "1522488439000",
+  "heron.build.user" : "release-agent",
+  "heron.build.version" : "0.17.8"
+}
+```
+
+## Running topologies on Heron on Kubernetes
+
+Once you have a Heron cluster up and running on Kubernetes via Helm, you can use the [`heron` CLI tool](user-manuals-heron-cli) like normal if you set the proper URL for the [Heron API server](deployment-api-server). When running Heron on Kubernetes, that URL is:
+
+```bash
+$ http://localhost:8001/api/v1/namespaces/default/services/heron-kubernetes-apiserver:9000/proxy
+```
+
+To set that URL:
+
+```bash
+$ heron config heron-kubernetes set service_url \
+  http://localhost:8001/api/v1/namespaces/default/services/heron-kubernetes-apiserver:9000/proxy
+```
+
+To test your cluster, you can submit an example topology:
+
+```bash
+$ heron submit heron-kubernetes \
+  ~/.heron/examples/heron-streamlet-examples.jar \
+  org.apache.heron.examples.streamlet.WindowedWordCountTopology \
+  WindowedWordCount
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/schedulers-local.md b/website2/website/versioned_docs/version-0.20.0/schedulers-local.md
new file mode 100644
index 0000000..18f434b
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/schedulers-local.md
@@ -0,0 +1,87 @@
+---
+id: version-0.20.0-schedulers-local
+title: Local Cluster
+sidebar_label: Local Cluster
+original_id: schedulers-local
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+In addition to out-of-the-box schedulers for
+[Aurora](schedulers-aurora-cluster), Heron can also be deployed in a local environment, which
+stands up a mock Heron cluster on a single machine. This can be useful for
+experimenting with Heron's features, testing a wide variety of possible cluster
+events, and so on.
+
+One of two state managers can be used for coordination when deploying locally:
+
+* [ZooKeeper](state-managers-zookeeper)
+* [Local File System](state-managers-local-fs)
+
+**Note**: Deploying a Heron cluster locally is not to be confused with Heron's
+[simulator mode](guides-simulator-mode). Simulator mode enables
+you to run topologies in a cluster-agnostic JVM process for the purpose of
+development and debugging, while the local scheduler stands up a Heron cluster
+on a single machine.
+
+## How Local Deployment Works
+
+Using the local scheduler is similar to deploying Heron on other schedulers.
+The [Heron](user-manuals-heron-cli) cli is used to deploy and manage topologies
+as would be done using a distributed scheduler. The main difference is in
+the configuration.
+
+## Local Scheduler Configuration
+
+To configure Heron to use local scheduler, specify the following in `scheduler.yaml`
+config file.
+
+* `heron.class.scheduler` --- Indicates the class to be loaded for local scheduler.
+Set this to `org.apache.heron.scheduler.local.LocalScheduler`
+
+* `heron.class.launcher` --- Specifies the class to be loaded for launching
+topologies. Set this to `org.apache.heron.scheduler.local.LocalLauncher`
+
+* `heron.scheduler.local.working.directory` --- Provides the working
+directory for topology. The working directory is essentially a scratch pad where
+topology jars, heron core release binaries, topology logs, etc are generated and kept.
+
+* `heron.package.core.uri` --- Indicates the location of the heron core binary package.
+The local scheduler uses this URI to download the core package to the working directory.
+
+* `heron.directory.sandbox.java.home` --- Specifies the java home to
+be used when running topologies in the containers. Set to `${JAVA_HOME}` to
+use the value set in the bash environment variable $JAVA_HOME.
+
+### Example Local Scheduler Configuration
+
+```yaml
+# scheduler class for distributing the topology for execution
+heron.class.scheduler: org.apache.heron.scheduler.local.LocalScheduler
+
+# launcher class for submitting and launching the topology
+heron.class.launcher: org.apache.heron.scheduler.local.LocalLauncher
+
+# working directory for the topologies
+heron.scheduler.local.working.directory: ${HOME}/.herondata/topologies/${CLUSTER}/${TOPOLOGY}
+
+# location of the core package
+heron.package.core.uri: file://${HERON_DIST}/heron-core.tar.gz
+
+# location of java - pick it up from shell environment
+heron.directory.sandbox.java.home: ${JAVA_HOME}
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/schedulers-mesos-local-mac.md b/website2/website/versioned_docs/version-0.20.0/schedulers-mesos-local-mac.md
new file mode 100644
index 0000000..c3be0f0
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/schedulers-mesos-local-mac.md
@@ -0,0 +1,168 @@
+---
+id: version-0.20.0-schedulers-mesos-local-mac
+title: Setting up Heron with Mesos Cluster Locally on Mac
+sidebar_label: Mesos Cluster Locally
+original_id: schedulers-mesos-local-mac
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+This is a step by step guide to run Heron on a Mesos cluster locally.
+
+## Install Heron
+Follow [Quick Start Guide](getting-started-local-single-node) to install Heron.
+
+## Setting up an Apache Mesos Cluster Locally
+
+Follow [Installing Mesos on your Mac with Homebrew]
+(https://mesosphere.com/blog/2014/07/07/installing-mesos-on-your-mac-with-homebrew/)
+to install and run Mesos. To confirm Mesos cluster is ready for accepting Heron topologies, access
+the Mesos management console [http://localhost:5050](http://localhost:5050) and confirm there is
+activated slaves.
+
+![console page](assets/mesos-management-console.png)
+
+## Configure Heron
+
+### State Manager
+By default, Heron uses Local File System State Manager on Mesos to manage states. Modify
+`$HOME/.heron/conf/mesos/statemgr.yaml` to use ZooKeeper. For more details see [Setting up
+ZooKeeper](state-managers-zookeeper).
+
+### Scheduler
+Heron needs to know where to load the lib to interact with Mesos. Change the config
+`heron.mesos.native.library.path` in `$HOME/.heron/conf/mesos/scheduler.yaml` to the library path
+of the Mesos install. If Mesos is installed through `brew`, the library path should be 
+`/usr/local/Cellar/mesos/your_mesos_version/lib`.
+
+> Mesos only offers a C++ interface, which is not portable across platforms.
+
+
+## Run Topology in Mesos
+
+After setting up Heron and Mesos, submit a topology using the following command. By default this
+command loads the config in `$HOME/.heron/conf`. Add `--config-path=your_conf_path` to change the
+config path.
+
+```bash
+heron submit mesos --verbose ~/.heron/examples/heron-api-examples.jar \
+org.apache.heron.examples.api.ExclamationTopology ExclamationTopology
+```
+
+The following will be displayed upon a successful submit.
+
+```bash
+[2016-07-25 22:04:41 -0700] org.apache.heron.scheduler.mesos.MesosLauncher INFO: \
+For checking the status and logs of the topology, use the working directory \
+$HOME/.herondata/topologies/mesos/$USER/ExclamationTopology
+[2016-07-25 22:04:41 -0700] org.apache.heron.scheduler.SubmitterMain FINE:  Topology \
+ExclamationTopology submitted successfully
+INFO: Topology 'ExclamationTopology' launched successfully
+INFO: Elapsed time: 4.114s.
+``` 
+
+Note that this doesn't necessarily mean the topology is successfully launched in Mesos, to verify
+check the working directory as shown in the output. You will see:
+
+* `heron-examples.jar`: the jar which contains the topology submitted.
+* `heron-conf`: configurations used to launch the topology.
+* `log-files`: directory containing Mesos scheduler's log.
+
+The log file will show whether the launch succeeded. If it succeeded, at the end of the log file
+it will show the task is running.
+
+```bash
+[2016-07-25 22:15:47 -0700] org.apache.heron.scheduler.mesos.framework.MesosFramework INFO: \
+Received status update [...]
+[2016-07-25 22:15:47 -0700] org.apache.heron.scheduler.mesos.framework.MesosFramework INFO: \
+Task with id 'container_1_1469510147073:0' RUNNING
+``` 
+
+If the launch fails, an error message will be included. For example, if the Mesos library isn't
+found in the configured location, the following exception will occur.
+
+```bash
+[2016-07-25 22:04:42 -0700] stderr STDERR:  Failed to load native Mesos library from \
+/usr/lib/mesos/0.28.1/lib
+[2016-07-25 22:04:42 -0700] stderr STDERR:  Exception in thread "main"
+[2016-07-25 22:04:42 -0700] stderr STDERR:  java.lang.UnsatisfiedLinkError: no mesos in \ 
+java.library.path
+[2016-07-25 22:04:42 -0700] stderr STDERR:      at \
+java.lang.ClassLoader.loadLibrary(ClassLoader.java:1867)
+...
+```
+
+## Mesos Management Console
+
+Another way to check your topology is running is to look at the Mesos management console. If it
+was launched successfully, two containers will be running.
+
+![result page](assets/mesos-management-console-with-topology.png)
+
+To view the process logs, click the `sandbox` on the right side. The sandbox of the heron container
+is shown below.
+
+![container-container-sandbox](assets/container-container-sandbox.png)
+
+The `log-files` directory includes the application and GC log of the processes running in this
+container.
+
+![container-log-files](assets/container-log-files.png)
+
+The bolt log of the ExclamationTopology is `container_1_exclaim1_1.log.0`. Below is a sample of it.
+
+![bolt-log](assets/bolt-log.png)
+
+## Heron UI
+
+Install Heron tools to monitor the topology with the `heron-ui` (see [Quick Start Guide]
+(../../../../getting-started)). Configure the value of `statemgrs.rootpath` in 
+`$HOME/.herontools/conf/heron_tracker.yaml` to `$HOME/.herondata/repository/state/mesos` before
+starting the tracker. This configuration sets the location of the state manager root path. Start
+tracker and the UI.
+
+```bash
+$ heron-tracker
+... Running on port: 8888
+... Using config file: $HOME/.herontools/conf/heron_tracker.yaml
+```
+
+```bash
+$ heron-ui
+... Running on port: 8889
+... Using tracker url: http://localhost:8888
+```
+
+Go to the UI at [http://localhost:8889](http://localhost:8889) to see the topology.
+
+![mesos-local-heron-ui](assets/mesos-local-heron-ui.png)
+
+To see the metrics, click on the topology.
+
+![mesos-local-heron-ui-more](assets/mesos-local-heron-ui-more.png)
+
+To enter the Mesos Management Console page, click the `job` button.
+
+![mesos-local-heron-ui-to-mesos-console](assets/mesos-local-heron-ui-to-mesos-console.png)
+
+## Kill Topology
+
+To kill the topology, run:
+
+```bash
+heron kill mesos ExclamationTopology
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/schedulers-nomad.md b/website2/website/versioned_docs/version-0.20.0/schedulers-nomad.md
new file mode 100644
index 0000000..bf2496a
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/schedulers-nomad.md
@@ -0,0 +1,439 @@
+---
+id: version-0.20.0-schedulers-nomad
+title: Nomad
+sidebar_label: Nomad
+original_id: schedulers-nomad
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron supports [Hashicorp](https://hashicorp.com)'s [Nomad](https://nomadproject.io) as a scheduler. You can use Nomad for either small- or large-scale Heron deployments or to run Heron locally in [standalone mode](schedulers-standalone).
+
+> Update: Heron now supports running on Nomad via [raw exec driver](https://www.nomadproject.io/docs/drivers/raw_exec.html) and [docker driver](https://www.nomadproject.io/docs/drivers/docker.html)
+
+## Nomad setup
+
+Setting up a nomad cluster will not be covered here. See the [official Nomad docs](https://www.nomadproject.io/intro/getting-started/install.html) for instructions.
+
+**Instructions on running Heron on Nomad via raw execs are located here**:
+
+Below are instructions on how to to run Heron on Nomad via raw execs.  In this mode, Heron executors will run as raw processes on the host machines. 
+
+The advantages of this mode is that it is incredibly lightweight and likely do not require sudo privileges to setup and run.  However in this mode, the setup procedure may be a little more complex compared to running via docker since there are more things to consider.  Also in resource allocation is considered but not enforced.
+
+## Requirements
+
+When setting up your Nomad cluster, the following are required:
+
+* The [Heron CLI tool](user-manuals-heron-cli) must be installed on each machine used to deploy Heron topologies
+* Python 2.7, Java 7 or 8, and [curl](https://curl.haxx.se/) must be installed on every machine in the cluster
+* A [ZooKeeper cluster](https://zookeeper.apache.org)
+
+## Configuring Heron settings
+
+Before running Heron via Nomad, you'll need to configure some settings. Once you've [installed Heron](getting-started-local-single-node), all of the configurations you'll need to modify will be in the `~/.heron/conf/nomad` diredctory.
+
+First, make sure that the `heron.nomad.driver` is set to "raw_exec" in `~/.heron/conf/nomad/scheduler.yaml` e.g.
+
+```yaml
+heron.nomad.driver: "raw_exec"
+```
+
+You'll need to use a topology uploader to deploy topology packages to nodes in your cluster. You can use one of the following uploaders:
+
+* The HTTP uploader in conjunction with Heron's [API server](deployment-api-server). The Heron API server acts like a file server to which users can upload topology packages. The API server distributes the packages, along with the Heron core package, to the relevant machines. You can also use the API server to submit your Heron topology to Nomad (described [below](#deploying-with-the-api-server)) <!-- TODO: link to upcoming HTTP uploader documentation -->
+* [Amazon S3](uploaders-amazon-s3). Please note that the S3 uploader requires an AWS account.
+* [SCP](uploaders-scp). Please note that the SCP uploader requires SSH access to nodes in the cluster.
+
+You can modify the `heron.class.uploader` parameter in `~/.heron/conf/nomad/uploader.yaml` to choose an uploader.
+
+In addition, you must update the `heron.statemgr.connection.string` parameter in the `statemgr.yaml` file in `~/.heron/conf/nomad` to your ZooKeeper connection string. Here's an example:
+
+```yaml
+heron.statemgr.connection.string: 127.0.0.1:2181
+```
+
+Then, update the `heron.nomad.scheduler.uri` parameter in `scheduler.yaml` to the URL of the Nomad server to which you'll be submitting jobs. Here's an example:
+
+```yaml
+heron.nomad.scheduler.uri: http://127.0.0.1:4646
+```
+
+You may also want to configure where Heron will store files on your machine if you're running Nomad locally (in `scheduler.yaml`). Here's an example:
+
+```yaml
+heron.scheduler.local.working.directory: ${HOME}/.herondata/topologies/${CLUSTER}/${ROLE}/${TOPOLOGY_ID}
+```
+
+> Heron uses string interpolation to fill in the missing values for `CLUSTER`, `ROLE`, etc.
+
+## Distributing Heron core
+
+The Heron core package needs to be made available for every machine in the cluster to download. You'll need to provide a URI for the Heron core package. Here are the currently supported protocols:
+
+* `file://` (local FS)
+* `http://` (HTTP)
+
+You can do this in one of several ways:
+
+* Use the Heron API server to distribute `heron-core.tar.gz` (see [here](deployment-api-server) for more info)
+* Copy `heron-core.tar.gz` onto every node in the cluster
+* Mount a network drive to every machine in the cluster that contains 
+* Upload `heron-core.tar.gz` to an S3 bucket and expose an HTTP endpoint
+* Upload `heron-core.tar.gz` to be hosted on a file server and expose an HTTP endpoint
+
+> A copy of `heron-core.tar.gz` is located at `~/.heron/dist/heron-core.tar.gz` on the machine on which you installed the Heron CLI.
+
+You'll need to set the URL for `heron-core.tar.gz` in the `client.yaml` configuration file in `~/.heron/conf/nomad`. Here are some examples:
+
+```yaml
+# local filesystem
+heron.package.core.uri: file:///path/to/heron/heron-core.tar.gz
+
+# from a web server
+heron.package.core.uri: http://some.webserver.io/heron-core.tar.gz
+```
+
+## Submitting Heron topologies to the Nomad cluster
+
+You can submit Heron topologies to a Nomad cluster via the [Heron CLI tool](user-manuals-heron-cli):
+
+```bash
+$ heron submit nomad \
+  <topology package path> \
+  <topology classpath> \
+  <topology CLI args>
+```
+
+Here's an example:
+
+```bash
+$ heron submit nomad \
+  ~/.heron/examples/heron-streamlet-examples.jar \           # Package path
+  org.apache.heron.examples.api.WindowedWordCountTopology \ # Topology classpath
+  windowed-word-count                                        # Args passed to topology
+```
+
+## Deploying with the API server
+
+The advantage of running the [Heron API Server](deployment-api-server) is that it can act as a file server to help you distribute topology package files and submit jobs to Nomad, so that you don't need to modify the configuration files mentioned above.  By using Heron’s API Server, you can set configurations such as the URI of ZooKeeper and the Nomad server once and not need to configure each machine from which you want to submit Heron topologies.
+
+## Running the API server
+
+You can run the Heron API server on any machine that can be reached by machines in your Nomad cluster via HTTP. Here's a command you can use to run the API server:
+
+```bash
+$ ~/.heron/bin/heron-apiserver \
+  --cluster nomad \
+  --base-template nomad \
+  -D heron.statemgr.connection.string=<ZooKeeper URI> \
+  -D heron.nomad.scheduler.uri=<Nomad URI> \
+  -D heron.class.uploader=org.apache.heron.uploader.http.HttpUploader \
+  --verbose
+```
+
+You can also run the API server in Nomad itself, but you will need to have a local copy of the Heron API server executable on every machine in the cluster. Here's an example Nomad job for the API server:
+
+```hcl
+job "apiserver" {
+  datacenters = ["dc1"]
+  type = "service"
+  group "apiserver" {
+    count = 1
+    task "apiserver" {
+      driver = "raw_exec"
+      config {
+        command = <heron_apiserver_executable>
+        args = [
+        "--cluster", "nomad",
+        "--base-template", "nomad",
+        "-D", "heron.statemgr.connection.string=<zookeeper_uri>",
+        "-D", "heron.nomad.scheduler.uri=<scheduler_uri>",
+        "-D", "heron.class.uploader=org.apache.heron.uploader.http.HttpUploader",
+        "--verbose"]
+      }
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+      }
+    }
+  }
+}
+```
+
+Make sure to replace the following:
+
+* `<heron_apiserver_executable>` --- The local path to where the [Heron API server](deployment-api-server) executable is located (usually `~/.heron/bin/heron-apiserver`)
+* `<zookeeper_uri>` --- The URI for your ZooKeeper cluster
+* `<scheduler_uri>` --- The URI for your Nomad server
+
+## Using the Heron API server to distribute Heron topology packages
+
+Heron users can upload their Heron topology packages to the Heron API server using the HTTP uploader by modifying the `uploader.yaml` file to including the following:
+
+```yaml
+# uploader class for transferring the topology jar/tar files to storage
+heron.class.uploader:    org.apache.heron.uploader.http.HttpUploader
+heron.uploader.http.uri: http://localhost:9000/api/v1/file/upload
+```
+
+The [Heron CLI](user-manuals-heron-cli) will take care of the upload. When the topology is starting up, the topology package will be automatically downloaded from the API server.
+
+## Using the API server to distribute the Heron core package
+
+Heron users can use the Heron API server to distribute the Heron core package. When running the API server, just add this argument:
+
+```bash
+--heron-core-package-path <path to Heron core>
+```
+
+Here's an example:
+
+```bash
+$ ~/.heron/bin/heron-apiserver \
+  --cluster nomad \
+  --base-template nomad \
+  --download-hostname 127.0.0.1 \
+  --heron-core-package-path ~/.heron/dist/heron-core.tar.gz \
+  -D heron.statemgr.connection.string=127.0.0.1:2181 \
+  -D heron.nomad.scheduler.uri=127.0.0.1:4647 \
+  -D heron.class.uploader=org.apache.heron.uploader.http.HttpUploader \
+  --verbose
+```
+
+Then change the `client.yaml` file in `~/.heron/conf/nomad` to the following:
+
+```yaml
+heron.package.use_core_uri: true
+heron.package.core.uri:     http://localhost:9000/api/v1/file/download/core
+```
+
+## Using the API server to submit Heron topologies
+
+Users can submit topologies using the [Heron CLI](user-manuals-heron-cli) by specifying a service URL to the API server. Here's the format of that command:
+
+```bash
+$ heron submit nomad \
+  --service-url=<Heron API server URL> \
+  <topology package path> \
+  <topology classpath> \
+  <topology args>
+```
+
+Here's an example:
+
+```bash
+$ heron submit nomad \
+  --service-url=http://localhost:9000 \
+  ~/.heron/examples/heron-streamlet-examples.jar \
+  org.apache.heron.examples.api.WindowedWordCountTopology \
+  windowed-word-count
+```
+
+## Integration with Consul for metrics
+Each Heron executor part of a Heron topology serves metrics out of a port randomly generated by Nomad.  Thus, Consul is needed for service discovery for users to determine which port the Heron executor is serving the metrics out of.
+Every Heron executor will automatically register itself as a service with Consul given that there is a Consul cluster running. The port Heron will be serving metrics will be registered with Consul.
+
+The service will be registered with the name with the following format:
+
+```yaml
+metrics-heron-<TOPOLOGY_NAME>-<CONTAINER_INDEX>
+```
+
+Each heron executor registered with Consul will be tagged with
+
+```yaml
+<TOPOLOGY_NAME>-<CONTAINER_INDEX>
+```
+
+To add additional tags, please add specify them in a comma delimited list via
+
+```yaml
+heron.nomad.metrics.service.additional.tags
+```
+
+in `scheduler.yaml`. For example:
+
+```yaml
+heron.nomad.metrics.service.additional.tags: "prometheus,metrics,heron"
+```
+
+Users can then configure Prometheus to scrape metrics for each Heron executor based on these tags
+
+
+Instructions on running Heron on Nomad via docker containers are located here:
+
+**Below are instructions on how to run Heron on Nomad via docker containers.**  In this mode, Heron executors will run as docker containers on host machines.
+
+## Requirements
+
+When setting up your Nomad cluster, the following are required:
+
+* The [Heron CLI tool](user-manuals-heron-cli) must be installed on each machine used to deploy Heron topologies
+* Python 2.7, Java 7 or 8, and [curl](https://curl.haxx.se/) must be installed on every machine in the cluster
+* A [ZooKeeper cluster](https://zookeeper.apache.org)
+* Docker installed and enabled on every machine
+* Each machine must also be able to pull the official Heron docker image from DockerHub or have the image preloaded.
+
+## Configuring Heron settings
+
+Before running Heron via Nomad, you'll need to configure some settings. Once you've [installed Heron](getting-started-local-single-node), all of the configurations you'll need to modify will be in the `~/.heron/conf/nomad` diredctory.
+
+First, make sure that the `heron.nomad.driver` is set to "docker" in `~/.heron/conf/nomad/scheduler.yaml` e.g.
+
+```yaml
+heron.nomad.driver: "docker"
+```
+
+You can also adjust which docker image to use for running Heron via the `heron.executor.docker.image` in `~/.heron/conf/nomad/scheduler.yaml` e.g.
+
+```yaml
+heron.executor.docker.image: 'heron/heron:latest'
+```
+
+You'll need to use a topology uploader to deploy topology packages to nodes in your cluster. You can use one of the following uploaders:
+
+* The HTTP uploader in conjunction with Heron's [API server](deployment-api-server). The Heron API server acts like a file server to which users can upload topology packages. The API server distributes the packages, along with the Heron core package, to the relevant machines. You can also use the API server to submit your Heron topology to Nomad (described [below](#deploying-with-the-api-server)) <!-- TODO: link to upcoming HTTP uploader documentation -->
+* [Amazon S3](uploaders-amazon-s3). Please note that the S3 uploader requires an AWS account.
+* [SCP](uploaders-scp). Please note that the SCP uploader requires SSH access to nodes in the cluster.
+
+You can modify the `heron.class.uploader` parameter in `~/.heron/conf/nomad/uploader.yaml` to choose an uploader.
+
+In addition, you must update the `heron.statemgr.connection.string` parameter in the `statemgr.yaml` file in `~/.heron/conf/nomad` to your ZooKeeper connection string. Here's an example:
+
+```yaml
+heron.statemgr.connection.string: 127.0.0.1:2181
+```
+
+Then, update the `heron.nomad.scheduler.uri` parameter in `scheduler.yaml` to the URL of the Nomad server to which you'll be submitting jobs. Here's an example:
+
+```yaml
+heron.nomad.scheduler.uri: http://127.0.0.1:4646
+```
+
+## Submitting Heron topologies to the Nomad cluster
+
+You can submit Heron topologies to a Nomad cluster via the [Heron CLI tool](user-manuals-heron-cli):
+
+```bash
+$ heron submit nomad \
+  <topology package path> \
+  <topology classpath> \
+  <topology CLI args>
+```
+
+Here's an example:
+
+```bash
+$ heron submit nomad \
+  ~/.heron/examples/heron-streamlet-examples.jar \           # Package path
+  org.apache.heron.examples.api.WindowedWordCountTopology \ # Topology classpath
+  windowed-word-count                                        # Args passed to topology
+```
+
+## Deploying with the API server
+
+The advantage of running the [Heron API Server](deployment-api-server) is that it can act as a file server to help you distribute topology package files and submit jobs to Nomad, so that you don't need to modify the configuration files mentioned above.  By using Heron’s API Server, you can set configurations such as the URI of ZooKeeper and the Nomad server once and not need to configure each machine from which you want to submit Heron topologies.
+
+## Running the API server
+
+You can run the Heron API server on any machine that can be reached by machines in your Nomad cluster via HTTP. Here's a command you can use to run the API server:
+
+```bash
+$ ~/.heron/bin/heron-apiserver \
+  --cluster nomad \
+  --base-template nomad \
+  -D heron.statemgr.connection.string=<ZooKeeper URI> \
+  -D heron.nomad.scheduler.uri=<Nomad URI> \
+  -D heron.class.uploader=org.apache.heron.uploader.http.HttpUploader \
+  --verbose
+```
+
+You can also run the API server in Nomad itself, but you will need to have a local copy of the Heron API server executable on every machine in the cluster. Here's an example Nomad job for the API server:
+
+```hcl
+job "apiserver" {
+  datacenters = ["dc1"]
+  type = "service"
+  group "apiserver" {
+    count = 1
+    task "apiserver" {
+      driver = "raw_exec"
+      config {
+        command = <heron_apiserver_executable>
+        args = [
+        "--cluster", "nomad",
+        "--base-template", "nomad",
+        "-D", "heron.statemgr.connection.string=<zookeeper_uri>",
+        "-D", "heron.nomad.scheduler.uri=<scheduler_uri>",
+        "-D", "heron.class.uploader=org.apache.heron.uploader.http.HttpUploader",
+        "--verbose"]
+      }
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+      }
+    }
+  }
+}
+```
+
+Make sure to replace the following:
+
+* `<heron_apiserver_executable>` --- The local path to where the [Heron API server](deployment-api-server) executable is located (usually `~/.heron/bin/heron-apiserver`)
+* `<zookeeper_uri>` --- The URI for your ZooKeeper cluster
+* `<scheduler_uri>` --- The URI for your Nomad server
+
+## Using the Heron API server to distribute Heron topology packages
+
+Heron users can upload their Heron topology packages to the Heron API server using the HTTP uploader by modifying the `uploader.yaml` file to including the following:
+
+```yaml
+# uploader class for transferring the topology jar/tar files to storage
+heron.class.uploader:    org.apache.heron.uploader.http.HttpUploader
+heron.uploader.http.uri: http://localhost:9000/api/v1/file/upload
+```
+
+## Integration with Consul for metrics
+Each container part of a Heron topology serves metrics out of a port randomly generated by Nomad.  Thus, Consul is needed for service discovery for users to determine which port the container is serving the metrics out of.
+Every Heron executor running in a docker container will automatically register itself as a service with Consul given that there is a Consul cluster running. The port Heron will be serving metrics will be registered with Consul.
+  
+The service will be registered with the name with the following format:
+
+```yaml
+metrics-heron-<TOPOLOGY_NAME>-<CONTAINER_INDEX>
+```
+
+Each heron executor registered with Consul will be tagged with
+
+```yaml
+<TOPOLOGY_NAME>-<CONTAINER_INDEX>
+```
+
+To add additional tags, please add specify them in a comma delimited list via
+
+```yaml
+heron.nomad.metrics.service.additional.tags
+```
+
+in `scheduler.yaml`. For example:
+
+```yaml
+heron.nomad.metrics.service.additional.tags: "prometheus,metrics,heron"
+```
+
+Users can then configure Prometheus to scrape metrics for each container based on these tags
diff --git a/website2/website/versioned_docs/version-0.20.0/schedulers-slurm.md b/website2/website/versioned_docs/version-0.20.0/schedulers-slurm.md
new file mode 100644
index 0000000..1267887
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/schedulers-slurm.md
@@ -0,0 +1,89 @@
+---
+id: version-0.20.0-schedulers-slurm
+title: Slurm Cluster (Experimental)
+sidebar_label: Slurm Cluster
+original_id: schedulers-slurm
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+In addition to out-of-the-box scheduler for
+[Aurora](../aurora), Heron can also be deployed in a HPC cluster with the Slurm Scheduler.
+This allows a researcher to deploy Heron and execute streaming scientific work-flows.
+
+## How Slurm Deployment Works
+
+Using the Slurm scheduler is similar to deploying Heron on other systems. [The Heron CLI](user-manuals-heron-cli)  is used to deploy and manage topologies similar to other
+schedulers. The main difference is in the configuration.
+
+A set of default configuration files are provided with Heron in the [conf/slurm](https://github.com/apache/incubator-heron/tree/master/heron/config/src/yaml/conf/slurm) directory.
+The default configuration uses the local file system based state manager. It is
+possible that the local file system is mounted using NFS.
+
+When a Heron topology is submitted, the Slurm scheduler allocates the nodes required to
+run the job and starts the Heron processes in those nodes. It uses a `slurm.sh` script found in
+[conf/slum](https://github.com/apache/incubator-heron/tree/master/heron/config/src/yaml/conf/slurm)
+directory to submit the topoloy as a batch job to the slurm scheduler.
+
+## Slurm Scheduler Configuration
+
+To configure Heron to use slurm scheduler, specify the following in `scheduler.yaml`
+config file:
+
+* `heron.class.scheduler` --- Indicates the class to be loaded for slurm scheduler.
+Set this to `org.apache.heron.scheduler.slurm.SlurmScheduler`
+
+* `heron.class.launcher` --- Specifies the class to be loaded for launching
+topologies. Set this to `org.apache.heron.scheduler.slurm.SlurmLauncher`
+
+* `heron.scheduler.local.working.directory` --- The shared directory to be used as
+Heron sandbox directory.
+
+* `heron.package.core.uri` --- Indicates the location of the heron core binary package.
+The local scheduler uses this URI to download the core package to the working directory.
+
+* `heron.directory.sandbox.java.home` --- This is used to specify the java home to
+be used when running topologies in the containers. Set to `${JAVA_HOME}` to use
+the value set in the bash environment variable $JAVA_HOME.
+
+* `heron.scheduler.is.service` --- Indicate whether the scheduler
+is a service. In the case of Slurm, it should be set to `False`.
+
+### Example Slurm Scheduler Configuration
+
+```yaml
+# scheduler class for distributing the topology for execution
+heron.class.scheduler: org.apache.heron.scheduler.slurm.SlurmScheduler
+
+# launcher class for submitting and launching the topology
+heron.class.launcher: org.apache.heron.scheduler.slurm.SlurmLauncher
+
+# working directory for the topologies
+heron.scheduler.local.working.directory: ${HOME}/.herondata/topologies/${CLUSTER}/${TOPOLOGY}
+
+# location of java - pick it up from shell environment
+heron.directory.sandbox.java.home: ${JAVA_HOME}
+
+# Invoke the IScheduler as a library directly
+heron.scheduler.is.service: False
+```
+
+## Slurm Script `slurm.sh`
+
+The script `slurm.sh` is used by the scheduler to submit the Heron job to the Slurm scheduler.
+Edit this file to set specific slurm settings like time, account. The script and `scheduler.yaml`
+must be included with other cluster configuration files.
diff --git a/website2/website/versioned_docs/version-0.20.0/schedulers-standalone.md b/website2/website/versioned_docs/version-0.20.0/schedulers-standalone.md
new file mode 100644
index 0000000..a6506f3
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/schedulers-standalone.md
@@ -0,0 +1,243 @@
+---
+id: version-0.20.0-schedulers-standalone
+title: Heron Multi-node Standalone Cluster
+sidebar_label: Heron Multi-node Standalone Cluster
+original_id: schedulers-standalone
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron enables you to easily run a multi-node cluster in **standalone mode**. The difference between standalone mode and [local mode](schedulers-local) for Heron is that standalone mode involves running multiple compute nodes---using [Hashicorp](https://www.hashicorp.com/)'s [Nomad](https://www.nomadproject.io/) as a scheduler---rather than just one.
+
+## Installation
+
+You can use Heron in standalone mode using the `heron-admin` CLI tool, which can be installed using the instructions [here](getting-started-local-single-node).
+
+## Requirements
+
+In order to run Heron in standalone mode, you'll need to run a [ZooKeeper](https://zookeeper.apache.org) cluster. You will also need to be able to ssh into all the nodes that you want to have in your standalone cluster from the node that your are setting up the cluster.
+
+## Configuration
+
+Once you have the `heron-admin` CLI tool installed, you need to provide a list of hosts for both the Heron cluster itself and for [ZooKeeper](https://zookeeper.apache.org).
+
+You can easily do this by running the following command:
+
+```bash
+$ heron-admin standalone set
+```
+
+That will open up an `inventory.yaml` file in whichever editor is specified in your `EDITOR` environment variable. The default is [Vim](http://www.vim.org/). That YAML file looks like this:
+
+```yaml
+cluster:
+- 127.0.0.1
+zookeepers:
+- 127.0.0.1
+```
+
+You can modify the file to include all hosts for your standalone cluster and for ZooKeeper. Remember you need to be able to ssh into all the nodes listed in the cluster section. Once you've added the lists of hosts for the Heron standalone cluster and ZooKeeper and saved the file, you can move on to [starting the cluster](#starting-and-stopping-the-cluster).
+
+> To run Heron in standalone mode locally on your laptop, use the defaults that are already provided in the `inventory.yaml` file.
+
+## Starting and stopping the cluster
+
+To start Heron in standalone mode once the host configuration has been applied:
+
+```bash
+$ heron-admin standalone cluster start
+```
+
+You should see output like this:
+
+```bash
+[2018-01-22 10:37:06 -0800] [INFO]: Roles:
+[2018-01-22 10:37:06 -0800] [INFO]:  - Master Servers: ['127.0.0.1']
+[2018-01-22 10:37:06 -0800] [INFO]:  - Slave Servers: ['127.0.0.1']
+[2018-01-22 10:37:06 -0800] [INFO]:  - Zookeeper Servers: ['127.0.0.1']
+[2018-01-22 10:37:06 -0800] [INFO]: Updating config files...
+[2018-01-22 10:37:06 -0800] [INFO]: Starting master on 127.0.0.1
+[2018-01-22 10:37:06 -0800] [INFO]: Done starting masters
+[2018-01-22 10:37:06 -0800] [INFO]: Starting slave on 127.0.0.1
+[2018-01-22 10:37:06 -0800] [INFO]: Done starting slaves
+[2018-01-22 10:37:06 -0800] [INFO]: Waiting for cluster to come up... 0
+[2018-01-22 10:37:08 -0800] [INFO]: Starting Heron API Server on 127.0.0.1
+[2018-01-22 10:37:08 -0800] [INFO]: Waiting for API server to come up... 0
+[2018-01-22 10:37:09 -0800] [INFO]: Waiting for API server to come up... 1
+[2018-01-22 10:37:16 -0800] [INFO]: Done starting Heron API Server
+[2018-01-22 10:37:16 -0800] [INFO]: Starting Heron Tools on 127.0.0.1
+[2018-01-22 10:37:16 -0800] [INFO]: Waiting for API server to come up... 0
+[2018-01-22 10:37:17 -0800] [INFO]: Done starting Heron Tools
+[2018-01-22 10:37:17 -0800] [INFO]: Heron standalone cluster complete!
+```
+
+If you see the `Heron standalone cluster complete!` message, that means that the cluster is ready for you to [submit](#submitting-a-topology) and manage topologies.
+
+You can stop the cluster at any time using the `stop` command:
+
+```bash
+$ heron-admin standalone cluster stop
+```
+
+You will be prompted to confirm that you want to stop the cluster by typing **yes** or **y** (or **no** or **n** if you don't want to). If you enter **yes** or **y** and press **Enter**, all Heron-related jobs will be de-scheduled on Nomad.
+
+## Fetching info about your standalone cluster
+
+At any time, you can retrieve information about your standalone cluster by running:
+
+```bash
+$ heron-admin standalone info
+```
+
+This will return a JSON string containing a list of hosts for Heron and ZooKeeper as well as URLs for the [Heron API server](deployment-api-server), [Heron UI](user-manuals-heron-ui), and [Heron Tracker](user-manuals-heron-tracker-runbook). Here is a cluster info JSON string if all defaults are retained:
+
+```json
+{
+  "numNodes": 1,
+  "nodes": [
+    "127.0.0.1"
+  ],
+  "roles": {
+    "masters": [
+      "127.0.0.1"
+    ],
+    "slaves": [
+      "127.0.0.1"
+    ],
+    "zookeepers": [
+      "127.0.0.1"
+    ]
+  },
+  "urls": {
+    "serviceUrl": "http://127.0.0.1:9000",
+    "heronUi": "http://127.0.0.1:8889",
+    "heronTracker": "http://127.0.0.1:8888"
+  }
+}
+```
+
+You can also get more specific bits of info using the `get` command:
+
+```bash
+# Heron Tracker URL
+$ heron-admin standalone get heron-tracker-url
+
+# Heron UI URL
+$ heron-admin standalone get heron-ui-url
+
+# Heron cluster service URL
+$ heron-admin standalone get service-url
+```
+
+## Setting the service URL
+
+Once your standalone cluster is running, there's one final step before you can interact with the cluster: you need to specify the service URL for the [Heron API server](deployment-api-server) for the standalone cluster. You can fetch that URL in two different ways:
+
+```bash
+# Using the "get" command
+$ heron-admin standalone get service-url
+
+# Using the "info" command
+$ heron-admin standalone info | jq .urls.serviceUrl | tr -d '"'
+```
+
+Once you have the URL, you can use the `heron config` command to set the service URL:
+
+```bash
+$ heron config standalone set service_url SERVICE_URL
+```
+
+Here are some more convenient ways to set the service URL:
+
+```bash
+# Using the "get" command
+$ heron config standalone set service_url \
+  $(heron-admin standalone get service-url)
+
+# Using the "info" command
+$ heron config standalone set service_url \
+  $(heron-admin standalone info | jq .urls.serviceUrl | tr -d '"')
+```
+
+If you're running a standalone cluster locally on your laptop, the service URL will always be `http://localhost:9000`:
+
+```bash
+$ heron config standalone set service_url http://localhost:9000
+```
+
+## Submitting a topology
+
+Once your standalone cluster is up and running and you've set the service URL for the [`heron` CLI tool](user-manuals-heron-cli), you can submit and manage topologies by specifying the `standalone` cluster. Here's an example topology submission command:
+
+```bash
+$ heron submit standalone \
+  ~/.heron/examples/heron-streamlet-examples.jar \
+  org.apache.heron.examples.streamlet.WindowedWordCountTopology \
+  WindowedWordCount
+```
+
+You can also specify the service url as part of the submission command:
+
+```bash
+$ heron submit standalone \
+  --service-url http://localhost:9000 \
+  ~/.heron/examples/heron-streamlet-examples.jar \
+  org.apache.heron.examples.streamlet.WindowedWordCountTopology \
+  WindowedWordCount
+```
+
+
+Once the topology has been submitted, it can be deactivated, killed, updated, and so on, just like topologies on any other scheduler.
+
+## Managing Nomad
+
+Heron standalone uses [Nomad](https://www.nomadproject.io/) as a scheduler. For the most part, you shouldn't need to interact with Nomad when managing your Heron standalone cluster. If you do need to manage Nomad directly, however, you can do so using the `heron-nomad` executable, which is installed at `~/.heron/bin/heron-nomad`. That executable is essentially an alias for the `nomad` CLI tool. You can find documentation in the [official Nomad docs](https://www.nomadproject.io/docs/commands/index.html).
+
+You can also access the [Nomad Web UI](https://www.nomadproject.io/guides/ui.html) on port 4646 of any master node in the Heron cluster. You can see a list of master nodes by running `heron-admin standalone info`. If you're running a standalone cluster locally on your machine, you can access the Nomad UI at `localhost:4646`.
+
+## Debugging Help
+
+The locations of the logs for the Nomad Server (master node) and Nomad Clients (slave nodes) are located at '/tmp/nomad_server_log' and '/tmp/nomad_client.log' respectively. Please look through these logs to see if there was a error setting up the Nomad cluster
+
+### Common Problems
+
+If you see the following in '/tmp/nomad_server_log'
+
+```bash
+Error starting agent: Failed to start Consul server: Failed to start lan serf: Failed to parse advertise address!
+```
+
+The Nomad server cannot determine the network address to advertise itself on.  You will need to manually set that address. You can do that by modifying the configuration file:
+
+~/.heron/conf/standalone/resources/master.hcl 
+
+You will need to add a stanza like:
+
+advertise {
+  # Defaults to the node's hostname. If the hostname resolves to a loopback
+  # address you must manually configure advertise addresses.
+  http = "1.2.3.4"
+  rpc  = "1.2.3.4"
+  serf = "1.2.3.4:5648" # non-default ports may be specified
+}
+
+Please reference:
+
+https://www.nomadproject.io/docs/agent/configuration/index.html
+
+You can then stop the cluster and restart.
+
diff --git a/website2/website/versioned_docs/version-0.20.0/schedulers-yarn.md b/website2/website/versioned_docs/version-0.20.0/schedulers-yarn.md
new file mode 100644
index 0000000..c38503d
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/schedulers-yarn.md
@@ -0,0 +1,168 @@
+---
+id: version-0.20.0-schedulers-yarn
+title: Apache Hadoop YARN Cluster (Experimental)
+sidebar_label: YARN Cluster
+original_id: schedulers-yarn
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+In addition to out-of-the-box schedulers for [Aurora](schedulers-aurora-cluster), Heron can also be deployed on a
+YARN cluster with the YARN scheduler. The YARN scheduler is implemented using the
+[Apache REEF](https://reef.apache.org/) framework.
+
+**Key features** of the YARN scheduler:
+
+* **Heterogeneous container allocation:** The YARN scheduler will request heterogeneous containers
+from the YARN ResourceManager [RM](http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/YARN.html). In other words the topology will not request more resources than what is really needed.
+
+* **Container reuse:** The REEF framework allows the YARN scheduler to retain containers
+across events like topology restarts.
+
+## Topology deployment on a YARN Cluster
+
+Using the YARN scheduler is similar to deploying Heron on other clusters, i.e. using the
+[Heron CLI](user-manuals-heron-cli).
+This document assumes that the Hadoop yarn client is installed and configured.
+
+Following steps are executed when a Heron topology is submitted:
+
+1. The REEF client copies the `Heron Core package` and the `topology package` on the distributed file system.
+1. It then starts the YARN Application Master (AM) for the topology.
+1. The AM subsequently invokes the `Heron Scheduler` in the same process.
+1. This is followed by container allocation for the topology's master and workers. As a result `N+2`
+containers are allocated for each topology.
+
+### Configuring the Heron client classpath
+
+**Under 0.14.2 version (including 0.14.2)**
+
+  1. Command `hadoop classpath` provides a list of jars needed to submit a hadoop job. Copy all jars to `HERON_INSTALL_DIR/lib/scheduler`.
+     * Do not copy commons-cli jar if it is older than version 1.3.1.
+  1. Create a jar containing core-site.xml and yarn-site.xml. Add this jar to `HERON_INSTALL_DIR/lib/scheduler` too.
+
+**After 0.14.3 version released**
+
+It is unnecessary to copy hadoop-classpath-jars to `HERON_INSTALL_DIR/lib/scheduler` like what 0.14.2 version requested. [#1245](https://github.com/apache/incubator-heron/issues/1245) added `extra-launch-classpath` arguments, which makes it easier and more convenient to submit a topology to YARN.
+
+> **Tips**
+>
+>***No matter which version of Heron you are using, there is something user should pay attention to*** if you want to submit a topology to YARN.
+>
+>For `localfs-state-manager`
+>
+>* The version of common-cli jar should be greater than or equal to 1.3.1.
+>
+>For `zookeeper-state-manager`
+>
+>* The version of common-cli jar should be greater than or equal to 1.3.1.
+>* The version of curator-framework jar should be greater than or equal to 2.10.0
+>* The version of curator-client jar should be greater than or equal to 2.10.0
+
+### Configure the YARN scheduler
+
+A set of default configuration files are provided with Heron in the [conf/yarn](https://github.com/apache/incubator-heron/tree/master/heron/config/src/yaml/conf/yarn) directory.
+The default configuration uses the local state manager. This will work with single-node local
+YARN installation only. A Zookeeper based state management will be needed for topology
+deployment on a multi-node YARN cluster.
+
+1. Custom Heron Launcher for YARN: `YarnLauncher`
+1. Custom Heron Scheduler for YARN: `YarnScheduler`
+1. State manager for multi-node deployment:
+`org.apache.heron.statemgr.zookeeper.curator.CuratorStateManager`
+1. `YarnLauncher` performs the job of uploader also. So `NullUploader` is used.
+
+## Topology management
+
+### Topology Submission
+**Command**
+
+**Under 0.14.2 version (including 0.14.2)**
+
+`$ heron submit yarn heron-api-examples.jar org.apache.heron.examples.api.AckingTopology AckingTopology`
+
+
+**After 0.14.3 version released**
+
+`$ heron submit yarn heron-api-examples.jar org.apache.heron.examples.api.AckingTopology AckingTopology --extra-launch-classpath <extra-classpath-value>`
+
+>**Tips**
+>
+>1. More details for using the `--extra-launch-classpath` argument in 0.14.3 version. It supports both a single directory which including all `hadoop-lib-jars` and multiple directories separated by colon such as what `hadoop classpath` gives. ***The submit operation will fail if any path is invalid or if any file is missing.***
+>2. if you want to submit a topology to a specific YARN queue, you can set the `heron.scheduler.yarn.queue` argument in `--config-property`. For instance, `--config-property heron.scheduler.yarn.queue=test`. This configuration could be found in the [conf/yarn/scheduler](https://github.com/apache/incubator-heron/blob/master/heron/config/src/yaml/conf/yarn/scheduler.yaml) file too. `default` would be the YARN default queue as YARN provided.
+
+**Sample Output**
+
+```bash
+INFO: Launching topology 'AckingTopology'
+...
+...
+Powered by
+     ___________  ______  ______  _______
+    /  ______  / /  ___/ /  ___/ /  ____/
+   /     _____/ /  /__  /  /__  /  /___
+  /  /\  \     /  ___/ /  ___/ /  ____/
+ /  /  \  \   /  /__  /  /__  /  /
+/__/    \__\ /_____/ /_____/ /__/
+
+...
+...
+org.apache.heron.scheduler.yarn.ReefClientSideHandlers INFO:  Topology AckingTopology is running, jobId AckingTopology.
+```
+
+**Verification**
+
+Visit the YARN http console or execute command `yarn application -list` on a yarn client host.
+
+```bash
+Total number of applications (application-types: [] and states: [SUBMITTED, ACCEPTED, RUNNING]):1
+                Application-Id	    Application-Name	    Application-Type	      User	     Queue	             State	       Final-State	       Progress	                       Tracking-URL
+application_1466548964728_0004	      AckingTopology	                YARN	     heron	   default	           RUNNING	         UNDEFINED	             0%	                                N/A
+```
+
+### Topology termination
+**Command**
+
+`$ heron kill yarn AckingTopology`
+
+
+### Log File location
+
+Assuming HDFS as the file system, Heron logs and REEF logs can be found in the following locations:
+
+1. Logs generated when the topologies AM starts:
+`<LOG_DIR>/userlogs/application_1466548964728_0004/container_1466548964728_0004_01_000001/driver.stderr`
+
+1. Ths scheduler's logs are created on the first/AM container:
+`<NM_LOCAL_DIR>/usercache/heron/appcache/application_1466548964728_0004/container_1466548964728_0004_01_000001/log-files`
+
+1. Logs generated when the TMaster starts in its container:
+`<LOG_DIR>/userlogs/application_1466548964728_0004/container_1466548964728_0004_01_000002/evaluator.stderr`
+
+1. The TMaster's logs are created on the second container owned by the topology app:
+`<NM_LOCAL_DIR>/usercache/heron/appcache/application_1466548964728_0004/container_1466548964728_0004_01_000002/log-files`
+
+1. Worker logs are created on the remaining containers in the YARN NodeManager's local directory.
+
+
+## Work in Progress
+
+1. The YARN Scheduler will restart any failed workers and TMaster containers. However [AM HA](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/ResourceManagerHA.html)  is not
+ supported yet. As a result AM failure will result in topology failure.
+ Issue: [#949](https://github.com/apache/incubator-heron/issues/949)
+1. TMaster and Scheduler are started in separate containers. Increased network latency can result
+ in warnings or failures. Issue: [#951](https://github.com/apache/incubator-heron/issues/951)
diff --git a/website2/website/versioned_docs/version-0.20.0/state-managers-local-fs.md b/website2/website/versioned_docs/version-0.20.0/state-managers-local-fs.md
new file mode 100644
index 0000000..990fda6
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/state-managers-local-fs.md
@@ -0,0 +1,67 @@
+---
+id: version-0.20.0-state-managers-local-fs
+title: Local File System
+sidebar_label: Local File System
+original_id: state-managers-local-fs
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron can use the local file system as a state manager for storing various book
+keeping information. Use of local file system is recommended mainly for single
+node server and laptop. This configuration is ideal for deploying in edge devices.
+Heron developers can use this setting for developing and debugging various heron
+components in their laptop or server.
+
+### Local File System State Manager Configuration
+
+You can make Heron aware of the ZooKeeper cluster by modifying the
+`statemgr.yaml` config file specific for the Heron cluster. You'll
+need to specify the following for each cluster:
+
+* `heron.class.state.manager` --- Indicates the class to be loaded for local file system
+state manager. You should set this to `org.apache.heron.statemgr.localfs.LocalFileSystemStateManager`
+
+* `heron.statemgr.connection.string` --- This should be `LOCALMODE` since it always localhost.
+
+* `heron.statemgr.root.path` --- The root path in the local file system where state information
+is stored.  We recommend providing Heron with an exclusive directory; if you do not, make sure that
+the following sub-directories are unused: `/tmasters`, `/topologies`, `/pplans`, `/executionstate`,
+`/schedulers`.
+
+* `heron.statemgr.localfs.is.initialize.file.tree` --- Indicates whether the nodes under root
+`/tmasters`, `/topologies`, `/pplans`, `/executionstate`, and `/schedulers` need to created, if they
+are not found. Set it to `True`, if you could like Heron to create those directories. If those
+directories are already there, set it to `False`. The absence of this configuration implies `True`.
+
+### Example Local File System State Manager Configuration
+
+Below is an example configuration (in `statemgr.yaml`) for a local file system running in `localhost`:
+
+```yaml
+# local state manager class for managing state in a persistent fashion
+heron.class.state.manager: org.apache.heron.statemgr.localfs.LocalFileSystemStateManager
+
+# local state manager connection string
+heron.statemgr.connection.string: LOCALMODE
+
+# path of the root address to store the state in a local file system
+heron.statemgr.root.path: ${HOME}/.herondata/repository/state/${CLUSTER}
+
+# create the sub directories, if needed
+heron.statemgr.localfs.is.initialize.file.tree: True
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/state-managers-zookeeper.md b/website2/website/versioned_docs/version-0.20.0/state-managers-zookeeper.md
new file mode 100644
index 0000000..12fb108
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/state-managers-zookeeper.md
@@ -0,0 +1,100 @@
+---
+id: version-0.20.0-state-managers-zookeeper
+title: Zookeeper
+sidebar_label: Zookeeper
+original_id: state-managers-zookeeper
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron relies on ZooKeeper for a wide variety of cluster coordination tasks. You
+can use either a shared or dedicated ZooKeeper cluster.
+
+There are a few things you should be aware of regarding Heron and ZooKeeper:
+
+* Heron uses ZooKeeper only for coordination, *not* for message passing, which
+  means that ZooKeeper load should generally be fairly low. A single-node
+  and/or shared ZooKeeper *may* suffice for your Heron cluster, depending on
+  usage.
+* Heron uses ZooKeeper more efficiently than Storm. This makes Heron less likely
+  than Storm to require a bulky or dedicated ZooKeeper cluster, but your use
+  case may require one.
+* We strongly recommend running ZooKeeper [under
+  supervision](http://zookeeper.apache.org/doc/r3.3.3/zookeeperAdmin.html#sc_supervision).
+
+### ZooKeeper State Manager Configuration
+
+You can make Heron aware of the ZooKeeper cluster by modifying the
+`statemgr.yaml` config file specific for the Heron cluster. You'll
+need to specify the following for each cluster:
+
+* `heron.class.state.manager` --- Indicates the class to be loaded for managing
+the state in ZooKeeper and this class is loaded using reflection. You should set this
+to `org.apache.heron.statemgr.zookeeper.curator.CuratorStateManager`
+
+* `heron.statemgr.connection.string` --- The host IP address and port to connect to ZooKeeper
+cluster (e.g) "127.0.0.1:2181".
+
+* `heron.statemgr.root.path` --- The root ZooKeeper node to be used by Heron. We recommend
+providing Heron with an exclusive root node; if you do not, make sure that the following child
+nodes are unused: `/tmasters`, `/topologies`, `/pplans`, `/executionstate`, `/schedulers`.
+
+* `heron.statemgr.zookeeper.is.initialize.tree` --- Indicates whether the nodes under ZooKeeper
+root `/tmasters`, `/topologies`, `/pplans`, `/executionstate`, and `/schedulers` need to created,
+if they are not found. Set it to `True` if you could like Heron to create those nodes. If those
+nodes are already there, set it to `False`. The absence of this configuration implies `True`.
+
+* `heron.statemgr.zookeeper.session.timeout.ms` --- Specifies how much time in milliseconds
+to wait before declaring the ZooKeeper session is dead.
+
+* `heron.statemgr.zookeeper.connection.timeout.ms` --- Specifies how much time in milliseconds
+to wait before the connection to ZooKeeper is dead.
+
+* `heron.statemgr.zookeeper.retry.count` --- Count of the number of retry attempts to connect
+to ZooKeeper
+
+* `heron.statemgr.zookeeper.retry.interval.ms`: Time in milliseconds to wait between each retry
+
+### Example ZooKeeper State Manager Configuration
+
+Below is an example configuration (in `statemgr.yaml`) for a ZooKeeper running in `localhost`:
+
+```yaml
+# local state manager class for managing state in a persistent fashion
+heron.class.state.manager: org.apache.heron.statemgr.zookeeper.curator.CuratorStateManager
+
+# local state manager connection string
+heron.statemgr.connection.string:  "127.0.0.1:2181"
+
+# path of the root address to store the state in a local file system
+heron.statemgr.root.path: "/heron"
+
+# create the zookeeper nodes, if they do not exist
+heron.statemgr.zookeeper.is.initialize.tree: True
+
+# timeout in ms to wait before considering zookeeper session is dead
+heron.statemgr.zookeeper.session.timeout.ms: 30000
+
+# timeout in ms to wait before considering zookeeper connection is dead
+heron.statemgr.zookeeper.connection.timeout.ms: 30000
+
+# timeout in ms to wait before considering zookeeper connection is dead
+heron.statemgr.zookeeper.retry.count: 10
+
+# duration of time to wait until the next retry
+heron.statemgr.zookeeper.retry.interval.ms: 10000
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/topology-development-eco-api.md b/website2/website/versioned_docs/version-0.20.0/topology-development-eco-api.md
new file mode 100644
index 0000000..5caa6cf
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/topology-development-eco-api.md
@@ -0,0 +1,580 @@
+---
+id: version-0.20.0-topology-development-eco-api
+title: The ECO API for Java
+sidebar_label: The ECO API for Java
+original_id: topology-development-eco-api
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+> **The Heron ECO API is in beta**. The Heron ECO API can be used to build and test topologies on your local or on a cluster.  The API still needs some testing and feedback from the community to understand how we  should continue to develop ECO.
+
+Heron processing topologies can be written using an API called the **Heron ECO API**. The ECO API is currently available to work with spouts and bolts from the following packages:
+
+* `org.apache.storm`
+* `org.apache.heron`
+
+> Although this document focuses on the ECO API, both the [Streamlet API](heron-streamlet-concepts) and [Topology API](heron-topology-concepts) topologies you have built can still be used with Heron
+
+## The Heron ECO API vs. The Streamlet and Topology APIs
+
+Heron's ECO offers one major difference over the Streamlet and Topology APIs and that is extensibility without recompilation.
+With Heron's ECO developers now have a way to alter the way data flows through spouts and bolts without needing to get into their code and make changes.
+Topologies can now be defined through a YAML based format.
+
+## Why the name ECO?
+
+/ˈekoʊ/ (Because all software should come with a pronunciation guide these days)
+ECO is an acronym that stands for:
+* Extensible
+* Component
+* Orchestrator
+
+
+## What about Storm Flux?  Is it compatible with Eco?
+
+ECO is an extension of Flux.  Most Storm Flux topologies should be able to deployed in Heron with minimal changes.
+Start reading [Migrate Storm Topologies To Heron] (../../../migrate-storm-to-heron) to learn how to migrate your Storm Flux topology then come back.
+
+## Getting started
+
+In order to use the Heron ECO API for Java, you'll need to install the `heron-api` and the `heron-storm` library, which is available
+via [Maven Central](http://search.maven.org/).
+
+### Maven setup
+
+To install the `heron-api` library using Maven, add this to the `dependencies` block of your `pom.xml`
+configuration file:
+
+```xml
+<dependency>
+    <groupId>org.apache.heron</groupId>
+    <artifactId>heron-api</artifactId>
+    <version>{{< heronVersion >}}</version>
+    <scope>compile</scope>
+</dependency>
+<dependency>
+    <groupId>org.apache.heron</groupId>
+    <artifactId>heron-storm</artifactId>
+    <version>{{< heronVersion >}}</version>
+    <scope>compile</scope>
+</dependency>
+```
+
+#### Compiling a JAR with dependencies
+
+In order to run a Java topology in a Heron cluster, you'll need to package your topology as a "fat" JAR with dependencies included. You can use the [Maven Assembly Plugin](https://maven.apache.org/plugins/maven-assembly-plugin/usage.html) to generate JARs with dependencies. To install the plugin and add a Maven goal for a single JAR, add this to the `plugins` block in your `pom.xml`:
+
+```xml
+<plugin>
+    <artifactId>maven-assembly-plugin</artifactId>
+    <configuration>
+        <descriptorRefs>
+            <descriptorRef>jar-with-dependencies</descriptorRef>
+        </descriptorRefs>
+        <archive>
+            <manifest>
+                <mainClass></mainClass>
+            </manifest>
+        </archive>
+    </configuration>
+    <executions>
+        <execution>
+            <id>make-assembly</id>
+            <phase>package</phase>
+            <goals>
+                <goal>single</goal>
+            </goals>
+        </execution>
+    </executions>
+</plugin>
+```
+
+Once your `pom.xml` is properly set up, you can compile the JAR with dependencies using this command:
+
+```bash
+$ mvn assembly:assembly
+```
+
+By default, this will add a JAR in your project's `target` folder with the name `PROJECT-NAME-VERSION-jar-with-dependencies.jar`. Here's an example ECO topology submission command using a compiled JAR:
+
+```bash
+$ heron submit local \
+  target/my-project-1.2.3-jar-with-dependencies.jar \
+  org.apache.heron.eco.Eco \
+  --eco-config-file path/to/your/topology-definition.yaml
+```
+
+### Reference Links
+[Topology Name](#topology-name)
+
+[Configuration](#configuration)
+
+[Components](#components)
+
+[Property Injection](#property-injection)
+
+[The Topology Definition](#the-topology-definition)
+
+[Streams and Groupings](#streams-and-groupings)
+
+[Handling Enums](#handling-enums)
+
+[Property Substitution](#property-substitution)
+
+[Environment Variable Substitution](#environment-variable-substitution)
+
+[Other ECO Examples](#other-eco-examples)
+
+Notice how the above example submission command is referencing the main class `org.apache.heron.eco.Eco`.  This part of the command
+needs to stay the same.  Eco is the main class that will assemble your topology from the `--eco-config-file` you specify.
+
+## Defining Your ECO Topology File
+
+An ECO topology definition consists of the following:
+
+* A topology name
+* An optional list of topology "components" (named Java objects that will be made available for configuration in the topology)
+* A DSL topology definition that contains:
+  - A list of spouts, each identified by a unique ID
+  - A list of bolts, each identified by a unique ID
+  - A list of "stream" objects representing a flow of tuples between spouts and bolts
+
+
+An example of a simple YAML DSL definition is below:
+
+```yaml
+
+name: "fibonacci-topology"
+
+config:
+  topology.workers: 1
+
+components:
+  - id: "property-holder"
+    className: "org.apache.heron.examples.eco.TestPropertyHolder"
+    constructorArgs:
+      - "some argument"
+    properties:
+      - name: "numberProperty"
+        value: 11
+      - name: "publicProperty"
+        value: "This is public property"
+
+spouts:
+  - id: "spout-1"
+    className: "org.apache.heron.examples.eco.TestFibonacciSpout"
+    constructorArgs:
+      - ref: "property-holder"
+    parallelism: 1
+
+bolts:
+  - id: "even-and-odd-bolt"
+    className: "org.apache.heron.examples.eco.EvenAndOddBolt"
+    parallelism: 1
+
+  - id: "ibasic-print-bolt"
+    className: "org.apache.heron.examples.eco.TestIBasicPrintBolt"
+    parallelism: 1
+    configMethods:
+      - name: "sampleConfigurationMethod"
+        args:
+          - "${ecoPropertyOne}"
+          - MB
+
+  - id: "sys-out-bolt"
+    className: "org.apache.heron.examples.eco.TestPrintBolt"
+    parallelism: 1
+
+streams:
+  - from: "spout-1"
+    to: "even-and-odd-bolt"
+    grouping:
+      type: SHUFFLE
+
+  - from: "even-and-odd-bolt"
+    to: "ibasic-print-bolt"
+    grouping:
+      type: SHUFFLE
+      streamId: "odds"
+
+  - from: "even-and-odd-bolt"
+    to: "sys-out-bolt"
+    grouping:
+      type: SHUFFLE
+      streamId: "evens"
+
+```
+
+If you want to stop here and try to deploy the above topology you can execute:
+
+```bash
+$ heron submit local \
+  ~/.heron/examples/heron-eco-examples.jar \
+  org.apache.heron.eco.Eco \
+  --eco-config-file ~/.heron/examples/storm_fibonacci.yaml
+```
+
+This ECO topology does not do anything spectacular, but it's a good starting point to go through some of ECO's concepts.
+
+## Taking a closer look at the YAML definition specs
+
+### Topology Name
+
+Each ECO definition file will be required to have a `name` defined.
+
+```yaml
+
+name: "simple-wordcount-topology"
+
+```
+
+### Configuration
+
+`config` is the section where you will list your properties to be inserted into a `org.apache.heron.api.Config` class. This section is optional.
+
+```yaml
+
+config:
+  topology.workers: 1
+
+```
+
+#### Specifying Component Level Resources
+
+You can specify component level JVM resources by referencing the `id` of the component and its `ram`.
+You can choose between Bytes `B`, Megabytes `MB`, or Gigabytes `GB`.  Examples would be `256MB` or `2GB`.
+The unit of measurement must be appended at the end of the numerical value with no spaces.  There is plan to support 
+component level `cpu` and `disk` configs in the future.
+
+```yaml
+ topology.component.resourcemap:
+
+    - id: "spout-1"
+      ram: 256MB # The minimum value for a component's specified RAM is 256MB
+      
+
+    - id: "bolt-1"
+      ram: 256MB # The minimum value for a component's specified RAM is 256MB
+      
+ ```
+ 
+ #### Specifying JVM Options
+ 
+ You can specify component level JVM resources by referencing the `id` of the component and a list
+ of the JVM options
+ 
+```yaml
+topology.component.jvmoptions:
+
+   - id: "spout-1"
+     options: ["-XX:NewSize=300m", "-Xms2g"]
+```
+
+#### Other Supported Configuration Parameters
+* `"topology.worker.childopts"` : Topology-specific options for the worker child process. This is used in addition to WORKER_CHILDOPTS
+* `"topology.tick.tuple.freq.ms"` :  How often (in milliseconds) a tick tuple from the "__system" component and "__tick" stream should be sent to tasks. Meant to be used as a component-specific configuration.
+* `"topology.enable.message.timeouts"` : True if Heron should timeout messages or not. Defaults to true. This is meant to be used in unit tests to prevent tuples from being accidentally timed out during the test.
+* `"topology.debug"` : When set to true, Heron will log every message that's emitted.
+*  `"topology.stmgrs"` : The number of stmgr instances that should spin up to service this topology. All the executors will be evenly shared by these stmgrs.
+* `"topology.message.timeout.secs"` : The maximum amount of time given to the topology to fully process a message
+emitted by a spout. If the message is not acked within this time frame, Heron
+will fail the message on the spout. Some spouts implementations will then replay
+the message at a later time.
+* `"topology.component.parallelism"` : The per component parallelism for a component in this topology.
+* `"topology.max.spout.pending"` : This config applies to individual tasks, not to spouts or topologies as a whole.
+ A pending tuple is one that has been emitted from a spout but has not been acked or failed yet.
+ Note that this config parameter has no effect for unreliable spouts that don't tag
+ their tuples with a message id.
+* `"topology.auto.task.hooks"` :  A list of task hooks that are automatically added to every spout and bolt in the topology. An example
+ of when you'd do this is to add a hook that integrates with your internal
+monitoring system. These hooks are instantiated using the zero-arg constructor.
+* `"topology.serializer.classname"` : The serialization class that is used to serialize/deserialize tuples
+* `"topology.reliability.mode"` : A Heron topology can be run in any one of the TopologyReliabilityMode
+ mode. The format of this flag is the string encoded values of the
+underlying TopologyReliabilityMode value.  Values are `ATMOST_ONCE`, `ATLEAST_ONCE`, and `EFFECTIVELY_ONCE`.
+* `"topology.reliability.mode"` :  A Heron topology can be run in any one of the TopologyReliabilityMode
+mode. The format of this flag is the string encoded values of the
+underlying TopologyReliabilityMode value.
+* `"topology.container.cpu"` : Number of CPU cores per container to be reserved for this topology.
+* `"topology.container.ram"` : Amount of RAM per container to be reserved for this topology. In bytes.
+* `"topology.container.disk"` : Amount of disk per container to be reserved for this topology. In bytes.
+* `"topology.container.max.cpu.hint"` : Hint for max number of CPU cores per container to be reserved for this topology.
+* `"topology.container.max.ram.hint"` : Hint for max amount of RAM per container to be reserved for this topology.  In bytes.
+* `"topology.container.max.disk.hint"` : Hint for max amount of disk per container to be reserved for this topology. In bytes.
+* `"topology.container.padding.percentage"` : Hint for max amount of disk per container to be reserved for this topology. In bytes.
+* `"topology.container.ram.padding"` : Amount of RAM to pad each container. In bytes.
+* `"topology.stateful.checkpoint.interval.seconds"` : What's the checkpoint interval for stateful topologies in seconds.
+* `"topology.stateful.start.clean"` :  Boolean flag that says that the stateful topology should start from clean state, i.e. ignore any checkpoint state.
+* `"topology.name"` :  Name of the topology. This config is automatically set by Heron when the topology is submitted.
+* `"topology.team.name"` : Name of the team which owns this topology.
+* `"topology.team.email"` : Email of the team which owns this topology.
+* `"topology.cap.ticket"` :  Cap ticket (if filed) for the topology. If the topology is in prod this has to be set or it cannot be deployed.
+* `"topology.project.name"` : Project name of the topology, to help us with tagging which topologies are part of which project. For example, if topology A and Topology B are part of the same project, we will like to aggregate them as part of the same project. This is required by Cap team.
+* `"topology.additional.classpath"` :  Any user defined classpath that needs to be passed to instances should be set in to config through this key. The value will be of the format "cp1:cp2:cp3..."
+* `"topology.update.deactivate.wait.secs"` : Amount of time to wait after deactivating a topology before updating it
+* `"topology.update.reactivate.wait.secs"` : fter updating a topology, amount of time to wait for it to come back up before reactivating it
+* `"topology.environment"` : Topology-specific environment properties to be added to an Heron instance. This is added to the existing environment (that of the Heron instance).  This variable contains Map<String, String>
+* `"topology.timer.events"` : Timer events registered for a topology.  This is a Map<String, Pair<Duration, Runnable>>.  Where the key is the name and the value contains the frequency of the event and the task to run.
+* `"topology.remote.debugging.enable"` : Enable Remote debugging for java heron instances
+* `"topology.droptuples.upon.backpressure"` : Do we want to drop tuples instead of initiating Spout BackPressure
+* `"topology.component.output.bps"` : The per component output bytes per second in this topology
+
+
+### Components
+
+`components` are a list of instances that would be used as configuration objects for other components in your ECO file defined in the YAML DSL.
+The properties that are required for each `component` instance are `id` and `className`.  `id` can be any name you choose, `className`  is the fully qualified className of the Java class. The `id` field is used to identify
+the component for injection in the coming spouts and bolts defined in the topology.  `constructorArgs` is only needed
+if a component has constructor that requires arguments.  We will get into constructor args the in the next section.  `components` are optional.
+
+```yaml
+
+components:
+  - id: "property-holder"
+    className: "org.apache.heron.examples.eco.TestPropertyHolder"
+    constructorArgs:
+      - "some argument"
+
+```
+
+### Property Injection
+
+#### Constructor Injection
+
+`constructorArgs` can specify any object type.  Above you can see that the only constructor argument specified is a string
+that contained "some argument".  If declaring a number, you may omit the parenthesis.
+
+```yaml
+
+ constructorArgs:
+      - "some argument"
+      - 123.45
+
+```
+
+The is also a way to reference other components as arguments by using `ref`.  In the example
+below we are specifying an already defined component to be a constructor argument.  Any instance that is referenced by
+`ref` must have already been defined in the ECO definition file before it is to be used.
+
+```yaml
+
+constructorArgs:
+      - ref: "property-holder"
+
+```
+
+#### Setter and Public Field Injection
+
+Besides constructor injection, you may also use setter methods. In the below example, ECO will take inspect the component
+for setters that match the names and values provided.  If no setter is defined, it will then look for a public field to set the property.
+
+
+```yaml
+
+properties:
+      - name: "numberProperty"
+        value: 11
+      - name: "publicProperty"
+        value: "This is public property"
+
+```
+
+## The Topology Definition
+
+Spouts and Bolts each have their own sections for defining in the ECO file.  They are extensions of `components` so they will
+be allowed the same property injection methods above.  One difference is the `parallelism` property they contain, it sets the `parallelism`
+property for each bolt or spout once the topology has been deployed into Heron.
+
+
+### Spouts
+
+```yaml
+
+spouts:
+  - id: "spout-1"
+    className: "org.apache.heron.examples.eco.TestFibonacciSpout"
+    constructorArgs:
+      - ref: "property-holder"
+    parallelism: 1
+
+```
+
+### Bolts
+
+```yaml
+
+bolts:
+  - id: "even-and-odd-bolt"
+    className: "org.apache.heron.examples.eco.EvenAndOddBolt"
+    parallelism: 1
+
+```
+
+### Streams and Groupings
+
+Streams are what connect your bolts and spouts together.  Stream Groupings are the specific way you are to connect those streams to the spouts and bolts.
+
+##### A Stream can contain the following fields.
+
+* `from` - references the `id` of the component where data is coming from
+* `to` - references the `id` of the component where the data is going
+* `grouping` - This is grouping definition of how this stream will connect the two afore mentioned components together
+
+##### A grouping can contain the following fields
+* `type` - The type of grouping.
+  - `SHUFFLE`
+  - `FIELDS`
+  - `ALL`
+  - `GLOBAL`
+  - `NONE`
+  - `CUSTOM`
+* `args` is specific to the `FIELDS` grouping type. You would specify this as a list like so `["arg1", "arg2"]`
+* `customClass` if you wanted to create a custom grouping, you could specify the fully qualified class name here
+
+In the below example, you can see the first Stream Definition declares that data will flow from `spout-1` to `even-and-odd-bolt` with a grouping type of `SHUFFLE`
+
+
+```yaml
+
+streams:
+  - from: "spout-1"
+    to: "even-and-odd-bolt"
+    grouping:
+      type: SHUFFLE
+
+  - from: "even-and-odd-bolt"
+    to: "ibasic-print-bolt"
+    grouping:
+      type: SHUFFLE
+      streamId: "odds"
+
+  - from: "even-and-odd-bolt"
+    to: "sys-out-bolt"
+    grouping:
+      type: SHUFFLE
+      streamId: "evens"
+
+```
+
+### Handling Enums
+
+The usage of Enums is supported in ECO.  You can use enums in constructor arguments, references,
+configuration methods, and properties  In the `fibonacci-topology` referenced above.  In the examples we reference the 
+enum `TestUnits`.  The enum is shown below.
+
+```java
+public enum TestUnits {
+  MB("MB"),
+  GB("GB"),
+  B("B");
+
+  String value;
+
+  TestUnits(String value) {
+    this.value = value;
+  }
+
+  public String getValue() {
+    return value;
+  }
+}
+```
+
+In the `ibasic-print-bolt` a `configMethod` is specified with the name `sampleConfigurationMethod`.  
+
+```yaml
+  - id: "ibasic-print-bolt"
+    ... excluded for simplicity
+        args:
+          - "someStringArgument"
+          - MB
+```
+This is the same as calling the following java method 
+
+```java
+
+public void sampleConfigurationMethod(String someProperty, TestUnits TestUnits) {
+    this.someProperty += someProperty;
+    this.TestUnits = TestUnits;
+  }
+  
+```
+
+### Property Substitution
+
+It's always nice to be able to define properties based on the environments you are in.  We haven't forgotten this with ECO. 
+You are able to substitute values into your ECO files by either environment variables or a properties file.  
+To turn on property substitution add the flag `--props` and specify a path to a `.properties` file. 
+
+To start over run 
+```bash
+$ heron kill local fibonacci-topology
+```
+After the topology has been killed, execute:
+
+```bash
+$ heron submit local \
+  ~/.heron/examples/heron-eco-examples.jar \
+  org.apache.heron.eco.Eco \
+  --eco-config-file ~/.heron/examples/fibonacci.yaml --props ~/.heron/examples/sample.properties
+```
+
+If you look above at the yaml file snippet at the beginning of the page you will see
+
+```yaml
+- id: "ibasic-print-bolt"
+    className: "org.apache.heron.examples.eco.TestIBasicPrintBolt"
+    parallelism: 1
+    configMethods:
+      - name: "sampleConfigurationMethod"
+        args:
+          - "${ecoPropertyOne}"
+          - MB
+ ```
+In the  `sample.properties` we have a key value set at `ecoPropertyOne=thisValueWasSetFromAPropertiesFile`.  You can check the logs
+for the `ibasic-print-bolt` and see the values are printing out.
+
+### Environment Variable Substitution
+
+ECO also allows you to do environment variable substitution.  To activate environment variable substitution pass the flag `--env-props` upon submitting a topology. If you have `SOME_VARIABLE` defined
+you can reference in your yaml file like below.
+
+```yaml
+${ENV-SOME_VARIABLE}
+```
+
+
+### Other ECO examples
+
+Run the simple wordcount example
+
+```bash
+$ heron submit local \
+  ~/.heron/examples/heron-eco-examples.jar \
+   org.apache.heron.eco.Eco \
+   --eco-config-file ~/.heron/examples/storm_wordcount.yaml
+```
+
+Run the simple windowing example
+
+```bash
+$ heron submit local \
+   ~/.heron/examples/heron-eco-examples.jar \
+   org.apache.heron.eco.Eco \
+   --eco-config-file ~/.heron/examples/storm_windowing.yaml
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/topology-development-streamlet-api.md b/website2/website/versioned_docs/version-0.20.0/topology-development-streamlet-api.md
new file mode 100644
index 0000000..74c649a
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/topology-development-streamlet-api.md
@@ -0,0 +1,627 @@
+---
+id: version-0.20.0-topology-development-streamlet-api
+title: The Heron Streamlet API for Java
+sidebar_label: The Heron Streamlet API for Java
+original_id: topology-development-streamlet-api
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+ > **The Heron Streamlet API is in beta.** 
+ > The Heron Streamlet API is well tested and can be used to build and test topologies locally. The API is not yet fully stable, however, and breaking changes are likely in the coming weeks.
+
+
+Heron processing topologies can be written using an API called the **Heron Streamlet API**. The Heron Streamlet API is currently available for the following languages:
+
+* [Java](topology-development-streamlet-api)
+* [Scala](topology-development-streamlet-scala)
+
+> Although this document covers the new Heron Streamlet API, topologies created using the original [topology API](topology-development-topology-api-java) can still be used with Heron (which means that all of your older topologies will still run).
+
+For a more in-depth conceptual guide to the new API, see [The Heron Streamlet API](topology-development-streamlet-api). A high-level overview can also be found in the section immediately [below](#the-heron-streamlet-api-vs-the-topology-api).
+
+## The Heron Streamlet API vs. The Topology API
+
+When Heron was first released, all Heron topologies needed to be written using an API based on the [Storm Topology API](topology-development-topology-api-java). Although this API is quite powerful (and can still be used), the **Heron Streamlet API** enables you to create topologies without needing to implement spouts and bolts directly or to connect spouts and bolts together.
+
+Here are some crucial differences between the two APIs:
+
+Domain | Original Topology API | Heron Streamlet API
+:------|:----------------------|:--------------------
+Programming style | Procedural, processing component based | Functional
+Abstraction level | **Low level**. Developers must think in terms of "physical" spout and bolt implementation logic. | **High level**. Developers can write processing logic in an idiomatic fashion in the language of their choice, without needing to write and connect spouts and bolts.
+Processing model | [Spout](heron-topology-concepts#spouts) and [bolt](heron-topology-concepts#bolts) logic must be created explicitly, and connecting spouts and bolts is the responsibility of the developer | Spouts and bolts are created for you automatically on the basis of the processing graph that you build
+
+The two APIs also have a few things in common:
+
+* Topologies' [logical](heron-topology-concepts#logical-plan) and [physical](heron-topology-concepts#physical-plan) plans are automatically created by Heron
+* Topologies are [managed](user-manuals-heron-cli) in the same way using the `heron` CLI tool
+
+## Getting started
+
+In order to use the Heron Streamlet API for Java, you'll need to install the `heron-api` library.
+
+### Maven setup
+
+In order to use the `heron-api` library, add this to the `dependencies` block of your `pom.xml` configuration file:
+
+```xml
+<dependency>
+    <groupId>org.apache.heron</groupId>
+    <artifactId>heron-api</artifactId>
+    <version>{{< heronVersion >}}</version>
+</dependency>
+```
+
+#### Compiling a JAR with dependencies
+
+In order to run a Java topology created using the Heron Streamlet API in a Heron cluster, you'll need to package your topology as a "fat" JAR with dependencies included. You can use the [Maven Assembly Plugin](https://maven.apache.org/plugins/maven-assembly-plugin/usage.html) to generate JARs with dependencies. To install the plugin and add a Maven goal for a single JAR, add this to the `plugins` block in your `pom.xml`:
+
+```xml
+<plugin>
+    <artifactId>maven-assembly-plugin</artifactId>
+    <configuration>
+        <descriptorRefs>
+            <descriptorRef>jar-with-dependencies</descriptorRef>
+        </descriptorRefs>
+        <archive>
+            <manifest>
+                <mainClass></mainClass>
+            </manifest>
+        </archive>
+    </configuration>
+    <executions>
+        <execution>
+            <id>make-assembly</id>
+            <phase>package</phase>
+            <goals>
+                <goal>single</goal>
+            </goals>
+        </execution>
+    </executions>
+</plugin>
+```
+
+Once your `pom.xml` is properly set up, you can compile the JAR with dependencies using this command:
+
+```bash
+$ mvn assembly:assembly
+```
+
+By default, this will add a JAR in your project's `target` folder with the name `PROJECT-NAME-VERSION-jar-with-dependencies.jar`. Here's an example topology submission command using a compiled JAR:
+
+```bash
+$ mvn assembly:assembly
+$ heron submit local \
+  target/my-project-1.2.3-jar-with-dependencies.jar \
+  com.example.Main \
+  MyTopology arg1 arg2
+```
+
+### Java Streamlet API starter project
+
+If you'd like to up and running quickly with the Heron Streamlet API for Java, you can clone [this repository](https://github.com/streamlio/heron-java-streamlet-api-example), which includes an example topology built using the Streamlet API as well as the necessary Maven configuration. To build a JAR with dependencies of this example topology:
+
+```bash
+$ git clone https://github.com/streamlio/heron-java-streamlet-api-example
+$ cd heron-java-streamlet-api-example
+$ mvn assembly:assembly
+$ ls target/*.jar
+target/heron-java-streamlet-api-example-latest-jar-with-dependencies.jar
+target/heron-java-streamlet-api-example-latest.jar
+```
+
+If you're running a [local Heron cluster](getting-started-local-single-node), you can submit the built example topology like this:
+
+```bash
+$ heron submit local target/heron-java-streamlet-api-example-latest-jar-with-dependencies.jar \
+  io.streaml.heron.streamlet.WordCountStreamletTopology \
+  WordCountStreamletTopology
+```
+
+#### Selecting delivery semantics
+
+Heron enables you to apply one of three [delivery semantics](heron-delivery-semantics) to any Heron topology. For the [example topology](#java-streamlet-api-starter-project) above, you can select the delivery semantics when you submit the topology with the topology's second argument. This command, for example, would apply [effectively-once](heron-delivery-semantics) to the example topology:
+
+```bash
+$ heron submit local target/heron-java-streamlet-api-example-latest-jar-with-dependencies.jar \
+  io.streaml.heron.streamlet.WordCountStreamletTopology \
+  WordCountStreamletTopology \
+  effectively-once
+```
+
+The other options are `at-most-once` and `at-least-once`. If you don't explicitly select the delivery semantics, at-least-once semantics will be applied.
+
+## Streamlet API topology configuration
+
+Every Streamlet API topology needs to be configured using a `Config` object. Here's an example default configuration:
+
+```java
+import org.apache.heron.streamlet.Config;
+import org.apache.heron.streamlet.Runner;
+
+Config topologyConfig = Config.defaultConfig();
+
+// Apply topology configuration using the topologyConfig object
+Runner topologyRunner = new Runner();
+topologyRunner.run("name-for-topology", topologyConfig, topologyBuilder);
+```
+
+The table below shows the configurable parameters for Heron topologies:
+
+Parameter | Default
+:---------|:-------
+[Delivery semantics](#delivery-semantics) | At most once
+Serializer | [Kryo](https://github.com/EsotericSoftware/kryo)
+Number of total container topologies | 2
+Per-container CPU | 1.0
+Per-container RAM | 100 MB
+
+Here's an example non-default configuration:
+
+```java
+Config topologyConfig = Config.newBuilder()
+        .setNumContainers(5)
+        .setPerContainerRamInGigabytes(10)
+        .setPerContainerCpu(3.5f)
+        .setDeliverySemantics(Config.DeliverySemantics.EFFECTIVELY_ONCE)
+        .setSerializer(Config.Serializer.JAVA)
+        .setUserConfig("some-key", "some-value")
+        .build();
+```
+
+### Delivery semantics
+
+You can apply [delivery semantics](heron-delivery-semantics) to a Streamlet API topology like this:
+
+```java
+topologyConfig
+        .setDeliverySemantics(Config.DeliverySemantics.EFFECTIVELY_ONCE);
+```
+
+The other available options in the `DeliverySemantics` enum are `ATMOST_ONCE` and `ATLEAST_ONCE`.
+
+## Streamlets
+
+In the Heron Streamlet API for Java, processing graphs consist of streamlets. One or more supplier streamlets inject data into your graph to be processed by downstream operators.
+
+## Operations
+
+Operation | Description | Example
+:---------|:------------|:-------
+[`map`](#map-operations) | Create a new streamlet by applying the supplied mapping function to each element in the original streamlet | Add 1 to each element in a streamlet of integers
+[`flatMap`](#flatmap-operations) | Like a map operation but with the important difference that each element of the streamlet is flattened | Flatten a sentence into individual words
+[`filter`](#filter-operations) | Create a new streamlet containing only the elements that satisfy the supplied filtering function | Remove all inappropriate words from a streamlet of strings
+[`union`](#union-operations) | Unifies two streamlets into one, without modifying the elements of the two streamlets | Unite two different `Streamlet<String>`s into a single streamlet
+[`clone`](#clone-operations) | Creates any number of identical copies of a streamlet | Create three separate streamlets from the same source
+[`transform`](#transform-operations) | Transform a streamlet using whichever logic you'd like (useful for transformations that don't neatly map onto the available operations) |
+[`join`](#join-operations) | Create a new streamlet by combining two separate key-value streamlets into one on the basis of each element's key. Supported Join Types: Inner (as default), Outer-Left, Outer-Right and Outer. | Combine key-value pairs listing current scores (e.g. `("h4x0r", 127)`) for each user into a single per-user stream
+[`keyBy`](#key-by-operations) | Returns a new key-value streamlet by applying the supplied extractors to each element in the original streamlet |
+[`reduceByKey`](#reduce-by-key-operations) |  Produces a streamlet of key-value on each key, and in accordance with a reduce function that you apply to all the accumulated values | Count the number of times a value has been encountered
+[`reduceByKeyAndWindow`](#reduce-by-key-and-window-operations) |  Produces a streamlet of key-value on each key, within a time window, and in accordance with a reduce function that you apply to all the accumulated values | Count the number of times a value has been encountered within a specified time window
+[`countByKey`](#count-by-key-operations) | A special reduce operation of counting number of tuples on each key | Count the number of times a value has been encountered
+[`countByKeyAndWindow`](#count-by-key-and-window-operations) | A special reduce operation of counting number of tuples on each key, within a time window | Count the number of times a value has been encountered within a specified time window
+[`split`](#split-operations) | Split a streamlet into multiple streamlets with different id |
+[`withStream`](#with-stream-operations) | Select a stream with id from a streamlet that contains multiple streams |
+[`applyOperator`](#apply-operator-operations) | Returns a new streamlet by applying an user defined operator to the original streamlet | Apply an existing bolt as an operator
+[`repartition`](#repartition-operations) | Create a new streamlet by applying a new parallelism level to the original streamlet | Increase the parallelism of a streamlet from 5 to 10
+[`toSink`](#sink-operations) | Sink operations terminate the processing graph by storing elements in a database, logging elements to stdout, etc. | Store processing graph results in an AWS Redshift table
+[`log`](#log-operations) | Logs the final results of a processing graph to stdout. This *must* be the last step in the graph. |
+[`consume`](#consume-operations) | Consume operations are like sink operations except they don't require implementing a full sink interface (consume operations are thus suited for simple operations like logging) | Log processing graph results using a custom formatting function
+
+### Map operations
+
+Map operations create a new streamlet by applying the supplied mapping function to each element in the original streamlet. Here's an example:
+
+```java
+builder.newSource(() -> 1)
+    .map(i -> i + 12);
+```
+
+In this example, a supplier streamlet emits an indefinite series of 1s. The `map` operation then adds 12 to each incoming element, producing a streamlet of 13s.
+
+### FlatMap operations
+
+FlatMap operations are like `map` operations but with the important difference that each element of the streamlet is "flattened" into a collection type. In this example, a supplier streamlet emits the same sentence over and over again; the `flatMap` operation transforms each sentence into a Java `List` of individual words:
+
+```java
+builder.newSource(() -> "I have nothing to declare but my genius")
+    .flatMap((sentence) -> Arrays.asList(sentence.split("\\s+")));
+```
+
+The effect of this operation is to transform the `Streamlet<String>` into a `Streamlet<List<String>>`.
+
+> One of the core differences between `map` and `flatMap` operations is that `flatMap` operations typically transform non-collection types into collection types.
+
+### Filter operations
+
+Filter operations retain elements in a streamlet, while potentially excluding some or all elements, on the basis of a provided filtering function. Here's an example:
+
+```java
+builder.newSource(() -> ThreadLocalRandom.current().nextInt(1, 11))
+        .filter((i) -> i < 7);
+```
+
+In this example, a source streamlet consisting of random integers between 1 and 10 is modified by a `filter` operation that removes all streamlet elements that are greater than 6.
+
+### Union operations
+
+Union operations combine two streamlets of the same type into a single streamlet without modifying the elements. Here's an example:
+
+```java
+Streamlet<String> flowers = builder.newSource(() -> "flower");
+Streamlet<String> butterflies = builder.newSource(() -> "butterfly");
+
+Streamlet<String> combinedSpringStreamlet = flowers
+        .union(butterflies);
+```
+
+Here, one streamlet is an endless series of "flowers" while the other is an endless series of "butterflies". The `union` operation combines them into a single `Spring` streamlet of alternating "flowers" and "butterflies".
+
+### Clone operations
+
+Clone operations enable you to create any number of "copies" of a streamlet. Each of the "copy" streamlets contains all the elements of the original and can be manipulated just like the original streamlet. Here's an example:
+
+```java
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+Streamlet<Integer> integers = builder.newSource(() -> ThreadLocalRandom.current().nextInt(100));
+
+List<Streamlet<Integer>> copies = integers.clone(5);
+Streamlet<Integer> ints1 = copies.get(0);
+Streamlet<Integer> ints2 = copies.get(1);
+Streamlet<Integer> ints3 = copies.get(2);
+// and so on...
+```
+
+In this example, a streamlet of random integers between 1 and 100 is split into 5 identical streamlets.
+
+### Transform operations
+
+Transform operations are highly flexible operations that are most useful for:
+
+* operations involving state in [stateful topologies](heron-delivery-semantics#stateful-topologies)
+* operations that don't neatly fit into the other categories or into a lambda-based logic
+
+Transform operations require you to implement three different methods:
+
+* A `setup` method that enables you to pass a context object to the operation and to specify what happens prior to the `transform` step
+* A `transform` operation that performs the desired transformation
+* A `cleanup` method that allows you to specify what happens after the `transform` step
+
+The context object available to a transform operation provides access to:
+
+* the current state of the topology
+* the topology's configuration
+* the name of the stream
+* the stream partition
+* the current task ID
+
+Here's a Java example of a transform operation in a topology where a stateful record is kept of the number of items processed:
+
+```java
+import org.apache.heron.streamlet.Context;
+import org.apache.heron.streamlet.SerializableTransformer;
+
+import java.util.function.Consumer;
+
+public class CountNumberOfItems implements SerializableTransformer<String, String> {
+    private int numberOfItems;
+
+    public void setup(Context context) {
+        numberOfItems = (int) context.getState().get("number-of-items");
+        context.getState().put("number-of-items", numberOfItems + 1);
+    }
+
+    public void transform(String in, Consumer<String> consumer) {
+        String transformedString = // Apply some operation to the incoming value
+        consumer.accept(transformedString);
+    }
+
+    public void cleanup() {
+        System.out.println(
+                String.format("Successfully processed new state: %d", numberOfItems));
+    }
+}
+```
+
+This operation does a few things:
+
+* In the `setup` method, the [`Context`](/api/java/org/apache/heron/streamlet/Context.html) object is used to access the current state (which has the semantics of a Java `Map`). The current number of items processed is incremented by one and then saved as the new state.
+* In the `transform` method, the incoming string is transformed in some way and then "accepted" as the new value.
+* In the `cleanup` step, the current count of items processed is logged.
+
+Here's that operation within the context of a streamlet processing graph:
+
+```java
+builder.newSource(() -> "Some string over and over");
+        .transform(new CountNumberOfItems())
+        .log();
+```
+
+### Join operations
+
+> For a more in-depth conceptual discussion of joins, see the [Heron Streamlet API](../../../concepts/streamlet-api#join-operations) doc.
+
+Join operations unify two streamlets *on a key* (join operations thus require KV streamlets). Each `KeyValue` object in a streamlet has, by definition, a key. When a join operation is added to a processing graph, 
+
+```java
+import org.apache.heron.streamlet.WindowConfig;
+
+Builder builder = Builder.newBuilder();
+
+KVStreamlet<String, String> streamlet1 =
+        builder.newKVSource(() -> new KeyValue<>("heron-api", "topology-api"));
+
+builder.newSource(() -> new KeyValue<>("heron-api", "streamlet-api"))
+    .join(streamlet1, WindowConfig.TumblingCountWindow(10), KeyValue::create);
+```
+
+In this case, the resulting streamlet would consist of an indefinite stream with two `KeyValue` objects with the key `heron-api` but different values (`topology-api` and `streamlet-api`).
+
+> The effect of a join operation is to create a new streamlet *for each key*.
+
+### Key by operations
+
+Key by operations convert each item in the original streamlet into a key-value pair and return a new streamlet. Here is an example:
+
+```java
+import java.util.Arrays;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .keyBy(
+        // Key extractor (in this case, each word acts as the key)
+        word -> word,
+        // Value extractor (get the length of each word)
+        word -> workd.length()
+    )
+    // The result is logged
+    .log();
+```
+
+### Reduce by key operations
+
+You can apply [reduce](https://docs.oracle.com/javase/tutorial/collections/streams/reduction.html) operations to streamlets by specifying:
+
+* a key extractor that determines what counts as the key for the streamlet
+* a value extractor that determines which final value is chosen for each element of the streamlet
+* a reduce function that produces a single value for each key in the streamlet
+
+Reduce by key operations produce a new streamlet of key-value window objects (which include a key-value pair including the extracted key and calculated value). Here's an example:
+
+```java
+import java.util.Arrays;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .reduceByKeyAndWindow(
+        // Key extractor (in this case, each word acts as the key)
+        word -> word,
+        // Value extractor (each word appears only once, hence the value is always 1)
+        word -> 1,
+        // Reduce operation (a running sum)
+        (x, y) -> x + y
+    )
+    // The result is logged
+    .log();
+```
+
+### Reduce by key and window operations
+
+You can apply [reduce](https://docs.oracle.com/javase/tutorial/collections/streams/reduction.html) operations to streamlets by specifying:
+
+* a key extractor that determines what counts as the key for the streamlet
+* a value extractor that determines which final value is chosen for each element of the streamlet
+* a [time window](../../../concepts/topologies#window-operations) across which the operation will take place
+* a reduce function that produces a single value for each key in the streamlet
+
+Reduce by key and window operations produce a new streamlet of key-value window objects (which include a key-value pair including the extracted key and calculated value, as well as information about the window in which the operation took place). Here's an example:
+
+```java
+import java.util.Arrays;
+
+import org.apache.heron.streamlet.WindowConfig;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .reduceByKeyAndWindow(
+        // Key extractor (in this case, each word acts as the key)
+        word -> word,
+        // Value extractor (each word appears only once, hence the value is always 1)
+        word -> 1,
+        // Window configuration
+        WindowConfig.TumblingCountWindow(50),
+        // Reduce operation (a running sum)
+        (x, y) -> x + y
+    )
+    // The result is logged
+    .log();
+```
+
+### Count by key operations
+
+Count by key operations extract keys from data in the original streamlet and count the number of times a key has been encountered. Here's an example:
+
+```java
+import java.util.Arrays;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .countByKeyAndWindow(word -> word)
+    // The result is logged
+    .log();
+```
+
+### Count by key and window operations
+
+Count by key and window operations extract keys from data in the original streamlet and count the number of times a key has been encountered within each [time window](../../../concepts/topologies#window-operations). Here's an example:
+
+```java
+import java.util.Arrays;
+
+import org.apache.heron.streamlet.WindowConfig;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .countByKeyAndWindow(
+        // Key extractor (in this case, each word acts as the key)
+        word -> word,
+        // Window configuration
+        WindowConfig.TumblingCountWindow(50),
+    )
+    // The result is logged
+    .log();
+```
+
+### Split operations
+
+Split operations split a streamlet into multiple streamlets with different id by getting the corresponding stream ids from each item in the origina streamlet. Here is an example:
+
+```java
+import java.util.Arrays;
+
+Map<String, SerializablePredicate<String>> splitter = new HashMap();
+    splitter.put("long_word", s -> s.length() >= 4);
+    splitter.put("short_word", s -> s.length() < 4);
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    // Splits the stream into streams of long and short words
+    .split(splitter)
+    // Choose the stream of the short words
+    .withStream("short_word")
+    // The result is logged
+    .log();
+```
+
+### With stream operations
+
+With stream operations select a stream with id from a streamlet that contains multiple streams. They are often used with [split](#split-operations).
+
+### Apply operator operations
+
+Apply operator operations apply a user defined operator (like a bolt) to each element of the original streamlet and return a new streamlet. Here is an example:
+
+```java
+import java.util.Arrays;
+
+private class MyBoltOperator extends MyBolt implements IStreamletRichOperator<Double, Double> {
+}
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    // Apply user defined operation
+    .applyOperator(new MyBoltOperator())
+    // The result is logged
+    .log();
+```
+
+### Repartition operations
+
+When you assign a number of [partitions](#partitioning-and-parallelism) to a processing step, each step that comes after it inherits that number of partitions. Thus, if you assign 5 partitions to a `map` operation, then any `mapToKV`, `flatMap`, `filter`, etc. operations that come after it will also be assigned 5 partitions. But you can also change the number of partitions for a processing step (as well as the number of partitions for downstream operations) using `repartition`. Here's an example:
+
+```java
+import java.util.concurrent.ThreadLocalRandom;
+
+Builder builder = Builder.newBuilder();
+
+builder.newSource(() -> ThreadLocalRandom.current().nextInt(1, 11))
+        .setNumPartitions(5)
+        .map(i -> i + 1)
+        .repartition(2)
+        .filter(i -> i > 7 && i < 2)
+        .log();
+```
+
+In this example, the supplier streamlet emits random integers between one and ten. That operation is assigned 5 partitions. After the `map` operation, the `repartition` function is used to assign 2 partitions to all downstream operations.
+
+### Sink operations
+
+In processing graphs like the ones you build using the Heron Streamlet API, **sinks** are essentially the terminal points in your graph, where your processing logic comes to an end. A processing graph can end with writing to a database, publishing to a topic in a pub-sub messaging system, and so on. With the Streamlet API, you can implement your own custom sinks. Here's an example:
+
+```java
+import org.apache.heron.streamlet.Context;
+import org.apache.heron.streamlet.Sink;
+
+public class FormattedLogSink implements Sink<T> {
+    private String streamletName;
+
+    public void setup(Context context) {
+        streamletName = context.getStreamName();
+    }
+
+    public void put(T element) {
+        String message = String.format("Streamlet %s has produced an element with a value of: '%s'",
+                streamletName,
+                element.toString());
+        System.out.println(message);
+    }
+
+    public void cleanup() {}
+}
+```
+
+In this example, the sink fetches the name of the enclosing streamlet from the context passed in the `setup` method. The `put` method specifies how the sink handles each element that is received (in this case, a formatted message is logged to stdout). The `cleanup` method enables you to specify what happens after the element has been processed by the sink.
+
+Here is the `FormattedLogSink` at work in an example processing graph:
+
+```java
+Builder builder = Builder.newBuilder();
+
+builder.newSource(() -> "Here is a string to be passed to the sink")
+        .toSink(new FormattedLogSink());
+```
+
+> [Log operations](#log-operations) rely on a log sink that is provided out of the box. You'll need to implement other sinks yourself.
+
+### Log operations
+
+Log operations are special cases of consume operations that log streamlet elements to stdout.
+
+> Streamlet elements will be using their `toString` representations and at the `INFO` level.
+
+### Consume operations
+
+Consume operations are like [sink operations](#sink-operations) except they don't require implementing a full sink interface. Consume operations are thus suited for simple operations like formatted logging. Here's an example:
+
+```java
+import java.util.concurrent.ThreadLocalRandom;
+
+Builder builder = Builder.newBuilder()
+        .newSource(() -> ThreadLocalRandom.current().nextInt(1, 11))
+        .filter(i -> i % 2 == 0)
+        .consume(i -> {
+            String message = String.format("Even number found: %d", i);
+            System.out.println(message);
+        });
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/topology-development-streamlet-scala.md b/website2/website/versioned_docs/version-0.20.0/topology-development-streamlet-scala.md
new file mode 100644
index 0000000..d8fec7c
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/topology-development-streamlet-scala.md
@@ -0,0 +1,560 @@
+---
+id: version-0.20.0-topology-development-streamlet-scala
+title: The Heron Streamlet API for Scala
+sidebar_label: The Heron Streamlet API for Scala
+original_id: topology-development-streamlet-scala
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+## Getting started
+
+In order to use the Heron Streamlet API for Scala, you'll need to install the `heron-api` library.
+
+### Maven setup
+
+In order to use the `heron-api` library, add this to the `dependencies` block of your `pom.xml` configuration file:
+
+```xml
+<dependency>
+    <groupId>org.apache.heron</groupId>
+    <artifactId>heron-api</artifactId>
+    <version>{{< heronVersion >}}</version>
+</dependency>
+```
+
+#### Compiling a JAR with dependencies
+
+In order to run a Scala topology created using the Heron Streamlet API in a Heron cluster, you'll need to package your topology as a "fat" JAR with dependencies included. You can use the [Maven Assembly Plugin](https://maven.apache.org/plugins/maven-assembly-plugin/usage.html) to generate JARs with dependencies. To install the plugin and add a Maven goal for a single JAR, add this to the `plugins` block in your `pom.xml`:
+
+```xml
+<plugin>
+    <artifactId>maven-assembly-plugin</artifactId>
+    <configuration>
+        <descriptorRefs>
+            <descriptorRef>jar-with-dependencies</descriptorRef>
+        </descriptorRefs>
+        <archive>
+            <manifest>
+                <mainClass></mainClass>
+            </manifest>
+        </archive>
+    </configuration>
+    <executions>
+        <execution>
+            <id>make-assembly</id>
+            <phase>package</phase>
+            <goals>
+                <goal>single</goal>
+            </goals>
+        </execution>
+    </executions>
+</plugin>
+```
+
+Once your `pom.xml` is properly set up, you can compile the JAR with dependencies using this command:
+
+```bash
+$ mvn assembly:assembly
+```
+
+By default, this will add a JAR in your project's `target` folder with the name `PROJECT-NAME-VERSION-jar-with-dependencies.jar`. Here's an example topology submission command using a compiled JAR:
+
+```bash
+$ mvn assembly:assembly
+$ heron submit local \
+  target/my-project-1.2.3-jar-with-dependencies.jar \
+  com.example.Main \
+  MyTopology arg1 arg2
+```
+
+## Streamlet API topology configuration
+
+Every Streamlet API topology needs to be configured using a `Config` object. Here's an example default configuration:
+
+```scala
+import org.apache.heron.streamlet.Config
+import org.apache.heron.streamlet.scala.Runner
+
+val topologyConfig = Config.defaultConfig()
+
+// Apply topology configuration using the topologyConfig object
+val topologyRunner = new Runner()
+topologyRunner.run("name-for-topology", topologyConfig, topologyBuilder)
+```
+
+The table below shows the configurable parameters for Heron topologies:
+
+Parameter | Default
+:---------|:-------
+[Delivery semantics](#delivery-semantics) | At most once
+Serializer | [Kryo](https://github.com/EsotericSoftware/kryo)
+Number of total container topologies | 2
+Per-container CPU | 1.0
+Per-container RAM | 100 MB
+
+Here's an example non-default configuration:
+
+```scala
+val topologyConfig = Config.newBuilder()
+        .setNumContainers(5)
+        .setPerContainerRamInGigabytes(10)
+        .setPerContainerCpu(3.5f)
+        .setDeliverySemantics(Config.DeliverySemantics.EFFECTIVELY_ONCE)
+        .setSerializer(Config.Serializer.JAVA)
+        .setUserConfig("some-key", "some-value")
+        .build()
+```
+
+### Delivery semantics
+
+You can apply [delivery semantics](../../../concepts/delivery-semantics) to a Streamlet API topology like this:
+
+```scala
+topologyConfig
+        .setDeliverySemantics(Config.DeliverySemantics.EFFECTIVELY_ONCE)
+```
+
+The other available options in the `DeliverySemantics` enum are `ATMOST_ONCE` and `ATLEAST_ONCE`.
+
+## Streamlets
+
+In the Heron Streamlet API for Scala, processing graphs consist of [streamlets](../../../concepts/topologies#streamlets). One or more supplier streamlets inject data into your graph to be processed by downstream operators.
+
+## Operations
+
+Operation | Description | Example
+:---------|:------------|:-------
+[`map`](#map-operations) | Create a new streamlet by applying the supplied mapping function to each element in the original streamlet | Add 1 to each element in a streamlet of integers
+[`flatMap`](#flatmap-operations) | Like a map operation but with the important difference that each element of the streamlet is flattened | Flatten a sentence into individual words
+[`filter`](#filter-operations) | Create a new streamlet containing only the elements that satisfy the supplied filtering function | Remove all inappropriate words from a streamlet of strings
+[`union`](#union-operations) | Unifies two streamlets into one, without modifying the elements of the two streamlets | Unite two different `Streamlet<String>`s into a single streamlet
+[`clone`](#clone-operations) | Creates any number of identical copies of a streamlet | Create three separate streamlets from the same source
+[`transform`](#transform-operations) | Transform a streamlet using whichever logic you'd like (useful for transformations that don't neatly map onto the available operations) |
+[`join`](#join-operations) | Create a new streamlet by combining two separate key-value streamlets into one on the basis of each element's key. Supported Join Types: Inner (as default), Outer-Left, Outer-Right and Outer | Combine key-value pairs listing current scores (e.g. `("h4x0r", 127)`) for each user into a single per-user stream
+[`keyBy`](#key-by-operations) | Returns a new key-value streamlet by applying the supplied extractors to each element in the original streamlet |
+[`reduceByKey`](#reduce-by-key-operations) |  Produces a streamlet of key-value on each key, and in accordance with a reduce function that you apply to all the accumulated values | Count the number of times a value has been encountered
+[`reduceByKeyAndWindow`](#reduce-by-key-and-window-operations) |  Produces a streamlet of key-value on each key, within a time window, and in accordance with a reduce function that you apply to all the accumulated values | Count the number of times a value has been encountered within a specified time window
+[`countByKey`](#count-by-key-operations) | A special reduce operation of counting number of tuples on each key | Count the number of times a value has been encountered
+[`countByKeyAndWindow`](#count-by-key-and-window-operations) | A special reduce operation of counting number of tuples on each key, within a time window | Count the number of times a value has been encountered within a specified time window
+[`split`](#split-operations) | Split a streamlet into multiple streamlets with different id |
+[`withStream`](#with-stream-operations) | Select a stream with id from a streamlet that contains multiple streams |
+[`applyOperator`](#apply-operator-operations) | Returns a new streamlet by applying an user defined operator to the original streamlet | Apply an existing bolt as an operator
+[`repartition`](#repartition-operations) | Create a new streamlet by applying a new parallelism level to the original streamlet | Increase the parallelism of a streamlet from 5 to 10
+[`toSink`](#sink-operations) | Sink operations terminate the processing graph by storing elements in a database, logging elements to stdout, etc. | Store processing graph results in an AWS Redshift table
+[`log`](#log-operations) | Logs the final results of a processing graph to stdout. This *must* be the last step in the graph. |
+[`consume`](#consume-operations) | Consume operations are like sink operations except they don't require implementing a full sink interface (consume operations are thus suited for simple operations like logging) | Log processing graph results using a custom formatting function
+
+### Map operations
+
+Map operations create a new streamlet by applying the supplied mapping function to each element in the original streamlet. Here's an example:
+
+```scala
+builder.newSource(() => 1)
+    .map[Int]((i: Int) => i + 12) // or .map[Int](_.+(12)) as synthetic function
+```
+
+In this example, a supplier streamlet emits an indefinite series of 1s. The `map` operation then adds 12 to each incoming element, producing a streamlet of 13s.
+
+### FlatMap operations
+
+FlatMap operations are like `map` operations but with the important difference that each element of the streamlet is "flattened" into a collection type. In this example, a supplier streamlet emits the same sentence over and over again; the `flatMap` operation transforms each sentence into a Scala `List` of individual words:
+
+```scala
+builder.newSource(() => "I have nothing to declare but my genius")
+    .flatMap[String](_.split(" "))
+```
+
+The effect of this operation is to transform the `Streamlet[String]` into a `Streamlet[List[String]]`.
+
+> One of the core differences between `map` and `flatMap` operations is that `flatMap` operations typically transform non-collection types into collection types.
+
+### Filter operations
+
+Filter operations retain elements in a streamlet, while potentially excluding some or all elements, on the basis of a provided filtering function. Here's an example:
+
+```scala
+import java.util.concurrent.ThreadLocalRandom
+
+builder.newSource(() => ThreadLocalRandom.current().nextInt(1, 11))
+        .filter(_.<(7))
+```
+
+In this example, a source streamlet consisting of random integers between 1 and 10 is modified by a filter operation that removes all streamlet elements that are lower than 7.
+
+### Union operations
+
+Union operations combine two streamlets of the same type into a single streamlet without modifying the elements. Here's an example:
+
+```scala
+val flowers = builder.newSource(() => "flower")
+val butterflies = builder.newSource(() => "butterfly")
+
+val combinedSpringStreamlet = flowers.union(butterflies)
+```
+
+Here, one streamlet is an endless series of "flowers" while the other is an endless series of "butterflies". The `union` operation combines them into a single streamlet of alternating "flowers" and "butterflies".
+
+### Clone operations
+
+Clone operations enable you to create any number of "copies" of a streamlet. Each of the "copy" streamlets contains all the elements of the original and can be manipulated just like the original streamlet. Here's an example:
+
+```scala
+import scala.util.Random
+
+val integers = builder.newSource(() => Random.nextInt(100))
+
+val copies = integers.clone(5)
+val ints1 = copies.get(0)
+val ints2 = copies.get(1)
+val ints3 = copies.get(2)
+// and so on...
+```
+
+In this example, a streamlet of random integers between 0 and 99 is split into 5 identical streamlets.
+
+### Transform operations
+
+Transform operations are highly flexible operations that are most useful for:
+
+* operations involving state in [stateful topologies](../../concepts/delivery-semantics#stateful-topologies)
+* operations that don't neatly fit into the other categories or into a lambda-based logic
+
+Transform operations require you to implement three different methods:
+
+* A `setup` function that enables you to pass a context object to the operation and to specify what happens prior to the `transform` step
+* A `transform` operation that performs the desired transformation
+* A `cleanup` function that allows you to specify what happens after the `transform` step
+
+The context object available to a transform operation provides access to:
+
+* the current state of the topology
+* the topology's configuration
+* the name of the stream
+* the stream partition
+* the current task ID
+
+Here's a Scala example of a transform operation in a topology where a stateful record is kept of the number of items processed:
+
+```scala
+import org.apache.heron.streamlet.Context
+import org.apache.heron.streamlet.scala.SerializableTransformer
+
+class CountNumberOfItems extends SerializableTransformer[String, String] {
+    private val numberOfItems = new AtomicLong()
+
+    override def setup(context: Context): Unit = {
+      numberOfItems.incrementAndGet()
+      context.getState().put("number-of-items", numberOfItems)
+    }
+
+    override def transform(i: String, f: String => Unit): Unit = {
+      val transformedString = i.toUpperCase
+      f(transformedString)
+    }
+
+    override def cleanup(): Unit =
+      println(s"Successfully processed new state: $numberOfItems")
+  }
+```
+
+This operation does a few things:
+
+* In the `setup` method, the [`Context`](/api/java/org/apache/heron/streamlet/Context.html) object is used to access the current state (which has the semantics of a Java `Map`). The current number of items processed is incremented by one and then saved as the new state.
+* In the `transform` method, the incoming string is transformed as UpperCase in some way and then "accepted" as the new value.
+* In the `cleanup` step, the current count of items processed is logged.
+
+Here's that operation within the context of a streamlet processing graph:
+
+```scala
+builder.newSource(() => "Some string over and over");
+        .transform(new CountNumberOfItems())
+        .log()
+```
+
+### Join operations
+
+> For a more in-depth conceptual discussion of joins, see the [Heron Streamlet API](../../../concepts/streamlet-api#join-operations) doc.
+
+Join operations unify two streamlets *on a key* (join operations thus require KV streamlets). Each `KeyValue` object in a streamlet has, by definition, a key. When a `join` operation is added to a processing graph,
+
+```scala
+import org.apache.heron.streamlet.{Config, KeyValue, WindowConfig}
+import org.apache.heron.streamlet.scala.Builder
+
+val builder = Builder.newBuilder()
+
+val streamlet1 = builder
+  .newSource(() =>
+    new KeyValue[String, String]("heron-api", "topology-api"))
+  .setName("streamlet1")
+
+val streamlet2 = builder
+  .newSource(() =>
+    new KeyValue[String, String]("heron-api", "streamlet-api"))
+  .setName("streamlet2")
+
+streamlet1.join[KeyValue[String, String], KeyValue[String, String], String](
+  streamlet2,
+  (kv: KeyValue[String, String]) => kv,
+  (kv: KeyValue[String, String]) => kv,
+  WindowConfig.TumblingCountWindow(10),
+  (kv1: KeyValue[String, String], kv2: KeyValue[String, String]) =>
+    kv1.getValue + " - " + kv2.getValue
+)
+```
+
+In this case, the resulting streamlet would consist of an indefinite stream with two `KeyValue` objects with the key `heron-api` but different values (`topology-api` and `streamlet-api`).
+
+> The effect of a `join` operation is to create a new streamlet *for each key*.
+
+### Key by operations
+
+Key by operations convert each item in the original streamlet into a key-value pair and return a new streamlet. Here is an example:
+
+```scala
+val builder = Builder.newBuilder()
+
+builder
+  .newSource(() => "Paco de Lucia is one of the most popular virtuoso")
+  // Convert each sentence into individual words
+  .flatMap[String](_.split(" "))
+  .keyBy[String, Int](
+      // Key extractor (in this case, each word acts as the key)
+      (word: String) => word,
+      // Value extractor (get the length of each word)
+      (word: String) => word.length
+  )
+  // The result is logged
+  .log();
+```
+
+### Reduce by key operations
+
+You can apply [reduce](https://docs.oracle.com/javase/tutorial/collections/streams/reduction.html) operations to streamlets by specifying:
+
+* a key extractor that determines what counts as the key for the streamlet
+* a value extractor that determines which final value is chosen for each element of the streamlet
+* a reduce function that produces a single value for each key in the streamlet
+
+Reduce by key operations produce a new streamlet of key-value window objects (which include a key-value pair including the extracted key and calculated value). Here's an example:
+
+```scala
+val builder = Builder.newBuilder()
+
+builder
+  .newSource(() => "Paco de Lucia is one of the most popular virtuoso")
+  // Convert each sentence into individual words
+  .flatMap[String](_.split(" "))
+  .reduceByKey[String, Int](
+      // Key extractor (in this case, each word acts as the key)
+      (word: String) => word,
+      // Value extractor (each word appears only once, hence the value is always 1)
+      (word: String) => 1,
+      // Reduce operation (a running sum)
+      (x: Int, y: Int) => x + y)
+  // The result is logged
+  .log();
+```
+
+### Reduce by key and window operations
+
+You can apply [reduce](https://docs.oracle.com/javase/tutorial/collections/streams/reduction.html) operations to streamlets by specifying:
+
+* a key extractor that determines what counts as the key for the streamlet
+* a value extractor that determines which final value is chosen for each element of the streamlet
+* a [time window](../../../concepts/topologies#window-operations) across which the operation will take place
+* a reduce function that produces a single value for each key in the streamlet
+
+Reduce by key and window operations produce a new streamlet of key-value window objects (which include a key-value pair including the extracted key and calculated value, as well as information about the window in which the operation took place). Here's an example:
+
+```scala
+import org.apache.heron.streamlet.WindowConfig;
+
+val builder = Builder.newBuilder()
+
+builder
+  .newSource(() => "Paco de Lucia is one of the most popular virtuoso")
+  // Convert each sentence into individual words
+  .flatMap[String](_.split(" "))
+  .reduceByKeyAndWindow[String, Int](
+      // Key extractor (in this case, each word acts as the key)
+      (word: String) => word,
+      // Value extractor (each word appears only once, hence the value is always 1)
+      (word: String) => 1,
+      // Window configuration
+      WindowConfig.TumblingCountWindow(50),
+      // Reduce operation (a running sum)
+      (x: Int, y: Int) => x + y)
+  // The result is logged
+  .log();
+```
+
+### Count by key operations
+
+Count by key operations extract keys from data in the original streamlet and count the number of times a key has been encountered. Here's an example:
+
+```scala
+val builder = Builder.newBuilder()
+
+builder
+  .newSource(() => "Paco de Lucia is one of the most popular virtuoso")
+  // Convert each sentence into individual words
+  .flatMap[String](_.split(" "))
+  // Count the number of occurrences of each word
+  .countByKey[String]((word: String) => word)
+  // The result is logged
+  .log();
+```
+
+### Count by key and window operations
+
+Count by key and window operations extract keys from data in the original streamlet and count the number of times a key has been encountered within each [time window](../../../concepts/topologies#window-operations). Here's an example:
+
+```scala
+val builder = Builder.newBuilder()
+
+builder
+  .newSource(() => "Paco de Lucia is one of the most popular virtuoso")
+  // Convert each sentence into individual words
+  .flatMap[String](_.split(" "))
+  // Count the number of occurrences of each word within each time window
+  .countByKeyAndWindow[String](
+      (word: String) => word,
+      WindowConfig.TumblingCountWindow(50))
+  // The result is logged
+  .log();
+```
+
+### Split operations
+
+Split operations split a streamlet into multiple streamlets with different id by getting the corresponding stream ids from each item in the origina streamlet. Here is an example:
+
+```scala
+val builder = Builder.newBuilder()
+
+builder
+  .newSource(() => "Paco de Lucia is one of the most popular virtuoso")
+  // Convert each sentence into individual words
+  .flatMap[String](_.split(" "))
+  // Count the number of occurrences of each word within each time window
+  .split(Map(
+      "long_word" -> { word: String => word.length >= 4 },
+      "short_word" -> { word: String => word.length < 4 }
+  ))
+  .withStream("short_word)
+  // The result is logged
+  .log();
+```
+
+### With stream operations
+
+With stream operations select a stream with id from a streamlet that contains multiple streams. They are often used with [split](#split-operations).
+
+### Apply operator operations
+
+Apply operator operations apply a user defined operator (like a bolt) to each element of the original streamlet and return a new streamlet. Here is an example:
+
+```scala
+val builder = Builder.newBuilder()
+
+private class MyBoltOperator extends MyBolt
+    with IStreamletOperator[String, String] {
+}
+
+builder
+  .newSource(() => "Paco de Lucia is one of the most popular virtuoso")
+  // Convert each sentence into individual words
+  .flatMap[String](_.split(" "))
+  // Apply user defined operation
+  .applyOperator(new MyBoltOperator())
+  // The result is logged
+  .log();
+```
+
+### Repartition operations
+
+When you assign a number of [partitions](#partitioning-and-parallelism) to a processing step, each step that comes after it inherits that number of partitions. Thus, if you assign 5 partitions to a `map` operation, then any `mapToKV`, `flatMap`, `filter`, etc. operations that come after it will also be assigned 5 partitions. But you can also change the number of partitions for a processing step (as well as the number of partitions for downstream operations) using `repartition`. Here's an example:
+
+```scala
+import java.util.concurrent.ThreadLocalRandom;
+
+val builder = Builder.newBuilder
+
+val numbers = builder
+  .newSource(() => ThreadLocalRandom.current().nextInt(1, 11))
+
+numbers
+  .setNumPartitions(5)
+  .map(i => i + 1)
+  .repartition(2)
+  .filter(i => i > 7 && i < 2)
+  .log()
+```
+
+In this example, the supplier streamlet emits random integers between 1 and 10. That operation is assigned 5 partitions. After the `map` operation, the `repartition` function is used to assign 2 partitions to all downstream operations.
+
+### Sink operations
+
+In processing graphs like the ones you build using the Heron Streamlet API, **sinks** are essentially the terminal points in your graph, where your processing logic comes to an end. A processing graph can end with writing to a database, publishing to a topic in a pub-sub messaging system, and so on. With the Streamlet API, you can implement your own custom sinks. Here's an example:
+
+```scala
+import org.apache.heron.streamlet.Context
+import org.apache.heron.streamlet.scala.Sink
+
+class FormattedLogSink extends Sink[String] {
+    private var streamName: Option[String] = None
+
+    override def setup(context: Context): Unit =
+      streamName = Some(context.getStreamName)
+
+    override def put(tuple: String): Unit =
+      println(s"The current value of tuple is $tuple in stream: $streamName")
+
+    override def cleanup(): Unit = {}
+  }
+```
+
+In this example, the sink fetches the name of the enclosing streamlet from the context passed in the `setup` method. The `put` method specifies how the sink handles each element that is received (in this case, a formatted message is logged to stdout). The `cleanup` method enables you to specify what happens after the element has been processed by the sink.
+
+Here is the `FormattedLogSink` at work in an example processing graph:
+
+```scala
+val builder = Builder.newBuilder
+
+builder.newSource(() => "Here is a string to be passed to the sink")
+        .toSink(new FormattedLogSink)
+```
+
+> [Log operations](#log-operations) rely on a log sink that is provided out of the box. You'll need to implement other sinks yourself.
+
+### Log operations
+
+Log operations are special cases of consume operations that log streamlet elements to stdout.
+
+> Streamlet elements will be using their `toString` representations and at the `INFO` level.
+
+### Consume operations
+
+Consume operations are like [sink operations](#sink-operations) except they don't require implementing a full sink interface. Consume operations are thus suited for simple operations like formatted logging. Here's an example:
+
+```scala
+val builder = Builder.newBuilder
+      .newSource(() => Random.nextInt(10))
+      .filter(i => i % 2 == 0)
+      .consume(i => println(s"Even number found: $i"))
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/topology-development-topology-api-java.md b/website2/website/versioned_docs/version-0.20.0/topology-development-topology-api-java.md
new file mode 100644
index 0000000..109f5a9
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/topology-development-topology-api-java.md
@@ -0,0 +1,444 @@
+---
+id: version-0.20.0-topology-development-topology-api-java
+title: The Heron Topology API for Java
+sidebar_label: The Heron Topology API for Java
+original_id: topology-development-topology-api-java
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+> This document pertains to the older, Storm-based, Heron Topology API.  Heron now offers two separate APIs for building topologies: the original, [Storm](https://storm.apache.org)-based Topology API, and the newer [Streamlet API](../../../concepts/topologies#the-heron-streamlet-api). Topologies created using the Topology API can still run on Heron and there are currently no plans to deprecate this API. We would, however, recommend that you use the Streamlet API for future work.
+
+
+A topology specifies components like spouts and bolts, as well as the relation
+between components and proper configurations. The
+[`heron-api`](http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22org.apache.heron%22%20AND%20a%3A%22heron-api%22)
+enables you to create topology logic in Java.
+
+> If you're interested in creating stateful topologies with [effectively-once
+> semantics](heron-delivery-semantics) in Java, see [this new
+> guide](guides-effectively-once-java-topologies).
+
+## Getting started
+
+In order to use the Heron API for Java, you'll need to install the `heron-api` library, which is available
+via [Maven Central](http://search.maven.org/).
+
+### Maven setup
+
+To install the `heron-api` library using Maven, add this to the `dependencies` block of your `pom.xml`
+configuration file:
+
+```xml
+<dependency>
+    <groupId>org.apache.heron</groupId>
+    <artifactId>heron-api</artifactId>
+    <version>{{< heronVersion >}}</version>
+</dependency>
+```
+
+#### Compiling a JAR with dependencies
+
+In order to run a Java topology in a Heron cluster, you'll need to package your topology as a "fat" JAR with dependencies included. You can use the [Maven Assembly Plugin](https://maven.apache.org/plugins/maven-assembly-plugin/usage.html) to generate JARs with dependencies. To install the plugin and add a Maven goal for a single JAR, add this to the `plugins` block in your `pom.xml`:
+
+```xml
+<plugin>
+    <artifactId>maven-assembly-plugin</artifactId>
+    <configuration>
+        <descriptorRefs>
+            <descriptorRef>jar-with-dependencies</descriptorRef>
+        </descriptorRefs>
+        <archive>
+            <manifest>
+                <mainClass></mainClass>
+            </manifest>
+        </archive>
+    </configuration>
+    <executions>
+        <execution>
+            <id>make-assembly</id>
+            <phase>package</phase>
+            <goals>
+                <goal>single</goal>
+            </goals>
+        </execution>
+    </executions>
+</plugin>
+```
+
+Once your `pom.xml` is properly set up, you can compile the JAR with dependencies using this command:
+
+```bash
+$ mvn assembly:assembly
+```
+
+By default, this will add a JAR in your project's `target` folder with the name `PROJECT-NAME-VERSION-jar-with-dependencies.jar`. Here's an example topology submission command using a compiled JAR:
+
+```bash
+$ mvn assembly:assembly
+$ heron submit local \
+  target/my-project-1.2.3-jar-with-dependencies.jar \
+  com.example.Main \
+  MyTopology arg1 arg2
+```
+
+### Writing your topology logic
+
+Heron [topologies](heron-topology-concpets) are processing graphs consisting
+of spouts that ingest data and bolts that process that data.
+
+> **Don't want to manually create spouts and bolts? Try the Heron Streamlet API.**  If you find manually creating and connecting spouts and bolts to be overly cumbersome, we recommend trying out the [Heron Streamlet API](topology-development-streamlet-api-java) for Java, which enables you to create your topology logic using a highly streamlined logic inspired by functional programming concepts.
+
+Once you've defined the spouts and bolts, a topology can be composed using a
+[`TopologyBuilder`](/api/org/apache/heron/api/topology/TopologyBuilder.html). The
+`TopologyBuilder` has two major methods used to specify topology components:
+
+Method | Description
+:------|:-----------
+`setBolt(String id, IRichBolt bolt, Number parallelismHint)` | `id` is the unique identifier that assigned to a bolt, `bolt` is the one previously composed, and `parallelismHint` is a number that specifies the number of instances of this bolt.
+`setSpout(String id, IRichSpout spout, Number parallelismHint)` | `id` is the unique identifier that assigned to a spout, `spout` is the one previously composed, and `parallelismHint` is a number that specifying the number of instances of this spout.
+
+Here's a simple example:
+
+```java
+
+TopologyBuilder builder = new TopologyBuilder();
+builder.setSpout("word", new TestWordSpout(), 5);
+builder.setBolt("exclaim", new ExclamationBolt(), 4);
+```
+
+In addition to the component specification, you also need to specify how tuples
+will be routed between your topology components. There are a few different grouping
+strategies available:
+
+Grouping strategy | Description
+:-----------------|:-----------
+Fields grouping | Tuples are transmitted to bolts based on a given field. Tuples with the same field will always go to the same bolt.
+Global grouping | All tuples are transmitted to a single instance of a bolt with the lowest task id.
+Shuffle Grouping | Tuples are randomly transmitted to different instances of a bolt.
+None grouping | Currently, this is the same as shuffle grouping.
+All grouping | All tuples are transmitted to all instances of a bolt.
+Custom grouping | User-defined grouping strategy.
+
+The following snippet is a simple example of specifying shuffle grouping
+between a `word` spout and an `exclaim` bolt.
+
+```java
+
+builder.setBolt("exclaim", new ExclamationBolt(), 4)
+  .shuffleGrouping("word");
+```
+
+Once the components and the grouping are specified, the topology can be built.
+
+```java
+HeronTopology topology = builder.createTopology();
+```
+
+See the [`ExclamationTopology`](https://github.com/apache/incubator-heron/blob/master/examples/src/java/org/apache/heron/examples/api/ExclamationTopology.java) for the complete example. More examples can be found in the  [`examples package`](https://github.com/apache/incubator-heron/tree/master/examples/src/java/org/apache/heron/examples).
+
+## Spouts
+
+A Heron **spout** is a source of streams, responsible for emitting
+[tuples](../../developers/data-model) into the topology. A spout may, for
+example, read data from a Kestrel queue or read tweets from the Twitter API and
+emit tuples to one or more bolts.
+
+Information on building spouts can be found in [Building
+Spouts](../../developers/java/spouts).
+
+### Implementing a Spout
+
+Spouts must implement the [`ISpout`](/api/org/apache/heron/api/spout/ISpout.html) interface.
+
+```java
+public interface ISpout extends Serializable {
+  void open(Map<String, Object> conf, TopologyContext context, SpoutOutputCollector collector);
+  void close();
+  void activate();
+  void deactivate();
+  void nextTuple();
+  void ack(Object msgId);
+  void fail(Object msgId);
+}
+```
+
+* The `open` method is called when the spout is initialized and provides the
+spout with the executing environment.
+
+* The `close` method is called when the spout is shutdown. There's no guarantee
+that this method is called due to how the instance is killed.
+
+* The `activate` method is called when the spout is asked to back into active
+state.
+
+* The `deactivate` method is called when the spout is asked to enter deactive
+state.
+
+* The `nextTuple` method is used to fetch tuples from input source and emit it
+to [`OutputCollector`](/api/org/apache/heron/api/bolt/).
+
+* The `ack` method is called when the `Tuple` with the `msgId` emitted by this
+spout is successfully processed.
+
+* The `fail` method is called when the `Tuple` with the `msgId` emitted by this
+spout is not processed successfully.
+
+See [`TestWordSpout`](https://github.com/apache/incubator-heron/blob/master/examples/src/java/org/apache/heron/examples/api/spout/TestWordSpout.java) for a simple spout example.
+
+Instead of implementing the [`ISpout`](/api/org/apache/heron/api/spout/ISpout.html) interface directly, you can also implement [`IRichSpout`](/api/org/apache/heron/api/spout/IRichSpout.html).
+
+
+## Bolts
+
+A Heron **bolt** consumes streams of
+[tuples](guides-data-model) emitted by spouts and performs some
+set of user-defined processing operations on those tuples, which may include
+performing complex stream transformations, performing storage operations,
+aggregating multiple streams into one, emitting tuples to other bolts within the
+topology, and much more.
+
+### Implementing a Bolt
+
+
+Spouts must implement the [`ISpout`](/api/org/apache/heron/api/spout/ISpout.html) interface.
+
+```java
+public interface ISpout extends Serializable {
+  void open(Map<String, Object> conf, TopologyContext context, SpoutOutputCollector collector);
+  void close();
+  void activate();
+  void deactivate();
+  void nextTuple();
+  void ack(Object msgId);
+  void fail(Object msgId);
+}
+```
+
+* The `open` method is called when the spout is initialized and provides the
+spout with the executing environment.
+
+* The `close` method is called when the spout is shutdown. There's no guarantee
+that this method is called due to how the instance is killed.
+
+* The `activate` method is called when the spout is asked to back into active
+state.
+
+* The `deactivate` method is called when the spout is asked to enter deactive
+state.
+
+* The `nextTuple` method is used to fetch tuples from input source and emit it
+to [`OutputCollector`](/api/org/apache/heron/api/bolt/).
+
+* The `ack` method is called when the `Tuple` with the `msgId` emitted by this
+spout is successfully processed.
+
+* The `fail` method is called when the `Tuple` with the `msgId` emitted by this
+spout is not processed successfully.
+
+See [`TestWordSpout`](https://github.com/apache/incubator-heron/blob/master/examples/src/java/org/apache/heron/examples/api/spout/TestWordSpout.java) for a simple spout example.
+
+Instead of implementing the [`ISpout`](/api/org/apache/heron/api/spout/ISpout.html) interface directly, you can also implement [`IRichSpout`](/api/org/apache/heron/api/spout/IRichSpout.html).
+
+## Applying delivery semantics to topologies
+
+```java
+import org.apache.heron.api.Config;
+
+Config topologyConfig = new Config();
+
+config.setTopologyReliabilityMode(Config.TopologyReliabilityMode.EFFECTIVELY_ONCE);
+```
+
+There are three delivery semantics available corresponding to the three delivery semantics that Heron provides:
+
+* `ATMOST_ONCE`
+* `ATLEAST_ONCE`
+* `EFFECTIVELY_ONCE`
+
+## Acking
+
+In distributed systems, an **ack** (short for "acknowledgment") is a message that confirms that some action has been taken. In Heron, you can create [bolts](#acking-bolts) that emit acks when some desired operation has occurred (for example data has been successfully stored in a database or a message has been successfully produced on a topic in a pub-sub messaging system). Those acks can then be received and acted upon by upstream [spouts](#ack-receiving-spouts).
+
+> You can see acking at work in a complete Heron topology in [this topology](https://github.com/apache/incubator-heron/blob/master/examples/src/java/org/apache/heron/examples/api/AckingTopology.java).
+
+Whereas acking a tuple indicates that some operation has succeeded, the opposite can be indicated when a bolt [fails](#failing) a tuple.
+
+### Acking bolts
+
+Each Heron bolt has an `OutputCollector` that can ack tuples using the `ack` method. Tuples can be acked inside the `execute` method that each bolt uses to process incoming tuples. *When* a bolt acks tuples is up to you. Tuples can be acked immediately upon receipt, after data has been saved to a database, after a message has been successfully published to a pub-sub topic, etc.
+
+Here's an example of a bolt that acks tuples when they're successfully processed:
+
+```java
+import org.apache.heron.api.bolt.BaseRichBolt;
+import org.apache.heron.api.bolt.OutputCollector;
+import org.apache.heron.api.topology.TopologyContext;
+
+public class AckingBolt extends BaseRichBolt {
+    private OutputCollector outputCollector;
+
+    public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
+        this.outputCollector = collector;
+    }
+
+    private void applyProcessingOperation(Tuple tuple) throws Exception {
+        // Some processing logic for each tuple received by the bolt
+    }
+
+    public void execute(Tuple tuple) {
+        try {
+            applyProcessingOperation(tuple);
+            outputCollector.ack(tuple);
+        } catch (Exception e) {
+            outputCollector.fail(tuple);
+        }
+    }
+}
+```
+
+In this bolt, there's an `applyProcessingOperation` function that processes each incoming tuple. One of two things can result from this function:
+
+1. The operation succeeds, in which case the bolt sends an ack. Any upstream spouts---such as a spout like the `AckReceivingSpout` below---would then receive that ack, along with the message ID that the bolt provides.
+1. The operation fails and throws an exception, in which case the tuple is failed rather than acked.
+
+### Ack-receiving spouts
+
+Heron spouts don't emit acks, but they can receive acks when downstream bolts have acked a tuple. In order to receive an ack from downstream bolts, spouts need to do two things:
+
+1. [Specify](#specifying-a-message-id) a message ID when they emit tuples using the `nextTuple` method
+1. [Implement](#specifying-ack-reception-logic) an `ack` function that specifies what will happen when an ack is received from downstream bolts
+
+### Specifying a message ID
+
+If you want a spout to receive acks from downstream bolts, the spout needs to specify a message ID every time the spout's `SpoutOutputCollector` emits a tuple to downstream bolts. Here's an example:
+
+```java
+import org.apache.heron.api.spout.BaseRichSpout;
+
+public class AckReceivingSpout extends BaseRichSpout {
+    private Object generateMessageId() {
+        // Some logic to produce a unique ID
+    }
+
+    public void nextTuple() {
+        collector.emit(new Values(someValue), generateMessageId());
+    }
+}
+```
+
+In this example, each tuple emitted by the spout includes a unique message ID. If no ID is specified, as in the example below, then the spout simply *will not receive acks*:
+
+```java
+public class NoAckReceivedSpout extends BaseRichSpout {
+    public void nextTuple() {
+        collector.emit(new Values(someValue));
+    }
+}
+```
+
+> When implementing acking logic---as well as [failing logic](#failing)---each tuple that is acked/failed **must have a unique ID**. Otherwise, the spout receiving the ack will not be able to identify *which* tuple has been acked/failed.
+
+When specifying an ID for the tuple being emitted, the ID is of type `Object`, which means that you can serialize to/deserialize from any data type that you'd like. The message ID could thus be a simple `String` or `long` or something more complex, like a hash, `Map`, or POJO.
+
+### Specifying ack reception logic
+
+In order to specify what your spout does when an ack is received, you need to implement an `ack` function in your spout. That function takes a Java `Object` containing the tuple's ID, which means that you can potentially serialize the message ID to any type you'd like.
+
+In this example, the spout simply logs the message ID:
+
+```java
+public class AckReceivingSpout extends BaseRichSpout {
+    private Object generateMessageId() {
+        // Some logic to produce a unique ID
+    }
+
+    public void nextTuple() {
+        collector.emit(new Values(someValue), generateMessageId());
+    }
+
+    public void ack(Object messageId) {
+        // This will simply print the message ID whenever an ack arrives
+        System.out.println((String) messageId);
+    }
+}
+```
+
+In this example, the spout performs a series of actions when receiving the ack:
+
+```java
+public class AckReceivingSpout extends BaseRichSpout {
+    public void nextTuple() {
+        if (someCondition) {
+            String randomHash = // Generate a random hash as a message ID
+            collector.emit(new Values(val), randomHash);
+        }
+    }
+
+    public void ack(Object messageId) {
+        saveItemToDatabase(item);
+        publishToPubSubTopic(message);
+    }
+}
+```
+
+### Failing
+
+**Failing** a tuple is essentially the opposite of acking it, i.e. it indicates that some operation has failed. Bolts can fail tuples by calling the `fail` method on the `OutputCollector` rather than `ack`. Here's an example:
+
+
+```java
+public class AckingBolt extends BaseRichBolt {
+    public void execute(Tuple tuple) {
+        try {
+            someProcessingOperation(tuple);
+            collector.ack(tuple);
+        } catch (Exception e) {
+            collector.fail(tuple);
+        }
+    }
+}
+```
+
+In this example, an exception-throwing processing operation is attempted. If it succeeds, the tuple is acked; if it fails and an exception is thrown, the tuple is failed.
+
+As with acks, spouts can be set up to handle failed tuples by implementing the `fail` method, which takes the message ID as the argument (just like the `ack` method). Here's an example:
+
+```java
+public class AckReceivingSpout extends BaseRichSpout {
+    public void nextTuple() {
+        collector.emit(new Values(someValue), someMessageId);
+    }
+
+    public void fail(Object messageId) {
+        // Process the messageId
+    }
+}
+```
+
+As with acking, spouts must include a message ID when emitting tuples or else they will not receive fail messages.
+
+### Acking, failing, and timeouts
+
+If you're setting up your spouts and bolts to include an ack/fail logic, you can specify that a tuple will automatically be failed if a timeout threshold is reached before the tuple is acked. In this example, all tuples passing through all bolts will be failed if not acked within 10 seconds:
+
+```java
+import org.apache.heron.api.Config;
+
+Config config = new Config();
+config.setMessageTimeoutSecs(10);
+```
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.0/topology-development-topology-api-python.md b/website2/website/versioned_docs/version-0.20.0/topology-development-topology-api-python.md
new file mode 100644
index 0000000..3ba8eca
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/topology-development-topology-api-python.md
@@ -0,0 +1,870 @@
+---
+id: version-0.20.0-topology-development-topology-api-python
+title: The Heron Topology API for Python
+sidebar_label: The Heron Topology API for Python
+original_id: topology-development-topology-api-python
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+> The current version of `heronpy` is [{{% heronpyVersion %}}](https://pypi.python.org/pypi/heronpy/{{% heronpyVersion %}}).
+
+Support for developing Heron topologies in Python is provided by a Python library called [`heronpy`](https://pypi.python.org/pypi/heronpy).
+
+> #### Python API docs
+> You can find API docs for the `heronpy` library [here](/api/python).
+
+## Setup
+
+First, you need to install the `heronpy` library using [pip](https://pip.pypa.io/en/stable/), [EasyInstall](https://wiki.python.org/moin/EasyInstall), or an analogous tool:
+
+```shell
+$ pip install heronpy
+$ easy_install heronpy
+```
+
+Then you can include `heronpy` in your project files. Here's an example:
+
+```python
+from heronpy.api.bolt.bolt import Bolt
+from heronpy.api.spout.spout import Spout
+from heronpy.api.topology import Topology
+```
+
+## Writing topologies in Python
+
+Heron [topologies](heron-topology-concepts) are networks of [spouts](heron-topology-concepts#spouts) that pull data into a topology and [bolts](heron-topology-concepts#bolts) that process that ingested data.
+
+> You can see how to create Python spouts in the [Implementing Python Spouts](#spouts) guide and how to create Python bolts in the [Implementing Python Bolts](#bolts) guide.
+
+Once you've defined spouts and bolts for a topology, you can then compose the topology in one of two ways:
+
+* You can use the [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder) class inside of a main function.
+
+    Here's an example:
+
+    ```python
+    #!/usr/bin/env python
+    from heronpy.api.topology import TopologyBuilder
+
+
+    if __name__ == "__main__":
+        builder = TopologyBuilder("MyTopology")
+        # Add spouts and bolts
+        builder.build_and_submit()
+    ```
+
+* You can subclass the [`Topology`](/api/python/topology.m.html#heronpy.topology.Topology) class.
+
+    Here's an example:
+
+    ```python
+    from heronpy.api.stream import Grouping
+    from heronpy.api.topology import Topology
+
+
+    class MyTopology(Topology):
+        my_spout = WordSpout.spec(par=2)
+        my_bolt = CountBolt.spec(par=3, inputs={spout: Grouping.fields("word")})
+    ```
+
+## Defining topologies using the [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder) class
+
+If you create a Python topology using a [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder), you need to instantiate a `TopologyBuilder` inside of a standard Python main function, like this:
+
+```python
+from heronpy.api.topology import TopologyBuilder
+
+
+if __name__ == "__main__":
+    builder = TopologyBuilder("MyTopology")
+```
+
+Once you've created a `TopologyBuilder` object, you can add [bolts](#bolts) using the [`add_bolt`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder.add_bolt) method and [spouts](#spouts) using the [`add_spout`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder.add_spout) method. Here's an example:
+
+```python
+builder = TopologyBuilder("MyTopology")
+builder.add_bolt("my_bolt", CountBolt, par=3)
+builder.add_spout("my_spout", WordSpout, par=2)
+```
+
+Both the `add_bolt` and `add_spout` methods return the corresponding [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec) object.
+
+The `add_bolt` method takes four arguments and an optional `config` parameter:
+
+Argument | Data type | Description | Default
+:--------|:----------|:------------|:-------
+`name` | `str` | The unique identifier assigned to this bolt | |
+`bolt_cls` | class | The subclass of [`Bolt`](/api/python/bolt/bolt.m.html#heronpy.bolt.bolt.Bolt) that defines this bolt | |
+`par` | `int` | The number of instances of this bolt in the topology | |
+`config` | `dict` | Specifies the configuration for this spout | `None`
+
+The `add_spout` method takes three arguments and an optional `config` parameter:
+
+Argument | Data type | Description | Default
+:--------|:----------|:------------|:-------
+`name` | `str` | The unique identifier assigned to this spout | |
+`spout_cls` | class | The subclass of [`Spout`](/api/python/spout/spout.m.html#heronpy.spout.spout.Spout) that defines this spout | |
+`par` | `int` | The number of instances of this spout in the topology | |
+`inputs` | `dict` or `list` | Either a `dict` mapping from [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec) to [`Grouping`](/api/python/stream.m.html#heronpy.stream.Grouping) *or* a list of [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec)s, in which case the [`shuffle`](/api/python/stream.m.html#heronpy.stream.Grouping.SHUFFLE) grouping is used
+`config` | `dict` | Specifies the configuration for this spout | `None`
+
+### Example
+
+The following is an example implementation of a word count topology in Python that subclasses [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder).
+
+```python
+from your_spout import WordSpout
+from your_bolt import CountBolt
+
+from heronpy.api.stream import Grouping
+from heronpy.api.topology import TopologyBuilder
+
+
+if __name__ == "__main__":
+    builder = TopologyBuilder("WordCountTopology")
+    # piece together the topology
+    word_spout = builder.add_spout("word_spout", WordSpout, par=2)
+    count_bolt = builder.add_bolt("count_bolt", CountBolt, par=2, inputs={word_spout: Grouping.fields("word")})
+    # submit the toplogy
+    builder.build_and_submit()
+```
+
+Note that arguments to the main method can be passed by providing them in the
+`heron submit` command.
+
+### Topology-wide configuration
+
+If you're building a Python topology using a `TopologyBuilder`, you can specify configuration for the topology using the [`set_config`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder.set_config) method. A topology's config is a `dict` in which the keys are a series constants from the [`api_constants`](/api/python/api_constants.m.html) module and values are configuration values for those parameters.
+
+Here's an example:
+
+```python
+from heronpy.api import api_constants
+from heronpy.api.topology import TopologyBuilder
+
+
+if __name__ == "__main__":
+    topology_config = {
+        api_constants.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS: True
+    }
+    builder = TopologyBuilder("MyTopology")
+    builder.set_config(topology_config)
+    # Add bolts and spouts, etc.
+```
+
+### Launching the topology
+
+If you want to [submit](../../../operators/heron-cli#submitting-a-topology) Python topologies to a Heron cluster, they need to be packaged as a [PEX](https://pex.readthedocs.io/en/stable/whatispex.html) file. In order to produce PEX files, we recommend using a build tool like [Pants](http://www.pantsbuild.org/python_readme.html) or [Bazel](https://github.com/benley/bazel_rules_pex).
+
+If you defined your topology by subclassing the [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder) class and built a `word_count.pex` file for that topology in the `~/topology` folder. You can submit the topology to a cluster called `local` like this:
+
+```bash
+$ heron submit local \
+  ~/topology/word_count.pex \
+  - # No class specified
+```
+
+Note the `-` in this submission command. If you define a topology by subclassing `TopologyBuilder` you do not need to instruct Heron where your main method is located.
+
+> #### Example topologies buildable as PEXs
+> * See [this repo](https://github.com/streamlio/pants-dev-environment) for an example of a Heron topology written in Python and deployable as a Pants-packaged PEX.
+> * See [this repo](https://github.com/streamlio/bazel-dev-environment) for an example of a Heron topology written in Python and deployable as a Bazel-packaged PEX.
+
+## Defining a topology by subclassing the [`Topology`](/api/python/topology.m.html#heronpy.topology.Topology) class
+
+If you create a Python topology by subclassing the [`Topology`](/api/python/topology.m.html#heronpy.topology.Topology) class, you need to create a new topology class, like this:
+
+```python
+from my_spout import WordSpout
+from my_bolt import CountBolt
+
+from heronpy.api.stream import Grouping
+from heronpy.api.topology import Topology
+
+
+class MyTopology(Topology):
+    my_spout = WordSpout.spec(par=2)
+    my_bolt_inputs = {my_spout: Grouping.fields("word")}
+    my_bolt = CountBolt.spec(par=3, inputs=my_bolt_inputs)
+```
+
+All you need to do is place [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec)s as the class attributes
+of your topology class, which are returned by the `spec()` method of
+your spout or bolt class. You do *not* need to run a `build` method or anything like that; the `Topology` class will automatically detect which spouts and bolts are included in the topology.
+
+> If you use this method to define a new Python topology, you do *not* need to have a main function.
+
+For bolts, the [`spec`](/api/python/bolt/bolt.m.html#heronpy.bolt.bolt.Bolt.spec) method for spouts takes three optional arguments::
+
+Argument | Data type | Description | Default
+:--------|:----------|:------------|:-------
+`name` | `str` | The unique identifier assigned to this bolt or `None` if you want to use the variable name of the return `HeronComponentSpec` as the unique identifier for this bolt | |
+`par` | `int` | The number of instances of this bolt in the topology | |
+`config` | `dict` | Specifies the configuration for this bolt | `None`
+
+
+For spouts, the [`spec`](/api/python/spout/spout.m.html#heronpy.spout.spout.Spout.spec) method takes four optional arguments:
+
+Argument | Data type | Description | Default
+:--------|:----------|:------------|:-------
+`name` | `str` | The unique identifier assigned to this spout or `None` if you want to use the variable name of the return `HeronComponentSpec` as the unique identifier for this spout | `None` |
+`inputs` | `dict` or `list` | Either a `dict` mapping from [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec) to [`Grouping`](/api/python/stream.m.html#heronpy.stream.Grouping) *or* a list of [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec)s, in which case the [`shuffle`](/api/python/stream.m.html#heronpy.stream.Grouping.SHUFFLE) grouping is used
+`par` | `int` | The number of instances of this spout in the topology | `1` |
+`config` | `dict` | Specifies the configuration for this spout | `None`
+
+### Example
+
+Here's an example topology definition with one spout and one bolt:
+
+```python
+from my_spout import WordSpout
+from my_bolt import CountBolt
+
+from heronpy.api.stream import Grouping
+from heronpy.api.topology import Topology
+
+
+class WordCount(Topology):
+    word_spout = WordSpout.spec(par=2)
+    count_bolt = CountBolt.spec(par=2, inputs={word_spout: Grouping.fields("word")})
+```
+
+### Launching
+
+If you defined your topology by subclassing the [`Topology`](/api/python/topology.m.html#heronpy.topology.Topology) class,
+your main Python file should *not* contain a main method. You will, however, need to instruct Heron which class contains your topology definition.
+
+Let's say that you've defined a topology by subclassing `Topology` and built a PEX stored in `~/topology/dist/word_count.pex`. The class containing your topology definition is `topology.word_count.WordCount`. You can submit the topology to a cluster called `local` like this:
+
+```bash
+$ heron submit local \
+  ~/topology/dist/word_count.pex \
+  topology.word_count.WordCount \ # Specifies the topology class definition
+  WordCountTopology
+```
+
+### Topology-wide configuration
+
+If you're building a Python topology by subclassing `Topology`, you can specify configuration for the topology using the [`set_config`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder.set_config) method. A topology's config is a `dict` in which the keys are a series constants from the [`api_constants`](/api/python/api_constants.m.html) module and values are configuration values for those parameters.
+
+Here's an example:
+
+```python
+from heronpy.api.topology import Topology
+from heronpy.api import api_constants
+
+
+class MyTopology(Topology):
+    config = {
+        api_constants.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS: True
+    }
+    # Add bolts and spouts, etc.
+```
+
+## Multiple streams
+
+To specify that a component has multiple output streams, instead of using a list of
+strings for `outputs`, you can specify a list of `Stream` objects, in the following manner.
+
+```python
+class MultiStreamSpout(Spout):
+    outputs = [
+        Stream(fields=["normal", "fields"], name="default"),
+        Stream(fields=["error_message"], name="error_stream"),
+    ]
+```
+
+To select one of these streams as the input for your bolt, you can simply
+use `[]` to specify the stream you want. Without any stream specified, the `default`
+stream will be used.
+
+```python
+class MultiStreamTopology(Topology):
+    spout = MultiStreamSpout.spec()
+    error_bolt = ErrorBolt.spec(inputs={spout["error_stream"]: Grouping.LOWEST})
+    consume_bolt = ConsumeBolt.spec(inputs={spout: Grouping.SHUFFLE})
+```
+
+## Declaring output fields using the `spec()` method
+
+In Python topologies, the output fields of your spouts and bolts
+need to be declared by placing `outputs` class attributes, as there is
+no `declareOutputFields()` method. `heronpy` enables you to dynamically declare output fields as a list using the
+`optional_outputs` argument in the `spec()` method.
+
+This is useful in a situation like below.
+
+```python
+class IdentityBolt(Bolt):
+    # Statically declaring output fields is not allowed
+    class process(self, tup):
+        emit([tup.values])
+
+
+class DynamicOutputField(Topology):
+    spout = WordSpout.spec()
+    bolt = IdentityBolt.spec(inputs={spout: Grouping.ALL}, optional_outputs=["word"])
+```
+
+You can also declare outputs in the `add_spout()` and the `add_bolt()`
+method for the `TopologyBuilder` in the same way.
+
+## Example topologies
+
+There are a number of example topologies that you can peruse in the [`examples/src/python`]({{% githubMaster %}}/examples/src/python) directory of the [Heron repo]({{% githubMaster %}}):
+
+Topology | File | Description
+:--------|:-----|:-----------
+Word count | [`word_count_topology.py`]({{% githubMaster %}}/examples/src/python/word_count_topology.py) | The [`WordSpout`]({{% githubMaster %}}/examples/src/python/spout/word_spout.py) spout emits random words from a list, while the [`CountBolt`]({{% githubMaster %}}/examples/src/python/bolt/count_bolt.py) bolt counts the number of words that have been emitted.
+Multiple streams | [`multi_stream_topology.py`]({{% githubMaster %}}/examples/src/python/multi_stream_topology.py) | The [`MultiStreamSpout`]({{% githubMaster %}}/examples/src/python/spout/multi_stream_spout.py) emits multiple streams to downstream bolts.
+Half acking | [`half_acking_topology.py`]({{% githubMaster %}}/examples/src/python/half_acking_topology.py) | The [`HalfAckBolt`]({{% githubMaster %}}/examples/src/python/bolt/half_ack_bolt.py) acks only half of all received tuples.
+Custom grouping | [`custom_grouping_topology.py`]({{% githubMaster %}}/examples/src/python/custom_grouping_topology.py) | The [`SampleCustomGrouping`]({{% githubMaster %}}/examples/src/python/custom_grouping_topology.py#L26) class provides a custom field grouping.
+
+You can build the respective PEXs for these topologies using the following commands:
+
+```shell
+$ bazel build examples/src/python:word_count
+$ bazel build examples/src/python:multi_stream
+$ bazel build examples/src/python:half_acking
+$ bazel build examples/src/python:custom_grouping
+```
+
+All built PEXs will be stored in `bazel-bin/examples/src/python`. You can submit them to Heron like so:
+
+```shell
+$ heron submit local \
+  bazel-bin/examples/src/python/word_count.pex - \
+  WordCount
+$ heron submit local \
+  bazel-bin/examples/src/python/multi_stream.pex \
+  heron.examples.src.python.multi_stream_topology.MultiStream
+$ heron submit local \
+  bazel-bin/examples/src/python/half_acking.pex - \
+  HalfAcking
+$ heron submit local \
+  bazel-bin/examples/src/python/custom_grouping.pex \
+  heron.examples.src.python.custom_grouping_topology.CustomGrouping
+```
+
+By default, the `submit` command also activates topologies. To disable this behavior, set the `--deploy-deactivated` flag.
+
+## Bolts 
+
+ Bolts must implement the `Bolt` interface, which has the following methods.
+
+```python
+class MyBolt(Bolt):
+    def initialize(self, config, context): pass
+    def process(self, tup): pass
+```
+
+* The `initialize()` method is called when the bolt is first initialized and
+provides the bolt with the executing environment. It is equivalent to `prepare()`
+method of the [`IBolt`](/api/org/apache/heron/api/bolt/IBolt.html) interface in Java.
+Note that you should not override `__init__()` constructor of `Bolt` class
+for initialization of custom variables, since it is used internally by HeronInstance; instead,
+`initialize()` should be used to initialize any custom variables or connections to databases.
+
+* The `process()` method is called to process a single input `tup` of `HeronTuple` type. This method
+is equivalent to `execute()` method of `IBolt` interface in Java. You can use
+`self.emit()` method to emit the result, as described below.
+
+In addition, `BaseBolt` class provides you with the following methods.
+
+```python
+class BaseBolt(BaseComponent):
+    def emit(self, tup, stream="default", anchors=None, direct_task=None, need_task_ids=False): ...
+    def ack(self, tup): ...
+    def fail(self, tup): ...
+    def log(self, message, level=None): ...
+    @staticmethod
+    def is_tick(tup)
+    @classmethod
+    def spec(cls, name=None, inputs=None, par=1, config=None): ...
+```
+
+* The `emit()` method is used to emit a given `tup`, which can be a `list` or `tuple` of
+any python objects. Unlike the Java implementation, `OutputCollector`
+doesn't exist in the Python implementation.
+
+* The `ack()` method is used to indicate that processing of a tuple has succeeded.
+
+* The `fail()` method is used to indicate that processing of a tuple has failed.
+
+* The `is_tick()` method returns whether a given `tup` of `HeronTuple` type is a tick tuple.
+
+* The `log()` method is used to log an arbitrary message, and its outputs are redirected
+  to the log file of the component. It accepts an optional argument
+  which specifies the logging level. By default, its logging level is `info`.
+
+    **Warning:** due to internal issue, you should **NOT** output anything to
+    `sys.stdout` or `sys.stderr`; instead, you should use this method to log anything you want.
+
+* In order to declare the output fields of this bolt, you need to place
+a class attribute `outputs` as a list of `str` or `Stream`. Note that unlike Java,
+`declareOutputFields` does not exist in the Python implementation. Moreover, you can
+optionally specify the output fields from the `spec()` method from the `optional_outputs`.
+
+
+* You will use the `spec()` method to define a topology and specify the location
+of this bolt within the topology, as well as to give component-specific configurations.
+
+The following is an example implementation of a bolt in Python.
+
+```python
+from collections import Counter
+from heronpy.api.bolt.bolt import Bolt
+
+
+class CountBolt(Bolt):
+    outputs = ["word", "count"]
+
+    def initialize(self, config, context):
+        self.counter = Counter()
+
+    def process(self, tup):
+        word = tup.values[0]
+        self.counter[word] += 1
+        self.emit([word, self.counter[word]])
+```
+
+## Spouts
+
+To create a spout for a Heron topology, you need to subclass the [`Spout`](/api/python/spout/spout.m.html#heronpy.spout.spout.Spout) class, which has the following methods.
+
+```python
+class MySpout(Spout):
+    def initialize(self, config, context): pass
+    def next_tuple(self): pass
+    def ack(self, tup_id): pass
+    def fail(self, tup_id): pass
+    def activate(self): pass
+    def deactivate(self): pass
+    def close(self): pass
+```
+
+## `Spout` class methods
+
+The [`Spout`](/api/python/spout/spout.m.html#heronpy.spout.spout.Spout) class provides a number of methods that you should implement when subclassing.
+
+* The `initialize()` method is called when the spout is first initialized
+and provides the spout with the executing environment. It is equivalent to
+`open()` method of [`ISpout`](/api/org/apache/heron/api/spout/ISpout.html).
+Note that you should not override `__init__()` constructor of `Spout` class
+for initialization of custom variables, since it is used internally by HeronInstance; instead,
+`initialize()` should be used to initialize any custom variables or connections to databases.
+
+* The `next_tuple()` method is used to fetch tuples from input source. You can
+emit fetched tuples by calling `self.emit()`, as described below.
+
+* The `ack()` method is called when the `HeronTuple` with the `tup_id` emitted
+by this spout is successfully processed.
+
+* The `fail()` method is called when the `HeronTuple` with the `tup_id` emitted
+by this spout is not processed successfully.
+
+* The `activate()` method is called when the spout is asked to back into
+active state.
+
+* The `deactivate()` method is called when the spout is asked to enter deactive
+state.
+
+* The `close()` method is called when when the spout is shutdown. There is no
+guarantee that this method is called due to how the instance is killed.
+
+## `BaseSpout` class methods
+
+The `Spout` class inherits from the [`BaseSpout`](/api/python/spout/base_spout.m.html#heronpy.spout.base_spout.BaseSpout) class, which also provides you methods you can use in your spouts.
+
+```python
+class BaseSpout(BaseComponent):
+    def log(self, message, level=None): ...
+    def emit(self, tup, tup_id=None, stream="default", direct_task=None, need_task_ids=False): ...
+    @classmethod
+    def spec(cls, name=None, par=1, config=None): ...
+```
+
+* The `emit()` method is used to emit a given tuple, which can be a `list` or `tuple` of any Python objects. Unlike in the Java implementation, there is no `OutputCollector` in the Python implementation.
+
+* The `log()` method is used to log an arbitrary message, and its outputs are redirected to the log file of the component. It accepts an optional argument which specifies the logging level. By default, its logging level is `info`.
+
+    **Warning:** due to internal issue, you should **NOT** output anything to
+    `sys.stdout` or `sys.stderr`; instead, you should use this method to log anything you want.
+
+* In order to declare the output fields of this spout, you need to place
+a class attribute `outputs` as a list of `str` or `Stream`. Note that unlike Java,
+`declareOutputFields` does not exist in the Python implementation. Moreover, you can
+optionally specify the output fields from the `spec()` method from the `optional_outputs`.
+.
+
+* You will use the `spec()` method to define a topology and specify the location
+of this spout within the topology, as well as to give component-specific configurations.
+
+## Example spout
+
+The following is an example implementation of a spout in Python.
+
+```python
+from itertools import cycle
+from heronpy.api.spout.spout import Spout
+
+
+class WordSpout(Spout):
+    outputs = ['word']
+
+    def initialize(self, config, context):
+        self.words = cycle(["hello", "world", "heron", "storm"])
+        self.log("Initializing WordSpout...")
+
+    def next_tuple(self):
+        word = next(self.words)
+        self.emit([word])
+```
+
+## Topologies Further
+
+```shell
+$ pip install heronpy
+$ easy_install heronpy
+```
+
+Then you can include `heronpy` in your project files. Here's an example:
+
+```python
+from heronpy.api.bolt.bolt import Bolt
+from heronpy.api.spout.spout import Spout
+from heronpy.api.topology import Topology
+```
+
+## Writing topologies in Python
+
+Heron [topologies](heron-topologies-concepts) are networks of [spouts](../spouts) that pull data into a topology and [bolts](../bolts) that process that ingested data.
+
+> You can see how to create Python spouts in the [Implementing Python Spouts](../spouts) guide and how to create Python bolts in the [Implementing Python Bolts](../bolts) guide.
+
+Once you've defined spouts and bolts for a topology, you can then compose the topology in one of two ways:
+
+* You can use the [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder) class inside of a main function.
+
+    Here's an example:
+
+    ```python
+    #!/usr/bin/env python
+    from heronpy.api.topology import TopologyBuilder
+
+
+    if __name__ == "__main__":
+        builder = TopologyBuilder("MyTopology")
+        # Add spouts and bolts
+        builder.build_and_submit()
+    ```
+
+* You can subclass the [`Topology`](/api/python/topology.m.html#heronpy.topology.Topology) class.
+
+    Here's an example:
+
+    ```python
+    from heronpy.api.stream import Grouping
+    from heronpy.api.topology import Topology
+
+
+    class MyTopology(Topology):
+        my_spout = WordSpout.spec(par=2)
+        my_bolt = CountBolt.spec(par=3, inputs={spout: Grouping.fields("word")})
+    ```
+
+## Defining topologies using the [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder) class
+
+If you create a Python topology using a [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder), you need to instantiate a `TopologyBuilder` inside of a standard Python main function, like this:
+
+```python
+from heronpy.api.topology import TopologyBuilder
+
+
+if __name__ == "__main__":
+    builder = TopologyBuilder("MyTopology")
+```
+
+Once you've created a `TopologyBuilder` object, you can add [bolts](../bolts) using the [`add_bolt`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder.add_bolt) method and [spouts](../spouts) using the [`add_spout`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder.add_spout) method. Here's an example:
+
+```python
+builder = TopologyBuilder("MyTopology")
+builder.add_bolt("my_bolt", CountBolt, par=3)
+builder.add_spout("my_spout", WordSpout, par=2)
+```
+
+Both the `add_bolt` and `add_spout` methods return the corresponding [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec) object.
+
+The `add_bolt` method takes four arguments and an optional `config` parameter:
+
+Argument | Data type | Description | Default
+:--------|:----------|:------------|:-------
+`name` | `str` | The unique identifier assigned to this bolt | |
+`bolt_cls` | class | The subclass of [`Bolt`](/api/python/bolt/bolt.m.html#heronpy.bolt.bolt.Bolt) that defines this bolt | |
+`par` | `int` | The number of instances of this bolt in the topology | |
+`config` | `dict` | Specifies the configuration for this spout | `None`
+
+The `add_spout` method takes three arguments and an optional `config` parameter:
+
+Argument | Data type | Description | Default
+:--------|:----------|:------------|:-------
+`name` | `str` | The unique identifier assigned to this spout | |
+`spout_cls` | class | The subclass of [`Spout`](/api/python/spout/spout.m.html#heronpy.spout.spout.Spout) that defines this spout | |
+`par` | `int` | The number of instances of this spout in the topology | |
+`inputs` | `dict` or `list` | Either a `dict` mapping from [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec) to [`Grouping`](/api/python/stream.m.html#heronpy.stream.Grouping) *or* a list of [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec)s, in which case the [`shuffle`](/api/python/stream.m.html#heronpy.stream.Grouping.SHUFFLE) grouping is used
+`config` | `dict` | Specifies the configuration for this spout | `None`
+
+### Example
+
+The following is an example implementation of a word count topology in Python that subclasses [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder).
+
+```python
+from your_spout import WordSpout
+from your_bolt import CountBolt
+
+from heronpy.api.stream import Grouping
+from heronpy.api.topology import TopologyBuilder
+
+
+if __name__ == "__main__":
+    builder = TopologyBuilder("WordCountTopology")
+    # piece together the topology
+    word_spout = builder.add_spout("word_spout", WordSpout, par=2)
+    count_bolt = builder.add_bolt("count_bolt", CountBolt, par=2, inputs={word_spout: Grouping.fields("word")})
+    # submit the toplogy
+    builder.build_and_submit()
+```
+
+Note that arguments to the main method can be passed by providing them in the
+`heron submit` command.
+
+### Topology-wide configuration
+
+If you're building a Python topology using a `TopologyBuilder`, you can specify configuration for the topology using the [`set_config`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder.set_config) method. A topology's config is a `dict` in which the keys are a series constants from the [`api_constants`](/api/python/api_constants.m.html) module and values are configuration values for those parameters.
+
+Here's an example:
+
+```python
+from heronpy.api import api_constants
+from heronpy.api.topology import TopologyBuilder
+
+
+if __name__ == "__main__":
+    topology_config = {
+        api_constants.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS: True
+    }
+    builder = TopologyBuilder("MyTopology")
+    builder.set_config(topology_config)
+    # Add bolts and spouts, etc.
+```
+
+### Launching the topology
+
+If you want to [submit](../../../operators/heron-cli#submitting-a-topology) Python topologies to a Heron cluster, they need to be packaged as a [PEX](https://pex.readthedocs.io/en/stable/whatispex.html) file. In order to produce PEX files, we recommend using a build tool like [Pants](http://www.pantsbuild.org/python_readme.html) or [Bazel](https://github.com/benley/bazel_rules_pex).
+
+If you defined your topology by subclassing the [`TopologyBuilder`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder) class and built a `word_count.pex` file for that topology in the `~/topology` folder. You can submit the topology to a cluster called `local` like this:
+
+```bash
+$ heron submit local \
+  ~/topology/word_count.pex \
+  - # No class specified
+```
+
+Note the `-` in this submission command. If you define a topology by subclassing `TopologyBuilder` you do not need to instruct Heron where your main method is located.
+
+> #### Example topologies buildable as PEXs
+> * See [this repo](https://github.com/streamlio/pants-dev-environment) for an example of a Heron topology written in Python and deployable as a Pants-packaged PEX.
+> * See [this repo](https://github.com/streamlio/bazel-dev-environment) for an example of a Heron topology written in Python and deployable as a Bazel-packaged PEX.
+
+## Defining a topology by subclassing the [`Topology`](/api/python/topology.m.html#heronpy.topology.Topology) class
+
+If you create a Python topology by subclassing the [`Topology`](/api/python/topology.m.html#heronpy.topology.Topology) class, you need to create a new topology class, like this:
+
+```python
+from my_spout import WordSpout
+from my_bolt import CountBolt
+
+from heronpy.api.stream import Grouping
+from heronpy.api.topology import Topology
+
+
+class MyTopology(Topology):
+    my_spout = WordSpout.spec(par=2)
+    my_bolt_inputs = {my_spout: Grouping.fields("word")}
+    my_bolt = CountBolt.spec(par=3, inputs=my_bolt_inputs)
+```
+
+All you need to do is place [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec)s as the class attributes
+of your topology class, which are returned by the `spec()` method of
+your spout or bolt class. You do *not* need to run a `build` method or anything like that; the `Topology` class will automatically detect which spouts and bolts are included in the topology.
+
+> If you use this method to define a new Python topology, you do *not* need to have a main function.
+
+For bolts, the [`spec`](/api/python/bolt/bolt.m.html#heronpy.bolt.bolt.Bolt.spec) method for spouts takes three optional arguments::
+
+Argument | Data type | Description | Default
+:--------|:----------|:------------|:-------
+`name` | `str` | The unique identifier assigned to this bolt or `None` if you want to use the variable name of the return `HeronComponentSpec` as the unique identifier for this bolt | |
+`par` | `int` | The number of instances of this bolt in the topology | |
+`config` | `dict` | Specifies the configuration for this bolt | `None`
+
+
+For spouts, the [`spec`](/api/python/spout/spout.m.html#heronpy.spout.spout.Spout.spec) method takes four optional arguments:
+
+Argument | Data type | Description | Default
+:--------|:----------|:------------|:-------
+`name` | `str` | The unique identifier assigned to this spout or `None` if you want to use the variable name of the return `HeronComponentSpec` as the unique identifier for this spout | `None` |
+`inputs` | `dict` or `list` | Either a `dict` mapping from [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec) to [`Grouping`](/api/python/stream.m.html#heronpy.stream.Grouping) *or* a list of [`HeronComponentSpec`](/api/python/component/component_spec.m.html#heronpy.component.component_spec.HeronComponentSpec)s, in which case the [`shuffle`](/api/python/stream.m.html#heronpy.stream.Grouping.SHUFFLE) grouping is used
+`par` | `int` | The number of instances of this spout in the topology | `1` |
+`config` | `dict` | Specifies the configuration for this spout | `None`
+
+### Example
+
+Here's an example topology definition with one spout and one bolt:
+
+```python
+from my_spout import WordSpout
+from my_bolt import CountBolt
+
+from heronpy.api.stream import Grouping
+from heronpy.api.topology import Topology
+
+
+class WordCount(Topology):
+    word_spout = WordSpout.spec(par=2)
+    count_bolt = CountBolt.spec(par=2, inputs={word_spout: Grouping.fields("word")})
+```
+
+### Launching
+
+If you defined your topology by subclassing the [`Topology`](/api/python/topology.m.html#heronpy.topology.Topology) class,
+your main Python file should *not* contain a main method. You will, however, need to instruct Heron which class contains your topology definition.
+
+Let's say that you've defined a topology by subclassing `Topology` and built a PEX stored in `~/topology/dist/word_count.pex`. The class containing your topology definition is `topology.word_count.WordCount`. You can submit the topology to a cluster called `local` like this:
+
+```bash
+$ heron submit local \
+  ~/topology/dist/word_count.pex \
+  topology.word_count.WordCount \ # Specifies the topology class definition
+  WordCountTopology
+```
+
+### Topology-wide configuration
+
+If you're building a Python topology by subclassing `Topology`, you can specify configuration for the topology using the [`set_config`](/api/python/topology.m.html#heronpy.topology.TopologyBuilder.set_config) method. A topology's config is a `dict` in which the keys are a series constants from the [`api_constants`](/api/python/api_constants.m.html) module and values are configuration values for those parameters.
+
+Here's an example:
+
+```python
+from heronpy.api.topology import Topology
+from heronpy.api import api_constants
+
+
+class MyTopology(Topology):
+    config = {
+        api_constants.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS: True
+    }
+    # Add bolts and spouts, etc.
+```
+
+## Multiple streams
+
+To specify that a component has multiple output streams, instead of using a list of
+strings for `outputs`, you can specify a list of `Stream` objects, in the following manner.
+
+```python
+class MultiStreamSpout(Spout):
+    outputs = [
+        Stream(fields=["normal", "fields"], name="default"),
+        Stream(fields=["error_message"], name="error_stream"),
+    ]
+```
+
+To select one of these streams as the input for your bolt, you can simply
+use `[]` to specify the stream you want. Without any stream specified, the `default`
+stream will be used.
+
+```python
+class MultiStreamTopology(Topology):
+    spout = MultiStreamSpout.spec()
+    error_bolt = ErrorBolt.spec(inputs={spout["error_stream"]: Grouping.LOWEST})
+    consume_bolt = ConsumeBolt.spec(inputs={spout: Grouping.SHUFFLE})
+```
+
+## Declaring output fields using the `spec()` method
+
+In Python topologies, the output fields of your spouts and bolts
+need to be declared by placing `outputs` class attributes, as there is
+no `declareOutputFields()` method. `heronpy` enables you to dynamically declare output fields as a list using the
+`optional_outputs` argument in the `spec()` method.
+
+This is useful in a situation like below.
+
+```python
+class IdentityBolt(Bolt):
+    # Statically declaring output fields is not allowed
+    class process(self, tup):
+        emit([tup.values])
+
+
+class DynamicOutputField(Topology):
+    spout = WordSpout.spec()
+    bolt = IdentityBolt.spec(inputs={spout: Grouping.ALL}, optional_outputs=["word"])
+```
+
+You can also declare outputs in the `add_spout()` and the `add_bolt()`
+method for the `TopologyBuilder` in the same way.
+
+## Example topologies
+
+There are a number of example topologies that you can peruse in the [`examples/src/python`]({{% githubMaster %}}/examples/src/python) directory of the [Heron repo]({{% githubMaster %}}):
+
+Topology | File | Description
+:--------|:-----|:-----------
+Word count | [`word_count_topology.py`]({{% githubMaster %}}/examples/src/python/word_count_topology.py) | The [`WordSpout`]({{% githubMaster %}}/examples/src/python/spout/word_spout.py) spout emits random words from a list, while the [`CountBolt`]({{% githubMaster %}}/examples/src/python/bolt/count_bolt.py) bolt counts the number of words that have been emitted.
+Multiple streams | [`multi_stream_topology.py`]({{% githubMaster %}}/examples/src/python/multi_stream_topology.py) | The [`MultiStreamSpout`]({{% githubMaster %}}/examples/src/python/spout/multi_stream_spout.py) emits multiple streams to downstream bolts.
+Half acking | [`half_acking_topology.py`]({{% githubMaster %}}/examples/src/python/half_acking_topology.py) | The [`HalfAckBolt`]({{% githubMaster %}}/examples/src/python/bolt/half_ack_bolt.py) acks only half of all received tuples.
+Custom grouping | [`custom_grouping_topology.py`]({{% githubMaster %}}/examples/src/python/custom_grouping_topology.py) | The [`SampleCustomGrouping`]({{% githubMaster %}}/examples/src/python/custom_grouping_topology.py#L26) class provides a custom field grouping.
+
+You can build the respective PEXs for these topologies using the following commands:
+
+```shell
+$ bazel build examples/src/python:word_count
+$ bazel build examples/src/python:multi_stream
+$ bazel build examples/src/python:half_acking
+$ bazel build examples/src/python:custom_grouping
+```
+
+All built PEXs will be stored in `bazel-bin/examples/src/python`. You can submit them to Heron like so:
+
+```shell
+$ heron submit local \
+  bazel-bin/examples/src/python/word_count.pex - \
+  WordCount
+$ heron submit local \
+  bazel-bin/examples/src/python/multi_stream.pex \
+  heron.examples.src.python.multi_stream_topology.MultiStream
+$ heron submit local \
+  bazel-bin/examples/src/python/half_acking.pex - \
+  HalfAcking
+$ heron submit local \
+  bazel-bin/examples/src/python/custom_grouping.pex \
+  heron.examples.src.python.custom_grouping_topology.CustomGrouping
+```
+
+By default, the `submit` command also activates topologies. To disable this behavior, set the `--deploy-deactivated` flag.
diff --git a/website2/website/versioned_docs/version-0.20.0/uploaders-amazon-s3.md b/website2/website/versioned_docs/version-0.20.0/uploaders-amazon-s3.md
new file mode 100644
index 0000000..0778042
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/uploaders-amazon-s3.md
@@ -0,0 +1,65 @@
+---
+id: version-0.20.0-uploaders-amazon-s3
+title: Amazon S3
+sidebar_label: Amazon S3
+original_id: uploaders-amazon-s3
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+If you are running in Amazon AWS, Heron provides out of the box uploader for S3,
+the object storage. S3 uploader is useful when the topologies run in a distributed
+cluster of Amazon EC2 compute instances. Since S3 replicates the data, it provides
+a scalable mechanism for distributing the user topology jars.
+
+### S3 Uploader Configuration
+
+You can make Heron use S3 uploader by modifying the `uploader.yaml` config file specific
+for the Heron cluster. You'll need to specify the following for each cluster:
+
+* `heron.class.uploader` --- Indicate the uploader class to be loaded. You should set this
+to `org.apache.heron.uploader.s3.S3Uploader`
+
+* `heron.uploader.s3.bucket` --- Specifies the S3 bucket where the topology jar should be
+uploaded.
+
+* `heron.uploader.s3.access_key` --- Specify the access key of the AWS account that has
+write access to the bucket
+
+* `heron.uploader.s3.secret_key` --- Specify the secret access of the AWS account that has
+write access to the bucket
+
+### Example S3 Uploader Configuration
+
+Below is an example configuration (in `uploader.yaml`) for a S3 uploader:
+
+```yaml
+# uploader class for transferring the topology jar/tar files to storage
+heron.class.uploader: org.apache.heron.uploader.s3.S3Uploader
+
+# S3 region bucket is created.  Must specify.
+heron.uploader.s3.region: us-east-1
+
+# S3 bucket to put the jar file into
+heron.uploader.s3.bucket: heron-topologies-company-com
+
+# AWS access key
+heron.uploader.s3.access_key: access_key
+
+# AWS secret access key
+heron.uploader.s3.secret_key: secret_access_key
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/uploaders-hdfs.md b/website2/website/versioned_docs/version-0.20.0/uploaders-hdfs.md
new file mode 100644
index 0000000..f221c13
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/uploaders-hdfs.md
@@ -0,0 +1,63 @@
+---
+id: version-0.20.0-uploaders-hdfs
+title: HDFS
+sidebar_label: HDFS
+original_id: uploaders-hdfs
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+With Heron, you have the option to use HDFS as stable storage for user submitted
+topology jars. Since HDFS replicates the data, it provides a scalable
+mechanism for distributing the user topology jars. This is desirable when
+the job runs in a distributed cluster and requires several hundred containers to
+run.
+
+There are a few things you should be aware of HDFS uploader:
+
+* It requires hadoop client be installed in the machine where the topology is being submitted
+
+### HDFS Uploader Configuration
+
+You can make Heron use HDFS uploader by modifying the `uploader.yaml` config file specific
+for the Heron cluster. You'll need to specify the following for each cluster:
+
+* `heron.class.uploader` --- Indicate the uploader class to be loaded. You should set this
+to `org.apache.heron.uploader.hdfs.HdfsUploader`
+
+* `heron.uploader.hdfs.config.directory` --- Specifies the directory of the config files
+for hadoop. This is used by hadoop client to upload the topology jar
+
+* `heron.uploader.hdfs.topologies.directory.uri` --- URI of the directory name for uploading
+topology jars. The name of the directory should be unique per cluster, if they are sharing the
+storage. In those cases, you could use the Heron environment variable `${CLUSTER}` that will be
+substituted by cluster name for distinction.
+
+### Example HDFS Uploader Configuration
+
+Below is an example configuration (in `uploader.yaml`) for a HDFS uploader:
+
+```yaml
+# uploader class for transferring the topology jar/tar files to storage
+heron.class.uploader: org.apache.heron.uploader.hdfs.HdfsUploader
+
+# Directory of config files for hadoop client to read from
+heron.uploader.hdfs.config.directory: /home/hadoop/hadoop
+
+# name of the directory to upload topologies for HDFS uploader
+heron.uploader.hdfs.topologies.directory.uri: hdfs://heron/topologies/${CLUSTER}
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/uploaders-http.md b/website2/website/versioned_docs/version-0.20.0/uploaders-http.md
new file mode 100644
index 0000000..5871627
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/uploaders-http.md
@@ -0,0 +1,65 @@
+---
+id: version-0.20.0-uploaders-http
+title: HTTP
+sidebar_label: HTTP
+original_id: uploaders-http
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+When a topology is submitted to Heron, the topology jars will be uploaded to a stable location. 
+The submitter will provide this location to the scheduler and it will pass it to the each 
+container. Heron can use a Http uploader to upload topology jar distribution to a stable 
+Http location.
+
+### Http Uploader Configuration
+
+You can make Heron aware of the Http uploader by modifying the `uploader.yaml` config file specific 
+for the Heron cluster. You’ll need to specify the following for each cluster:
+
+* `heron.class.uploader` — Indicate the uploader class to be loaded. You should set this 
+to `org.apache.heron.uploader.http.HttpUploader`
+
+* `heron.uploader.http.uri` — Provides the name of the URI where the topology jar should be 
+uploaded.
+
+### Example Http Uploader Configuration
+
+Below is an example configuration (in `uploader.yaml`) for a Http uploader:
+
+```yaml
+# uploader class for transferring the topology jar/tar files to storage
+heron.class.uploader: org.apache.heron.uploader.http.HttpUploader
+
+heron.uploader.http.uri: http://localhost:9000/api/v1/file/upload
+```
+
+Also Heron's API server can be used as a file server for the HttpUploader to upload topology 
+package/jars as follows:
+
+```
+${HOME}/.heron/bin/heron-apiserver 
+--cluster standalone 
+--base-template standalone 
+-D heron.statemgr.connection.string=<zookeeper_host:zookeeper_port> 
+-D heron.nomad.scheduler.uri=<scheduler_uri> 
+-D heron.class.uploader=org.apache.heron.uploader.http.HttpUploader
+--verbose
+```
+
+Also Http Server that topology package/jars are uploaded needs to return an URI upon upload 
+so that Heron will know the location to download in the future.
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.0/uploaders-local-fs.md b/website2/website/versioned_docs/version-0.20.0/uploaders-local-fs.md
new file mode 100644
index 0000000..c32ce7f
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/uploaders-local-fs.md
@@ -0,0 +1,61 @@
+---
+id: version-0.20.0-uploaders-local-fs
+title: Local File System
+sidebar_label: Local File System
+original_id: uploaders-local-fs
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+When you submit a topology to Heron, the topology jars will be uploaded to a
+stable location. The submitter will provide this location to the scheduler and
+it will pass it to the executor each container. Heron can use a local file
+system as a stable storage for topology jar distribution.
+
+There are a few things you should be aware of local file system uploader:
+
+* Local file system uploader is mainly used in conjunction with local scheduler.
+
+* It is ideal, if you want to run Heron in a single server, laptop or an edge device.
+
+* Useful for Heron developers for local testing of the components.
+
+### Local File System Uploader Configuration
+
+You can make Heron aware of the local file system uploader by modifying the
+`uploader.yaml` config file specific for the Heron cluster. You'll need to specify
+the following for each cluster:
+
+* `heron.class.uploader` --- Indicate the uploader class to be loaded. You should set this
+to `org.apache.heron.uploader.localfs.LocalFileSystemUploader`
+
+* `heron.uploader.localfs.file.system.directory` --- Provides the name of the directory where
+the topology jar should be uploaded. The name of the directory should be unique per cluster
+You could use the Heron environment variables `${CLUSTER}` that will be substituted by cluster
+name. If this is not set, `${HOME}/.herondata/repository/${CLUSTER}/${ROLE}/${TOPOLOGY}` will be set as default.
+
+### Example Local File System Uploader Configuration
+
+Below is an example configuration (in `uploader.yaml`) for a local file system uploader:
+
+```yaml
+# uploader class for transferring the topology jar/tar files to storage
+heron.class.uploader: org.apache.heron.uploader.localfs.LocalFileSystemUploader
+
+# name of the directory to upload topologies for local file system uploader
+heron.uploader.localfs.file.system.directory: ${HOME}/.herondata/topologies/${CLUSTER}
+```
diff --git a/website2/website/versioned_docs/version-0.20.0/uploaders-scp.md b/website2/website/versioned_docs/version-0.20.0/uploaders-scp.md
new file mode 100644
index 0000000..140a59b
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/uploaders-scp.md
@@ -0,0 +1,88 @@
+---
+id: version-0.20.0-uploaders-scp
+title: Secure Copy (SCP)
+sidebar_label: Secure Copy (SCP)
+original_id: uploaders-scp
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+For small clusters with simple setups that doesn't have a HDFS like file system, the SCP uploader
+can be used to manage the package files. This uploader uses the `scp` linux command to upload the
+files to a node accessible by all the worker nodes in the cluster. Then a scheduler like Aurora can
+use the `scp` command again to download the content to each of the worker machines.
+
+SCP Uploader requirements
+
+* SCP uploader requires the `scp` and `ssh` linux utilities installed. Also it is better to have
+passwordless ssh configured between the shared node and worker nodes in the cluster.
+
+### SCP Uploader Configuration
+
+You can make Heron use SCP uploader by modifying the `uploader.yaml` config file specific
+for the Heron cluster. You'll need to specify the following for each cluster:
+
+* `heron.class.uploader` --- Indicate the uploader class to be loaded. You should set this to
+org.apache.heron.uploader.scp.ScpUploader
+* `heron.uploader.scp.command.options` --- Part of the SCP command where you specify custom options.
+i.e "-i ~/.ssh/id_rsa"
+* `heron.uploader.scp.command.connection` --- The user name and host pair to be used by the SCP command.
+i.e "user@host"
+* `heron.uploader.ssh.command.options` --- Part of the SSH command where you specify custom options.
+i.e "-i ~/.ssh/id_rsa"
+* `heron.uploader.ssh.command.connection` --- The user name and host pair to be used by the SSH command.
+i.e "user@host"
+* `heron.uploader.scp.dir.path` --- The directory to be used to uploading the package.
+
+### Example SCP Uploader Configuration
+
+Below is an example configuration (in `uploader.yaml`) for a SCP uploader:
+
+```yaml
+# uploader class for transferring the topology jar/tar files to storage
+heron.class.uploader:         org.apache.heron.uploader.scp.ScpUploader
+# This is the scp command options that will be used by the uploader, this can be used to
+# specify custom options such as the location of ssh keys.
+heron.uploader.scp.command.options:   "-i ~/.ssh/id_rsa"
+# The scp connection string sets the remote user name and host used by the uploader.
+heron.uploader.scp.command.connection:   "user@host"
+
+# The ssh command options that will be used when connecting to the uploading host to execute
+# command such as delete files, make directories.
+heron.uploader.ssh.command.options:   "-i ~/.ssh/id_rsa"
+# The ssh connection string sets the remote user name and host used by the uploader.
+heron.uploader.ssh.command.connection:   "user@host"
+
+# the directory where the file will be uploaded, make sure the user has the necessary permissions
+# to upload the file here.
+heron.uploader.scp.dir.path:   ${HOME}/heron/repository/${CLUSTER}/${ROLE}/${TOPOLOGY}
+```
+
+The uploader will use SSH to create the entire directory structure specified in `heron.uploader.scp.dir.path` 
+by running `mkdir -p` before using SCP to upload the topology package.
+
+
+Below is an example `scp` command configuration in the `heron.aurora` file. The cmdline is run by every node
+in the cluster to **download** the topology package.
+
+```bash
+fetch_user_package = Process(
+  name = 'fetch_user_package',
+  cmdline = 'scp -i ~/.ssh/id_rsa user@host:%s %s && tar zxf %s' % (heron_topology_jar_uri, \
+      topology_package_file, topology_package_file)
+)
+```
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.0/user-manuals-heron-cli.md b/website2/website/versioned_docs/version-0.20.0/user-manuals-heron-cli.md
new file mode 100644
index 0000000..5c0f714
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.0/user-manuals-heron-cli.md
@@ -0,0 +1,388 @@
+---
+id: version-0.20.0-user-manuals-heron-cli
+title: Managing Topologies with Heron CLI
+sidebar_label: Heron Client
+original_id: user-manuals-heron-cli
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+The **Heron CLI** us used to to manage every aspect of the
+[topology lifecycle](heron-topology-concepts#topology-lifecycle).
+
+## Deploying the `heron` CLI Executable
+
+To use `heron` CLI, download the `heron-install` for your platfrom from
+[release binaries](https://github.com/apache/incubator-heron/releases) and  run the
+installation script. For example, if you have downloaded the version `0.17.6`,
+you invoke the installation script as follows
+
+```bash
+$ chmod +x heron-install-0.17.6-darwin.sh
+$ ./heron-install-0.17.6-darwin.sh --user
+Heron client installer
+----------------------
+
+Uncompressing......
+
+Heron is now installed!
+
+Make sure you have "/Users/$USER/bin" in your path.
+
+See http://heronstreaming.io/docs/getting-started.html on how to use Heron!
+
+....
+```
+
+Alternatively, generate a full [Heron release](compiling-overview) and
+distribute the resulting `heron` CLI to all machines used to manage topologies.
+
+### Common CLI Args
+
+All topology management commands (`submit`, `activate`, `deactivate`,
+`restart`, `update` and `kill`) take the following required arguments:
+
+* `cluster` --- The name of the cluster where the command needs to be executed.
+
+* `role` --- This represents the user or the group depending on deployment.
+  If not provided, it defaults to the unix user.
+
+* `env` --- This is a tag for including additional information (e.g) a
+   topology can be tagged as PROD or DEVEL to indicate whether it is in production
+   or development. If `env` is not provided, it is given a value `default`
+
+`cluster`, `role` and `env` are specified as a single argument in the form of
+`cluster/role/env` (e.g) `local/ads/PROD` to refer the cluster `local` with
+role `ads` and the environment `PROD`. If you just want to specify `cluster`, the
+argument will be simply `local`.
+
+### Optional CLI Flags
+
+CLI supports a common set of optional flags for all topology management commands
+(`submit`, `activate`, `deactivate`, `restart`, `update` and `kill`):
+
+* `--config-path` --- Every heron cluster must provide a few configuration
+  files that are kept under a directory named after the cluster. By default,
+  when a cluster is provided in the command, it searches the `conf` directory
+  for a directory with the cluster name. This flag enables you to specify a
+  non standard directory to search for the cluster directory.
+
+* `--config-property` --- Heron supports several configuration parameters
+  that be overridden. These parameters are specified in the form of `key=value`.
+
+* `--verbose` --- When this flag is provided, `heron` CLI prints logs
+  that provide detailed information about the execution.
+
+Below is an example topology management command that uses one of these flags:
+
+```bash
+$ heron activate --config-path ~/heronclusters devcluster/ads/PROD AckingTopology
+```
+
+## Submitting a Topology
+
+To run a topology in a Heron cluster, submit it using the `submit` command.
+Topologies can be submitted in either an activated (default) or deactivated state
+(more on [activation](#activating-a-topology) and [deactivation](#deactivating-a-topology)
+below).
+
+Below is the basic syntax:
+
+```bash
+$ heron help submit
+usage: heron submit [options] cluster/[role]/[env] topology-file-name topology-class-name [topology-args]
+
+Required arguments:
+  cluster/[role]/[env]  Cluster, role, and env to run topology
+  topology-file-name    Topology jar/tar/zip file
+  topology-class-name   Topology class name
+
+Optional arguments:
+  --config-path (a string; path to cluster config; default: "/Users/$USER/.heron/conf")
+  --config-property (key=value; a config key and its value; default: [])
+  --deploy-deactivated (a boolean; default: "false")
+  --topology-main-jvm-property Define a system property to pass to java -D when running main.
+  --verbose (a boolean; default: "false")
+```
+
+Arguments of the `submit` command:
+
+* **cluster/[role]/[env]** --- The cluster where topology needs to be submitted,
+  optionally taking the role and environment. For example,`local/ads/PROD` or just `local`
+
+* **topology-file-name** --- The path of the file in which you've packaged the
+  topology's code. For Java topologies this will be a `.jar` file; for
+  topologies in other languages (not yet supported), this could be a
+  `.tar` file. For example, `/path/to/topology/my-topology.jar`
+
+* **topology-class-name** --- The name of the class containing the `main` function
+  for the topology. For example, `com.example.topologies.MyTopology`
+
+* **topology-args** (optional) --- Arguments specific to the topology.
+  You will need to supply additional args only if the `main` function for your
+  topology requires them.
+
+### Example Topology Submission Command
+
+Below is an example command that submits a topology to a cluster named `devcluster`
+with a main class named `com.example.topologies.MyTopology` packaged in `my-topology.jar`,
+along with the optional `--config-path` where the config for `devcluster` can be found:
+
+```bash
+$ heron submit --config-path ~/heronclusters devcluster /path/to/topology/my-topology.jar \
+    com.example.topologies.MyTopology my-topology
+```
+
+### Other Topology Submission Options
+
+| Flag                           | Meaning                                                                 |
+|:-------------------------------|:------------------------------------------------------------------------|
+| `--deploy-deactivated`         | If set, the topology is deployed in a deactivated state.                |
+| `--topology-main-jvm-property` | Defines a system property to pass to java -D when running topology main |
+
+
+## Activating a Topology
+
+Topologies are submitted to the cluster in the activated state by default. To
+activate a deactivated topology use the `activate` command. Below is the basic
+syntax:
+
+```bash
+$ heron help activate
+usage: heron activate [options] cluster/[role]/[env] topology-name
+
+Required arguments:
+  cluster/[role]/[env]  Cluster, role, and env to run topology
+  topology-name         Name of the topology
+
+Optional arguments:
+  --config-path (a string; path to cluster config; default: "/Users/$USER/.heron/conf")
+  --config-property (key=value; a config key and its value; default: [])
+```
+
+Arguments of the `activate` command:
+
+* **cluster/[role]/[env]** --- The cluster where topology needs to be submitted,
+  optionally taking the role and environment. For exampple, `local/ads/PROD` or just `local`
+
+* **topology-name**  --- The name of the already-submitted topology that you'd
+  like to activate.
+
+### Example Topology Activation Command
+
+```bash
+$ heron activate local/ads/PROD my-topology
+```
+
+## Deactivating a Topology
+
+You can deactivate a running topology at any time using the `deactivate`
+command. Here's the basic syntax:
+
+```bash
+$ heron help deactivate
+usage: heron deactivate [options] cluster/[role]/[env] topology-name
+
+Required arguments:
+  cluster/[role]/[env]  Cluster, role, and env to run topology
+  topology-name         Name of the topology
+
+Optional arguments:
+  --config-path (a string; path to cluster config; default: "/Users/kramasamy/.heron/conf")
+  --config-property (key=value; a config key and its value; default: [])
+  --verbose (a boolean; default: "false")
+
+```
+
+Arguments of the `deactivate` command:
+
+* **cluster/[role]/[env]** --- The cluster where topology needs to be submitted,
+  optionally taking the role and environment. For example, `local/ads/PROD` or just `local`
+
+* **topology-name** --- The name of the topology that you'd like to deactivate.
+
+## Restarting a Topology
+
+You can restart a deactivated topology using the `restart` command (assuming
+that the topology has not yet been killed, i.e. removed from the cluster).
+
+```bash
+$ heron help restart
+usage: heron restart [options] cluster/[role]/[env] topology-name [container-id]
+
+Required arguments:
+  cluster/[role]/[env]  Cluster, role, and env to run topology
+  topology-name         Name of the topology
+  container-id          Identifier of the container to be restarted
+
+Optional arguments:
+  --config-path (a string; path to cluster config; default: "/Users/kramasamy/.heron/conf")
+  --config-property (key=value; a config key and its value; default: [])
+  --verbose (a boolean; default: "false")
+```
+
+Arguments of the `restart` command:
+
+* **cluster/[role]/[env]** --- The cluster where topology needs to be submitted,
+  optionally taking the role and environment. For example, `local/ads/PROD` or just `local`
+
+* **topology-name** --- The name of the topology that you'd like to restart.
+
+* **container-id** (optional) --- This enables you to specify the container ID to be
+  restarted if you want to restart only a specific container of the topology.
+
+### Example Topology Restart Command
+
+```bash
+$ heron restart local/ads/PROD my-topology
+```
+
+## Updating a Topology
+
+You can update the parallelism of any of the components of a deployed
+topology using the `update` command.
+
+```bash
+$ heron help update
+usage: heron update [options] cluster/[role]/[env] <topology-name> --component-parallelism <name:value>
+
+Required arguments:
+  cluster/[role]/[env]  Cluster, role, and environment to run topology
+  topology-name         Name of the topology
+
+Optional arguments:
+  --component-parallelism COMPONENT_PARALLELISM
+                        Component name and the new parallelism value colon-
+                        delimited: [component_name]:[parallelism]
+  --config-path (a string; path to cluster config; default: "/Users/billg/.heron/conf")
+  --config-property (key=value; a config key and its value; default: [])
+  --verbose (a boolean; default: "false")
+```
+
+Arguments of the `update` command include **cluster/[role]/[env]** and
+**topology-name** as well as:
+
+* **--component-parallelism** --- This argument can be included multiple
+times to change the parallelism of components in the deployed topology.
+
+### Example Topology Update Command
+
+```bash
+$ heron update local/ads/PROD my-topology \
+  --component-parallelism=my-spout:2 \
+  --component-parallelism=my-bolt:4
+```
+
+## Killing a Topology
+
+If you've submitted a topology to your Heron cluster and would like to remove
+knowledge of the topology entirely, you can remove it using the `kill` command.
+Here's the basic syntax:
+
+```bash
+$ heron kill <killer-overrides> <topology>
+```
+
+Arguments of the `kill` command:
+
+* **cluster/[role]/[env]** --- The cluster where topology needs to be submitted,
+  optionally taking the role and environment.  For example, `local/ads/PROD` or just
+  `local`
+
+* **topology-name** --- The name of the topology that you'd like to kill.
+
+### Example Topology Kill Command
+
+```bash
+$ heron kill local my-topology
+```
+
+## Heron CLI configuration
+
+When using the Heron CLI tool to interact with Heron clusters, there are two ways to provide configuration for the tool:
+
+* Via command-line flags, such as `--service-url`
+* Using the `heron config` interface, which enables you to set, unset, and list configs in the local filesystem
+
+### Available parameters
+
+The following parameters can currently be set using the `heron config` interface:
+
+Parameter | Description | Corresponding CLI flag
+:---------|:------------|:----------------------
+`service_url` | The service URL for the Heron cluster | `--service-url`
+
+### Set configuration
+
+You can set a config using the `set` command. Here's an example:
+
+```bash
+$ heron config us-west-staging set service_url http://us-west.staging.example.com:9000
+```
+
+### Unset configuration
+
+You can remove a parameter using the `unset` command. Here's an example:
+
+```bash
+$ heron config apac-australia unset service_url
+```
+
+### List configuration
+
+You can list all of the CLI configs for a Heron cluster using the `list` command. This will return the configs as a list of `parameter = value` pairs. Here's an example:
+
+```bash
+$ heron config local list
+service_url = http://localhost:9000
+```
+
+### Configuration example
+
+Let's say that you need to interact with a Heron cluster called `apac-japan-staging` which has a service
+URL of http://apac-japan.staging.example.com:9000. If you specified the service URL via CLI flags, you'd need
+to set the flag every time you perform an operation involving that cluster:
+
+```bash
+$ heron deactivate apac-japan-staging MyTopology \
+  --service-