Merge branch 'release/slider-0.40'
diff --git a/.gitignore b/.gitignore
index 9dbb730..6e8763e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,17 +7,9 @@
 .classpath
 .project
 .settings
-/target
+target/
 /conf
-/slider-core/target
 /slider-core/src/test/resources/slider-test.xml
-/slider-agent/target
-/slider-funtest/target
-/slider-assembly/target
-/slider-providers/hbase/slider-hbase-provider/target
-/slider-providers/hbase/hbase-funtests/target
-/slider-providers/accumulo/slider-accumulo-provider/target
-/slider-providers/accumulo/accumulo-funtests/target
 /test-configs
 release.properties
 *.backup
diff --git a/LICENSE.txt b/LICENSE
similarity index 100%
rename from LICENSE.txt
rename to LICENSE
diff --git a/NOTICE.txt b/NOTICE
similarity index 69%
rename from NOTICE.txt
rename to NOTICE
index 86ac395..9b6f594 100644
--- a/NOTICE.txt
+++ b/NOTICE
@@ -1,5 +1,5 @@
 Apache Slider
-Copyright 2007-2014 The Apache Software Foundation
+Copyright 2014 The Apache Software Foundation
 
 This product includes software developed at The Apache Software
 Foundation (http://www.apache.org/).
diff --git a/app-packages/accumulo-v1_5/README.txt b/app-packages/accumulo-v1_5/README.txt
deleted file mode 100644
index c184b0b..0000000
--- a/app-packages/accumulo-v1_5/README.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-How to create a Slider package?
-
-Replace the placeholder tarball for Accumulo.
-  cp ~/Downloads/accumulo-1.5.1-bin.tar.gz package/files/
-  rm package/files/accumulo-1.5.1-bin.tar.gz.REPLACE
-
-Create a zip package at the root of the package (<slider enlistment>/app-packages/accumulo-v1_5/) 
-  zip -r accumulo_v151.zip .
-
-Verify the content using  
-  unzip -l "$@" accumulo_v151.zip
-
-While appConfig.json and resources.json are not required for the package they work
-well as the default configuration for Slider apps. So its advisable that when you
-create an application package for Slider, include sample/default resources.json and
-appConfig.json for a minimal Yarn cluster.
diff --git a/app-packages/accumulo-v1_5/metainfo.xml b/app-packages/accumulo-v1_5/metainfo.xml
deleted file mode 100644
index 79bef1d..0000000
--- a/app-packages/accumulo-v1_5/metainfo.xml
+++ /dev/null
@@ -1,145 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>ACCUMULO</name>
-      <comment>
-        The Apache Accumulo sorted, distributed key/value store is a robust,
-        scalable, high performance data storage system that features cell-based
-        access control and customizable server-side processing. It is based on
-        Google's BigTable design and is built on top of Apache Hadoop,
-        Zookeeper, and Thrift.
-        Requirements:
-        1. Ensure parent dir for path (accumulo-site/instance.dfs.dir) is accessible to the App owner.
-      </comment>
-      <version>1.5.1</version>
-      <exportGroups>
-        <exportGroup>
-          <name>QuickLinks</name>
-          <exports>
-            <export>
-              <name>org.apache.slider.monitor</name>
-              <value>http://${ACCUMULO_MONITOR_HOST}:${site.accumulo-site.monitor.port.client}</value>
-            </export>
-            <export>
-              <name>org.apache.slider.jmx</name>
-              <value>http://${ACCUMULO_MONITOR_HOST}:${site.accumulo-site.monitor.port.client}/xml</value>
-            </export>
-          </exports>
-        </exportGroup>
-      </exportGroups>
-      <commandOrders>
-        <commandOrder>
-          <command>ACCUMULO_MASTER-START</command>
-          <requires>ACCUMULO_MONITOR-INSTALLED</requires>
-        </commandOrder>
-        <commandOrder>
-          <command>ACCUMULO_TSERVER-START</command>
-          <requires>ACCUMULO_MASTER-STARTED</requires>
-        </commandOrder>
-        <commandOrder>
-          <command>ACCUMULO_MONITOR-START</command>
-          <requires>ACCUMULO_MASTER-STARTED</requires>
-        </commandOrder>
-        <commandOrder>
-          <command>ACCUMULO_GC-START</command>
-          <requires>ACCUMULO_MASTER-STARTED</requires>
-        </commandOrder>
-        <commandOrder>
-          <command>ACCUMULO_TRACER-START</command>
-          <requires>ACCUMULO_MASTER-STARTED</requires>
-        </commandOrder>
-      </commandOrders>
-      <components>
-        <component>
-          <name>ACCUMULO_MASTER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/accumulo_master.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ACCUMULO_MONITOR</name>
-          <category>MASTER</category>
-          <publishConfig>true</publishConfig>
-          <commandScript>
-            <script>scripts/accumulo_monitor.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ACCUMULO_GC</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/accumulo_gc.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ACCUMULO_TRACER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/accumulo_tracer.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ACCUMULO_TSERVER</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/accumulo_tserver.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ACCUMULO_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/accumulo_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>tarball</type>
-              <name>files/accumulo-1.5.1-bin.tar.gz</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-    </service>
-  </services>
-</metainfo>
diff --git a/app-packages/accumulo-v1_5/package/files/accumulo-1.5.1-bin.tar.gz.REPLACE b/app-packages/accumulo-v1_5/package/files/accumulo-1.5.1-bin.tar.gz.REPLACE
deleted file mode 100644
index ae1e83e..0000000
--- a/app-packages/accumulo-v1_5/package/files/accumulo-1.5.1-bin.tar.gz.REPLACE
+++ /dev/null
@@ -1,14 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/app-packages/accumulo-v1_5/package/scripts/accumulo_script.py b/app-packages/accumulo-v1_5/package/scripts/accumulo_script.py
deleted file mode 100644
index 2af7a1a..0000000
--- a/app-packages/accumulo-v1_5/package/scripts/accumulo_script.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-from accumulo_configuration import setup_conf_dir
-from accumulo_service import accumulo_service
-
-
-class AccumuloScript(Script):
-  def __init__(self, component):
-    self.component = component
-
-  def install(self, env):
-    self.install_packages(env)
-
-  def configure(self, env):
-    setup_conf_dir(name=self.component)
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    if self.component == 'master':
-      Execute( format("{daemon_script} init --instance-name {accumulo_instance_name} --password {accumulo_root_password} --clear-instance-name"),
-               not_if=format("hadoop fs -stat {accumulo_hdfs_root_dir}"),
-               user=params.accumulo_user)
-
-    accumulo_service( self.component,
-      action = 'start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    accumulo_service( self.component,
-      action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    component = self.component
-    pid_file = format("{pid_dir}/accumulo-{accumulo_user}-{component}.pid")
-    check_process_status(pid_file)
-
-
-if __name__ == "__main__":
-  self.fail_with_error('component unspecified')
diff --git a/app-packages/accumulo-v1_5/resources.json b/app-packages/accumulo-v1_5/resources.json
deleted file mode 100644
index 54d054b..0000000
--- a/app-packages/accumulo-v1_5/resources.json
+++ /dev/null
@@ -1,31 +0,0 @@
-{
-  "schema": "http://example.org/specification/v2.0.0",
-  "metadata": {
-  },
-  "global": {
-  },
-  "components": {
-    "ACCUMULO_MASTER": {
-      "yarn.role.priority": "1",
-      "yarn.component.instances": "1"
-    },
-    "slider-appmaster": {
-    },
-    "ACCUMULO_TSERVER": {
-      "yarn.role.priority": "2",
-      "yarn.component.instances": "1"
-    },
-    "ACCUMULO_MONITOR": {
-      "yarn.role.priority": "3",
-      "yarn.component.instances": "1"
-    },
-    "ACCUMULO_GC": {
-      "yarn.role.priority": "4",
-      "yarn.component.instances": "1"
-    },
-    "ACCUMULO_TRACER": {
-      "yarn.role.priority": "5",
-      "yarn.component.instances": "1"
-    }
-  }
-}
diff --git a/app-packages/accumulo/LICENSE.txt b/app-packages/accumulo/LICENSE.txt
new file mode 100644
index 0000000..dc02561
--- /dev/null
+++ b/app-packages/accumulo/LICENSE.txt
@@ -0,0 +1,261 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+The binary distribution of the Apache Slider App Package for Apache Accumulo
+bundles a binary distribution of Apache Accumulo 1.6.0.  This license
+information is applicable for an app package containing version 1.6.0 of
+Accumulo only.
+
+The Apache Accumulo project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses.
+
+This product bundles softare from the European Commission project OneLab
+under contract 034819 (http://www.one-lab.org), which is available under
+a "3-clause BSD" license. For details, see
+  core/src/main/java/org/apache/accumulo/core/bloomfilter/
+
+This product bundles JQuery and Flot, which are each available under
+The MIT License (MIT). For details, see
+  server/monitor/src/main/resources/web/flot/
+
+The binary distribution of this product bundles jline, which is available
+under the following "3-clause BSD" license:
+
+    The BSD License
+
+    Copyright (c) 2002-2006, Marc Prud'hommeaux <mwp1@cornell.edu>
+    All rights reserved.
+
+    Redistribution and use in source and binary forms, with or
+    without modification, are permitted provided that the following
+    conditions are met:
+
+    Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+    Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with
+    the distribution.
+
+    Neither the name of JLine nor the names of its contributors
+    may be used to endorse or promote products derived from this
+    software without specific prior written permission.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+    BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+    AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+    EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+    OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+    PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+    AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+    LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+    IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+    OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/app-packages/accumulo/NOTICE.txt b/app-packages/accumulo/NOTICE.txt
new file mode 100644
index 0000000..3a34b4d
--- /dev/null
+++ b/app-packages/accumulo/NOTICE.txt
@@ -0,0 +1,8 @@
+Apache Slider (incubating) -- Apache Accumulo App Package
+Copyright 2001-2014 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+This product includes JCommander (https://github.com/cbeust/jcommander),
+Copyright 2010 Cedric Beust cedric@beust.com.
diff --git a/app-packages/accumulo/README.txt b/app-packages/accumulo/README.txt
new file mode 100644
index 0000000..8e8fac2
--- /dev/null
+++ b/app-packages/accumulo/README.txt
@@ -0,0 +1,47 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+How to create a Slider package for Accumulo?
+
+  mvn clean package -DskipTests -Paccumulo-app-package
+
+App package can be found in
+  app-packages/accumulo/target/apache-slider-accumulo-${accumulo.version}-app-package-${slider.version}.zip
+
+Verify the content using
+  zip -Tv apache-slider-accumulo-*.zip
+
+While appConfig.json and resources.json are not required for the package they
+work well as the default configuration for Slider apps. So it is advisable that
+when you create an application package for Slider, include sample/default
+resources.json and appConfig.json for a minimal Yarn cluster.
+
+The version of Accumulo used for the app package can be adjusted by adding a
+flag such as
+  -Daccumulo.version=1.5.1
+
+**Note that the LICENSE.txt and NOTICE.txt that are bundled with the app
+package are designed for Accumulo 1.6.0 only and may need to be modified to be
+applicable for other versions of the app package.
+
+Note also that the sample appConfig.json provided only works with Accumulo 1.6,
+while for Accumulo 1.5 the instance.volumes property must be replaced with
+instance.dfs.dir (and it cannot use the provided variable ${DEFAULT_DATA_DIR}
+which is an HDFS URI).
+
+A less descriptive file name can be specified with
+-Dapp.package.name=accumulo_160 which would create a file accumulo_160.zip.
diff --git a/app-packages/accumulo-v1_5/appConfig.json b/app-packages/accumulo/appConfig.json
similarity index 91%
rename from app-packages/accumulo-v1_5/appConfig.json
rename to app-packages/accumulo/appConfig.json
index 52f8624..8fe9a18 100644
--- a/app-packages/accumulo-v1_5/appConfig.json
+++ b/app-packages/accumulo/appConfig.json
@@ -3,15 +3,14 @@
   "metadata": {
   },
   "global": {
-    "agent.conf": "/slider/agent/conf/agent.ini",
-    "application.def": "/slider/accumulo_v151.zip",
+    "application.def": "${app.package.name}.zip",
     "config_types": "accumulo-site",
     "java_home": "/usr/jdk64/jdk1.7.0_45",
-    "package_list": "files/accumulo-1.5.1-bin.tar.gz",
+    "package_list": "files/accumulo-${accumulo.version}-bin.tar.gz",
     "site.global.app_user": "yarn",
     "site.global.app_log_dir": "${AGENT_LOG_ROOT}/app/log",
     "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
-    "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/accumulo-1.5.1",
+    "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/accumulo-${accumulo.version}",
     "site.global.app_install_dir": "${AGENT_WORK_ROOT}/app/install",
     "site.global.tserver_heapsize": "128m",
     "site.global.master_heapsize": "128m",
@@ -25,7 +24,8 @@
     "site.global.accumulo_root_password": "secret",
     "site.global.user_group": "hadoop",
     "site.global.security_enabled": "false",
-    "site.accumulo-site.instance.dfs.dir": "/apps/accumulo/data",
+    "site.global.monitor_protocol": "http",
+    "site.accumulo-site.instance.volumes": "${DEFAULT_DATA_DIR}/data",
     "site.accumulo-site.instance.zookeeper.host": "${ZK_HOST}",
     "site.accumulo-site.instance.secret": "DEFAULT",
     "site.accumulo-site.tserver.memory.maps.max": "80M",
diff --git a/app-packages/accumulo-v1_5/configuration/accumulo-site.xml b/app-packages/accumulo/configuration/accumulo-site.xml
similarity index 100%
rename from app-packages/accumulo-v1_5/configuration/accumulo-site.xml
rename to app-packages/accumulo/configuration/accumulo-site.xml
diff --git a/app-packages/accumulo-v1_5/configuration/global.xml b/app-packages/accumulo/configuration/global.xml
similarity index 100%
rename from app-packages/accumulo-v1_5/configuration/global.xml
rename to app-packages/accumulo/configuration/global.xml
diff --git a/app-packages/accumulo-v1_5/jmx_metrics.json b/app-packages/accumulo/jmx_metrics.json
similarity index 81%
rename from app-packages/accumulo-v1_5/jmx_metrics.json
rename to app-packages/accumulo/jmx_metrics.json
index 05a1bd9..cf410d8 100644
--- a/app-packages/accumulo-v1_5/jmx_metrics.json
+++ b/app-packages/accumulo/jmx_metrics.json
@@ -1,41 +1,41 @@
 {
     "Component": {
         "ACCUMULO_MASTER": {
-            "masterGoalState": {
+            "MasterGoalState": {
                 "metric": "/stats/masterGoalState",
                 "pointInTime": true,
                 "temporal": false
             },
-            "masterState": {
+            "MasterState": {
                 "metric": "/stats/masterState",
                 "pointInTime": true,
                 "temporal": false
             },
-            "deadTabletServers": {
+            "DeadTabletServers": {
                 "metric": "/stats/deadTabletServers",
                 "pointInTime": true,
                 "temporal": false
             },
-            "totals_ingestrate": {
+            "TotalIngestRate": {
                 "metric": "/stats/totals/ingestrate",
                 "pointInTime": true,
                 "temporal": false
             },
-            "totals_queryrate": {
+            "TotalQueryRate": {
                 "metric": "/stats/totals/queryrate",
                 "pointInTime": true,
                 "temporal": false
             },
-            "totals_diskrate": {
+            "TotalDiskRate": {
                 "metric": "/stats/totals/diskrate",
                 "pointInTime": true,
                 "temporal": false
             },
-            "badTabletServers": {
+            "BadTabletServers": {
                 "metric": "/stats/badTabletServers",
                 "pointInTime": true,
                 "temporal": false
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/app-packages/accumulo/metainfo.xml b/app-packages/accumulo/metainfo.xml
new file mode 100644
index 0000000..4cf6c79
--- /dev/null
+++ b/app-packages/accumulo/metainfo.xml
@@ -0,0 +1,147 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <application>
+    <name>ACCUMULO</name>
+    <comment>
+      The Apache Accumulo sorted, distributed key/value store is a robust,
+      scalable, high performance data storage system that features cell-based
+      access control and customizable server-side processing. It is based on
+      Google's BigTable design and is built on top of Apache Hadoop,
+      Zookeeper, and Thrift.
+      Requirements:
+      1. Ensure parent dir for path (accumulo-site/instance.volumes) is accessible to the App owner.
+    </comment>
+    <version>${accumulo.version}</version>
+    <exportGroups>
+      <exportGroup>
+        <name>QuickLinks</name>
+        <exports>
+          <export>
+            <name>org.apache.slider.monitor</name>
+            <value>
+              ${site.global.monitor_protocol}://${ACCUMULO_MONITOR_HOST}:${site.accumulo-site.monitor.port.client}
+            </value>
+          </export>
+          <export>
+            <name>org.apache.slider.jmx</name>
+            <value>
+              ${site.global.monitor_protocol}://${ACCUMULO_MONITOR_HOST}:${site.accumulo-site.monitor.port.client}/xml
+            </value>
+          </export>
+        </exports>
+      </exportGroup>
+    </exportGroups>
+    <commandOrders>
+      <commandOrder>
+        <command>ACCUMULO_MASTER-START</command>
+        <requires>ACCUMULO_MONITOR-INSTALLED</requires>
+      </commandOrder>
+      <commandOrder>
+        <command>ACCUMULO_TSERVER-START</command>
+        <requires>ACCUMULO_MASTER-STARTED</requires>
+      </commandOrder>
+      <commandOrder>
+        <command>ACCUMULO_MONITOR-START</command>
+        <requires>ACCUMULO_MASTER-STARTED</requires>
+      </commandOrder>
+      <commandOrder>
+        <command>ACCUMULO_GC-START</command>
+        <requires>ACCUMULO_MASTER-STARTED</requires>
+      </commandOrder>
+      <commandOrder>
+        <command>ACCUMULO_TRACER-START</command>
+        <requires>ACCUMULO_MASTER-STARTED</requires>
+      </commandOrder>
+    </commandOrders>
+    <components>
+      <component>
+        <name>ACCUMULO_MASTER</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/accumulo_master.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>ACCUMULO_MONITOR</name>
+        <category>MASTER</category>
+        <publishConfig>true</publishConfig>
+        <commandScript>
+          <script>scripts/accumulo_monitor.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>ACCUMULO_GC</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/accumulo_gc.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>ACCUMULO_TRACER</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/accumulo_tracer.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>ACCUMULO_TSERVER</name>
+        <category>SLAVE</category>
+        <commandScript>
+          <script>scripts/accumulo_tserver.py</script>
+          <scriptType>PYTHON</scriptType>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>ACCUMULO_CLIENT</name>
+        <category>CLIENT</category>
+        <commandScript>
+          <script>scripts/accumulo_client.py</script>
+          <scriptType>PYTHON</scriptType>
+        </commandScript>
+      </component>
+    </components>
+
+    <osSpecifics>
+      <osSpecific>
+        <osType>any</osType>
+        <packages>
+          <package>
+            <type>tarball</type>
+            <name>files/accumulo-${accumulo.version}-bin.tar.gz</name>
+          </package>
+        </packages>
+      </osSpecific>
+    </osSpecifics>
+
+  </application>
+</metainfo>
diff --git a/app-packages/accumulo-v1_5/package/files/accumulo-metrics.xml b/app-packages/accumulo/package/files/accumulo-metrics.xml
similarity index 100%
rename from app-packages/accumulo-v1_5/package/files/accumulo-metrics.xml
rename to app-packages/accumulo/package/files/accumulo-metrics.xml
diff --git a/app-packages/accumulo-v1_5/package/files/auditLog.xml b/app-packages/accumulo/package/files/auditLog.xml
similarity index 100%
rename from app-packages/accumulo-v1_5/package/files/auditLog.xml
rename to app-packages/accumulo/package/files/auditLog.xml
diff --git a/app-packages/accumulo-v1_5/package/files/gc b/app-packages/accumulo/package/files/gc
similarity index 100%
rename from app-packages/accumulo-v1_5/package/files/gc
rename to app-packages/accumulo/package/files/gc
diff --git a/app-packages/accumulo-v1_5/package/files/generic_logger.xml b/app-packages/accumulo/package/files/generic_logger.xml
similarity index 100%
rename from app-packages/accumulo-v1_5/package/files/generic_logger.xml
rename to app-packages/accumulo/package/files/generic_logger.xml
diff --git a/app-packages/accumulo-v1_5/package/files/log4j.properties b/app-packages/accumulo/package/files/log4j.properties
similarity index 100%
rename from app-packages/accumulo-v1_5/package/files/log4j.properties
rename to app-packages/accumulo/package/files/log4j.properties
diff --git a/app-packages/accumulo-v1_5/package/files/masters b/app-packages/accumulo/package/files/masters
similarity index 100%
rename from app-packages/accumulo-v1_5/package/files/masters
rename to app-packages/accumulo/package/files/masters
diff --git a/app-packages/accumulo-v1_5/package/files/monitor b/app-packages/accumulo/package/files/monitor
similarity index 100%
rename from app-packages/accumulo-v1_5/package/files/monitor
rename to app-packages/accumulo/package/files/monitor
diff --git a/app-packages/accumulo-v1_5/package/files/monitor_logger.xml b/app-packages/accumulo/package/files/monitor_logger.xml
similarity index 100%
rename from app-packages/accumulo-v1_5/package/files/monitor_logger.xml
rename to app-packages/accumulo/package/files/monitor_logger.xml
diff --git a/app-packages/accumulo-v1_5/package/files/slaves b/app-packages/accumulo/package/files/slaves
similarity index 100%
rename from app-packages/accumulo-v1_5/package/files/slaves
rename to app-packages/accumulo/package/files/slaves
diff --git a/app-packages/accumulo-v1_5/package/files/tracers b/app-packages/accumulo/package/files/tracers
similarity index 100%
rename from app-packages/accumulo-v1_5/package/files/tracers
rename to app-packages/accumulo/package/files/tracers
diff --git a/app-packages/accumulo-v1_5/package/scripts/__init__.py b/app-packages/accumulo/package/scripts/__init__.py
similarity index 100%
rename from app-packages/accumulo-v1_5/package/scripts/__init__.py
rename to app-packages/accumulo/package/scripts/__init__.py
diff --git a/app-packages/accumulo-v1_5/package/scripts/accumulo_client.py b/app-packages/accumulo/package/scripts/accumulo_client.py
similarity index 100%
rename from app-packages/accumulo-v1_5/package/scripts/accumulo_client.py
rename to app-packages/accumulo/package/scripts/accumulo_client.py
diff --git a/app-packages/accumulo-v1_5/package/scripts/accumulo_configuration.py b/app-packages/accumulo/package/scripts/accumulo_configuration.py
similarity index 87%
rename from app-packages/accumulo-v1_5/package/scripts/accumulo_configuration.py
rename to app-packages/accumulo/package/scripts/accumulo_configuration.py
index 4e6bb5c..8299c36 100644
--- a/app-packages/accumulo-v1_5/package/scripts/accumulo_configuration.py
+++ b/app-packages/accumulo/package/scripts/accumulo_configuration.py
@@ -20,8 +20,8 @@
 
 from resource_management import *
 
-def setup_conf_dir(name=None # 'master' or 'tserver' or 'monitor' or 'gc' or 'tracer' or 'client'
-              ):
+def setup_conf_dir(name=None, # 'master' or 'tserver' or 'monitor' or 'gc' or 'tracer' or 'client'
+              extra_params=None):
   import params
 
   # create the conf directory
@@ -46,10 +46,18 @@
       recursive = True
     )
 
+    configs = {}
+    if extra_params == None:
+      configs = params.config['configurations']['accumulo-site']
+    else:
+      configs.update(params.config['configurations']['accumulo-site'])
+      for k in extra_params:
+        configs[k] = extra_params[k]
+
     # create a site file for server processes
     XmlConfig( "accumulo-site.xml",
             conf_dir = params.conf_dir,
-            configurations = params.config['configurations']['accumulo-site'],
+            configurations = configs,
             owner = params.accumulo_user,
             group = params.user_group,
             mode=0600
@@ -59,6 +67,7 @@
     client_configurations = {}
     client_configurations['instance.zookeeper.host'] = params.config['configurations']['accumulo-site']['instance.zookeeper.host']
     client_configurations['instance.dfs.dir'] = params.config['configurations']['accumulo-site']['instance.dfs.dir']
+    client_configurations['instance.volumes'] = params.config['configurations']['accumulo-site']['instance.volumes']
     client_configurations['general.classpaths'] = params.config['configurations']['accumulo-site']['general.classpaths']
     XmlConfig( "accumulo-site.xml",
             conf_dir = params.conf_dir,
diff --git a/app-packages/accumulo-v1_5/package/scripts/accumulo_gc.py b/app-packages/accumulo/package/scripts/accumulo_gc.py
similarity index 100%
rename from app-packages/accumulo-v1_5/package/scripts/accumulo_gc.py
rename to app-packages/accumulo/package/scripts/accumulo_gc.py
diff --git a/app-packages/accumulo-v1_5/package/scripts/accumulo_master.py b/app-packages/accumulo/package/scripts/accumulo_master.py
similarity index 100%
rename from app-packages/accumulo-v1_5/package/scripts/accumulo_master.py
rename to app-packages/accumulo/package/scripts/accumulo_master.py
diff --git a/app-packages/accumulo-v1_5/package/scripts/accumulo_monitor.py b/app-packages/accumulo/package/scripts/accumulo_monitor.py
similarity index 100%
rename from app-packages/accumulo-v1_5/package/scripts/accumulo_monitor.py
rename to app-packages/accumulo/package/scripts/accumulo_monitor.py
diff --git a/app-packages/accumulo/package/scripts/accumulo_script.py b/app-packages/accumulo/package/scripts/accumulo_script.py
new file mode 100644
index 0000000..5e2ceba
--- /dev/null
+++ b/app-packages/accumulo/package/scripts/accumulo_script.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.core.environment import Environment
+
+from accumulo_configuration import setup_conf_dir
+from accumulo_configuration import accumulo_StaticFile
+from accumulo_service import accumulo_service
+
+
+class AccumuloScript(Script):
+  def __init__(self, component):
+    self.component = component
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    if params.monitor_security_enabled and self.component == 'monitor':
+      import os
+      import random
+      import string
+
+      basedir = Environment.get_instance().config.basedir
+      keystore_file = os.path.join(basedir, "files", "keystore.jks")
+      truststore_file = os.path.join(basedir, "files", "cacerts.jks")
+      cert_file = os.path.join(basedir, "files", "server.cer")
+
+      if os.path.exists(keystore_file) or os.path.exists(truststore_file) or os.path.exists(cert_file):
+        self.fail_with_error("trying to create monitor certs but they already existed")
+
+      goodchars = string.lowercase + string.uppercase + string.digits + '#%+,-./:=?@^_'
+      keypass = ''.join(random.choice(goodchars) for x in range(20))
+      storepass = ''.join(random.choice(goodchars) for x in range(20))
+
+      https_params = {}
+      https_params[params.keystore_property] = params.keystore_path
+      https_params[params.truststore_property] = params.truststore_path
+      https_params[params.keystore_password_property] = keypass
+      https_params[params.truststore_password_property] = storepass
+
+      setup_conf_dir(name=self.component, extra_params=https_params)
+
+      Execute( format("{java64_home}/bin/keytool -genkey -alias \"default\" -keyalg RSA -keypass {keypass} -storepass {storepass} -keystore {keystore_file} -dname \"CN=Unknown, OU=Unknown, O=Unknown, L=Unknown, ST=Unknown, C=Unknown\""),
+               user=params.accumulo_user)
+      Execute( format("{java64_home}/bin/keytool -export -alias \"default\" -storepass {storepass} -file {cert_file} -keystore {keystore_file}"),
+               user=params.accumulo_user)
+      Execute( format("echo \"yes\" | {java64_home}/bin/keytool -import -v -trustcacerts -alias \"default\" -file {cert_file} -keystore {truststore_file} -keypass {keypass} -storepass {storepass}"),
+               user=params.accumulo_user)
+
+      accumulo_StaticFile("keystore.jks")
+      accumulo_StaticFile("cacerts.jks")
+
+    else:
+      setup_conf_dir(name=self.component)
+
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    if self.component == 'master':
+      Execute( format("{daemon_script} init --instance-name {accumulo_instance_name} --password {accumulo_root_password} --clear-instance-name"),
+               not_if=format("hadoop fs -stat {accumulo_hdfs_root_dir}"),
+               user=params.accumulo_user)
+
+    accumulo_service( self.component,
+      action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    accumulo_service( self.component,
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    component = self.component
+    pid_file = format("{pid_dir}/accumulo-{accumulo_user}-{component}.pid")
+    check_process_status(pid_file)
+
+
+if __name__ == "__main__":
+  self.fail_with_error('component unspecified')
diff --git a/app-packages/accumulo-v1_5/package/scripts/accumulo_service.py b/app-packages/accumulo/package/scripts/accumulo_service.py
similarity index 100%
rename from app-packages/accumulo-v1_5/package/scripts/accumulo_service.py
rename to app-packages/accumulo/package/scripts/accumulo_service.py
diff --git a/app-packages/accumulo-v1_5/package/scripts/accumulo_tracer.py b/app-packages/accumulo/package/scripts/accumulo_tracer.py
similarity index 100%
rename from app-packages/accumulo-v1_5/package/scripts/accumulo_tracer.py
rename to app-packages/accumulo/package/scripts/accumulo_tracer.py
diff --git a/app-packages/accumulo-v1_5/package/scripts/accumulo_tserver.py b/app-packages/accumulo/package/scripts/accumulo_tserver.py
similarity index 100%
rename from app-packages/accumulo-v1_5/package/scripts/accumulo_tserver.py
rename to app-packages/accumulo/package/scripts/accumulo_tserver.py
diff --git a/app-packages/accumulo-v1_5/package/scripts/params.py b/app-packages/accumulo/package/scripts/params.py
similarity index 75%
rename from app-packages/accumulo-v1_5/package/scripts/params.py
rename to app-packages/accumulo/package/scripts/params.py
index 556c8d2..3eaa1ab 100644
--- a/app-packages/accumulo-v1_5/package/scripts/params.py
+++ b/app-packages/accumulo/package/scripts/params.py
@@ -50,10 +50,24 @@
 log_dir = config['configurations']['global']['app_log_dir']
 daemon_script = format("{accumulo_root}/bin/accumulo")
 
+# accumulo monitor certificate properties
+monitor_security_enabled = config['configurations']['global']['monitor_protocol'] == "https"
+keystore_path = format("{accumulo_root}/conf/keystore.jks")
+truststore_path = format("{accumulo_root}/conf/cacerts.jks")
+cert_path = format("{accumulo_root}/conf/server.cer")
+keystore_property = "monitor.ssl.keyStore"
+keystore_password_property = "monitor.ssl.keyStorePassword"
+truststore_property = "monitor.ssl.trustStore"
+truststore_password_property = "monitor.ssl.trustStorePassword"
+
 # accumulo initialization parameters
 accumulo_instance_name = config['configurations']['global']['accumulo_instance_name']
 accumulo_root_password = config['configurations']['global']['accumulo_root_password']
-accumulo_hdfs_root_dir = config['configurations']['accumulo-site']['instance.dfs.dir']
+accumulo_hdfs_root_dir = None
+if ('instance.dfs.dir' in config['configurations']['accumulo-site']):
+  accumulo_hdfs_root_dir = config['configurations']['accumulo-site']['instance.dfs.dir']
+else:
+  accumulo_hdfs_root_dir = config['configurations']['accumulo-site']['instance.volumes'].split(",")[0]
 
 #log4j.properties
 if (('accumulo-log4j' in config['configurations']) and ('content' in config['configurations']['accumulo-log4j'])):
diff --git a/app-packages/accumulo-v1_5/package/scripts/status_params.py b/app-packages/accumulo/package/scripts/status_params.py
similarity index 100%
rename from app-packages/accumulo-v1_5/package/scripts/status_params.py
rename to app-packages/accumulo/package/scripts/status_params.py
diff --git a/app-packages/accumulo-v1_5/package/templates/accumulo-env.sh.j2 b/app-packages/accumulo/package/templates/accumulo-env.sh.j2
similarity index 100%
rename from app-packages/accumulo-v1_5/package/templates/accumulo-env.sh.j2
rename to app-packages/accumulo/package/templates/accumulo-env.sh.j2
diff --git a/app-packages/accumulo/pom.xml b/app-packages/accumulo/pom.xml
new file mode 100644
index 0000000..45dfd87
--- /dev/null
+++ b/app-packages/accumulo/pom.xml
@@ -0,0 +1,198 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+  <parent>
+    <groupId>org.apache.slider</groupId>
+    <artifactId>slider</artifactId>
+    <version>0.40</version>
+    <relativePath>../../pom.xml</relativePath>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>slider-accumulo-app-package</artifactId>
+  <packaging>jar</packaging>
+  <name>Slider Accumulo App Package</name>
+  <description>Slider Accumulo App Package</description>
+
+  <properties>
+    <work.dir>package-tmp</work.dir>
+    <app.package.name>apache-slider-accumulo-${accumulo.version}-app-package-${project.version}</app.package.name>
+  </properties>
+
+  <profiles>
+    <profile>
+      <id>accumulo-app-package</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-assembly-plugin</artifactId>
+            <configuration>
+              <descriptor>src/assembly/accumulo.xml</descriptor>
+              <appendAssemblyId>false</appendAssemblyId>
+              <finalName>${app.package.name}</finalName>
+            </configuration>
+            <executions>
+              <execution>
+                <id>build-app-package</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>single</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-dependency-plugin</artifactId>
+            <version>${maven-dependency-plugin.version}</version>
+            <executions>
+              <execution>
+                <id>copy-dependencies</id>
+                <phase>process-resources</phase>
+                <goals>
+                  <goal>copy-dependencies</goal>
+                </goals>
+                <configuration>
+                  <includeArtifactIds>accumulo</includeArtifactIds>
+                  <includeTypes>tar.gz</includeTypes>
+                  <excludeTransitive>true</excludeTransitive>
+                  <outputDirectory>${project.build.directory}/${work.dir}</outputDirectory>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-failsafe-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>run-integration-tests</id>
+                <goals>
+                  <goal>integration-test</goal>
+                  <goal>verify</goal>
+                </goals>
+              </execution>
+            </executions>
+            <configuration>
+              <systemPropertyVariables>
+                <java.net.preferIPv4Stack>true</java.net.preferIPv4Stack>
+                <java.awt.headless>true</java.awt.headless>
+                <!-- this property must be supplied-->
+                <slider.conf.dir>${slider.conf.dir}</slider.conf.dir>
+                <slider.bin.dir>../../slider-assembly/target/slider-${project.version}-all/slider-${project.version}</slider.bin.dir>
+                <test.app.pkg.dir>target</test.app.pkg.dir>
+                <test.app.pkg.file>${app.package.name}.zip</test.app.pkg.file>
+                <test.app.resource>target/test-config/resources.json</test.app.resource>
+                <test.app.template>target/${app.package.name}/appConfig.json</test.app.template>
+              </systemPropertyVariables>
+            </configuration>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+
+  <build>
+    <!-- resources are filtered for dynamic updates. This gets build info in-->
+    <resources>
+      <resource>
+        <directory>src/test/resources</directory>
+        <filtering>true</filtering>
+        <targetPath>${project.build.directory}/test-config</targetPath>
+      </resource>
+    </resources>
+
+    <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>${maven-compiler-plugin.version}</version>
+        <configuration>
+          <compilerId>groovy-eclipse-compiler</compilerId>
+          <!-- set verbose to be true if you want lots of uninteresting messages -->
+          <!-- <verbose>true</verbose> -->
+          <source>${project.java.src.version}</source>
+          <target>${project.java.src.version}</target>
+        </configuration>
+        <dependencies>
+          <dependency>
+            <groupId>org.codehaus.groovy</groupId>
+            <artifactId>groovy-eclipse-compiler</artifactId>
+            <version>${groovy-eclipse-compiler.version}</version>
+          </dependency>
+          <dependency>
+            <groupId>org.codehaus.groovy</groupId>
+            <artifactId>groovy-eclipse-batch</artifactId>
+            <version>${groovy-eclipse-batch.version}</version>
+          </dependency>
+        </dependencies>
+      </plugin>
+
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <!-- can't figure out how to get the surefire plugin not to pick up the ITs, so skip it entirely -->
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.accumulo</groupId>
+      <artifactId>accumulo</artifactId>
+      <version>${accumulo.version}</version>
+      <classifier>bin</classifier>
+      <type>tar.gz</type>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.accumulo</groupId>
+      <artifactId>accumulo-core</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.accumulo</groupId>
+      <artifactId>accumulo-test</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.slider</groupId>
+      <artifactId>slider-core</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.slider</groupId>
+      <artifactId>slider-funtest</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.codehaus.groovy</groupId>
+      <artifactId>groovy-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+</project>
diff --git a/app-packages/accumulo/resources.json b/app-packages/accumulo/resources.json
new file mode 100644
index 0000000..f876901
--- /dev/null
+++ b/app-packages/accumulo/resources.json
@@ -0,0 +1,36 @@
+{
+  "schema": "http://example.org/specification/v2.0.0",
+  "metadata": {
+  },
+  "global": {
+  },
+  "components": {
+    "ACCUMULO_MASTER": {
+      "yarn.role.priority": "1",
+      "yarn.component.instances": "1",
+      "yarn.memory": "256"
+    },
+    "slider-appmaster": {
+    },
+    "ACCUMULO_TSERVER": {
+      "yarn.role.priority": "2",
+      "yarn.component.instances": "1",
+      "yarn.memory": "256"
+    },
+    "ACCUMULO_MONITOR": {
+      "yarn.role.priority": "3",
+      "yarn.component.instances": "1",
+      "yarn.memory": "128"
+    },
+    "ACCUMULO_GC": {
+      "yarn.role.priority": "4",
+      "yarn.component.instances": "1",
+      "yarn.memory": "128"
+    },
+    "ACCUMULO_TRACER": {
+      "yarn.role.priority": "5",
+      "yarn.component.instances": "1",
+      "yarn.memory": "256"
+    }
+  }
+}
diff --git a/app-packages/accumulo/src/assembly/accumulo.xml b/app-packages/accumulo/src/assembly/accumulo.xml
new file mode 100644
index 0000000..a8f9578
--- /dev/null
+++ b/app-packages/accumulo/src/assembly/accumulo.xml
@@ -0,0 +1,72 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~  or more contributor license agreements.  See the NOTICE file
+  ~  distributed with this work for additional information
+  ~  regarding copyright ownership.  The ASF licenses this file
+  ~  to you under the Apache License, Version 2.0 (the
+  ~  "License"); you may not use this file except in compliance
+  ~  with the License.  You may obtain a copy of the License at
+  ~
+  ~       http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~  Unless required by applicable law or agreed to in writing, software
+  ~  distributed under the License is distributed on an "AS IS" BASIS,
+  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~  See the License for the specific language governing permissions and
+  ~  limitations under the License.
+  -->
+
+
+<assembly
+  xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  <id>accumulo_v${accumulo.version}</id>
+  <formats>
+    <format>zip</format>
+    <format>dir</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+
+  <files>
+    <file>
+      <source>appConfig.json</source>
+      <outputDirectory>/</outputDirectory>
+      <filtered>true</filtered>
+      <fileMode>0755</fileMode>
+    </file>
+    <file>
+      <source>metainfo.xml</source>
+      <outputDirectory>/</outputDirectory>
+      <filtered>true</filtered>
+      <fileMode>0755</fileMode>
+    </file>
+  </files>
+
+  <fileSets>
+    <fileSet>
+      <directory>${project.basedir}</directory>
+      <outputDirectory>/</outputDirectory>
+      <excludes>
+        <exclude>pom.xml</exclude>
+        <exclude>src/**</exclude>
+        <exclude>target/**</exclude>
+        <exclude>appConfig.json</exclude>
+        <exclude>metainfo.xml</exclude>
+      </excludes>
+      <fileMode>0755</fileMode>
+      <directoryMode>0755</directoryMode>
+    </fileSet>
+
+    <fileSet>
+      <directory>${project.build.directory}/${work.dir}</directory>
+      <outputDirectory>package/files</outputDirectory>
+      <includes>
+        <include>accumulo-${accumulo.version}-bin.tar.gz</include>
+      </includes>
+      <fileMode>0755</fileMode>
+      <directoryMode>0755</directoryMode>
+    </fileSet>
+
+  </fileSets>
+</assembly>
diff --git a/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloAgentCommandTestBase.groovy b/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloAgentCommandTestBase.groovy
new file mode 100644
index 0000000..50ecfcd
--- /dev/null
+++ b/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloAgentCommandTestBase.groovy
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.funtest.accumulo
+
+import groovy.util.logging.Slf4j
+import org.apache.slider.funtest.framework.AgentCommandTestBase
+import org.junit.After
+import org.junit.Before
+
+@Slf4j
+abstract class AccumuloAgentCommandTestBase extends AgentCommandTestBase {
+  protected static final int ACCUMULO_LAUNCH_WAIT_TIME
+  protected static final int ACCUMULO_GO_LIVE_TIME = 60000
+
+  // parameters must match those found in the default appConfig.json
+  protected static final String INSTANCE_NAME = "instancename"
+  protected static final String USER = "root"
+  protected static final String PASSWORD = "secret"
+
+  static {
+    ACCUMULO_LAUNCH_WAIT_TIME = getTimeOptionMillis(SLIDER_CONFIG,
+      KEY_ACCUMULO_LAUNCH_TIME,
+      1000 * DEFAULT_ACCUMULO_LAUNCH_TIME_SECONDS)
+  }
+
+  abstract public String getClusterName();
+
+  @Before
+  public void prepareCluster() {
+    setupCluster(getClusterName())
+  }
+
+  @After
+  public void destroyCluster() {
+    cleanup(getClusterName())
+  }
+}
diff --git a/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy b/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy
new file mode 100644
index 0000000..bcb952b
--- /dev/null
+++ b/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.funtest.accumulo
+
+import groovy.util.logging.Slf4j
+import org.apache.slider.api.ClusterDescription
+import org.apache.slider.client.SliderClient
+import org.apache.slider.common.SliderKeys
+import org.apache.slider.core.registry.docstore.PublishedConfiguration
+import org.apache.slider.core.registry.info.ServiceInstanceData
+import org.apache.slider.core.registry.retrieve.RegistryRetriever
+import org.apache.slider.funtest.framework.SliderShell
+import org.apache.slider.server.services.curator.CuratorServiceInstance
+import org.junit.Test
+
+@Slf4j
+class AccumuloBasicIT extends AccumuloAgentCommandTestBase {
+
+  @Override
+  public String getClusterName() {
+    return "test_accumulo_basic"
+  }
+
+  @Test
+  public void testAccumuloClusterCreate() throws Throwable {
+
+    describe getDescription()
+
+    def path = buildClusterPath(getClusterName())
+    assert !clusterFS.exists(path)
+
+    SliderShell shell = slider(EXIT_SUCCESS,
+      [
+        ACTION_CREATE, getClusterName(),
+        ARG_IMAGE, agentTarballPath.toString(),
+        ARG_TEMPLATE, APP_TEMPLATE,
+        ARG_RESOURCES, APP_RESOURCE
+      ])
+
+    logShell(shell)
+
+    ensureApplicationIsUp(getClusterName())
+
+    // must match the values in src/test/resources/resources.json
+    Map<String, Integer> roleMap = [
+      "ACCUMULO_MASTER" : 1,
+      "ACCUMULO_TSERVER" : 2,
+      "ACCUMULO_MONITOR": 1,
+      "ACCUMULO_GC": 0,
+      "ACCUMULO_TRACER" : 0
+    ];
+
+    //get a slider client against the cluster
+    SliderClient sliderClient = bondToCluster(SLIDER_CONFIG, getClusterName())
+    ClusterDescription cd = sliderClient.clusterDescription
+    assert getClusterName() == cd.name
+
+    log.info("Connected via Client {}", sliderClient.toString())
+
+    //wait for the role counts to be reached
+    waitForRoleCount(sliderClient, roleMap, ACCUMULO_LAUNCH_WAIT_TIME)
+
+    sleep(ACCUMULO_GO_LIVE_TIME)
+
+    clusterLoadOperations(cd, sliderClient)
+  }
+
+
+  public String getDescription() {
+    return "Create a working Accumulo cluster $clusterName"
+  }
+
+  public static String getMonitorUrl(SliderClient sliderClient, String clusterName) {
+    CuratorServiceInstance<ServiceInstanceData> instance =
+      sliderClient.getRegistry().queryForInstance(SliderKeys.APP_TYPE, clusterName)
+    ServiceInstanceData serviceInstanceData = instance.payload
+    RegistryRetriever retriever = new RegistryRetriever(serviceInstanceData)
+    PublishedConfiguration configuration = retriever.retrieveConfiguration(
+      retriever.getConfigurations(true), "quicklinks", true)
+
+    // must match name set in metainfo.xml
+    String monitorUrl = configuration.entries.get("org.apache.slider.monitor")
+
+    assertNotNull monitorUrl
+    return monitorUrl
+  }
+
+  public static void checkMonitorPage(String monitorUrl) {
+    String monitor = fetchWebPageWithoutError(monitorUrl);
+    assume monitor != null, "Monitor page null"
+    assume monitor.length() > 100, "Monitor page too short"
+    assume monitor.contains("Accumulo Overview"), "Monitor page didn't contain expected text"
+  }
+
+  /**
+   * Override point for any cluster load operations
+   */
+  public void clusterLoadOperations(ClusterDescription cd, SliderClient sliderClient) {
+    String monitorUrl = getMonitorUrl(sliderClient, getClusterName())
+    assert monitorUrl.startsWith("http://"), "Monitor URL didn't have expected protocol"
+    checkMonitorPage(monitorUrl)
+  }
+}
diff --git a/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloMonitorSSLIT.groovy b/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloMonitorSSLIT.groovy
new file mode 100644
index 0000000..6f68e13
--- /dev/null
+++ b/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloMonitorSSLIT.groovy
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.funtest.accumulo
+
+import groovy.util.logging.Slf4j
+import org.apache.slider.api.ClusterDescription
+import org.apache.slider.client.SliderClient
+
+import javax.net.ssl.KeyManager
+import javax.net.ssl.SSLContext
+import javax.net.ssl.TrustManager
+import javax.net.ssl.X509TrustManager
+import java.security.SecureRandom
+import java.security.cert.CertificateException
+import java.security.cert.X509Certificate
+
+@Slf4j
+class AccumuloMonitorSSLIT extends AccumuloBasicIT {
+  AccumuloMonitorSSLIT() {
+    APP_TEMPLATE = "target/test-config/appConfig_monitor_ssl.json"
+  }
+
+  @Override
+  public String getClusterName() {
+    return "test_monitor_ssl";
+  }
+
+  @Override
+  public String getDescription() {
+    return "Test enable monitor SSL $clusterName"
+  }
+
+  @Override
+  public void clusterLoadOperations(ClusterDescription cd, SliderClient sliderClient) {
+    String monitorUrl = getMonitorUrl(sliderClient, getClusterName())
+    assert monitorUrl.startsWith("https://"), "Monitor URL didn't have expected protocol"
+
+    SSLContext ctx = SSLContext.getInstance("SSL");
+    TrustManager[] t = new TrustManager[1];
+    t[0] = new DefaultTrustManager();
+    ctx.init(new KeyManager[0], t, new SecureRandom());
+    SSLContext.setDefault(ctx);
+    checkMonitorPage(monitorUrl)
+  }
+
+  private static class DefaultTrustManager implements X509TrustManager {
+    @Override
+    public void checkClientTrusted(X509Certificate[] arg0, String arg1) throws CertificateException {}
+
+    @Override
+    public void checkServerTrusted(X509Certificate[] arg0, String arg1) throws CertificateException {}
+
+    @Override
+    public X509Certificate[] getAcceptedIssuers() {
+      return null;
+    }
+  }
+}
\ No newline at end of file
diff --git a/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloReadWriteIT.groovy b/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloReadWriteIT.groovy
new file mode 100644
index 0000000..cdbbcce
--- /dev/null
+++ b/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloReadWriteIT.groovy
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.funtest.accumulo
+
+import groovy.util.logging.Slf4j
+import org.apache.accumulo.core.cli.BatchWriterOpts
+import org.apache.accumulo.core.cli.ScannerOpts
+import org.apache.accumulo.core.client.Connector
+import org.apache.accumulo.core.client.ZooKeeperInstance
+import org.apache.accumulo.core.client.security.tokens.PasswordToken
+import org.apache.accumulo.test.TestIngest
+import org.apache.accumulo.test.VerifyIngest
+import org.apache.slider.api.ClusterDescription
+import org.apache.slider.client.SliderClient
+import org.apache.slider.common.SliderXmlConfKeys
+import org.apache.slider.funtest.framework.FuntestProperties
+
+import java.util.concurrent.atomic.AtomicBoolean
+
+@Slf4j
+class AccumuloReadWriteIT extends AccumuloBasicIT {
+
+  @Override
+  public String getClusterName() {
+    return "test_read_write";
+  }
+
+  @Override
+  public String getDescription() {
+    return "Test reading and writing to Accumulo cluster $clusterName"
+  }
+
+  @Override
+  public void clusterLoadOperations(ClusterDescription cd, SliderClient sliderClient) {
+    try {
+      String zookeepers = SLIDER_CONFIG.get(SliderXmlConfKeys.REGISTRY_ZK_QUORUM,
+        FuntestProperties.DEFAULT_SLIDER_ZK_HOSTS)
+
+      ZooKeeperInstance instance = new ZooKeeperInstance(INSTANCE_NAME, zookeepers)
+      Connector connector = instance.getConnector(USER, new PasswordToken(PASSWORD))
+
+      ingest(connector, 200000, 1, 50, 0);
+      verify(connector, 200000, 1, 50, 0);
+
+      ingest(connector, 2, 1, 500000, 0);
+      verify(connector, 2, 1, 500000, 0);
+
+      interleaveTest(connector);
+    } catch (Exception e) {
+      fail("Got exception connecting/reading/writing "+e)
+    }
+  }
+
+  public static void ingest(Connector connector, int rows, int cols, int width, int offset) throws Exception {
+    TestIngest.Opts opts = new TestIngest.Opts();
+    opts.rows = rows;
+    opts.cols = cols;
+    opts.dataSize = width;
+    opts.startRow = offset;
+    opts.columnFamily = "colf";
+    opts.createTable = true;
+    TestIngest.ingest(connector, opts, new BatchWriterOpts());
+  }
+
+  private static void verify(Connector connector, int rows, int cols, int width, int offset) throws Exception {
+    ScannerOpts scannerOpts = new ScannerOpts();
+    VerifyIngest.Opts opts = new VerifyIngest.Opts();
+    opts.rows = rows;
+    opts.cols = cols;
+    opts.dataSize = width;
+    opts.startRow = offset;
+    opts.columnFamily = "colf";
+    VerifyIngest.verifyIngest(connector, opts, scannerOpts);
+  }
+
+  static void interleaveTest(final Connector connector) throws Exception {
+    final int ROWS = 200000;
+    final AtomicBoolean fail = new AtomicBoolean(false);
+    final int CHUNKSIZE = ROWS / 10;
+    ingest(connector, CHUNKSIZE, 1, 50, 0);
+    int i;
+    for (i = 0; i < ROWS; i += CHUNKSIZE) {
+      final int start = i;
+      Thread verify = new Thread() {
+        @Override
+        public void run() {
+          try {
+            verify(connector, CHUNKSIZE, 1, 50, start);
+          } catch (Exception ex) {
+            fail.set(true);
+          }
+        }
+      };
+      ingest(connector, CHUNKSIZE, 1, 50, i + CHUNKSIZE);
+      verify.join();
+      assertFalse(fail.get());
+    }
+    verify(connector, CHUNKSIZE, 1, 50, i);
+  }
+
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java b/app-packages/accumulo/src/test/java/org/apache/slider/funtest/accumulo/StubToForceGroovyTestsToCompile.java
similarity index 85%
rename from slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
rename to app-packages/accumulo/src/test/java/org/apache/slider/funtest/accumulo/StubToForceGroovyTestsToCompile.java
index 7af463d..7d289e0 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
+++ b/app-packages/accumulo/src/test/java/org/apache/slider/funtest/accumulo/StubToForceGroovyTestsToCompile.java
@@ -16,10 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.slider.server.services.utility;
+package org.apache.slider.funtest.accumulo;
 
-public interface EventCallback {
-  
-  public void eventCallbackEvent();
-  
+public class StubToForceGroovyTestsToCompile {
 }
diff --git a/app-packages/accumulo-v1_5/appConfig.json b/app-packages/accumulo/src/test/resources/appConfig_monitor_ssl.json
similarity index 90%
copy from app-packages/accumulo-v1_5/appConfig.json
copy to app-packages/accumulo/src/test/resources/appConfig_monitor_ssl.json
index 52f8624..8b63d06 100644
--- a/app-packages/accumulo-v1_5/appConfig.json
+++ b/app-packages/accumulo/src/test/resources/appConfig_monitor_ssl.json
@@ -3,15 +3,15 @@
   "metadata": {
   },
   "global": {
-    "agent.conf": "/slider/agent/conf/agent.ini",
-    "application.def": "/slider/accumulo_v151.zip",
+    "agent.conf": "agent.ini",
+    "application.def": "${app.package.name}.zip",
     "config_types": "accumulo-site",
     "java_home": "/usr/jdk64/jdk1.7.0_45",
-    "package_list": "files/accumulo-1.5.1-bin.tar.gz",
+    "package_list": "files/accumulo-${accumulo.version}-bin.tar.gz",
     "site.global.app_user": "yarn",
     "site.global.app_log_dir": "${AGENT_LOG_ROOT}/app/log",
     "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
-    "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/accumulo-1.5.1",
+    "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/accumulo-${accumulo.version}",
     "site.global.app_install_dir": "${AGENT_WORK_ROOT}/app/install",
     "site.global.tserver_heapsize": "128m",
     "site.global.master_heapsize": "128m",
@@ -25,7 +25,8 @@
     "site.global.accumulo_root_password": "secret",
     "site.global.user_group": "hadoop",
     "site.global.security_enabled": "false",
-    "site.accumulo-site.instance.dfs.dir": "/apps/accumulo/data",
+    "site.global.monitor_protocol": "https",
+    "site.accumulo-site.instance.volumes": "${DEFAULT_DATA_DIR}/data",
     "site.accumulo-site.instance.zookeeper.host": "${ZK_HOST}",
     "site.accumulo-site.instance.secret": "DEFAULT",
     "site.accumulo-site.tserver.memory.maps.max": "80M",
diff --git a/app-packages/accumulo/src/test/resources/resources.json b/app-packages/accumulo/src/test/resources/resources.json
new file mode 100644
index 0000000..0d536aa
--- /dev/null
+++ b/app-packages/accumulo/src/test/resources/resources.json
@@ -0,0 +1,36 @@
+{
+  "schema": "http://example.org/specification/v2.0.0",
+  "metadata": {
+  },
+  "global": {
+  },
+  "components": {
+    "ACCUMULO_MASTER": {
+      "yarn.role.priority": "1",
+      "yarn.component.instances": "1",
+      "yarn.memory": "256"
+    },
+    "slider-appmaster": {
+    },
+    "ACCUMULO_TSERVER": {
+      "yarn.role.priority": "2",
+      "yarn.component.instances": "2",
+      "yarn.memory": "256"
+    },
+    "ACCUMULO_MONITOR": {
+      "yarn.role.priority": "3",
+      "yarn.component.instances": "1",
+      "yarn.memory": "128"
+    },
+    "ACCUMULO_GC": {
+      "yarn.role.priority": "4",
+      "yarn.component.instances": "0",
+      "yarn.memory": "128"
+    },
+    "ACCUMULO_TRACER": {
+      "yarn.role.priority": "5",
+      "yarn.component.instances": "0",
+      "yarn.memory": "256"
+    }
+  }
+}
diff --git a/src/site/markdown/architecture/index.md b/app-packages/command-logger/README.txt
similarity index 70%
copy from src/site/markdown/architecture/index.md
copy to app-packages/command-logger/README.txt
index d77a58e..56a8a33 100644
--- a/src/site/markdown/architecture/index.md
+++ b/app-packages/command-logger/README.txt
@@ -14,14 +14,9 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-  
-# Architecture
+Command logger is a test application used by Slider for functional tests.
 
-* [Overview](architecture.html)
-* [Application Needs](application_needs.html)
-* [Specification](../specification/index.html)
-* [Service Registry](../registry/index.html)
-* [Role history](rolehistory.html) 
-
-
- 
+Like any typical Slider Application, there is a native application package
+in the form of a tarball that is created by project under application_pkg and
+there is a project under slider_pkg to create the Slider Application Package
+that is consumed by Slider.
diff --git a/src/site/markdown/architecture/index.md b/app-packages/command-logger/application-pkg/README.txt
similarity index 67%
copy from src/site/markdown/architecture/index.md
copy to app-packages/command-logger/application-pkg/README.txt
index d77a58e..5596f30 100644
--- a/src/site/markdown/architecture/index.md
+++ b/app-packages/command-logger/application-pkg/README.txt
@@ -14,14 +14,11 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-  
-# Architecture
+Command logger is a simple application that does not have any moving parts yet.
+Its sole purpose is to get packaged as a tarball that includes some static files
+suggesting how commands may be logged.
 
-* [Overview](architecture.html)
-* [Application Needs](application_needs.html)
-* [Specification](../specification/index.html)
-* [Service Registry](../registry/index.html)
-* [Role history](rolehistory.html) 
-
-
- 
+TODO:
+* Add a daemon that gets activated when application is STARTED
+* Provide hooks to validate that daemon is behaving as expected
+* Provide hooks to inject failures
diff --git a/app-packages/command-logger/application-pkg/pom.xml b/app-packages/command-logger/application-pkg/pom.xml
new file mode 100644
index 0000000..53f7fd2
--- /dev/null
+++ b/app-packages/command-logger/application-pkg/pom.xml
@@ -0,0 +1,83 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+  <parent>
+    <groupId>org.apache.slider</groupId>
+    <artifactId>slider</artifactId>
+    <version>0.40</version>
+    <relativePath>../../../pom.xml</relativePath>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>command-logger</artifactId>
+  <packaging>pom</packaging>
+  <name>Command Logger</name>
+  <description>Command Logger</description>
+  <properties>
+    <app.package.name>command-logger</app.package.name>
+  </properties>
+
+  <build>
+    <plugins>
+
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <tarLongFileMode>gnu</tarLongFileMode>
+          <descriptor>src/packages/tarball/all.xml</descriptor>
+          <appendAssemblyId>false</appendAssemblyId>
+          <finalName>${app.package.name}</finalName>
+        </configuration>
+        <executions>
+          <execution>
+            <id>build-tarball</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.0</version>
+      </plugin>
+
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <version>${apache-rat-plugin.version}</version>
+        <executions>
+          <execution>
+            <id>check-licenses</id>
+            <goals>
+              <goal>check</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+    <extensions>
+      <extension>
+        <groupId>org.apache.maven.wagon</groupId>
+        <artifactId>wagon-ssh-external</artifactId>
+      </extension>
+    </extensions>
+  </build>
+</project>
diff --git a/src/site/markdown/architecture/index.md b/app-packages/command-logger/application-pkg/src/command_logger/README.txt
similarity index 77%
rename from src/site/markdown/architecture/index.md
rename to app-packages/command-logger/application-pkg/src/command_logger/README.txt
index d77a58e..19d2e26 100644
--- a/src/site/markdown/architecture/index.md
+++ b/app-packages/command-logger/application-pkg/src/command_logger/README.txt
@@ -14,14 +14,5 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-  
-# Architecture
 
-* [Overview](architecture.html)
-* [Application Needs](application_needs.html)
-* [Specification](../specification/index.html)
-* [Service Registry](../registry/index.html)
-* [Role history](rolehistory.html) 
-
-
- 
+All this project contains is a tarball that has a sample log file.
diff --git a/src/site/markdown/architecture/index.md b/app-packages/command-logger/application-pkg/src/command_logger/operations.log
similarity index 77%
copy from src/site/markdown/architecture/index.md
copy to app-packages/command-logger/application-pkg/src/command_logger/operations.log
index d77a58e..4604cdd 100644
--- a/src/site/markdown/architecture/index.md
+++ b/app-packages/command-logger/application-pkg/src/command_logger/operations.log
@@ -14,14 +14,11 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-  
-# Architecture
 
-* [Overview](architecture.html)
-* [Application Needs](application_needs.html)
-* [Specification](../specification/index.html)
-* [Service Registry](../registry/index.html)
-* [Role history](rolehistory.html) 
-
-
- 
+This is a log for all operations on a Yarn container.
+Container Id: Container001
+Application id: Application001
+---------------
+Time:
+Log:
+---------------
diff --git a/app-packages/command-logger/application-pkg/src/packages/tarball/all.xml b/app-packages/command-logger/application-pkg/src/packages/tarball/all.xml
new file mode 100644
index 0000000..5c6080b
--- /dev/null
+++ b/app-packages/command-logger/application-pkg/src/packages/tarball/all.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1"
+          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+          xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1 http://maven.apache.org/xsd/assembly-1.1.1.xsd">
+  <!--This 'agent-all' id is not appended to the produced bundle because we do this:
+    http://maven.apache.org/plugins/maven-assembly-plugin/faq.html#required-classifiers
+  -->
+  <id>command-logger</id>
+  <formats>
+    <format>tar</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <fileSets>
+    <fileSet>
+      <directory>src/command_logger</directory>
+      <outputDirectory>command-logger-app</outputDirectory>
+    </fileSet>
+  </fileSets>
+</assembly>
diff --git a/app-packages/command-logger/slider-pkg/appConfig.json b/app-packages/command-logger/slider-pkg/appConfig.json
new file mode 100644
index 0000000..1d92c59
--- /dev/null
+++ b/app-packages/command-logger/slider-pkg/appConfig.json
@@ -0,0 +1,26 @@
+{
+    "schema": "http://example.org/specification/v2.0.0",
+    "metadata": {
+    },
+    "global": {
+        "application.def": "apache-slider-command-logger.zip",
+        "config_types": "cl-site",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "package_list": "files/command-logger.tar",
+        "site.global.app_user": "yarn",
+        "site.global.application_id": "CommandLogger",
+        "site.global.app_log_dir": "${AGENT_LOG_ROOT}/app/log",
+        "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
+        "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/command-logger",
+        "site.global.app_install_dir": "${AGENT_WORK_ROOT}/app/install",
+        "site.cl-site.logfile.location": "${AGENT_LOG_ROOT}/app/log/operations.log",
+        "site.cl-site.datetime.format": "%A, %d. %B %Y %I:%M%p"
+    },
+    "components": {
+        "COMMAND_LOGGER": {
+        },
+        "slider-appmaster": {
+            "jvm.heapsize": "256M"
+        }
+    }
+}
diff --git a/slider-core/src/test/app_packages/test_command_log/configuration/cl-site.xml b/app-packages/command-logger/slider-pkg/configuration/cl-site.xml
similarity index 100%
rename from slider-core/src/test/app_packages/test_command_log/configuration/cl-site.xml
rename to app-packages/command-logger/slider-pkg/configuration/cl-site.xml
diff --git a/app-packages/command-logger/slider-pkg/metainfo.xml b/app-packages/command-logger/slider-pkg/metainfo.xml
new file mode 100644
index 0000000..e17413d
--- /dev/null
+++ b/app-packages/command-logger/slider-pkg/metainfo.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <application>
+    <name>TEST_COMMAND_LOG</name>
+    <comment>
+      When started it creates a new log file and stores all commands in the
+      log file. When stopped it renames the file.
+    </comment>
+    <version>0.1.0</version>
+    <components>
+      <component>
+        <name>COMMAND_LOGGER</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/cl.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+    </components>
+
+    <osSpecifics>
+      <osSpecific>
+        <osType>any</osType>
+        <packages>
+          <package>
+            <type>tarball</type>
+            <name>files/command_log.tar</name>
+          </package>
+        </packages>
+      </osSpecific>
+    </osSpecifics>
+
+  </application>
+</metainfo>
diff --git a/slider-core/src/test/app_packages/test_command_log/package/scripts/cl.py b/app-packages/command-logger/slider-pkg/package/scripts/cl.py
similarity index 100%
rename from slider-core/src/test/app_packages/test_command_log/package/scripts/cl.py
rename to app-packages/command-logger/slider-pkg/package/scripts/cl.py
diff --git a/slider-core/src/test/app_packages/test_command_log/package/scripts/params.py b/app-packages/command-logger/slider-pkg/package/scripts/params.py
similarity index 100%
rename from slider-core/src/test/app_packages/test_command_log/package/scripts/params.py
rename to app-packages/command-logger/slider-pkg/package/scripts/params.py
diff --git a/slider-core/src/test/app_packages/test_command_log/package/templates/operations.log.j2 b/app-packages/command-logger/slider-pkg/package/templates/operations.log.j2
similarity index 100%
rename from slider-core/src/test/app_packages/test_command_log/package/templates/operations.log.j2
rename to app-packages/command-logger/slider-pkg/package/templates/operations.log.j2
diff --git a/app-packages/command-logger/slider-pkg/pom.xml b/app-packages/command-logger/slider-pkg/pom.xml
new file mode 100644
index 0000000..0971868
--- /dev/null
+++ b/app-packages/command-logger/slider-pkg/pom.xml
@@ -0,0 +1,122 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+  <parent>
+    <groupId>org.apache.slider</groupId>
+    <artifactId>slider</artifactId>
+    <version>0.40</version>
+    <relativePath>../../../pom.xml</relativePath>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>apache-slider-command-logger</artifactId>
+  <packaging>pom</packaging>
+  <name>Slider Command Logger App Package</name>
+  <description>Slider Command Logger App Package</description>
+  <properties>
+    <work.dir>package-tmp</work.dir>
+    <app.package.name>apache-slider-command-logger</app.package.name>
+  </properties>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <descriptor>src/assembly/command-logger.xml</descriptor>
+          <appendAssemblyId>false</appendAssemblyId>
+          <finalName>${app.package.name}</finalName>
+        </configuration>
+        <executions>
+          <execution>
+            <id>build-app-package</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <version>${maven-dependency-plugin.version}</version>
+        <executions>
+          <execution>
+            <id>copy-dependencies</id>
+            <phase>process-resources</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <includeArtifactIds>command-logger</includeArtifactIds>
+              <includeTypes>tar</includeTypes>
+              <outputDirectory>${project.build.directory}/${work.dir}</outputDirectory>
+            </configuration>
+          </execution>
+          <execution>
+            <id>copy</id>
+            <phase>test</phase>
+            <goals>
+              <goal>copy</goal>
+            </goals>
+            <configuration>
+              <artifactItems>
+                <artifactItem>
+                  <groupId>org.apache.slider</groupId>
+                  <artifactId>command-logger</artifactId>
+                  <type>tar</type>
+                  <overWrite>false</overWrite>
+                  <outputDirectory>${project.build.directory}/${work.dir}</outputDirectory>
+                  <destFileName>command-logger.tar</destFileName>
+                </artifactItem>
+              </artifactItems>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <version>${apache-rat-plugin.version}</version>
+        <executions>
+          <execution>
+            <id>check-licenses</id>
+            <goals>
+              <goal>check</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+
+    </plugins>
+  </build>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.slider</groupId>
+      <artifactId>command-logger</artifactId>
+      <version>${project.version}</version>
+      <type>tar</type>
+    </dependency>
+  </dependencies>
+
+</project>
diff --git a/app-packages/command-logger/slider-pkg/resources.json b/app-packages/command-logger/slider-pkg/resources.json
new file mode 100644
index 0000000..8345661
--- /dev/null
+++ b/app-packages/command-logger/slider-pkg/resources.json
@@ -0,0 +1,15 @@
+{
+    "schema": "http://example.org/specification/v2.0.0",
+    "metadata": {
+    },
+    "global": {
+    },
+    "components": {
+        "COMMAND_LOGGER": {
+            "yarn.role.priority": "1",
+            "yarn.component.instances": "1"
+        },
+        "slider-appmaster": {
+        }
+    }
+}
diff --git a/app-packages/command-logger/slider-pkg/src/assembly/command-logger.xml b/app-packages/command-logger/slider-pkg/src/assembly/command-logger.xml
new file mode 100644
index 0000000..cad468a
--- /dev/null
+++ b/app-packages/command-logger/slider-pkg/src/assembly/command-logger.xml
@@ -0,0 +1,78 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~  or more contributor license agreements.  See the NOTICE file
+  ~  distributed with this work for additional information
+  ~  regarding copyright ownership.  The ASF licenses this file
+  ~  to you under the Apache License, Version 2.0 (the
+  ~  "License"); you may not use this file except in compliance
+  ~  with the License.  You may obtain a copy of the License at
+  ~
+  ~       http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~  Unless required by applicable law or agreed to in writing, software
+  ~  distributed under the License is distributed on an "AS IS" BASIS,
+  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~  See the License for the specific language governing permissions and
+  ~  limitations under the License.
+  -->
+
+
+<assembly
+  xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  <id>command_logger_app_pkg</id>
+  <formats>
+    <format>zip</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+
+  <files>
+    <file>
+      <source>appConfig.json</source>
+      <outputDirectory>/</outputDirectory>
+      <filtered>true</filtered>
+      <fileMode>0755</fileMode>
+    </file>
+    <file>
+      <source>resources.json</source>
+      <outputDirectory>/</outputDirectory>
+      <filtered>true</filtered>
+      <fileMode>0755</fileMode>
+    </file>
+    <file>
+      <source>metainfo.xml</source>
+      <outputDirectory>/</outputDirectory>
+      <filtered>true</filtered>
+      <fileMode>0755</fileMode>
+    </file>
+  </files>
+
+  <fileSets>
+    <fileSet>
+      <directory>${project.build.directory}/${work.dir}</directory>
+      <outputDirectory>package/files</outputDirectory>
+      <includes>
+        <include>command-logger.tar</include>
+      </includes>
+      <fileMode>0755</fileMode>
+      <directoryMode>0755</directoryMode>
+    </fileSet>
+
+    <fileSet>
+      <directory>${project.basedir}</directory>
+      <outputDirectory>/</outputDirectory>
+      <excludes>
+        <exclude>pom.xml</exclude>
+        <exclude>src/**</exclude>
+        <exclude>target/**</exclude>
+        <exclude>appConfig.json</exclude>
+        <exclude>resources.json</exclude>
+        <exclude>metainfo.xml</exclude>
+      </excludes>
+      <fileMode>0755</fileMode>
+      <directoryMode>0755</directoryMode>
+    </fileSet>
+
+  </fileSets>
+</assembly>
diff --git a/app-packages/hbase-v0_96/README.txt b/app-packages/hbase-v0_96/README.txt
deleted file mode 100644
index 33c93df..0000000
--- a/app-packages/hbase-v0_96/README.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-How to create a Slider package?
-
-Replace the placeholder tarball for HBase.
-  cp ~/Downloads/hbase-0.96.1-hadoop2-bin.tar.gz package/files/
-  rm package/files/hbase-0.96.1-hadoop2-bin.tar.gz.REPLACE
-
-Create a zip package at the root of the package (<slider enlistment>/app-packages/hbase-v0_96/) 
-  zip -r hbase_v096.zip .
-
-Verify the content using  
-  unzip -l "$@" hbase_v096.zip
-
-While appConfig.json and resources.json are not required for the package they work
-well as the default configuration for Slider apps. So its advisable that when you
-create an application package for Slider, include sample/default resources.json and
-appConfig.json for a minimal Yarn cluster.
diff --git a/app-packages/hbase-v0_96/ganglia_metrics.json b/app-packages/hbase-v0_96/ganglia_metrics.json
deleted file mode 100644
index da73d48..0000000
--- a/app-packages/hbase-v0_96/ganglia_metrics.json
+++ /dev/null
@@ -1,38 +0,0 @@
-{
-    "Component": {
-        "HBASE_REGIONSERVER": {
-            "readRequestsCount": {
-                "metric": "regionserver.Server.readRequestCount",
-                "pointInTime": false,
-                "temporal": true
-            },
-            "regions": {
-                "metric": "regionserver.Server.regionCount",
-                "pointInTime": false,
-                "temporal": true
-            },
-            "flushQueueSize": {
-                "metric": "regionserver.Server.flushQueueLength",
-                "pointInTime": false,
-                "temporal": true
-            }
-        },
-        "HBASE_MASTER": {
-            "cluster_requests": {
-                "metric": "master.Server.clusterRequests",
-                "pointInTime": false,
-                "temporal": true
-            },
-            "splitTime_avg_time": {
-                "metric": "master.FileSystem.HlogSplitTime_mean",
-                "pointInTime": false,
-                "temporal": true
-            },
-            "splitSize_avg_time": {
-                "metric": "master.FileSystem.HlogSplitSize_mean",
-                "pointInTime": false,
-                "temporal": true
-            }
-        }
-    }
-}
diff --git a/app-packages/hbase-v0_96/metainfo.xml b/app-packages/hbase-v0_96/metainfo.xml
deleted file mode 100644
index e5bcdc9..0000000
--- a/app-packages/hbase-v0_96/metainfo.xml
+++ /dev/null
@@ -1,105 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HBASE</name>
-      <comment>
-        Apache HBase is the Hadoop database, a distributed, scalable, big data store.
-        Requirements:
-        1. Ensure parent dir for path (hbase-site/hbase.rootdir) is accessible to the App owner.
-        2. Ensure ZK root (hbase-site/zookeeper.znode.parent) is unique for the App instance.
-      </comment>
-      <version>0.96.0.2.1.1</version>
-      <type>YARN-APP</type>
-      <minHadoopVersion>2.1.0</minHadoopVersion>
-      <exportGroups>
-        <exportGroup>
-          <name>QuickLinks</name>
-          <exports>
-            <export>
-              <name>org.apache.slider.jmx</name>
-              <value>http://${HBASE_MASTER_HOST}:${site.hbase-site.hbase.master.info.port}/jmx</value>
-            </export>
-            <export>
-              <name>org.apache.slider.monitor</name>
-              <value>http://${HBASE_MASTER_HOST}:${site.hbase-site.hbase.master.info.port}/master-status</value>
-            </export>
-            <export>
-              <name>org.apache.slider.metrics</name>
-              <value>http://${site.global.ganglia_server_host}/cgi-bin/rrd.py?c=${site.global.ganglia_server_id}</value>
-            </export>
-          </exports>
-        </exportGroup>
-      </exportGroups>
-      <commandOrders>
-        <commandOrder>
-          <command>HBASE_REGIONSERVER-START</command>
-          <requires>HBASE_MASTER-STARTED</requires>
-        </commandOrder>
-      </commandOrders>
-      <components>
-        <component>
-          <name>HBASE_MASTER</name>
-          <category>MASTER</category>
-          <minInstanceCount>1</minInstanceCount>
-          <maxInstanceCount>2</maxInstanceCount>
-          <commandScript>
-            <script>scripts/hbase_master.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HBASE_REGIONSERVER</name>
-          <category>SLAVE</category>
-          <minInstanceCount>1</minInstanceCount>
-          <commandScript>
-            <script>scripts/hbase_regionserver.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HBASE_CLIENT</name>
-          <category>CLIENT</category>
-          <minInstanceCount>0</minInstanceCount>
-          <commandScript>
-            <script>scripts/hbase_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>tarball</type>
-              <name>files/hbase-0.96.1-hadoop2-bin.tar.gz</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-    </service>
-  </services>
-</metainfo>
diff --git a/app-packages/hbase-v0_96/package/files/hbase-0.96.1-hadoop2-bin.tar.gz.REPLACE b/app-packages/hbase-v0_96/package/files/hbase-0.96.1-hadoop2-bin.tar.gz.REPLACE
deleted file mode 100644
index 5d03caa..0000000
--- a/app-packages/hbase-v0_96/package/files/hbase-0.96.1-hadoop2-bin.tar.gz.REPLACE
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-Replace with actual hbase tarball.
diff --git a/app-packages/hbase/README.txt b/app-packages/hbase/README.txt
new file mode 100644
index 0000000..b4e4ccd
--- /dev/null
+++ b/app-packages/hbase/README.txt
@@ -0,0 +1,75 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+Create Slider App Package for HBase
+
+While appConfig.json and resources.json are not required for the package they
+work well as the default configuration for Slider apps. So it is advisable that
+when you create an application package for Slider, include sample/default
+resources.json and appConfig.json for a minimal Yarn cluster.
+
+OPTION-I: Use mvn command
+OPTION-II: Manual
+
+****** OPTION - I (use mvn command) **
+You need the HBase version available on local maven repo to create the Slider App Package for HBase.
+
+The version of HBase used for the app package can be adjusted by adding a
+flag such as
+  -Dhbase.version=0.98.3
+
+Download the tarball for HBase:
+  e.g. path to tarball ~/Downloads/hbase-0.98.3-hadoop2-bin.tar.gz
+
+Use the following command to install HBase tarball locally:
+  mvn install:install-file -Dfile=<path-to-tarball> -DgroupId=org.apache.hbase -DartifactId=hbase -Dversion=0.98.3-hadoop2 -Dclassifier=bin -Dpackaging=tar.gz
+
+You may need to copy the hbase tarball to the following location if the above step doesn't publish the tarball:
+~/.m2/repository/org/apache/hbase/hbase/0.98.3-hadoop2/
+
+After HBase tarball is published locally in maven repository, you can use the following command:
+  mvn clean package -DskipTests -Phbase-app-package
+
+App package can be found in
+  app-packages/hbase/target/apache-slider-hbase-${hbase.version}-app-package-${slider.version}.zip
+
+Verify the content using
+  zip -Tv apache-slider-hbase-*.zip
+
+If an HBase version older than 0.98.3 is desired, it must be installed in the local maven repo.
+
+A less descriptive file name can be specified with
+  -Dapp.package.name=HBase_98dot3 which would create a file HBase_98dot3.zip.
+
+****** OPTION - II (manual) **
+The Slider App Package for HBase can also be created manually.
+
+Download the tarball for HBase:
+  e.g. path to tarball ~/Downloads/hbase-0.98.3-hadoop2-bin.tar.gz
+
+Copy the hbase tarball to package/files
+  cp ~/Downloads/hbase-0.98.3-hadoop2-bin.tar.gz package/files
+
+Edit appConfig.json/metainfo.xml
+  Replace 4 occurrences of "${hbase.version}" with the hbase version values such as "0.98.3-hadoop2"
+  Replace 1 occurrence of "${app.package.name}" with the desired app package name, e.g. "hbase-v098"
+
+Create a zip package at the root of the package (<slider enlistment>/app-packages/hbase/)
+  zip -r hbase-v098.zip .
+
+Verify the content using
+  zip -Tv hbase-v098.zip
diff --git a/app-packages/hbase-v0_96/appConfig.json b/app-packages/hbase/appConfig.json
similarity index 81%
rename from app-packages/hbase-v0_96/appConfig.json
rename to app-packages/hbase/appConfig.json
index fd884cb..20cd436 100644
--- a/app-packages/hbase-v0_96/appConfig.json
+++ b/app-packages/hbase/appConfig.json
@@ -3,28 +3,31 @@
   "metadata": {
   },
   "global": {
-    "agent.conf": "/slider/agent/conf/agent.ini",
-    "application.def": "/slider/hbase_v096.zip",
+    "application.def": "${app.package.name}.zip",
+    "create.default.zookeeper.node": "true",
     "config_types": "core-site,hdfs-site,hbase-site",
     "java_home": "/usr/jdk64/jdk1.7.0_45",
-    "package_list": "files/hbase-0.96.1-hadoop2-bin.tar.gz",
+    "package_list": "files/hbase-${hbase.version}-bin.tar.gz",
     "site.global.app_user": "yarn",
     "site.global.app_log_dir": "${AGENT_LOG_ROOT}/app/log",
     "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
-    "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/hbase-0.96.1-hadoop2",
+    "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/hbase-${hbase.version}",
     "site.global.app_install_dir": "${AGENT_WORK_ROOT}/app/install",
     "site.global.hbase_master_heapsize": "1024m",
     "site.global.hbase_regionserver_heapsize": "1024m",
+    "site.global.hbase_instance_name": "instancename",
+    "site.global.hbase_root_password": "secret",
     "site.global.user_group": "hadoop",
     "site.global.security_enabled": "false",
+    "site.global.monitor_protocol": "http",
     "site.global.ganglia_server_host": "${NN_HOST}",
     "site.global.ganglia_server_port": "8667",
     "site.global.ganglia_server_id": "Application1",
     "site.hbase-site.hbase.hstore.flush.retries.number": "120",
     "site.hbase-site.hbase.client.keyvalue.maxsize": "10485760",
     "site.hbase-site.hbase.hstore.compactionThreshold": "3",
-    "site.hbase-site.hbase.rootdir": "${NN_URI}/apps/hbase/data",
-    "site.hbase-site.hbase.stagingdir": "${NN_URI}/apps/hbase/staging",
+    "site.hbase-site.hbase.rootdir": "${DEFAULT_DATA_DIR}/data",
+    "site.hbase-site.hbase.stagingdir": "${DEFAULT_DATA_DIR}/staging",
     "site.hbase-site.hbase.regionserver.handler.count": "60",
     "site.hbase-site.hbase.regionserver.global.memstore.lowerLimit": "0.38",
     "site.hbase-site.hbase.hregion.memstore.block.multiplier": "2",
@@ -40,7 +43,7 @@
     "site.hbase-site.hbase.security.authentication": "simple",
     "site.hbase-site.hbase.defaults.for.version.skip": "true",
     "site.hbase-site.hbase.zookeeper.quorum": "${ZK_HOST}",
-    "site.hbase-site.zookeeper.znode.parent": "/hbase-unsecure",
+    "site.hbase-site.zookeeper.znode.parent": "${DEF_ZK_PATH}",
     "site.hbase-site.hbase.hstore.blockingStoreFiles": "10",
     "site.hbase-site.hbase.hregion.majorcompaction": "86400000",
     "site.hbase-site.hbase.security.authorization": "false",
@@ -50,10 +53,7 @@
     "site.hbase-site.hbase.zookeeper.useMulti": "true",
     "site.hbase-site.hbase.regionserver.info.port": "0",
     "site.hbase-site.hbase.master.info.port": "${HBASE_MASTER.ALLOCATED_PORT}",
-    "site.hbase-site.hbase.regionserver.port": "0",
-    "site.core-site.fs.defaultFS": "${NN_URI}",
-    "site.hdfs-site.dfs.namenode.https-address": "${NN_HOST}:50470",
-    "site.hdfs-site.dfs.namenode.http-address": "${NN_HOST}:50070"
+    "site.hbase-site.hbase.regionserver.port": "0"
   },
   "components": {
     "HBASE_MASTER": {
diff --git a/app-packages/hbase-v0_96/configuration/global.xml b/app-packages/hbase/configuration/global.xml
similarity index 100%
rename from app-packages/hbase-v0_96/configuration/global.xml
rename to app-packages/hbase/configuration/global.xml
diff --git a/app-packages/hbase-v0_96/configuration/hbase-log4j.xml b/app-packages/hbase/configuration/hbase-log4j.xml
similarity index 100%
rename from app-packages/hbase-v0_96/configuration/hbase-log4j.xml
rename to app-packages/hbase/configuration/hbase-log4j.xml
diff --git a/app-packages/hbase-v0_96/configuration/hbase-policy.xml b/app-packages/hbase/configuration/hbase-policy.xml
similarity index 100%
rename from app-packages/hbase-v0_96/configuration/hbase-policy.xml
rename to app-packages/hbase/configuration/hbase-policy.xml
diff --git a/app-packages/hbase-v0_96/configuration/hbase-site.xml b/app-packages/hbase/configuration/hbase-site.xml
similarity index 100%
rename from app-packages/hbase-v0_96/configuration/hbase-site.xml
rename to app-packages/hbase/configuration/hbase-site.xml
diff --git a/app-packages/hbase/get-hbase-site.sh b/app-packages/hbase/get-hbase-site.sh
new file mode 100755
index 0000000..0edac30
--- /dev/null
+++ b/app-packages/hbase/get-hbase-site.sh
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+tuple=`slider status $1 | grep "info.am.web.url"`
+FS=":"
+url=`echo $tuple | awk '{split($0,array,": ")} END{print array[2]}'`
+url="${url%,}"
+url="${url%\"}"
+url="${url#\"}"
+url="${url}ws/v1/slider/publisher/slider/hbase-site.xml"
+curl -k -o hbase-site.xml $url
diff --git a/app-packages/hbase-v0_96/jmx_metrics.json b/app-packages/hbase/jmx_metrics.json
similarity index 96%
rename from app-packages/hbase-v0_96/jmx_metrics.json
rename to app-packages/hbase/jmx_metrics.json
index ca980a9..ac0640e 100644
--- a/app-packages/hbase-v0_96/jmx_metrics.json
+++ b/app-packages/hbase/jmx_metrics.json
@@ -1,7 +1,7 @@
 {
     "Component": {
         "HBASE_MASTER": {
-            "metricAverageLoad": {
+            "MetricAverageLoad": {
                 "metric": "Hadoop:service=HBase,name=Master,sub=Server.averageLoad",
                 "pointInTime": true,
                 "temporal": false
@@ -46,11 +46,11 @@
                 "pointInTime": true,
                 "temporal": false
             },
-            "cluster_requests": {
+            "ClusterRequests": {
                 "metric": "Hadoop:service=HBase,name=Master,sub=Server.clusterRequests",
                 "pointInTime": true,
                 "temporal": false
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/app-packages/hbase/metainfo.xml b/app-packages/hbase/metainfo.xml
new file mode 100644
index 0000000..99413f6
--- /dev/null
+++ b/app-packages/hbase/metainfo.xml
@@ -0,0 +1,107 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <application>
+    <name>HBASE</name>
+    <comment>
+      Apache HBase is the Hadoop database, a distributed, scalable, big data store.
+      Requirements:
+      1. Ensure parent dir for path (hbase-site/hbase.rootdir) is accessible to the App owner.
+      2. Ensure ZK root (hbase-site/zookeeper.znode.parent) is unique for the App instance.
+    </comment>
+    <version>${hbase.version}</version>
+    <type>YARN-APP</type>
+    <minHadoopVersion>2.1.0</minHadoopVersion>
+    <exportGroups>
+      <exportGroup>
+        <name>QuickLinks</name>
+        <exports>
+          <export>
+            <name>org.apache.slider.jmx</name>
+            <value>http://${HBASE_MASTER_HOST}:${site.hbase-site.hbase.master.info.port}/jmx</value>
+          </export>
+          <export>
+            <name>org.apache.slider.monitor</name>
+            <value>http://${HBASE_MASTER_HOST}:${site.hbase-site.hbase.master.info.port}/master-status</value>
+          </export>
+          <export>
+            <name>org.apache.slider.metrics</name>
+            <value>http://${site.global.ganglia_server_host}/cgi-bin/rrd.py?c=${site.global.ganglia_server_id}</value>
+          </export>
+          <export>
+            <name>org.apache.slider.ganglia</name>
+            <value>http://${site.global.ganglia_server_host}/ganglia?c=${site.global.ganglia_server_id}</value>
+          </export>
+        </exports>
+      </exportGroup>
+    </exportGroups>
+    <commandOrders>
+      <commandOrder>
+        <command>HBASE_REGIONSERVER-START</command>
+        <requires>HBASE_MASTER-STARTED</requires>
+      </commandOrder>
+    </commandOrders>
+    <components>
+      <component>
+        <name>HBASE_MASTER</name>
+        <category>MASTER</category>
+        <minInstanceCount>1</minInstanceCount>
+        <maxInstanceCount>2</maxInstanceCount>
+        <commandScript>
+          <script>scripts/hbase_master.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>HBASE_REGIONSERVER</name>
+        <category>SLAVE</category>
+        <minInstanceCount>1</minInstanceCount>
+        <commandScript>
+          <script>scripts/hbase_regionserver.py</script>
+          <scriptType>PYTHON</scriptType>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>HBASE_CLIENT</name>
+        <category>CLIENT</category>
+        <minInstanceCount>0</minInstanceCount>
+        <commandScript>
+          <script>scripts/hbase_client.py</script>
+          <scriptType>PYTHON</scriptType>
+        </commandScript>
+      </component>
+    </components>
+
+    <osSpecifics>
+      <osSpecific>
+        <osType>any</osType>
+        <packages>
+          <package>
+            <type>tarball</type>
+            <name>files/hbase-${hbase.version}-bin.tar.gz</name>
+          </package>
+        </packages>
+      </osSpecific>
+    </osSpecifics>
+
+  </application>
+</metainfo>
diff --git a/app-packages/hbase-v0_96/package/scripts/__init__.py b/app-packages/hbase/package/scripts/__init__.py
similarity index 100%
rename from app-packages/hbase-v0_96/package/scripts/__init__.py
rename to app-packages/hbase/package/scripts/__init__.py
diff --git a/app-packages/hbase-v0_96/package/scripts/functions.py b/app-packages/hbase/package/scripts/functions.py
similarity index 100%
rename from app-packages/hbase-v0_96/package/scripts/functions.py
rename to app-packages/hbase/package/scripts/functions.py
diff --git a/app-packages/hbase-v0_96/package/scripts/hbase.py b/app-packages/hbase/package/scripts/hbase.py
similarity index 100%
rename from app-packages/hbase-v0_96/package/scripts/hbase.py
rename to app-packages/hbase/package/scripts/hbase.py
diff --git a/app-packages/hbase-v0_96/package/scripts/hbase_client.py b/app-packages/hbase/package/scripts/hbase_client.py
similarity index 100%
rename from app-packages/hbase-v0_96/package/scripts/hbase_client.py
rename to app-packages/hbase/package/scripts/hbase_client.py
diff --git a/app-packages/hbase-v0_96/package/scripts/hbase_master.py b/app-packages/hbase/package/scripts/hbase_master.py
similarity index 100%
rename from app-packages/hbase-v0_96/package/scripts/hbase_master.py
rename to app-packages/hbase/package/scripts/hbase_master.py
diff --git a/app-packages/hbase-v0_96/package/scripts/hbase_regionserver.py b/app-packages/hbase/package/scripts/hbase_regionserver.py
similarity index 100%
rename from app-packages/hbase-v0_96/package/scripts/hbase_regionserver.py
rename to app-packages/hbase/package/scripts/hbase_regionserver.py
diff --git a/app-packages/hbase-v0_96/package/scripts/hbase_service.py b/app-packages/hbase/package/scripts/hbase_service.py
similarity index 100%
rename from app-packages/hbase-v0_96/package/scripts/hbase_service.py
rename to app-packages/hbase/package/scripts/hbase_service.py
diff --git a/app-packages/hbase-v0_96/package/scripts/params.py b/app-packages/hbase/package/scripts/params.py
similarity index 100%
rename from app-packages/hbase-v0_96/package/scripts/params.py
rename to app-packages/hbase/package/scripts/params.py
diff --git a/app-packages/hbase-v0_96/package/scripts/status_params.py b/app-packages/hbase/package/scripts/status_params.py
similarity index 100%
rename from app-packages/hbase-v0_96/package/scripts/status_params.py
rename to app-packages/hbase/package/scripts/status_params.py
diff --git a/app-packages/hbase-v0_96/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/app-packages/hbase/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
similarity index 100%
rename from app-packages/hbase-v0_96/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
rename to app-packages/hbase/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
diff --git a/app-packages/hbase-v0_96/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/app-packages/hbase/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
similarity index 100%
rename from app-packages/hbase-v0_96/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
rename to app-packages/hbase/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
diff --git a/app-packages/hbase-v0_96/package/templates/hbase-env.sh.j2 b/app-packages/hbase/package/templates/hbase-env.sh.j2
similarity index 100%
rename from app-packages/hbase-v0_96/package/templates/hbase-env.sh.j2
rename to app-packages/hbase/package/templates/hbase-env.sh.j2
diff --git a/app-packages/hbase-v0_96/package/templates/hbase_client_jaas.conf.j2 b/app-packages/hbase/package/templates/hbase_client_jaas.conf.j2
similarity index 100%
rename from app-packages/hbase-v0_96/package/templates/hbase_client_jaas.conf.j2
rename to app-packages/hbase/package/templates/hbase_client_jaas.conf.j2
diff --git a/app-packages/hbase-v0_96/package/templates/hbase_master_jaas.conf.j2 b/app-packages/hbase/package/templates/hbase_master_jaas.conf.j2
similarity index 100%
rename from app-packages/hbase-v0_96/package/templates/hbase_master_jaas.conf.j2
rename to app-packages/hbase/package/templates/hbase_master_jaas.conf.j2
diff --git a/app-packages/hbase-v0_96/package/templates/hbase_regionserver_jaas.conf.j2 b/app-packages/hbase/package/templates/hbase_regionserver_jaas.conf.j2
similarity index 100%
rename from app-packages/hbase-v0_96/package/templates/hbase_regionserver_jaas.conf.j2
rename to app-packages/hbase/package/templates/hbase_regionserver_jaas.conf.j2
diff --git a/app-packages/hbase-v0_96/package/templates/regionservers.j2 b/app-packages/hbase/package/templates/regionservers.j2
similarity index 100%
rename from app-packages/hbase-v0_96/package/templates/regionservers.j2
rename to app-packages/hbase/package/templates/regionservers.j2
diff --git a/app-packages/hbase/pom.xml b/app-packages/hbase/pom.xml
new file mode 100644
index 0000000..3854496
--- /dev/null
+++ b/app-packages/hbase/pom.xml
@@ -0,0 +1,253 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+  <parent>
+    <groupId>org.apache.slider</groupId>
+    <artifactId>slider</artifactId>
+    <version>0.31.0-incubating-SNAPSHOT</version>
+    <relativePath>../../pom.xml</relativePath>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>slider-hbase-app-package</artifactId>
+  <packaging>jar</packaging>
+  <name>Slider HBase App Package</name>
+  <description>Slider HBase App Package</description>
+  <properties>
+    <work.dir>package-tmp</work.dir>
+    <app.package.name>apache-slider-hbase-${hbase.version}-app-package-${project.version}</app.package.name>
+  </properties>
+
+  <profiles>
+    <profile>
+      <id>hbase-app-package</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-assembly-plugin</artifactId>
+            <configuration>
+              <descriptor>src/assembly/hbase.xml</descriptor>
+              <appendAssemblyId>false</appendAssemblyId>
+              <finalName>${app.package.name}</finalName>
+            </configuration>
+            <executions>
+              <execution>
+                <id>build-app-package</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>single</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-dependency-plugin</artifactId>
+            <version>${maven-dependency-plugin.version}</version>
+            <executions>
+              <execution>
+                <id>copy-dependencies</id>
+                <phase>process-resources</phase>
+                <goals>
+                  <goal>copy-dependencies</goal>
+                </goals>
+                <configuration>
+                  <includeArtifactIds>hbase</includeArtifactIds>
+                  <includeTypes>tar.gz</includeTypes>
+                  <excludeTransitive>true</excludeTransitive>
+                  <outputDirectory>${project.build.directory}/${work.dir}</outputDirectory>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-failsafe-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>run-integration-tests</id>
+                <goals>
+                  <goal>integration-test</goal>
+                  <goal>verify</goal>
+                </goals>
+              </execution>
+            </executions>
+            <configuration>
+              <systemPropertyVariables>
+                <java.net.preferIPv4Stack>true</java.net.preferIPv4Stack>
+                <java.awt.headless>true</java.awt.headless>
+                <!-- this property must be supplied-->
+                <slider.conf.dir>${slider.conf.dir}</slider.conf.dir>
+                <slider.bin.dir>../../slider-assembly/target/slider-${project.version}-all/slider-${project.version}</slider.bin.dir>
+                <test.app.pkg.dir>target</test.app.pkg.dir>
+                <test.app.pkg.file>${app.package.name}.zip</test.app.pkg.file>
+                <test.app.resource>target/test-config/resources.json</test.app.resource>
+                <test.app.template>target/${app.package.name}/appConfig.json</test.app.template>
+              </systemPropertyVariables>
+            </configuration>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+
+  <build>
+    <!-- resources are filtered for dynamic updates. This gets build info in-->
+    <resources>
+      <resource>
+        <directory>src/test/resources</directory>
+        <filtering>true</filtering>
+        <targetPath>${project.build.directory}/test-config</targetPath>
+      </resource>
+    </resources>
+
+    <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>${maven-compiler-plugin.version}</version>
+        <configuration>
+          <compilerId>groovy-eclipse-compiler</compilerId>
+          <!-- set verbose to be true if you want lots of uninteresting messages -->
+          <!-- <verbose>true</verbose> -->
+          <source>${project.java.src.version}</source>
+          <target>${project.java.src.version}</target>
+        </configuration>
+        <dependencies>
+          <dependency>
+            <groupId>org.codehaus.groovy</groupId>
+            <artifactId>groovy-eclipse-compiler</artifactId>
+            <version>${groovy-eclipse-compiler.version}</version>
+          </dependency>
+          <dependency>
+            <groupId>org.codehaus.groovy</groupId>
+            <artifactId>groovy-eclipse-batch</artifactId>
+            <version>${groovy-eclipse-batch.version}</version>
+          </dependency>
+        </dependencies>
+      </plugin>
+
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <!-- can't figure out how to get the surefire plugin not to pick up the ITs, so skip it entirely -->
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase</artifactId>
+      <version>${hbase.version}</version>
+      <classifier>bin</classifier>
+      <type>tar.gz</type>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+       <groupId>org.apache.hbase</groupId>
+       <artifactId>hbase-client</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minicluster</artifactId>
+      <scope>test</scope>
+    </dependency>
+    
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-protocol</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+      <classifier>tests</classifier>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-it</artifactId>
+      <classifier>tests</classifier>
+        <exclusions>
+          <exclusion>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-client</artifactId>
+          </exclusion>
+        </exclusions>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop-compat</artifactId>
+      <classifier>tests</classifier>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop2-compat</artifactId>
+      <classifier>tests</classifier>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+      <classifier>tests</classifier>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.slider</groupId>
+      <artifactId>slider-core</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.slider</groupId>
+      <artifactId>slider-funtest</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.codehaus.groovy</groupId>
+      <artifactId>groovy-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+</project>
diff --git a/app-packages/hbase-v0_96/resources.json b/app-packages/hbase/resources.json
similarity index 67%
rename from app-packages/hbase-v0_96/resources.json
rename to app-packages/hbase/resources.json
index 9cc1b47..e0ff26f 100644
--- a/app-packages/hbase-v0_96/resources.json
+++ b/app-packages/hbase/resources.json
@@ -7,13 +7,15 @@
   "components": {
     "HBASE_MASTER": {
       "yarn.role.priority": "1",
-      "yarn.component.instances": "1"
+      "yarn.component.instances": "1",
+      "yarn.memory": "256"
     },
     "slider-appmaster": {
     },
     "HBASE_REGIONSERVER": {
       "yarn.role.priority": "2",
-      "yarn.component.instances": "1"
+      "yarn.component.instances": "1",
+      "yarn.memory": "256"
     }
   }
-}
\ No newline at end of file
+}
diff --git a/app-packages/hbase/src/assembly/hbase.xml b/app-packages/hbase/src/assembly/hbase.xml
new file mode 100644
index 0000000..ff1c395
--- /dev/null
+++ b/app-packages/hbase/src/assembly/hbase.xml
@@ -0,0 +1,72 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~  or more contributor license agreements.  See the NOTICE file
+  ~  distributed with this work for additional information
+  ~  regarding copyright ownership.  The ASF licenses this file
+  ~  to you under the Apache License, Version 2.0 (the
+  ~  "License"); you may not use this file except in compliance
+  ~  with the License.  You may obtain a copy of the License at
+  ~
+  ~       http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~  Unless required by applicable law or agreed to in writing, software
+  ~  distributed under the License is distributed on an "AS IS" BASIS,
+  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~  See the License for the specific language governing permissions and
+  ~  limitations under the License.
+  -->
+
+
+<assembly
+  xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  <id>hbase_v${hbase.version}</id>
+  <formats>
+    <format>zip</format>
+    <format>dir</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+
+  <files>
+    <file>
+      <source>appConfig.json</source>
+      <outputDirectory>/</outputDirectory>
+      <filtered>true</filtered>
+      <fileMode>0755</fileMode>
+    </file>
+    <file>
+      <source>metainfo.xml</source>
+      <outputDirectory>/</outputDirectory>
+      <filtered>true</filtered>
+      <fileMode>0755</fileMode>
+    </file>
+  </files>
+
+  <fileSets>
+    <fileSet>
+      <directory>${project.basedir}</directory>
+      <outputDirectory>/</outputDirectory>
+      <excludes>
+        <exclude>pom.xml</exclude>
+        <exclude>src/**</exclude>
+        <exclude>target/**</exclude>
+        <exclude>appConfig.json</exclude>
+        <exclude>metainfo.xml</exclude>
+      </excludes>
+      <fileMode>0755</fileMode>
+      <directoryMode>0755</directoryMode>
+    </fileSet>
+
+    <fileSet>
+      <directory>${project.build.directory}/${work.dir}</directory>
+      <outputDirectory>package/files</outputDirectory>
+      <includes>
+        <include>hbase-${hbase.version}-bin.tar.gz</include>
+      </includes>
+      <fileMode>0755</fileMode>
+      <directoryMode>0755</directoryMode>
+    </fileSet>
+
+  </fileSets>
+</assembly>
diff --git a/app-packages/hbase/src/test/groovy/org/apache/slider/funtest/hbase/HBaseAgentCommandTestBase.groovy b/app-packages/hbase/src/test/groovy/org/apache/slider/funtest/hbase/HBaseAgentCommandTestBase.groovy
new file mode 100644
index 0000000..757b7fa
--- /dev/null
+++ b/app-packages/hbase/src/test/groovy/org/apache/slider/funtest/hbase/HBaseAgentCommandTestBase.groovy
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.funtest.hbase
+
+import groovy.util.logging.Slf4j
+import org.apache.slider.funtest.framework.AgentCommandTestBase
+import org.junit.After
+import org.junit.Before
+
+@Slf4j
+abstract class HBaseAgentCommandTestBase extends AgentCommandTestBase {
+  protected static final int HBASE_LAUNCH_WAIT_TIME
+  protected static final int HBASE_GO_LIVE_TIME = 60000
+
+  // parameters must match those found in the default appConfig.json
+  protected static final String INSTANCE_NAME = "instancename"
+  protected static final String USER = "root"
+  protected static final String PASSWORD = "secret"
+
+  static {
+    HBASE_LAUNCH_WAIT_TIME = getTimeOptionMillis(SLIDER_CONFIG,
+      KEY_TEST_HBASE_LAUNCH_TIME,
+      1000 * DEFAULT_HBASE_LAUNCH_TIME_SECONDS)
+  }
+
+  abstract public String getClusterName();
+
+  @Before
+  public void prepareCluster() {
+    setupCluster(getClusterName())
+  }
+
+  @After
+  public void destroyCluster() {
+    cleanup(getClusterName())
+  }
+}
diff --git a/app-packages/hbase/src/test/groovy/org/apache/slider/funtest/hbase/HBaseBasicIT.groovy b/app-packages/hbase/src/test/groovy/org/apache/slider/funtest/hbase/HBaseBasicIT.groovy
new file mode 100644
index 0000000..52e19cd
--- /dev/null
+++ b/app-packages/hbase/src/test/groovy/org/apache/slider/funtest/hbase/HBaseBasicIT.groovy
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.funtest.hbase
+
+import groovy.util.logging.Slf4j
+import org.apache.slider.api.ClusterDescription
+import org.apache.slider.client.SliderClient
+import org.apache.slider.common.SliderKeys
+import org.apache.slider.core.registry.docstore.PublishedConfiguration
+import org.apache.slider.core.registry.info.ServiceInstanceData
+import org.apache.slider.core.registry.retrieve.RegistryRetriever
+import org.apache.slider.funtest.framework.SliderShell
+import org.apache.slider.server.services.curator.CuratorServiceInstance
+import org.junit.Test
+
+@Slf4j
+class HBaseBasicIT extends HBaseAgentCommandTestBase {
+
+  @Override
+  public String getClusterName() {
+    return "test_hbase_basic"
+  }
+
+  @Test
+  public void testHBaseClusterCreate() throws Throwable {
+
+    describe getDescription()
+
+    def path = buildClusterPath(getClusterName())
+    assert !clusterFS.exists(path)
+
+    SliderShell shell = slider(EXIT_SUCCESS,
+      [
+        ACTION_CREATE, getClusterName(),
+        ARG_IMAGE, agentTarballPath.toString(),
+        ARG_TEMPLATE, APP_TEMPLATE,
+        ARG_RESOURCES, APP_RESOURCE
+      ])
+
+    logShell(shell)
+
+    ensureApplicationIsUp(getClusterName())
+
+    // must match the values in src/test/resources/resources.json
+    Map<String, Integer> roleMap = [
+      "HBASE_MASTER" : 1,
+      "HBASE_REGIONSERVER" : 1
+    ];
+
+    //get a slider client against the cluster
+    SliderClient sliderClient = bondToCluster(SLIDER_CONFIG, getClusterName())
+    ClusterDescription cd = sliderClient.clusterDescription
+    assert getClusterName() == cd.name
+
+    log.info("Connected via Client {}", sliderClient.toString())
+
+    //wait for the role counts to be reached
+    waitForRoleCount(sliderClient, roleMap, HBASE_LAUNCH_WAIT_TIME)
+
+    sleep(HBASE_GO_LIVE_TIME)
+
+    clusterLoadOperations(cd, sliderClient)
+  }
+
+
+  public String getDescription() {
+    return "Create a working HBase cluster $clusterName"
+  }
+
+  public static String getMonitorUrl(SliderClient sliderClient, String clusterName) {
+    CuratorServiceInstance<ServiceInstanceData> instance =
+      sliderClient.getRegistry().queryForInstance(SliderKeys.APP_TYPE, clusterName)
+    ServiceInstanceData serviceInstanceData = instance.payload
+    RegistryRetriever retriever = new RegistryRetriever(serviceInstanceData)
+    PublishedConfiguration configuration = retriever.retrieveConfiguration(
+      retriever.getConfigurations(true), "quicklinks", true)
+
+    // must match name set in metainfo.xml
+    String monitorUrl = configuration.entries.get("org.apache.slider.monitor")
+
+    assertNotNull monitorUrl
+    return monitorUrl
+  }
+
+  public static void checkMonitorPage(String monitorUrl) {
+    String monitor = fetchWebPageWithoutError(monitorUrl);
+    assume monitor != null, "Monitor page null"
+    assume monitor.length() > 100, "Monitor page too short"
+    assume monitor.contains("Table Name"), "Monitor page didn't contain expected text"
+  }
+
+  /**
+   * Override point for any cluster load operations
+   */
+  public void clusterLoadOperations(ClusterDescription cd, SliderClient sliderClient) {
+    String monitorUrl = getMonitorUrl(sliderClient, getClusterName())
+    assert monitorUrl.startsWith("http://"), "Monitor URL didn't have expected protocol"
+    checkMonitorPage(monitorUrl)
+  }
+}
diff --git a/app-packages/hbase/src/test/groovy/org/apache/slider/funtest/hbase/HBaseMonitorSSLIT.groovy b/app-packages/hbase/src/test/groovy/org/apache/slider/funtest/hbase/HBaseMonitorSSLIT.groovy
new file mode 100644
index 0000000..12bf7ea
--- /dev/null
+++ b/app-packages/hbase/src/test/groovy/org/apache/slider/funtest/hbase/HBaseMonitorSSLIT.groovy
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.funtest.hbase
+
+import groovy.util.logging.Slf4j
+import org.apache.slider.api.ClusterDescription
+import org.apache.slider.client.SliderClient
+
+import javax.net.ssl.KeyManager
+import javax.net.ssl.SSLContext
+import javax.net.ssl.TrustManager
+import javax.net.ssl.X509TrustManager
+import java.security.SecureRandom
+import java.security.cert.CertificateException
+import java.security.cert.X509Certificate
+
+@Slf4j
+class HBaseMonitorSSLIT extends HBaseBasicIT {
+  HBaseMonitorSSLIT() {
+    APP_TEMPLATE = "target/test-config/appConfig_monitor_ssl.json"
+  }
+
+  @Override
+  public String getClusterName() {
+    return "test_monitor_ssl";
+  }
+
+  @Override
+  public String getDescription() {
+    return "Test enable monitor SSL $clusterName"
+  }
+
+  @Override
+  public void clusterLoadOperations(ClusterDescription cd, SliderClient sliderClient) {
+    String monitorUrl = getMonitorUrl(sliderClient, getClusterName())
+    assert monitorUrl.startsWith("https://"), "Monitor URL didn't have expected protocol"
+
+    SSLContext ctx = SSLContext.getInstance("SSL");
+    TrustManager[] t = new TrustManager[1];
+    t[0] = new DefaultTrustManager();
+    ctx.init(new KeyManager[0], t, new SecureRandom());
+    SSLContext.setDefault(ctx);
+    checkMonitorPage(monitorUrl)
+  }
+
+  private static class DefaultTrustManager implements X509TrustManager {
+    @Override
+    public void checkClientTrusted(X509Certificate[] arg0, String arg1) throws CertificateException {}
+
+    @Override
+    public void checkServerTrusted(X509Certificate[] arg0, String arg1) throws CertificateException {}
+
+    @Override
+    public X509Certificate[] getAcceptedIssuers() {
+      return null;
+    }
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java b/app-packages/hbase/src/test/java/org/apache/slider/funtest/hbase/StubToForceGroovyTestsToCompile.java
similarity index 85%
copy from slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
copy to app-packages/hbase/src/test/java/org/apache/slider/funtest/hbase/StubToForceGroovyTestsToCompile.java
index 7af463d..8143fcf 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
+++ b/app-packages/hbase/src/test/java/org/apache/slider/funtest/hbase/StubToForceGroovyTestsToCompile.java
@@ -16,10 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.slider.server.services.utility;
+package org.apache.slider.funtest.hbase;
 
-public interface EventCallback {
-  
-  public void eventCallbackEvent();
-  
+public class StubToForceGroovyTestsToCompile {
 }
diff --git a/app-packages/hbase-v0_96/appConfig.json b/app-packages/hbase/src/test/resources/appConfig_monitor_ssl.json
similarity index 80%
copy from app-packages/hbase-v0_96/appConfig.json
copy to app-packages/hbase/src/test/resources/appConfig_monitor_ssl.json
index fd884cb..37d72d0 100644
--- a/app-packages/hbase-v0_96/appConfig.json
+++ b/app-packages/hbase/src/test/resources/appConfig_monitor_ssl.json
@@ -3,28 +3,32 @@
   "metadata": {
   },
   "global": {
-    "agent.conf": "/slider/agent/conf/agent.ini",
-    "application.def": "/slider/hbase_v096.zip",
+    "agent.conf": "agent.ini",
+    "application.def": "${app.package.name}.zip",
+    "create.default.zookeeper.node": "true",
     "config_types": "core-site,hdfs-site,hbase-site",
     "java_home": "/usr/jdk64/jdk1.7.0_45",
-    "package_list": "files/hbase-0.96.1-hadoop2-bin.tar.gz",
+    "package_list": "files/hbase-${hbase.version}-bin.tar.gz",
     "site.global.app_user": "yarn",
-    "site.global.app_log_dir": "${AGENT_LOG_ROOT}/app/log",
+    "site.global.app_log_dir": "app/log",
     "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
-    "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/hbase-0.96.1-hadoop2",
+    "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/hbase-${hbase.version}",
     "site.global.app_install_dir": "${AGENT_WORK_ROOT}/app/install",
     "site.global.hbase_master_heapsize": "1024m",
     "site.global.hbase_regionserver_heapsize": "1024m",
+    "site.global.hbase_instance_name": "instancename",
+    "site.global.hbase_root_password": "secret",
     "site.global.user_group": "hadoop",
     "site.global.security_enabled": "false",
+    "site.global.monitor_protocol": "https",
     "site.global.ganglia_server_host": "${NN_HOST}",
     "site.global.ganglia_server_port": "8667",
     "site.global.ganglia_server_id": "Application1",
     "site.hbase-site.hbase.hstore.flush.retries.number": "120",
     "site.hbase-site.hbase.client.keyvalue.maxsize": "10485760",
     "site.hbase-site.hbase.hstore.compactionThreshold": "3",
-    "site.hbase-site.hbase.rootdir": "${NN_URI}/apps/hbase/data",
-    "site.hbase-site.hbase.stagingdir": "${NN_URI}/apps/hbase/staging",
+    "site.hbase-site.hbase.rootdir": "${DEFAULT_DATA_DIR}/data",
+    "site.hbase-site.hbase.stagingdir": "${DEFAULT_DATA_DIR}/staging",
     "site.hbase-site.hbase.regionserver.handler.count": "60",
     "site.hbase-site.hbase.regionserver.global.memstore.lowerLimit": "0.38",
     "site.hbase-site.hbase.hregion.memstore.block.multiplier": "2",
@@ -50,10 +54,7 @@
     "site.hbase-site.hbase.zookeeper.useMulti": "true",
     "site.hbase-site.hbase.regionserver.info.port": "0",
     "site.hbase-site.hbase.master.info.port": "${HBASE_MASTER.ALLOCATED_PORT}",
-    "site.hbase-site.hbase.regionserver.port": "0",
-    "site.core-site.fs.defaultFS": "${NN_URI}",
-    "site.hdfs-site.dfs.namenode.https-address": "${NN_HOST}:50470",
-    "site.hdfs-site.dfs.namenode.http-address": "${NN_HOST}:50070"
+    "site.hbase-site.hbase.regionserver.port": "0"
   },
   "components": {
     "HBASE_MASTER": {
diff --git a/app-packages/hbase-v0_96/resources.json b/app-packages/hbase/src/test/resources/resources.json
similarity index 67%
copy from app-packages/hbase-v0_96/resources.json
copy to app-packages/hbase/src/test/resources/resources.json
index 9cc1b47..e0ff26f 100644
--- a/app-packages/hbase-v0_96/resources.json
+++ b/app-packages/hbase/src/test/resources/resources.json
@@ -7,13 +7,15 @@
   "components": {
     "HBASE_MASTER": {
       "yarn.role.priority": "1",
-      "yarn.component.instances": "1"
+      "yarn.component.instances": "1",
+      "yarn.memory": "256"
     },
     "slider-appmaster": {
     },
     "HBASE_REGIONSERVER": {
       "yarn.role.priority": "2",
-      "yarn.component.instances": "1"
+      "yarn.component.instances": "1",
+      "yarn.memory": "256"
     }
   }
-}
\ No newline at end of file
+}
diff --git a/app-packages/storm-v0_91/metainfo.xml b/app-packages/storm-v0_91/metainfo.xml
deleted file mode 100644
index 9913d8c..0000000
--- a/app-packages/storm-v0_91/metainfo.xml
+++ /dev/null
@@ -1,142 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>STORM</name>
-      <comment>Apache Hadoop Stream processing framework</comment>
-      <version>0.9.1.2.1</version>
-
-      <exportGroups>
-        <exportGroup>
-          <name>QuickLinks</name>
-          <exports>
-            <export>
-              <name>org.apache.slider.jmx</name>
-              <value>http://${STORM_REST_API_HOST}:${site.global.rest_api_port}/api/cluster/summary</value>
-            </export>
-            <export>
-              <name>org.apache.slider.monitor</name>
-              <value>http://${STORM_UI_SERVER_HOST}:${site.storm-site.ui.port}</value>
-            </export>
-            <export>
-              <name>org.apache.slider.metrics</name>
-              <value>http://${site.global.ganglia_server_host}/cgi-bin/rrd.py?c=${site.global.ganglia_server_id}</value>
-            </export>
-          </exports>
-        </exportGroup>
-      </exportGroups>
-
-      <commandOrders>
-        <commandOrder>
-          <command>NIMBUS-START</command>
-          <requires>SUPERVISOR-INSTALLED,STORM_UI_SERVER-INSTALLED,DRPC_SERVER-INSTALLED,STORM_REST_API-INSTALLED</requires>
-        </commandOrder>
-        <commandOrder>
-          <command>SUPERVISOR-START</command>
-          <requires>NIMBUS-STARTED</requires>
-        </commandOrder>
-        <commandOrder>
-          <command>DRPC_SERVER-START</command>
-          <requires>NIMBUS-STARTED</requires>
-        </commandOrder>
-        <commandOrder>
-          <command>STORM_REST_API-START</command>
-          <requires>NIMBUS-STARTED,DRPC_SERVER-STARTED,STORM_UI_SERVER-STARTED</requires>
-        </commandOrder>
-        <commandOrder>
-          <command>STORM_UI_SERVER-START</command>
-          <requires>NIMBUS-STARTED</requires>
-        </commandOrder>
-      </commandOrders>
-
-      <components>
-
-        <component>
-          <name>NIMBUS</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/nimbus.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>STORM_REST_API</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/rest_api.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>SUPERVISOR</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/supervisor.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>STORM_UI_SERVER</name>
-          <category>MASTER</category>
-          <publishConfig>true</publishConfig>
-          <commandScript>
-            <script>scripts/ui_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>DRPC_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/drpc_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>tarball</type>
-              <name>files/apache-storm-0.9.1.2.1.1.0-237.tar.gz</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <configuration-dependencies>
-        <config-type>storm-site</config-type>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>
diff --git a/app-packages/storm-v0_91/README.txt b/app-packages/storm/README.txt
similarity index 81%
rename from app-packages/storm-v0_91/README.txt
rename to app-packages/storm/README.txt
index b547424..971cf14 100644
--- a/app-packages/storm-v0_91/README.txt
+++ b/app-packages/storm/README.txt
@@ -15,7 +15,11 @@
    limitations under the License.
 -->
 
-How to create a Slider package?
+How to create a Slider app package for Storm?
+
+To create the app package you will need the Storm tarball copied to a specific location.
+Various configurations provided in this sample are customized for apache-storm-0.9.1.2.1.1.0-237.tar.gz.
+So if you use a different version you may need to edit a few config values.
 
 Replace the placeholder tarball for Storm.
   cp ~/Downloads/apache-storm-0.9.1.2.1.1.0-237.tar.gz package/files/
diff --git a/app-packages/storm-v0_91/appConfig.json b/app-packages/storm/appConfig.json
similarity index 97%
rename from app-packages/storm-v0_91/appConfig.json
rename to app-packages/storm/appConfig.json
index d6a6679..6d6aa3a 100644
--- a/app-packages/storm-v0_91/appConfig.json
+++ b/app-packages/storm/appConfig.json
@@ -3,11 +3,11 @@
   "metadata": {
   },
   "global": {
-    "agent.conf": "/slider/agent/conf/agent.ini",
     "application.def": "/slider/storm_v091.zip",
     "config_types": "storm-site",
     "java_home": "/usr/jdk64/jdk1.7.0_45",
     "package_list": "files/apache-storm-0.9.1.2.1.1.0-237.tar.gz",
+    "create.default.zookeeper.node": "true",
     "site.global.app_user": "yarn",
     "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/apache-storm-0.9.1.2.1.1.0-237",
     "site.global.user_group": "hadoop",
@@ -52,7 +52,7 @@
     "site.storm-site.storm.cluster.mode": "distributed",
     "site.storm-site.dev.zookeeper.path": "${AGENT_WORK_ROOT}/app/tmp/dev-storm-zookeeper",
     "site.storm-site.drpc.invocations.port": "0",
-    "site.storm-site.storm.zookeeper.root": "/storm",
+    "site.storm-site.storm.zookeeper.root": "${DEF_ZK_PATH}",
     "site.storm-site.logviewer.childopts": "-Xmx128m",
     "site.storm-site.transactional.zookeeper.port": "null",
     "site.storm-site.topology.worker.childopts": "null",
@@ -68,7 +68,7 @@
     "site.storm-site.logviewer.appender.name": "A1",
     "site.storm-site.nimbus.host": "${NIMBUS_HOST}",
     "site.storm-site.ui.port": "${STORM_UI_SERVER.ALLOCATED_PORT}",
-    "site.storm-site.supervisor.slots.ports": "[0, 0]",
+    "site.storm-site.supervisor.slots.ports": "[${SUPERVISOR.ALLOCATED_PORT}]",
     "site.storm-site.nimbus.file.copy.expiration.secs": "600",
     "site.storm-site.supervisor.monitor.frequency.secs": "3",
     "site.storm-site.transactional.zookeeper.servers": "null",
diff --git a/app-packages/storm-v0_91/configuration/global.xml b/app-packages/storm/configuration/global.xml
similarity index 100%
rename from app-packages/storm-v0_91/configuration/global.xml
rename to app-packages/storm/configuration/global.xml
diff --git a/app-packages/storm-v0_91/configuration/storm-site.xml b/app-packages/storm/configuration/storm-site.xml
similarity index 100%
rename from app-packages/storm-v0_91/configuration/storm-site.xml
rename to app-packages/storm/configuration/storm-site.xml
diff --git a/app-packages/storm-v0_91/ganglia_metrics.json b/app-packages/storm/ganglia_metrics.json
similarity index 96%
rename from app-packages/storm-v0_91/ganglia_metrics.json
rename to app-packages/storm/ganglia_metrics.json
index 478649b..861c4fa 100644
--- a/app-packages/storm-v0_91/ganglia_metrics.json
+++ b/app-packages/storm/ganglia_metrics.json
@@ -1,6 +1,6 @@
 {
     "Component": {
-        "NIMBUS_SERVER": {
+        "NIMBUS": {
             "totalslots": {
                 "metric": "Total Slots",
                 "pointInTime": false,
diff --git a/app-packages/storm-v0_91/jmx_metrics.json b/app-packages/storm/jmx_metrics.json
similarity index 96%
rename from app-packages/storm-v0_91/jmx_metrics.json
rename to app-packages/storm/jmx_metrics.json
index fa97527..f7d4e60 100644
--- a/app-packages/storm-v0_91/jmx_metrics.json
+++ b/app-packages/storm/jmx_metrics.json
@@ -1,6 +1,6 @@
 {
     "Component": {
-        "NIMBUS_SERVER": {
+        "NIMBUS": {
             "FreeSlots": {
                 "metric": "$['slots.free']",
                 "pointInTime": true,
@@ -28,4 +28,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/app-packages/storm/metainfo.xml b/app-packages/storm/metainfo.xml
new file mode 100644
index 0000000..7edd794
--- /dev/null
+++ b/app-packages/storm/metainfo.xml
@@ -0,0 +1,145 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <application>
+    <name>STORM</name>
+    <comment>Apache Hadoop Stream processing framework</comment>
+    <version>0.9.1.2.1</version>
+
+    <exportGroups>
+      <exportGroup>
+        <name>QuickLinks</name>
+        <exports>
+          <export>
+            <name>org.apache.slider.jmx</name>
+            <value>http://${STORM_REST_API_HOST}:${site.global.rest_api_port}/api/cluster/summary</value>
+          </export>
+          <export>
+            <name>org.apache.slider.monitor</name>
+            <value>http://${STORM_UI_SERVER_HOST}:${site.storm-site.ui.port}</value>
+          </export>
+          <export>
+            <name>org.apache.slider.metrics</name>
+            <value>http://${site.global.ganglia_server_host}/cgi-bin/rrd.py?c=${site.global.ganglia_server_id}</value>
+          </export>
+          <export>
+            <name>org.apache.slider.ganglia</name>
+            <value>http://${site.global.ganglia_server_host}/ganglia?c=${site.global.ganglia_server_id}</value>
+          </export>
+        </exports>
+      </exportGroup>
+    </exportGroups>
+
+    <commandOrders>
+      <commandOrder>
+        <command>NIMBUS-START</command>
+        <requires>SUPERVISOR-INSTALLED,STORM_UI_SERVER-INSTALLED,DRPC_SERVER-INSTALLED,STORM_REST_API-INSTALLED
+        </requires>
+      </commandOrder>
+      <commandOrder>
+        <command>SUPERVISOR-START</command>
+        <requires>NIMBUS-STARTED</requires>
+      </commandOrder>
+      <commandOrder>
+        <command>DRPC_SERVER-START</command>
+        <requires>NIMBUS-STARTED</requires>
+      </commandOrder>
+      <commandOrder>
+        <command>STORM_REST_API-START</command>
+        <requires>NIMBUS-STARTED,DRPC_SERVER-STARTED,STORM_UI_SERVER-STARTED</requires>
+      </commandOrder>
+      <commandOrder>
+        <command>STORM_UI_SERVER-START</command>
+        <requires>NIMBUS-STARTED</requires>
+      </commandOrder>
+    </commandOrders>
+
+    <components>
+
+      <component>
+        <name>NIMBUS</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/nimbus.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>STORM_REST_API</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/rest_api.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>SUPERVISOR</name>
+        <category>SLAVE</category>
+        <commandScript>
+          <script>scripts/supervisor.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>STORM_UI_SERVER</name>
+        <category>MASTER</category>
+        <publishConfig>true</publishConfig>
+        <commandScript>
+          <script>scripts/ui_server.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>DRPC_SERVER</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/drpc_server.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+    </components>
+
+    <osSpecifics>
+      <osSpecific>
+        <osType>any</osType>
+        <packages>
+          <package>
+            <type>tarball</type>
+            <name>files/apache-storm-0.9.1.2.1.1.0-237.tar.gz</name>
+          </package>
+        </packages>
+      </osSpecific>
+    </osSpecifics>
+
+    <configuration-dependencies>
+      <config-type>storm-site</config-type>
+      <config-type>global</config-type>
+    </configuration-dependencies>
+  </application>
+</metainfo>
diff --git a/app-packages/storm-v0_91/package/files/apache-storm-0.9.1.2.1.1.0-237.tar.gz.REPLACE b/app-packages/storm/package/files/apache-storm-0.9.1.2.1.1.0-237.tar.gz.REPLACE
similarity index 100%
rename from app-packages/storm-v0_91/package/files/apache-storm-0.9.1.2.1.1.0-237.tar.gz.REPLACE
rename to app-packages/storm/package/files/apache-storm-0.9.1.2.1.1.0-237.tar.gz.REPLACE
diff --git a/app-packages/storm-v0_91/package/scripts/drpc_server.py b/app-packages/storm/package/scripts/drpc_server.py
similarity index 100%
rename from app-packages/storm-v0_91/package/scripts/drpc_server.py
rename to app-packages/storm/package/scripts/drpc_server.py
diff --git a/app-packages/storm-v0_91/package/scripts/nimbus.py b/app-packages/storm/package/scripts/nimbus.py
similarity index 100%
rename from app-packages/storm-v0_91/package/scripts/nimbus.py
rename to app-packages/storm/package/scripts/nimbus.py
diff --git a/app-packages/storm-v0_91/package/scripts/params.py b/app-packages/storm/package/scripts/params.py
similarity index 100%
rename from app-packages/storm-v0_91/package/scripts/params.py
rename to app-packages/storm/package/scripts/params.py
diff --git a/app-packages/storm-v0_91/package/scripts/rest_api.py b/app-packages/storm/package/scripts/rest_api.py
similarity index 100%
rename from app-packages/storm-v0_91/package/scripts/rest_api.py
rename to app-packages/storm/package/scripts/rest_api.py
diff --git a/app-packages/storm-v0_91/package/scripts/service.py b/app-packages/storm/package/scripts/service.py
similarity index 100%
rename from app-packages/storm-v0_91/package/scripts/service.py
rename to app-packages/storm/package/scripts/service.py
diff --git a/app-packages/storm-v0_91/package/scripts/status_params.py b/app-packages/storm/package/scripts/status_params.py
similarity index 100%
rename from app-packages/storm-v0_91/package/scripts/status_params.py
rename to app-packages/storm/package/scripts/status_params.py
diff --git a/app-packages/storm-v0_91/package/scripts/storm.py b/app-packages/storm/package/scripts/storm.py
similarity index 100%
rename from app-packages/storm-v0_91/package/scripts/storm.py
rename to app-packages/storm/package/scripts/storm.py
diff --git a/app-packages/storm-v0_91/package/scripts/supervisor.py b/app-packages/storm/package/scripts/supervisor.py
similarity index 100%
rename from app-packages/storm-v0_91/package/scripts/supervisor.py
rename to app-packages/storm/package/scripts/supervisor.py
diff --git a/app-packages/storm-v0_91/package/scripts/ui_server.py b/app-packages/storm/package/scripts/ui_server.py
similarity index 100%
rename from app-packages/storm-v0_91/package/scripts/ui_server.py
rename to app-packages/storm/package/scripts/ui_server.py
diff --git a/app-packages/storm-v0_91/package/scripts/yaml_config.py b/app-packages/storm/package/scripts/yaml_config.py
similarity index 100%
rename from app-packages/storm-v0_91/package/scripts/yaml_config.py
rename to app-packages/storm/package/scripts/yaml_config.py
diff --git a/app-packages/storm-v0_91/package/templates/config.yaml.j2 b/app-packages/storm/package/templates/config.yaml.j2
similarity index 100%
rename from app-packages/storm-v0_91/package/templates/config.yaml.j2
rename to app-packages/storm/package/templates/config.yaml.j2
diff --git a/app-packages/storm-v0_91/package/templates/storm_jaas.conf.j2 b/app-packages/storm/package/templates/storm_jaas.conf.j2
similarity index 100%
rename from app-packages/storm-v0_91/package/templates/storm_jaas.conf.j2
rename to app-packages/storm/package/templates/storm_jaas.conf.j2
diff --git a/app-packages/storm-v0_91/resources.json b/app-packages/storm/resources.json
similarity index 100%
rename from app-packages/storm-v0_91/resources.json
rename to app-packages/storm/resources.json
diff --git a/pom.xml b/pom.xml
index 51f0a10..a9a90ab 100644
--- a/pom.xml
+++ b/pom.xml
@@ -19,23 +19,33 @@
   <groupId>org.apache.slider</groupId>
   <artifactId>slider</artifactId>
   <name>Slider</name>
-  <version>0.30</version>
+  <version>0.40</version>
   <packaging>pom</packaging>
 
   <description>
     Slider is designed to deploy existing applications onto a YARN cluster without
     rewriting the application to be YARN-ready.
   </description>
-  <url>http://incubator.apache.org/slider/</url>
+  <inceptionYear>2014</inceptionYear>
+  <url>http://slider.incubator.apache.org/</url>
+  <organization>
+    <name>The Apache Software Foundation</name>
+    <url>http://www.apache.org/</url>
+  </organization>
+
   <modules>
+    <module>app-packages/command-logger/application-pkg</module>
+    <module>app-packages/command-logger/slider-pkg</module>
     <module>slider-core</module>
     <module>slider-agent</module>
+    <module>app-packages/accumulo</module>
     <module>slider-assembly</module>
     <module>slider-funtest</module>
     <module>slider-providers/hbase/slider-hbase-provider</module>
     <module>slider-providers/hbase/hbase-funtests</module>
     <module>slider-providers/accumulo/slider-accumulo-provider</module>
     <module>slider-providers/accumulo/accumulo-funtests</module>
+    <module>slider-install</module>
   </modules>
 
   <licenses>
@@ -47,73 +57,14 @@
 
 
   <scm>
-    <url>https://svn.apache.org/repos/asf/incubator/slider/</url>
-    <connection>scm:svn:https://svn.apache.org/repos/asf/incubator/slider/
+    <url>http://git-wip-us.apache.org/repos/asf/incubator-slider.git</url>
+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-slider.git
     </connection>
     <developerConnection>
-      scm:svn:https://svn.apache.org/repos/asf/incubator/slider/
+      scm:git:http://git-wip-us.apache.org/repos/asf/incubator-slider.git
     </developerConnection>
   </scm>
   
-  <developers>
-    <developer>
-      <id>steveloughran</id>
-      <name>Steve Loughran</name>
-      <timezone>0</timezone>
-      <organization>Hortonworks</organization>
-      <organizationUrl>http://www.hortonworks.com</organizationUrl>
-    </developer>
-
-    <developer>
-      <id>billierinaldi</id>
-      <name>Billie Rinaldi</name>
-      <timezone>-5</timezone>
-      <organization>Hortonworks</organization>
-      <organizationUrl>http://www.hortonworks.com</organizationUrl>
-    </developer>
-
-    <developer>
-      <id>ddraj</id>
-      <name>Devaraj Das</name>
-      <timezone>-8</timezone>
-      <organization>Hortonworks</organization>
-      <organizationUrl>http://www.hortonworks.com</organizationUrl>
-    </developer>
-
-    <developer>
-      <id>tyu</id>
-      <name>Ted Yu</name>
-      <timezone>-8</timezone>
-      <organization>Hortonworks</organization>
-      <organizationUrl>http://www.hortonworks.com</organizationUrl>
-    </developer>
-
-    <developer>
-      <id>joshelser</id>
-      <name>Josh Elser</name>
-      <timezone>-5</timezone>
-      <organization>Hortonworks</organization>
-      <organizationUrl>http://www.hortonworks.com</organizationUrl>
-    </developer>
-
-    <developer>
-      <id>sumitmohanty</id>
-      <name>Sumit Mohanty</name>
-      <timezone>-8</timezone>
-      <organization>Hortonworks</organization>
-      <organizationUrl>http://www.hortonworks.com</organizationUrl>
-    </developer>
-
-    <developer>
-      <id>jmaron</id>
-      <name>Jon Maron</name>
-      <timezone>-5</timezone>
-      <organization>Hortonworks</organization>
-      <organizationUrl>http://www.hortonworks.com</organizationUrl>
-    </developer>
-
-  </developers>
-
 
   <distributionManagement>
     <site>
@@ -121,7 +72,7 @@
       <name>Slider Website</name>
       <url>http://slider.incubator.apache.org/</url>
     </site>
-    <downloadUrl>https://svn.apache.org/repos/asf/incubator/slider</downloadUrl>
+    <downloadUrl>http://git-wip-us.apache.org/repos/asf/incubator-slider.git</downloadUrl>
   </distributionManagement>
   
   <mailingLists>
@@ -159,6 +110,7 @@
     <test.argLine>-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError</test.argLine>
     <test.reuseForks>false</test.reuseForks>
     <test.failIfNoTests>true</test.failIfNoTests>
+    <test.funtests.failIfNoTests>false</test.funtests.failIfNoTests>
     <test.forkMode>always</test.forkMode>
     
     <!--
@@ -166,8 +118,8 @@
     -->
     <hadoop.version>2.4.0</hadoop.version>
 
-    <hbase.version>0.98.2-hadoop2</hbase.version>
-    <accumulo.version>1.5.1</accumulo.version>
+    <hbase.version>0.98.3-hadoop2</hbase.version>
+    <accumulo.version>1.6.0</accumulo.version>
     
     <!--
      artifact versions
@@ -178,22 +130,24 @@
     <commons-digester.version>1.8</commons-digester.version>
     <commons-configuration.version>1.6</commons-configuration.version>
     <commons-lang.version>2.6</commons-lang.version>
+    <commons-compress.version>1.4.1</commons-compress.version>
+    <commons-logging.version>1.1.3</commons-logging.version>
+    <commons-io.version>2.4</commons-io.version>
     <curator.version>2.4.1</curator.version>
     <easymock.version>3.1</easymock.version>
     <guava.version>11.0.2</guava.version>
     <gson.version>2.2.2</gson.version>
     <guice.version>3.0</guice.version>
-    <httpclient.version>4.2.5</httpclient.version>
+    <httpclient.version>3.1</httpclient.version>
 
-
-<!--    <jackson.version>1.8.8</jackson.version>-->
     <jackson.version>1.9.13</jackson.version>
     <jcommander.version>1.30</jcommander.version>
     <jersey.version>1.9</jersey.version>
+    <servlet-api.version>2.5</servlet-api.version>
+    <jsr311-api.version>1.1.1</jsr311-api.version>
     <junit.version>4.11</junit.version>
     <log4j.version>1.2.17</log4j.version>
     <mockito.version>1.8.5</mockito.version>
-    <powermock.version>1.5</powermock.version>
 
     <!-- ProtocolBuffer version, used to verify the protoc version and -->
     <!-- define the protobuf JAR version                               -->
@@ -226,6 +180,7 @@
     <maven.project.version>2.4</maven.project.version>
     <maven.properties.version>1.0-alpha-2</maven.properties.version>
     <maven-project-info-reports-plugin.version>2.7</maven-project-info-reports-plugin.version>
+    <maven-rpm-plugin.version>2.1-alpha-4</maven-rpm-plugin.version>
     <maven-site-plugin.version>3.3</maven-site-plugin.version>
     <maven-source-plugin.version>2.2.1</maven-source-plugin.version>
     <maven-surefire-plugin.version>2.16</maven-surefire-plugin.version>
@@ -347,7 +302,10 @@
             <exclude>**/httpfs-signature.secret</exclude>
             <exclude>**/dfs.exclude</exclude>
             <exclude>**/*.iml</exclude>
+            <exclude>**/rat.txt</exclude>
+            <exclude>**/get-hbase-site.sh</exclude>
             <exclude>DISCLAIMER</exclude>
+            <exclude>app-packages/hbase/target/**</exclude>
           </excludes>
         </configuration>
       </plugin>
@@ -399,6 +357,54 @@
 
   <dependencyManagement>
     <dependencies>
+
+      <dependency>
+        <groupId>org.apache.slider</groupId>
+        <artifactId>slider-core</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.slider</groupId>
+        <artifactId>slider-core</artifactId>
+        <version>${project.version}</version>
+        <type>test-jar</type>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.slider</groupId>
+        <artifactId>slider-agent</artifactId>
+        <version>${project.version}</version>
+        <type>tar.gz</type>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.slider</groupId>
+        <artifactId>slider-funtest</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.slider</groupId>
+        <artifactId>slider-assembly</artifactId>
+        <classifier>all</classifier>
+        <version>${project.version}</version>
+        <type>tar.gz</type>
+      </dependency>
+
+       <dependency>
+        <groupId>org.apache.slider</groupId>
+        <artifactId>slider-hbase-provider</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.slider</groupId>
+        <artifactId>slider-hbase-provider</artifactId>
+        <version>${project.version}</version>
+        <type>test-jar</type>
+      </dependency>
+
       <dependency>
         <groupId>org.codehaus.groovy</groupId>
         <artifactId>groovy-all</artifactId>
@@ -417,9 +423,15 @@
         </exclusions>
       </dependency>
 
+      <!-- hadoop-client includes the following jars, so they do not need to be
+        included separately:
+        hadoop-common, hadoop-hdfs, hadoop-mapreduce-client-app,
+        hadoop-yarn-api, hadoop-mapreduce-client-core,
+        hadoop-mapreduce-client-jobclient, and hadoop-annotations
+      -->
       <dependency>
         <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-common</artifactId>
+        <artifactId>hadoop-client</artifactId>
         <version>${hadoop.version}</version>
         <exclusions>
           <exclusion>
@@ -427,30 +439,10 @@
             <artifactId>jackson-core-asl</artifactId>
           </exclusion>
           <exclusion>
-            <groupId>net.java.dev.jets3t</groupId>
-            <artifactId>jets3t</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>commons-codec</groupId>
-            <artifactId>commons-codec</artifactId>
-          </exclusion>
-          <exclusion>
             <groupId>com.google.guava</groupId>
             <artifactId>guava</artifactId>
           </exclusion>
           <exclusion>
-            <groupId>commons-net</groupId>
-            <artifactId>commons-net</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>tomcat</groupId>
-            <artifactId>jasper-runtime</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>net.java.dev.jets3t</groupId>
-            <artifactId>jets3t</artifactId>
-          </exclusion>
-          <exclusion>
             <groupId>org.apache.httpcomponents</groupId>
             <artifactId>httpclient</artifactId>
           </exclusion>
@@ -461,20 +453,11 @@
         </exclusions>
       </dependency>
 
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-client</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-client</artifactId>
-        <version>${hadoop.version}</version>
-        <type>pom</type>
-      </dependency>
-      
+      <!-- hadoop-minicluster includes the following test-jars, so they do not
+        need to be included separately:
+        hadoop-common, hadoop-hdfs, hadoop-yarn-server-tests,
+        hadoop-mapreduce-client-jobclient
+      -->
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-minicluster</artifactId>
@@ -489,37 +472,6 @@
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdfs</artifactId>
-        <version>${hadoop.version}</version>
-        <exclusions>
-          <exclusion>
-            <groupId>tomcat</groupId>
-            <artifactId>jasper-runtime</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-common</artifactId>
-        <version>${hadoop.version}</version>
-        <type>test-jar</type>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-common</artifactId>
-        <version>${hadoop.version}</version>
-        <exclusions>
-          <exclusion>
-            <groupId>com.sun.jersey.jersey-test-framework</groupId>
-            <artifactId>jersey-test-framework-grizzly2</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-client</artifactId>
         <version>${hadoop.version}</version>
         <exclusions>
@@ -532,30 +484,14 @@
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-common</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-web-proxy</artifactId>
         <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-mapreduce-client</artifactId>
-        <version>${hadoop.version}</version>
-        <type>pom</type>
-        <exclusions>
-        </exclusions>
-      </dependency>
-      
-      <dependency>
         <groupId>org.apache.avro</groupId>
         <artifactId>avro</artifactId>
         <version>${avro.version}</version>
-
         <exclusions>
           <exclusion>
             <groupId>org.mortbay.jetty</groupId>
@@ -595,16 +531,13 @@
           </exclusion>
         </exclusions>
       </dependency>
+
       <dependency>
         <groupId>commons-configuration</groupId>
         <artifactId>commons-configuration</artifactId>
         <version>${commons-configuration.version}</version>
         <exclusions>
           <exclusion>
-            <groupId>commons-logging</groupId>
-            <artifactId>commons-logging</artifactId>
-          </exclusion>
-          <exclusion>
             <groupId>commons-lang</groupId>
             <artifactId>commons-lang</artifactId>
           </exclusion>
@@ -614,19 +547,17 @@
           </exclusion>
         </exclusions>
       </dependency>
+
       <dependency>
         <groupId>commons-lang</groupId>
         <artifactId>commons-lang</artifactId>
         <version>${commons-lang.version}</version>
-        <exclusions>
-        </exclusions>
       </dependency>
+
       <dependency>
-        <groupId>org.apache.httpcomponents</groupId>
-        <artifactId>httpclient</artifactId>
+        <groupId>commons-httpclient</groupId>
+        <artifactId>commons-httpclient</artifactId>
         <version>${httpclient.version}</version>
-        <exclusions>
-        </exclusions>
       </dependency>
       
       <!-- ======================================================== -->
@@ -708,6 +639,12 @@
 
       <dependency>
         <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-protocol</artifactId>
+        <version>${hbase.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-it</artifactId>
         <version>${hbase.version}</version>
         <classifier>tests</classifier>
@@ -962,32 +899,6 @@
       <!-- Accumulo -->
       <!-- ======================================================== -->
 
-      <!--
-          <dependency>
-            <groupId>org.apache.accumulo</groupId>
-            <artifactId>accumulo</artifactId>
-            <version>${accumulo.version}</version>
-            <exclusions>
-              <exclusion>
-                <groupId>org.slf4j</groupId>
-                <artifactId>slf4j-api</artifactId>
-              </exclusion>
-            </exclusions>
-          </dependency>
-      -->
-
-      <dependency>
-        <groupId>org.apache.accumulo</groupId>
-        <artifactId>accumulo-server</artifactId>
-        <version>${accumulo.version}</version>
-        <exclusions>
-          <exclusion>
-            <groupId>commons-codec</groupId>
-            <artifactId>commons-codec</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-
       <dependency>
         <groupId>org.apache.accumulo</groupId>
         <artifactId>accumulo-core</artifactId>
@@ -1034,12 +945,6 @@
 
       <dependency>
         <groupId>org.apache.accumulo</groupId>
-        <artifactId>accumulo-start</artifactId>
-        <version>${accumulo.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.accumulo</groupId>
         <artifactId>accumulo-trace</artifactId>
         <version>${accumulo.version}</version>
       </dependency>
@@ -1057,18 +962,30 @@
 
 
       <dependency>
-        <groupId>commons-codec</groupId>
-        <artifactId>commons-codec</artifactId>
-        <version>${commons-codec.version}</version>
-      </dependency>
-
-      <dependency>
         <groupId>commons-digester</groupId>
         <artifactId>commons-digester</artifactId>
         <version>${commons-digester.version}</version>
       </dependency>
 
       <dependency>
+        <groupId>org.apache.commons</groupId>
+        <artifactId>commons-compress</artifactId>
+        <version>${commons-compress.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>commons-io</groupId>
+        <artifactId>commons-io</artifactId>
+        <version>${commons-io.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>commons-logging</groupId>
+        <artifactId>commons-logging</artifactId>
+        <version>${commons-logging.version}</version>
+      </dependency>
+
+      <dependency>
         <groupId>org.apache.curator</groupId>
         <artifactId>curator-client</artifactId>
         <version>${curator.version}</version>
@@ -1134,18 +1051,6 @@
         <version>${slf4j.version}</version>
       </dependency>
 
-      <dependency>
-        <groupId>org.slf4j</groupId>
-        <artifactId>slf4j-log4j12</artifactId>
-        <version>${slf4j.version}</version>
-      </dependency>
-<!--
-      <dependency>
-        <groupId>org.antlr</groupId>
-        <artifactId>stringtemplate</artifactId>
-        <version>${stringtemplate.version}</version>
-      </dependency>
-      -->
       <!-- Used for unit testing -->
       <dependency>
         <groupId>junit</groupId>
@@ -1158,51 +1063,30 @@
         <artifactId>protobuf-java</artifactId>
         <version>${protobuf.version}</version>
       </dependency>
-      <!--
-          <dependency>
-            <groupId>net.sourceforge.htmlunit</groupId>
-            <artifactId>htmlunit</artifactId>
-            <version>2.12</version>
-            
-            <exclusions>
-              <exclusion>
-                <groupId>xalan</groupId>
-                <artifactId>xalan</artifactId>
-              </exclusion>
-              <exclusion>
-                <groupId>xerces</groupId>
-                <artifactId>xercesImpl</artifactId>
-              </exclusion>
-            </exclusions>
-          </dependency>
-      
-          <dependency>
-            <groupId>xerces</groupId>
-            <artifactId>xercesImpl</artifactId>
-            <version>2.11.0</version>
-            
-          </dependency>
-      
-          <dependency>
-            <groupId>xalan</groupId>
-            <artifactId>xalan</artifactId>
-            <version>2.7.1</version>
-            
-          </dependency>
-          -->
-
 
       <!-- ======================================================== -->
       <!-- Jersey and webapp support -->
       <!-- ======================================================== -->
 
       <dependency>
+        <groupId>javax.servlet</groupId>
+        <artifactId>servlet-api</artifactId>
+        <version>${servlet-api.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>javax.ws.rs</groupId>
+        <artifactId>jsr311-api</artifactId>
+        <version>${jsr311-api.version}</version>
+      </dependency>
+
+      <dependency>
         <groupId>com.sun.jersey</groupId>
-        <artifactId>jersey-core</artifactId>
+        <artifactId>jersey-client</artifactId>
         <version>${jersey.version}</version>
       </dependency>
+
       <dependency>
-        
         <groupId>com.sun.jersey</groupId>
         <artifactId>jersey-json</artifactId>
         <version>${jersey.version}</version>
@@ -1213,8 +1097,8 @@
           </exclusion>
         </exclusions>
       </dependency>
+
       <dependency>
-        
         <groupId>com.sun.jersey</groupId>
         <artifactId>jersey-server</artifactId>
         <version>${jersey.version}</version>
@@ -1257,9 +1141,9 @@
       </dependency>
 
 
-  <!-- ======================================================== -->
-  <!-- Mocking -->
-  <!-- ======================================================== -->
+      <!-- ======================================================== -->
+      <!-- Mocking -->
+      <!-- ======================================================== -->
 
       <dependency>
         <groupId>org.mockito</groupId>
@@ -1272,49 +1156,89 @@
         <artifactId>easymock</artifactId>
         <version>${easymock.version}</version>
       </dependency>
-      <dependency>
-        <groupId>org.powermock</groupId>
-        <artifactId>powermock-core</artifactId>
-        <version>${powermock.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.powermock</groupId>
-        <artifactId>powermock-reflect</artifactId>
-        <version>${powermock.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.powermock</groupId>
-        <artifactId>powermock-api-easymock</artifactId>
-        <version>${powermock.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.powermock</groupId>
-        <artifactId>powermock-module-junit4</artifactId>
-        <version>${powermock.version}</version>
-      </dependency>
 
+      <!-- ======================================================== -->
+      <!-- Jetty -->
+      <!-- ======================================================== -->
+
+      <dependency>
+	      <groupId>org.mortbay.jetty</groupId>
+	      <artifactId>jetty-sslengine</artifactId>
+        <version>6.1.26</version>
+      </dependency>
 
     </dependencies>
   </dependencyManagement>
 
   <profiles>
 
+    <profile>
+      <id>apache-release</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-assembly-plugin</artifactId>
+            <version>${maven-assembly-plugin.version}</version>
+            <dependencies>
+              <dependency>
+                <groupId>org.apache.apache.resources</groupId>
+                <artifactId>apache-source-release-assembly-descriptor</artifactId>
+                <version>1.0.4</version>
+              </dependency>
+            </dependencies>
+            <executions>
+              <execution>
+                <id>source-release-assembly</id>
+                <goals>
+                  <goal>single</goal>
+                </goals>
+                <phase>package</phase>
+                <configuration>
+                  <runOnlyAtExecutionRoot>true</runOnlyAtExecutionRoot>
+                  <finalName>apache-slider-${project.version}</finalName>
+                  <descriptorRefs>
+                    <descriptorRef>source-release-zip-tar</descriptorRef>
+                  </descriptorRefs>
+                  <tarLongFileMode>gnu</tarLongFileMode>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
 
     <profile>
       <!-- local builds of everything -->
       <id>local</id>
       <properties>
         <hadoop.version>2.4.1-SNAPSHOT</hadoop.version>
-        <hbase.version>0.98.2-SNAPSHOT</hbase.version>
+        <hbase.version>0.98.3-SNAPSHOT</hbase.version>
         <accumulo.version>1.6.0-SNAPSHOT</accumulo.version>
       </properties>
     </profile>
 
     <profile>
-      <!-- hadoop 2.4 builds of everything -->
+      <!-- hadoop branch-2 builds  -->
+      <id>hadoop-2.4.1</id>
+      <properties>
+        <hadoop.version>2.4.1</hadoop.version>
+      </properties>
+    </profile>
+    <profile>
+      <!-- hadoop branch-2 builds  -->
       <id>branch-2</id>
       <properties>
-        <hadoop.version>2.4.0</hadoop.version>
+        <hadoop.version>2.5.0-SNAPSHOT</hadoop.version>
+      </properties>
+    </profile>
+    
+    <profile>
+      <!-- hadoop branch-2 builds  -->
+      <id>hadoop-trunk</id>
+      <properties>
+        <hadoop.version>3.0.0-SNAPSHOT</hadoop.version>
       </properties>
     </profile>
     
diff --git a/slider-agent/conf/agent.ini b/slider-agent/conf/agent.ini
index 87d73a7..b52bec9 100644
--- a/slider-agent/conf/agent.ini
+++ b/slider-agent/conf/agent.ini
@@ -25,6 +25,8 @@
 app_pkg_dir=app/definition
 app_install_dir=app/install
 app_run_dir=app/run
+app_dbg_cmd=
+debug_mode_enabled=true
 
 app_task_dir=app/command-log
 app_log_dir=app/log
diff --git a/slider-agent/pom.xml b/slider-agent/pom.xml
index c0477d3..7a3b447 100644
--- a/slider-agent/pom.xml
+++ b/slider-agent/pom.xml
@@ -19,12 +19,11 @@
   <parent>
     <groupId>org.apache.slider</groupId>
     <artifactId>slider</artifactId>
-    <version>0.30</version>
+    <version>0.40</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>slider-agent</artifactId>
   <packaging>pom</packaging>
-  <version>0.30</version>
   <name>Slider Agent</name>
   <description>Slider Agent</description>
   <properties>
@@ -44,6 +43,7 @@
         <configuration>
           <tarLongFileMode>gnu</tarLongFileMode>
           <descriptor>src/packages/tarball/all.xml</descriptor>
+          <appendAssemblyId>false</appendAssemblyId>
         </configuration>
         <executions>
           <execution>
@@ -67,7 +67,7 @@
         <executions>
           <execution>
             <configuration>
-              <executable>python2.6</executable>
+              <executable>${project.basedir}/src/test/python/python-wrap</executable>
               <workingDirectory>src/test/python</workingDirectory>
               <arguments>
                 <argument>unitTests.py</argument>
diff --git a/slider-agent/src/main/python/agent/AgentConfig.py b/slider-agent/src/main/python/agent/AgentConfig.py
index e0981f6..16b924c 100644
--- a/slider-agent/src/main/python/agent/AgentConfig.py
+++ b/slider-agent/src/main/python/agent/AgentConfig.py
@@ -21,6 +21,9 @@
 import ConfigParser
 import StringIO
 import os
+import logging
+
+logger = logging.getLogger()
 
 config = ConfigParser.RawConfigParser()
 content = """
@@ -37,6 +40,8 @@
 app_pkg_dir=app/definition
 app_install_dir=app/install
 app_run_dir=app/run
+app_dbg_cmd=
+debug_mode_enabled=true
 
 app_task_dir=app/command-log
 app_log_dir=app/log
@@ -55,6 +60,9 @@
 sleep_between_retries=1
 
 [security]
+keysdir=security/keys
+server_crt=ca.crt
+passphrase_env_var_name=SLIDER_PASSPHRASE
 
 [heartbeat]
 state_interval=6
@@ -79,6 +87,10 @@
   APP_INSTALL_DIR = "app_install_dir"
   # the location to store component instance PID directories
   APP_RUN_DIR = "app_run_dir"
+  # debug hint for agents
+  APP_DBG_CMD = "app_dbg_cmd"
+  # allow agent to operate in debug mode
+  DEBUG_MODE_ENABLED = "debug_mode_enabled"
 
   # run time dir for command executions
   APP_TASK_DIR = "app_task_dir"
@@ -137,6 +149,18 @@
     global config
     return config.get(category, name)
 
+  def isDebugEnabled(self):
+    global config
+    enabled = config.get(AgentConfig.AGENT_SECTION, AgentConfig.DEBUG_MODE_ENABLED)
+    return enabled == "true";
+
+  def debugCommand(self):
+    global config
+    command = config.get(AgentConfig.AGENT_SECTION, AgentConfig.APP_DBG_CMD)
+    if command == None:
+      return ""
+    return command
+
   def set(self, category, name, value):
     global config
     return config.set(category, name, value)
diff --git a/slider-agent/src/main/python/agent/Constants.py b/slider-agent/src/main/python/agent/Constants.py
index b937cd2..88cd564 100644
--- a/slider-agent/src/main/python/agent/Constants.py
+++ b/slider-agent/src/main/python/agent/Constants.py
@@ -27,3 +27,6 @@
 FOLDERS = "folders"
 AGENT_WORK_ROOT = "AGENT_WORK_ROOT"
 AGENT_LOG_ROOT = "AGENT_LOG_ROOT"
+DO_NOT_REGISTER = "DO_NOT_REGISTER"
+DO_NOT_HEARTBEAT = "DO_NOT_HEARTBEAT"
+DO_NOT_HEARTBEAT_AFTER_ = "DO_NOT_HEARTBEAT_AFTER_"
\ No newline at end of file
diff --git a/slider-agent/src/main/python/agent/Controller.py b/slider-agent/src/main/python/agent/Controller.py
index fe5760d..92e9086 100644
--- a/slider-agent/src/main/python/agent/Controller.py
+++ b/slider-agent/src/main/python/agent/Controller.py
@@ -36,6 +36,8 @@
 from NetUtil import NetUtil
 import ssl
 import ProcessHelper
+import Constants
+import security
 
 
 logger = logging.getLogger()
@@ -56,10 +58,10 @@
     self.credential = None
     self.config = config
     self.hostname = config.getLabel()
-    server_url = 'http://' + config.get(AgentConfig.SERVER_SECTION,
+    server_url = 'https://' + config.get(AgentConfig.SERVER_SECTION,
                                         'hostname') + \
                  ':' + config.get(AgentConfig.SERVER_SECTION,
-                                  'port')
+                                  'secured_port')
     self.registerUrl = server_url + '/ws/v1/slider/agents/' + self.hostname + '/register'
     self.heartbeatUrl = server_url + '/ws/v1/slider/agents/' + self.hostname + '/heartbeat'
     self.netutil = NetUtil()
@@ -84,14 +86,34 @@
     logger.info("Server connection disconnected.")
     pass
 
+  def processDebugCommandForRegister(self):
+    self.processDebugCommand(Constants.DO_NOT_REGISTER)
+    pass
+
+  def processDebugCommandForHeartbeat(self):
+    self.processDebugCommand(Constants.DO_NOT_HEARTBEAT)
+    pass
+
+  def processDebugCommand(self, command):
+    if self.config.isDebugEnabled() and self.config.debugCommand() == command:
+      ## Test support - sleep for 10 minutes
+      logger.info("Received debug command: "
+                  + self.config.debugCommand() + " Sleeping for 10 minutes")
+      time.sleep(60*10)
+      pass
+    pass
+
   def registerWithServer(self):
     id = -1
     ret = {}
 
+    self.processDebugCommandForRegister()
+
     while not self.isRegistered:
       try:
         data = json.dumps(self.register.build(id))
-        logger.info("Registering with the server " + pprint.pformat(data))
+        logger.info("Registering with the server at " + self.registerUrl +
+                    " with data " + pprint.pformat(data))
         response = self.sendRequest(self.registerUrl, data)
         ret = json.loads(response)
         exitstatus = 0
@@ -170,7 +192,8 @@
     retry = False
     certVerifFailed = False
 
-    id = 0
+    self.processDebugCommandForHeartbeat()
+
     while not self.DEBUG_STOP_HEARTBEATING:
 
       if self.shouldStopAgent():
@@ -374,11 +397,25 @@
     pass
 
   def sendRequest(self, url, data):
-    req = urllib2.Request(url, data, {'Content-Type': 'application/json'})
-    f = urllib2.urlopen(req)
-    response = f.read()
-    f.close()
-    return response
+    response = None
+    try:
+        if self.cachedconnect is None: # Lazy initialization
+            self.cachedconnect = security.CachedHTTPSConnection(self.config)
+        req = urllib2.Request(url, data, {'Content-Type': 'application/json'})
+        response = self.cachedconnect.request(req)
+        return response
+    except Exception:
+        exc_type, exc_value, exc_traceback = sys.exc_info()
+        logger.error("Exception raised", exc_info=(exc_type, exc_value, exc_traceback))
+        if response is None:
+            err_msg = 'Request failed! Data: ' + str(data)
+            logger.warn(err_msg)
+            return {'exitstatus': 1, 'log': err_msg}
+        else:
+            err_msg = ('Response parsing failed! Request data: ' + str(data)
+                       + '; Response: ' + str(response))
+            logger.warn(err_msg)
+            return {'exitstatus': 1, 'log': err_msg}
 
 
 def main(argv=None):
diff --git a/slider-agent/src/main/python/agent/CustomServiceOrchestrator.py b/slider-agent/src/main/python/agent/CustomServiceOrchestrator.py
index 328084d..6296033 100644
--- a/slider-agent/src/main/python/agent/CustomServiceOrchestrator.py
+++ b/slider-agent/src/main/python/agent/CustomServiceOrchestrator.py
@@ -88,6 +88,7 @@
       py_file_list = [script_tuple]
       # filter None values
       filtered_py_file_list = [i for i in py_file_list if i]
+      logger_level = logging.getLevelName(logger.level)
 
       # Executing hooks and script
       ret = None
@@ -101,6 +102,7 @@
         ret = self.python_executor.run_file(py_file, script_params,
                                             tmpoutfile, tmperrfile, timeout,
                                             tmpstrucoutfile,
+                                            logger_level,
                                             override_output_files,
                                             environment_vars)
         # Next run_file() invocations should always append to current output
diff --git a/slider-agent/src/main/python/agent/NetUtil.py b/slider-agent/src/main/python/agent/NetUtil.py
index ed8e687..eb658f7 100644
--- a/slider-agent/src/main/python/agent/NetUtil.py
+++ b/slider-agent/src/main/python/agent/NetUtil.py
@@ -18,6 +18,7 @@
 import time
 import logging
 import httplib
+from ssl import SSLError
 
 logger = logging.getLogger()
 
@@ -37,7 +38,7 @@
     logger.info("Connecting to the following url " + url);
     try:
       parsedurl = urlparse(url)
-      ca_connection = httplib.HTTPConnection(parsedurl[1])
+      ca_connection = httplib.HTTPSConnection(parsedurl[1])
       ca_connection.request("GET", parsedurl[2])
       response = ca_connection.getresponse()  
       status = response.status    
@@ -47,6 +48,11 @@
         return True
       else: 
         return False
+    except SSLError as slerror:
+        logger.error(str(slerror))
+        logger.error("SSLError: Failed to connect. Please check openssl library versions. \n" +
+                     "Refer to: https://bugzilla.redhat.com/show_bug.cgi?id=1022468 for more details.")
+        return False
     except Exception, e:
       logger.info("Failed to connect to " + str(url) + " due to " + str(e))
       return False
diff --git a/slider-agent/src/main/python/agent/PythonExecutor.py b/slider-agent/src/main/python/agent/PythonExecutor.py
index 142fcdd..5f29e5e 100644
--- a/slider-agent/src/main/python/agent/PythonExecutor.py
+++ b/slider-agent/src/main/python/agent/PythonExecutor.py
@@ -52,7 +52,7 @@
     pass
 
   def run_file(self, script, script_params, tmpoutfile, tmperrfile, timeout,
-               tmpstructedoutfile, override_output_files=True,
+               tmpstructedoutfile, logger_level, override_output_files=True,
                environment_vars=None):
     """
     Executes the specified python file in a separate subprocess.
@@ -69,7 +69,16 @@
     else: # Append to files
       tmpout = open(tmpoutfile, 'a')
       tmperr = open(tmperrfile, 'a')
-    script_params += [tmpstructedoutfile]
+
+    # need to remove this file for the following case:
+    # status call 1 does not write to file; call 2 writes to file;
+    # call 3 does not write to file, so contents are still call 2's result
+    try:
+      os.unlink(tmpstructedoutfile)
+    except OSError:
+      pass # no error
+
+    script_params += [tmpstructedoutfile, logger_level]
     pythonCommand = self.python_command(script, script_params)
     logger.info("Running command " + pprint.pformat(pythonCommand))
     process = self.launch_python_subprocess(pythonCommand, tmpout, tmperr,
@@ -100,7 +109,7 @@
         }
         logger.warn(structured_out)
       else:
-        structured_out = '{}'
+        structured_out = {}
 
     if self.python_process_has_been_killed:
       error = str(error) + "\n Python script has been killed due to timeout"
diff --git a/slider-agent/src/main/python/agent/main.py b/slider-agent/src/main/python/agent/main.py
index afe3595..12e07ba 100644
--- a/slider-agent/src/main/python/agent/main.py
+++ b/slider-agent/src/main/python/agent/main.py
@@ -27,7 +27,6 @@
 import os
 import time
 import errno
-import ConfigParser
 import ProcessHelper
 from Controller import Controller
 from AgentConfig import AgentConfig
@@ -40,7 +39,7 @@
 configFileRelPath = "infra/conf/agent.ini"
 logFileName = "agent.log"
 
-SERVER_STATUS_URL="http://{0}:{1}{2}"
+SERVER_STATUS_URL="https://{0}:{1}{2}"
 
 
 def signal_handler(signum, frame):
@@ -176,6 +175,8 @@
   parser.add_option("-l", "--label", dest="label", help="label of the agent", default=None)
   parser.add_option("--host", dest="host", help="AppMaster host", default=None)
   parser.add_option("--port", dest="port", help="AppMaster port", default=None)
+  parser.add_option("--secured_port", dest="secured_port", help="AppMaster 2 Way port", default=None)
+  parser.add_option("--debug", dest="debug", help="Agent debug hint", default="")
   (options, args) = parser.parse_args()
 
   if not 'AGENT_WORK_ROOT' in os.environ:
@@ -200,9 +201,23 @@
   if options.port:
       agentConfig.set(AgentConfig.SERVER_SECTION, "port", options.port)
 
+  if options.secured_port:
+      agentConfig.set(AgentConfig.SERVER_SECTION, "secured_port", options.secured_port)
+
+  if options.debug:
+    agentConfig.set(AgentConfig.AGENT_SECTION, AgentConfig.APP_DBG_CMD, options.debug)
+
+  # set the security directory to a subdirectory of the run dir
+  secDir = os.path.join(agentConfig.getResolvedPath(AgentConfig.RUN_DIR), "security")
+  logger.info("Security/Keys directory: " + secDir)
+  agentConfig.set(AgentConfig.SECURITY_SECTION, "keysdir", secDir)
+
   logFile = os.path.join(agentConfig.getResolvedPath(AgentConfig.LOG_DIR), logFileName)
+
   perform_prestart_checks(agentConfig)
   ensure_folder_layout(agentConfig)
+  # create security dir if necessary
+  ensure_path_exists(secDir)
 
   setup_logging(options.verbose, logFile)
   update_log_level(agentConfig, logFile)
diff --git a/slider-agent/src/main/python/agent/security.py b/slider-agent/src/main/python/agent/security.py
index 4037733..76671dc 100644
--- a/slider-agent/src/main/python/agent/security.py
+++ b/slider-agent/src/main/python/agent/security.py
@@ -140,7 +140,7 @@
     self.keysdir = self.config.get('security', 'keysdir')
     self.server_crt=self.config.get('security', 'server_crt')
     self.server_url = 'https://' + self.config.get('server', 'hostname') + ':' \
-       + self.config.get('server', 'url_port')
+       + self.config.get('server', 'port')
     
   def getAgentKeyName(self):
     keysdir = self.config.get('security', 'keysdir')
@@ -187,7 +187,7 @@
       logger.info("Agent certificate exists, ok")
             
   def loadSrvrCrt(self):
-    get_ca_url = self.server_url + '/cert/ca/'
+    get_ca_url = self.server_url + '/ws/v1/slider/agents/cert/ca/'
     logger.info("Downloading server cert from " + get_ca_url)
     stream = urllib2.urlopen(get_ca_url)
     response = stream.read()
@@ -196,7 +196,8 @@
     srvr_crt_f.write(response)
       
   def reqSignCrt(self):
-    sign_crt_req_url = self.server_url + '/certs/' + hostname.hostname()
+    sign_crt_req_url = self.server_url + '/ws/v1/slider/agents/certs/' + \
+                       hostname.hostname()
     agent_crt_req_f = open(self.getAgentCrtReqName())
     agent_crt_req_content = agent_crt_req_f.read()
     passphrase_env_var = self.config.get('security', 'passphrase_env_var_name')
diff --git a/slider-agent/src/main/python/resource_management/core/logger.py b/slider-agent/src/main/python/resource_management/core/logger.py
index 7124ef0..7370c97 100644
--- a/slider-agent/src/main/python/resource_management/core/logger.py
+++ b/slider-agent/src/main/python/resource_management/core/logger.py
@@ -59,6 +59,8 @@
   @staticmethod  
   def _get_resource_repr(resource):
     MESSAGE_MAX_LEN = 256
+    logger_level = logging._levelNames[Logger.logger.level]
+    
     arguments_str = ""
     for x,y in resource.arguments.iteritems():
       
@@ -70,7 +72,7 @@
         val = repr(y).lstrip('u')
       # don't show dicts of configurations
       # usually too long  
-      elif isinstance(y, dict):
+      elif logger_level != 'DEBUG' and isinstance(y, dict):
         val = "..."
       # for configs which didn't come
       elif isinstance(y, UnknownConfiguration):
diff --git a/slider-agent/src/main/python/resource_management/core/providers/package/apt.py b/slider-agent/src/main/python/resource_management/core/providers/package/apt.py
index 8d53f96..acfc212 100644
--- a/slider-agent/src/main/python/resource_management/core/providers/package/apt.py
+++ b/slider-agent/src/main/python/resource_management/core/providers/package/apt.py
@@ -15,7 +15,7 @@
 See the License for the specific language governing permissions and
 limitations under the License.
 
-Ambari Agent
+Slider Agent
 
 """
 
@@ -23,7 +23,7 @@
 from resource_management.core import shell
 from resource_management.core.logger import Logger
 
-INSTALL_CMD = "/usr/bin/apt-get --assume-yes install %s"
+INSTALL_CMD = "/usr/bin/apt-get --force-yes --assume-yes install %s"
 REMOVE_CMD = "/usr/bin/apt-get -y -q remove %s"
 CHECK_CMD = "dpkg --get-selections %s | grep -v deinstall"
 
diff --git a/slider-agent/src/main/python/resource_management/core/shell.py b/slider-agent/src/main/python/resource_management/core/shell.py
index 32e917c..92312d5 100644
--- a/slider-agent/src/main/python/resource_management/core/shell.py
+++ b/slider-agent/src/main/python/resource_management/core/shell.py
@@ -20,9 +20,9 @@
 
 """
 
-__all__ = ["checked_call", "call"]
+__all__ = ["checked_call", "call", "quote_bash_args"]
 
-import pipes
+import string
 import subprocess
 import threading
 from multiprocessing import Queue
@@ -52,7 +52,7 @@
   """
   # convert to string and escape
   if isinstance(command, (list, tuple)):
-    command = ' '.join(pipes.quote(x) for x in command)
+    command = ' '.join(quote_bash_args(x) for x in command)
 
   """
   Do not su to the supplied user (need to differentiate between when to call su and when not to)
@@ -89,7 +89,7 @@
     Logger.info(out)
   
   if throw_on_failure and code:
-    err_msg = ("Execution of '%s' returned %d. %s") % (command[-1], code, out)
+    err_msg = Logger.get_protected_text(("Execution of '%s' returned %d. %s") % (command[-1], code, out))
     raise Fail(err_msg)
   
   return code, out
@@ -100,4 +100,13 @@
     try:
       proc.terminate()
     except:
-      pass
\ No newline at end of file
+      pass
+
+def quote_bash_args(command):
+  if not command:
+    return "''"
+  valid = set(string.ascii_letters + string.digits + '@%_-+=:,./')
+  for char in command:
+    if char not in valid:
+      return "'" + command.replace("'", "'\"'\"'") + "'"
+  return command
\ No newline at end of file
diff --git a/slider-agent/src/main/python/resource_management/libraries/functions/__init__.py b/slider-agent/src/main/python/resource_management/libraries/functions/__init__.py
index 41cf666..ad30707 100644
--- a/slider-agent/src/main/python/resource_management/libraries/functions/__init__.py
+++ b/slider-agent/src/main/python/resource_management/libraries/functions/__init__.py
@@ -28,3 +28,4 @@
 from resource_management.libraries.functions.is_empty import *
 from resource_management.libraries.functions.substitute_vars import *
 from resource_management.libraries.functions.os_check import *
+from resource_management.libraries.functions.get_port_from_url import *
\ No newline at end of file
diff --git a/slider-agent/src/main/python/resource_management/libraries/functions/format.py b/slider-agent/src/main/python/resource_management/libraries/functions/format.py
index 9594791..a4ea111 100644
--- a/slider-agent/src/main/python/resource_management/libraries/functions/format.py
+++ b/slider-agent/src/main/python/resource_management/libraries/functions/format.py
@@ -22,12 +22,12 @@
 
 __all__ = ["format"]
 import sys
-import pipes
 from string import Formatter
 from resource_management.core.exceptions import Fail
 from resource_management.core.utils import checked_unite
 from resource_management.core.environment import Environment
 from resource_management.core.logger import Logger
+from resource_management.core.shell import quote_bash_args
 
 
 class ConfigurationFormatter(Formatter):
@@ -66,7 +66,7 @@
   
   def _convert_field(self, value, conversion, is_protected):
     if conversion == 'e':
-      return pipes.quote(str(value))
+      return quote_bash_args(str(value))
     elif conversion == 'h':
       return "[PROTECTED]" if is_protected else value
     elif conversion == 'p':
diff --git a/slider-agent/src/main/python/resource_management/libraries/functions/get_port_from_url.py b/slider-agent/src/main/python/resource_management/libraries/functions/get_port_from_url.py
new file mode 100644
index 0000000..15131f5
--- /dev/null
+++ b/slider-agent/src/main/python/resource_management/libraries/functions/get_port_from_url.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Slider Agent
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.is_empty import *
+from resource_management.core.exceptions import Fail
+import re
+
+def get_port_from_url(address):
+  """
+  Return port from URL. If address is UnknownConfiguration,
+  UnknownConfiguration will be returned. If no port was found, Fail will be
+  raised.
+  """
+  if not is_empty(address):
+    port = re.findall(":([\d]{1,5})(?=/|$)", address)
+    if port:
+      return port[0]
+    raise Fail("No port in URL:{0}".format(address))
+  else:
+    return address
\ No newline at end of file
diff --git a/slider-agent/src/main/python/resource_management/libraries/functions/get_unique_id_and_date.py b/slider-agent/src/main/python/resource_management/libraries/functions/get_unique_id_and_date.py
index d23d097..431b56c 100644
--- a/slider-agent/src/main/python/resource_management/libraries/functions/get_unique_id_and_date.py
+++ b/slider-agent/src/main/python/resource_management/libraries/functions/get_unique_id_and_date.py
@@ -25,7 +25,7 @@
 from resource_management.core import shell
 
 def get_unique_id_and_date():
-    out = shell.checked_call("hostid")[1]
+    out = shell.checked_call("hostid")[1].split('\n')[-1] # bugfix: take the lastline (stdin is not tty part cut)
     id = out.strip()
 
     now = datetime.datetime.now()
diff --git a/slider-agent/src/main/python/resource_management/libraries/functions/os_check.py b/slider-agent/src/main/python/resource_management/libraries/functions/os_check.py
index 7a72bc8..abfceb8 100644
--- a/slider-agent/src/main/python/resource_management/libraries/functions/os_check.py
+++ b/slider-agent/src/main/python/resource_management/libraries/functions/os_check.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
 
 '''
 Licensed to the Apache Software Foundation (ASF) under one
@@ -26,11 +26,70 @@
     'OSCheck',
     ]
 
-class OSCheck(object):
-  def __init__(self):
-    pass
 
-  def get_os_type(self):
+def linux_distribution():
+  PYTHON_VER = sys.version_info[0] * 10 + sys.version_info[1]
+
+  if PYTHON_VER < 26:
+    linux_distribution = platform.dist()
+  else:
+    linux_distribution = platform.linux_distribution()
+
+  return linux_distribution
+
+
+class OS_CONST_TYPE(type):
+  # os families
+  REDHAT_FAMILY = 'redhat'
+  DEBIAN_FAMILY = 'debian'
+  SUSE_FAMILY = 'suse'
+
+  # Declare here os type mapping
+  OS_FAMILY_COLLECTION = [
+    {'name': REDHAT_FAMILY,
+     'os_list':
+       ['redhat', 'fedora', 'centos', 'oraclelinux',
+        'ascendos', 'amazon', 'xenserver', 'oel', 'ovs',
+        'cloudlinux', 'slc', 'scientific', 'psbm',
+        'centos linux']
+    },
+    {'name': DEBIAN_FAMILY,
+     'os_list': ['ubuntu', 'debian']
+    },
+    {'name': SUSE_FAMILY,
+     'os_list': ['sles', 'sled', 'opensuse', 'suse']
+    }
+  ]
+  # Would be generated from Family collection definition
+  OS_COLLECTION = []
+
+  def __init__(cls, name, bases, dct):
+    for item in cls.OS_FAMILY_COLLECTION:
+      cls.OS_COLLECTION += item['os_list']
+
+  def __getattr__(cls, name):
+    """
+      Added support of class.OS_<os_type> properties defined in OS_COLLECTION
+      Example:
+              OSConst.OS_CENTOS would return centos
+              OSConst.OS_OTHEROS would triger an error, coz
+               that os is not present in OS_FAMILY_COLLECTION map
+    """
+    name = name.lower()
+    if "os_" in name and name[3:] in cls.OS_COLLECTION:
+      return name[3:]
+    else:
+      raise Exception("Unknown class property '%s'" % name)
+
+
+class OSConst:
+  __metaclass__ = OS_CONST_TYPE
+
+
+class OSCheck:
+
+  @staticmethod
+  def get_os_type():
     """
     Return values:
     redhat, fedora, centos, oraclelinux, ascendos,
@@ -41,7 +100,7 @@
     """
     # Read content from /etc/*-release file
     # Full release name
-    dist = platform.linux_distribution()
+    dist = linux_distribution()
     operatingSystem = dist[0].lower()
 
     # special cases
@@ -55,31 +114,26 @@
     if operatingSystem != '':
       return operatingSystem
     else:
-      print "Cannot detect os type. Exiting..."
-      sys.exit(1)
+      raise Exception("Cannot detect os type. Exiting...")
 
-
-  def get_os_family(self):
+  @staticmethod
+  def get_os_family():
     """
     Return values:
     redhat, debian, suse ... and others
 
     In case cannot detect raises exception( from self.get_operating_system_type() ).
     """
-    os_family = self.get_os_type()
-    if os_family in ['redhat', 'fedora', 'centos', 'oraclelinux', 'ascendos',
-                     'amazon', 'xenserver', 'oel', 'ovs', 'cloudlinux',
-                     'slc', 'scientific', 'psbm', 'centos linux']:
-      os_family = 'RedHat'
-    elif os_family in ['ubuntu', 'debian']:
-      os_family = 'Debian'
-    elif os_family in ['sles', 'sled', 'opensuse', 'suse']:
-      os_family = 'Suse'
-    #else:  os_family = self.get_os_type()
+    os_family = OSCheck.get_os_type()
+    for os_family_item in OSConst.OS_FAMILY_COLLECTION:
+      if os_family in os_family_item['os_list']:
+        os_family = os_family_item['name']
+        break
+
     return os_family.lower()
 
-
-  def get_os_version(self):
+  @staticmethod
+  def get_os_version():
     """
     Returns the OS version
 
@@ -87,57 +141,81 @@
     """
     # Read content from /etc/*-release file
     # Full release name
-    dist = platform.linux_distribution()
+    dist = linux_distribution()
     dist = dist[1]
 
     if dist:
       return dist
     else:
-      print "Cannot detect os version. Exiting..."
-      sys.exit(1)
+      raise Exception("Cannot detect os version. Exiting...")
 
-  def get_os_major_version(self):
+  @staticmethod
+  def get_os_major_version():
     """
     Returns the main OS version like
     Centos 6.5 --> 6
     RedHat 1.2.3 --> 1
     """
-    return self.get_os_version().split('.')[0]
+    return OSCheck.get_os_version().split('.')[0]
 
-  def get_os_release_name(self):
+  @staticmethod
+  def get_os_release_name():
     """
     Returns the OS release name
 
     In case cannot detect raises exception.
     """
-    dist = platform.linux_distribution()
+    dist = linux_distribution()
     dist = dist[2].lower()
 
     if dist:
       return dist
     else:
-      print "Cannot detect os release name. Exiting..."
-      sys.exit(1)
+      raise Exception("Cannot detect os release name. Exiting...")
 
+  #  Exception safe family check functions
 
-def main(argv=None):
-  # Same logic that was in "os_type_check.sh"
-  if len(sys.argv) != 2:
-    print "Usage: <cluster_os>"
-    sys.exit(2)
-    pass
+  @staticmethod
+  def is_debian_family():
+    """
+     Return true if it is so or false if not
 
-  cluster_os = sys.argv[1]
-  current_os = OSCheck().get_os_family() + OSCheck().get_os_major_version()
+     This is safe check for debian family, doesn't generate exception
+    """
+    try:
+      if OSCheck.get_os_family() == OSConst.DEBIAN_FAMILY:
+        return True
+    except Exception:
+      pass
+    return False
 
-  # If agent/server have the same {"family","main_version"} - then ok.
-  print "Cluster primary/cluster OS type is %s and local/current OS type is %s" % (
-    cluster_os, current_os)
-  if current_os == cluster_os:
-    sys.exit(0)
-  else:
-    print "Local OS is not compatible with cluster primary OS. Please perform manual bootstrap on this host."
-    sys.exit(1)
+  @staticmethod
+  def is_suse_family():
+    """
+     Return true if it is so or false if not
+
+     This is safe check for suse family, doesn't generate exception
+    """
+    try:
+      if OSCheck.get_os_family() == OSConst.SUSE_FAMILY:
+        return True
+    except Exception:
+      pass
+    return False
+
+  @staticmethod
+  def is_redhat_family():
+    """
+     Return true if it is so or false if not
+
+     This is safe check for redhat family, doesn't generate exception
+    """
+    try:
+      if OSCheck.get_os_family() == OSConst.REDHAT_FAMILY:
+        return True
+    except Exception:
+      pass
+    return False
 
 
 if __name__ == "__main__":
diff --git a/slider-agent/src/main/python/resource_management/libraries/providers/copy_from_local.py b/slider-agent/src/main/python/resource_management/libraries/providers/copy_from_local.py
index 0dfecd7..f6cd496 100644
--- a/slider-agent/src/main/python/resource_management/libraries/providers/copy_from_local.py
+++ b/slider-agent/src/main/python/resource_management/libraries/providers/copy_from_local.py
@@ -63,7 +63,7 @@
     pass
 
     if mode:
-      dir_mode = oct(mode)
+      dir_mode = oct(mode)[1:]
       chmod_cmd = format('fs -chmod {dir_mode} {dest_path}')
 
       ExecuteHadoop(chmod_cmd,
diff --git a/slider-agent/src/main/python/resource_management/libraries/providers/execute_hadoop.py b/slider-agent/src/main/python/resource_management/libraries/providers/execute_hadoop.py
index 61b2ddb..cbc6668 100644
--- a/slider-agent/src/main/python/resource_management/libraries/providers/execute_hadoop.py
+++ b/slider-agent/src/main/python/resource_management/libraries/providers/execute_hadoop.py
@@ -32,7 +32,7 @@
     principal = self.resource.principal
     
     if isinstance(command, (list, tuple)):
-      command = ' '.join(pipes.quote(x) for x in command)
+      command = ' '.join(quote_bash_args(x) for x in command)
     
     with Environment.get_instance_copy() as env:
       if self.resource.security_enabled and not self.resource.kinit_override:
diff --git a/slider-agent/src/main/python/resource_management/libraries/script/hook.py b/slider-agent/src/main/python/resource_management/libraries/script/hook.py
index 19b5204..acf4872 100644
--- a/slider-agent/src/main/python/resource_management/libraries/script/hook.py
+++ b/slider-agent/src/main/python/resource_management/libraries/script/hook.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+#!/usr/bin/env python
 
 '''
 Licensed to the Apache Software Foundation (ASF) under one
@@ -45,10 +45,16 @@
     Runs custom hook
     """
     args = sys.argv
+    
     #Hook script to run
-    args[0] = args[0].replace(args[1], command)
+    args[0] = args[0].replace('before-'+args[1], command)
+    args[0] = args[0].replace('after-'+args[1], command)
+    
     #Hook script base directory
-    args[3] = args[3].replace(args[1], command)
+    args[3] = args[3].replace('before-'+args[1], command)
+    args[3] = args[3].replace('after-'+args[1], command)
+    
+    args[1] = command.split("-")[1]
 
 
     cmd = [sys.executable]
diff --git a/slider-agent/src/main/python/resource_management/libraries/script/script.py b/slider-agent/src/main/python/resource_management/libraries/script/script.py
index 12fda80..624d65e 100644
--- a/slider-agent/src/main/python/resource_management/libraries/script/script.py
+++ b/slider-agent/src/main/python/resource_management/libraries/script/script.py
@@ -33,6 +33,15 @@
 from resource_management.libraries.script.config_dictionary import ConfigDictionary
 from resource_management.libraries.script.repo_installer import RepoInstaller
 
+USAGE = """Usage: {0} <COMMAND> <JSON_CONFIG> <BASEDIR> <STROUTPUT> <LOGGING_LEVEL>
+
+<COMMAND> command type (INSTALL/CONFIGURE/START/STOP/SERVICE_CHECK...)
+<JSON_CONFIG> path to command json file. Ex: /var/lib/ambari-agent/data/command-2.json
+<BASEDIR> path to service metadata dir. Ex: /var/lib/ambari-agent/cache/stacks/HDP/2.0.6/services/HDFS
+<STROUTPUT> path to file with structured command output (file will be created). Ex:/tmp/my.txt
+<LOGGING_LEVEL> log level for stdout. Ex:DEBUG,INFO
+"""
+
 class Script(object):
   """
   Executes a command for custom service. stdout and stderr are written to
@@ -52,9 +61,8 @@
   def put_structured_out(self, sout):
     Script.structuredOut.update(sout)
     try:
-      structuredOut = json.dumps(Script.structuredOut)
       with open(self.stroutfile, 'w') as fp:
-        json.dump(structuredOut, fp)
+        json.dump(Script.structuredOut, fp)
     except IOError:
       Script.structuredOut.update({"errMsg" : "Unable to write to " + self.stroutfile})
 
@@ -75,15 +83,23 @@
     cherr.setFormatter(formatter)
     logger.addHandler(cherr)
     logger.addHandler(chout)
+    
     # parse arguments
-    if len(sys.argv) < 5:
-      logger.error("Script expects at least 4 arguments")
-      sys.exit(1)
+    if len(sys.argv) < 6: 
+     logger.error("Script expects at least 5 arguments")
+     print USAGE.format(os.path.basename(sys.argv[0])) # print to stdout
+     sys.exit(1)
+    
     command_name = str.lower(sys.argv[1])
-    # parse command parameters
     command_data_file = sys.argv[2]
     basedir = sys.argv[3]
     self.stroutfile = sys.argv[4]
+    logging_level = sys.argv[5]
+    
+    logging_level_str = logging._levelNames[logging_level]
+    chout.setLevel(logging_level_str)
+    logger.setLevel(logging_level_str)
+      
     try:
       with open(command_data_file, "r") as f:
         pass
diff --git a/slider-agent/src/main/python/setup.py b/slider-agent/src/main/python/setup.py
index d024dbe..421b5f9 100644
--- a/slider-agent/src/main/python/setup.py
+++ b/slider-agent/src/main/python/setup.py
@@ -17,15 +17,15 @@
 
 setup(
     name = "slider-agent",
-    version = "0.13.0-SNAPSHOT",
+    version = "0.31.0-incubating-SNAPSHOT",
     packages = ['agent'],
     # metadata for upload to PyPI
     author = "Apache Software Foundation",
-    author_email = "slider-dev@incubator.apache.org",
+    author_email = "dev@slider.incubator.apache.org",
     description = "Slider agent",
     license = "Apache License v2.0",
     keywords = "hadoop, slider",
-    url = "http://incubator.apache.org/slider",
+    url = "http://slider.incubator.apache.org/",
     long_description = "This package implements Slider for deploying and managing Apps on Yarn.",
     platforms=["any"],
     entry_points = {
diff --git a/slider-agent/src/packages/tarball/all.xml b/slider-agent/src/packages/tarball/all.xml
index a5b6ba0..72afb75 100644
--- a/slider-agent/src/packages/tarball/all.xml
+++ b/slider-agent/src/packages/tarball/all.xml
@@ -18,9 +18,10 @@
 <assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1"
           xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
           xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1 http://maven.apache.org/xsd/assembly-1.1.1.xsd">
-  <!--This 'all' id is not appended to the produced bundle because we do this:
+  <!--This 'agent-all' id is not appended to the produced bundle because we do this:
     http://maven.apache.org/plugins/maven-assembly-plugin/faq.html#required-classifiers
   -->
+  <id>agent-all</id>
   <formats>
     <format>tar</format>
     <format>tar.gz</format>
diff --git a/slider-agent/src/test/python/agent/TestActionQueue.py b/slider-agent/src/test/python/agent/TestActionQueue.py
index 2e1e4cf..b3a840c 100644
--- a/slider-agent/src/test/python/agent/TestActionQueue.py
+++ b/slider-agent/src/test/python/agent/TestActionQueue.py
@@ -304,6 +304,7 @@
     def side_effect(py_file, script_params,
                     tmpoutfile, tmperrfile, timeout,
                     tmpstrucoutfile,
+                    loglevel,
                     override_output_files,
                     environment_vars):
       unfreeze_flag.wait()
diff --git a/slider-agent/src/test/python/agent/TestController.py b/slider-agent/src/test/python/agent/TestController.py
index 8dc7458..939e63f 100644
--- a/slider-agent/src/test/python/agent/TestController.py
+++ b/slider-agent/src/test/python/agent/TestController.py
@@ -262,7 +262,9 @@
     data = "data"
     requestMock.return_value = conMock
 
-    self.assertEqual("response", self.controller.sendRequest(url, data))
+    expected = {'exitstatus': 1, 'log': 'Request failed! Data: ' + data}
+
+    self.assertEqual(expected, self.controller.sendRequest(url, data))
     requestMock.called_once_with(url, data,
       {'Content-Type': 'application/json'})
 
@@ -557,6 +559,36 @@
     #Conroller thread and the agent stop if the repeatRegistration flag is False
     self.assertFalse(self.controller.repeatRegistration)
 
+  @patch("time.sleep")
+  def test_debugSetupForRegister(self, sleepMock):
+    original_value = self.controller.config
+    self.controller.config = AgentConfig("", "")
+    self.controller.config.set(AgentConfig.AGENT_SECTION, AgentConfig.DEBUG_MODE_ENABLED, "true")
+    self.controller.processDebugCommandForRegister()
+    self.controller.processDebugCommandForHeartbeat()
+    assert not sleepMock.called, 'sleep should not have been called'
+
+    self.controller.config.set(AgentConfig.AGENT_SECTION, AgentConfig.APP_DBG_CMD, "DO_NOT_RERISTER")
+    self.controller.config.set(AgentConfig.AGENT_SECTION, AgentConfig.APP_DBG_CMD, "DO_NOT_HEARTBEET")
+    self.controller.processDebugCommandForRegister()
+    self.controller.processDebugCommandForHeartbeat()
+    assert not sleepMock.called, 'sleep should not have been called'
+
+    self.controller.config.set(AgentConfig.AGENT_SECTION, AgentConfig.APP_DBG_CMD, "DO_NOT_REGISTER")
+    self.controller.processDebugCommandForRegister()
+    assert sleepMock.called, 'sleep should have been called'
+
+    self.controller.processDebugCommandForHeartbeat()
+    assert sleepMock.call_count == 1, 'sleep should have been called once'
+
+    self.controller.config.set(AgentConfig.AGENT_SECTION, AgentConfig.APP_DBG_CMD, "DO_NOT_HEARTBEAT")
+    self.controller.processDebugCommandForHeartbeat()
+    assert sleepMock.call_count == 2, 'sleep should have been called twice'
+
+    self.controller.config = original_value
+    pass
+
+
 if __name__ == "__main__":
   logging.basicConfig(format='%(asctime)s %(message)s',level=logging.DEBUG)
   unittest.main()
diff --git a/slider-agent/src/test/python/agent/TestCustomServiceOrchestrator.py b/slider-agent/src/test/python/agent/TestCustomServiceOrchestrator.py
index 6f20db9..d2439b1 100644
--- a/slider-agent/src/test/python/agent/TestCustomServiceOrchestrator.py
+++ b/slider-agent/src/test/python/agent/TestCustomServiceOrchestrator.py
@@ -152,7 +152,7 @@
     }
     ret = orchestrator.runCommand(command, "out.txt", "err.txt")
     ## Check that override_output_files was true only during first call
-    self.assertEquals(run_file_mock.call_args_list[0][0][6], True)
+    self.assertEquals(run_file_mock.call_args_list[0][0][7], True)
 
     run_file_mock.reset_mock()
     # Case when we force another command
diff --git a/slider-agent/src/test/python/agent/TestMain.py b/slider-agent/src/test/python/agent/TestMain.py
index 5273623..9ef1cad 100644
--- a/slider-agent/src/test/python/agent/TestMain.py
+++ b/slider-agent/src/test/python/agent/TestMain.py
@@ -259,11 +259,13 @@
     self.assertTrue(start_mock.called)
 
   class AgentOptions:
-      def __init__(self, label, host, port, verbose):
+      def __init__(self, label, host, port, secured_port, verbose, debug):
           self.label = label
           self.host = host
           self.port = port
+          self.secured_port = secured_port
           self.verbose = verbose
+          self.debug = debug
 
   @patch.object(main, "setup_logging")
   @patch.object(main, "bind_signal_handlers")
@@ -289,16 +291,17 @@
       Controller_init_mock.return_value = None
       isAlive_mock.return_value = False
       parse_args_mock.return_value = (
-          TestMain.AgentOptions("agent", "host1", "8080", True), [])
+          TestMain.AgentOptions("agent", "host1", "8080", "8081", True, ""), [])
       tmpdir = tempfile.gettempdir()
 
       #testing call without command-line arguments
       os.environ["AGENT_WORK_ROOT"] = os.path.join(tmpdir, "work")
       os.environ["AGENT_LOG_ROOT"] = os.path.join(tmpdir, "log")
       main.main()
-      self.assertTrue(AgentConfig_set_mock.call_count == 2)
+      self.assertTrue(AgentConfig_set_mock.call_count == 4)
       AgentConfig_set_mock.assert_any_call("server", "hostname", "host1")
       AgentConfig_set_mock.assert_any_call("server", "port", "8080")
+      AgentConfig_set_mock.assert_any_call("server", "secured_port", "8081")
 
 
 if __name__ == "__main__":
diff --git a/slider-agent/src/test/python/agent/TestNetUtil.py b/slider-agent/src/test/python/agent/TestNetUtil.py
index c19ec19..550e148 100644
--- a/slider-agent/src/test/python/agent/TestNetUtil.py
+++ b/slider-agent/src/test/python/agent/TestNetUtil.py
@@ -25,7 +25,7 @@
 class TestNetUtil(unittest.TestCase):
 
   @patch("urlparse.urlparse")
-  @patch("httplib.HTTPConnection")
+  @patch("httplib.HTTPSConnection")
   def test_checkURL(self, httpsConMock, parseMock):
 
     NetUtil.logger = MagicMock()
diff --git a/slider-agent/src/test/python/agent/TestPythonExecutor.py b/slider-agent/src/test/python/agent/TestPythonExecutor.py
index 7cec409..1b12a0a 100644
--- a/slider-agent/src/test/python/agent/TestPythonExecutor.py
+++ b/slider-agent/src/test/python/agent/TestPythonExecutor.py
@@ -57,7 +57,7 @@
     executor.runShellKillPgrp = runShellKillPgrp_method
     subproc_mock.returncode = None
     thread = Thread(target =  executor.run_file, args = ("fake_puppetFile",
-      ["arg1", "arg2"], tmpoutfile, tmperrfile, PYTHON_TIMEOUT_SECONDS, tmpstrucout))
+      ["arg1", "arg2"], tmpoutfile, tmperrfile, PYTHON_TIMEOUT_SECONDS, tmpstrucout,"INFO"))
     thread.start()
     time.sleep(0.1)
     subproc_mock.finished_event.wait()
@@ -87,7 +87,7 @@
     subproc_mock.returncode = 0
     thread = Thread(target =  executor.run_file, args = ("fake_puppetFile", ["arg1", "arg2"],
                                                       tmpoutfile, tmperrfile,
-                                                      PYTHON_TIMEOUT_SECONDS, tmpstrucout))
+                                                      PYTHON_TIMEOUT_SECONDS, tmpstrucout, "INFO"))
     thread.start()
     time.sleep(0.1)
     subproc_mock.should_finish_event.set()
@@ -103,7 +103,7 @@
     executor = PythonExecutor("/tmp", AgentConfig("", ""))
     environment_vars = [("PYTHONPATH", "a:b")]
     os_env_copy_mock.return_value = actual_vars
-    executor.run_file("script.pynot", ["a","b"], "", "", 10, "", True, environment_vars)
+    executor.run_file("script.pynot", ["a","b"], "", "", 10, "", "INFO", True, environment_vars)
     self.assertEquals(2, len(os_env_copy_mock.return_value))
 
   def test_execution_results(self):
@@ -124,9 +124,9 @@
     executor.runShellKillPgrp = runShellKillPgrp_method
     subproc_mock.returncode = 0
     subproc_mock.should_finish_event.set()
-    result = executor.run_file("file", ["arg1", "arg2"], tmpoutfile, tmperrfile, PYTHON_TIMEOUT_SECONDS, tmpstroutfile)
+    result = executor.run_file("file", ["arg1", "arg2"], tmpoutfile, tmperrfile, PYTHON_TIMEOUT_SECONDS, tmpstroutfile, "INFO")
     self.assertEquals(result, {'exitcode': 0, 'stderr': 'Dummy err', 'stdout': 'Dummy output',
-                               'structuredOut': {'msg': 'Unable to read structured output from ' + tmpstroutfile}})
+                               'structuredOut': {}})
 
 
   def test_is_successfull(self):
@@ -145,7 +145,7 @@
     executor = PythonExecutor("/tmp", AgentConfig("", ""))
     command = executor.python_command("script", ["script_param1"])
     self.assertEqual(4, len(command))
-    self.assertTrue("python" in command[0])
+    self.assertTrue("python" in command[0].lower(), "Looking for python in %s" % (command[0].lower()))
     self.assertEquals("-S", command[1])
     self.assertEquals("script", command[2])
     self.assertEquals("script_param1", command[3])
diff --git a/slider-agent/src/test/python/python-wrap b/slider-agent/src/test/python/python-wrap
new file mode 100755
index 0000000..40dc785
--- /dev/null
+++ b/slider-agent/src/test/python/python-wrap
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+# Copyright 2011 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+export PYTHONPATH=/usr/lib/python2.6/site-packages/common_functions:$PYTHONPATH
+
+# reset settings
+unset PYTHON
+
+# checking for preferable python versions
+if [ -a /usr/bin/python2.7 ] && [ -z "$PYTHON" ]; then
+  PYTHON=/usr/bin/python2.7
+fi
+
+if [ -a /usr/bin/python2.6 ] && [ -z "$PYTHON" ]; then
+  PYTHON=/usr/bin/python2.6
+fi
+
+# if no preferable python versions found, try to use system one
+if [[ -z "$PYTHON" ]]; then
+  PYTHON=/usr/bin/python
+fi
+
+# execute script
+$PYTHON "$@"
diff --git a/slider-agent/src/test/python/resource_management/TestLibraryFunctions.py b/slider-agent/src/test/python/resource_management/TestLibraryFunctions.py
new file mode 100644
index 0000000..4e6b6c3
--- /dev/null
+++ b/slider-agent/src/test/python/resource_management/TestLibraryFunctions.py
@@ -0,0 +1,33 @@
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from unittest import TestCase
+from resource_management.libraries.functions import get_port_from_url
+from resource_management.core.exceptions import Fail
+class TestLibraryFunctions(TestCase):
+
+  def test_get_port_from_url(self):
+    self.assertEqual("8080",get_port_from_url("protocol://host:8080"))
+    self.assertEqual("8080",get_port_from_url("protocol://host:8080/"))
+    self.assertEqual("8080",get_port_from_url("host:8080"))
+    self.assertEqual("8080",get_port_from_url("host:8080/"))
+    self.assertEqual("8080",get_port_from_url("host:8080/dots_in_url8888:"))
+    self.assertEqual("8080",get_port_from_url("protocol://host:8080/dots_in_url8888:"))
+    self.assertEqual("8080",get_port_from_url("127.0.0.1:8080"))
+    self.assertRaises(Fail, get_port_from_url, "http://host/no_port")
+    self.assertRaises(Fail, get_port_from_url, "127.0.0.1:808080")
diff --git a/slider-agent/src/test/python/resource_management/TestPackageResource.py b/slider-agent/src/test/python/resource_management/TestPackageResource.py
index 02aa3fb..af1f9cf 100644
--- a/slider-agent/src/test/python/resource_management/TestPackageResource.py
+++ b/slider-agent/src/test/python/resource_management/TestPackageResource.py
@@ -36,7 +36,7 @@
       Package("some_package",
       )
     call_mock.assert_called_with('dpkg --get-selections some_package | grep -v deinstall')    
-    shell_mock.assert_called_with("/usr/bin/apt-get --assume-yes install some_package")
+    shell_mock.assert_called_with("/usr/bin/apt-get --force-yes --assume-yes install some_package")
 
 
   @patch.object(shell, "call")
diff --git a/slider-agent/src/test/python/resource_management/TestPropertiesFileResource.py b/slider-agent/src/test/python/resource_management/TestPropertiesFileResource.py
index 79aef58..6eb01cf 100644
--- a/slider-agent/src/test/python/resource_management/TestPropertiesFileResource.py
+++ b/slider-agent/src/test/python/resource_management/TestPropertiesFileResource.py
@@ -29,7 +29,7 @@
 from resource_management.libraries import PropertiesFile
 
 @patch.object(System, "os_family", new='redhat')
-class TestPropertiesFIleResource(TestCase):
+class TestPropertiesFileResource(TestCase):
   """
   PropertiesFile="resource_management.libraries.providers.properties_file.PropertiesFileProvider"
   Testing PropertiesFile(PropertiesFileProvider) with different 'properties dictionary'
diff --git a/slider-assembly/pom.xml b/slider-assembly/pom.xml
index b1beea2..0ec91dd 100644
--- a/slider-assembly/pom.xml
+++ b/slider-assembly/pom.xml
@@ -17,20 +17,29 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <artifactId>slider-assembly</artifactId>
-  <version>0.30</version>
   <name>Slider Assembly</name>
   <packaging>pom</packaging>
-  <description>
-    
-    This is the module that does the packaging and shell entry
-    points for Slider
-  </description>
+  <description>Apache Slider is a tool for dynamically deploying distributed applications to an Apache YARN cluster</description>
   <parent>
     <groupId>org.apache.slider</groupId>
     <artifactId>slider</artifactId>
-    <version>0.30</version>
+    <version>0.40</version>
   </parent>
 
+
+  <properties>
+    <rpm.basedir>/usr/lib/slider</rpm.basedir>
+    <rpm.confdir>${rpm.basedir}/conf</rpm.confdir>
+    <rpm.bindir>${rpm.basedir}/bin</rpm.bindir>
+    <rpm.libdir>${rpm.basedir}/lib</rpm.libdir>
+    <rpm.agentdir>${rpm.basedir}/agent</rpm.agentdir>
+    <rpm.username>mapred</rpm.username>
+    <rpm.groupname>hadoop</rpm.groupname>
+    <src.confdir>src/conf-hdp</src.confdir>
+    <src.libdir>${project.build.directory}/lib</src.libdir>
+    <src.agent.ini.dir>${project.build.directory}/../../slider-agent/conf</src.agent.ini.dir>
+  </properties>
+
   <build>
     <plugins>
       <!--read in a build.properties file if defined-->
@@ -75,16 +84,8 @@
   <!--            <tarLongFileFormat>gnu</tarLongFileFormat>-->
             </configuration>
           </execution>
-        </executions>
-      </plugin>
-
-      <!-- copy in the agent tar file -->
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <version>${maven-dependency-plugin.version}</version>
-        <executions>
           <execution>
+            <!-- copy in the agent tar file -->
             <id>copy</id>
             <phase>package</phase>
             <goals>
@@ -126,6 +127,7 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-enforcer-plugin</artifactId>
+        <version>${maven-enforcer-plugin.version}</version>
         <inherited>false</inherited>
         <configuration>
           <rules>
@@ -185,6 +187,7 @@
         </executions>
       </plugin>
 
+ 
     </plugins>
     
     
@@ -198,6 +201,168 @@
     </plugins>
   </reporting>
 
+  <profiles>
+    <profile>
+      <id>rpm</id>
+      <build>
+        <plugins>
+          <!-- RPM -->
+
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>rpm-maven-plugin</artifactId>
+            <version>${maven-rpm-plugin.version}</version>
+            <executions>
+              <execution>
+                <id>generate-rpm</id>
+                <goals>
+                  <goal>rpm</goal>
+                </goals>
+              </execution>
+            </executions>
+            <configuration>
+              <license>http://www.apache.org/licenses/LICENSE-2.0</license>
+              <name>slider</name>
+              <version>${project.version}</version>
+<!--
+              <release>${project.version}</release>
+-->
+              <!---->
+              <distribution></distribution>
+              <group>Applications/Engineering</group>
+              <!--<icon>src/main/resources/icon.gif</icon>-->
+              <!--<packager>ASF</packager>-->
+              <prefix>${rpm.basedir}</prefix>
+              <!--
+                        <changelogFile>src/changelog</changelogFile>
+              -->
+              <!--          <defineStatements>
+                          <defineStatement>_unpackaged_files_terminate_build 0</defineStatement>
+                        </defineStatements>-->
+              <provides>
+                <provide>apache-slider</provide>
+              </provides>
+              <mappings>
+                <mapping>
+                  <directory>${rpm.basedir}</directory>
+                  <!-- RW.R..R.. -->
+                  <filemode>644</filemode>
+
+                  <username>${rpm.username}</username>
+                  <groupname>${rpm.groupname}</groupname>
+                  <sources>
+
+                  </sources>
+                </mapping>
+
+                <!-- binaries -->
+                <mapping>
+                  <directory>${rpm.bindir}</directory>
+                  <!-- RWXR.XR.X -->
+                  <filemode>0755</filemode>
+                  <username>${rpm.username}</username>
+                  <groupname>${rpm.groupname}</groupname>
+                  <sources>
+                    <source>
+                      <location>src/main/scripts</location>
+                    </source>
+                  </sources>
+                </mapping>
+
+                <!-- library -->
+                <mapping>
+                  <directory>${rpm.libdir}</directory>
+                  <!-- RW.R..R.. -->
+                  <filemode>644</filemode>
+
+                  <username>${rpm.username}</username>
+                  <groupname>${rpm.groupname}</groupname>
+                  <sources>
+                    <source>
+                      <location>${src.libdir}</location>
+                    </source>
+                  </sources>
+                </mapping>
+
+                <!-- configuration -->
+                <mapping>
+                  <directory>${rpm.confdir}</directory>
+                  <configuration>true</configuration>
+                  <filemode>0755</filemode>
+                  <username>${rpm.username}</username>
+                  <groupname>${rpm.groupname}</groupname>
+                  <sources>
+                    <source>
+                      <location>${src.confdir}</location>
+                    </source>
+                  </sources>
+                </mapping>
+
+                <!-- agent -->
+                <mapping>
+                  <directory>${rpm.agentdir}</directory>
+                  <configuration>true</configuration>
+                  <filemode>0755</filemode>
+                  <username>${rpm.username}</username>
+                  <groupname>${rpm.groupname}</groupname>
+                  <sources>
+                    <source>
+                      <location>${project.build.directory}/agent</location>
+      <includes>
+        <include>slider-agent.tar.gz</include>
+      </includes>
+                    </source>
+                  </sources>
+                </mapping>
+
+                <!-- agent.ini -->
+                <mapping>
+                  <directory>${rpm.agentdir}/conf</directory>
+                  <configuration>true</configuration>
+                  <filemode>0755</filemode>
+                  <username>${rpm.username}</username>
+                  <groupname>${rpm.groupname}</groupname>
+                  <sources>
+                    <source>
+                      <location>${src.agent.ini.dir}</location>
+                      <includes>
+                         <include>*.ini
+                        </include>
+                      </includes>
+                    </source>
+                  </sources>
+                </mapping>
+              </mappings>
+              <!--
+              Scripts. Very dangerous in RPMs unless you know exactly what you are doing.
+              It's very easy to break the uninstall process, in particular.
+              -->
+<!--
+              <preinstallScriptlet>
+                <script>echo "installing slider"</script>
+              </preinstallScriptlet>
+-->
+              <!--
+                        <postinstallScriptlet>
+                          <scriptFile>src/main/scripts/postinstall</scriptFile>
+                          <fileEncoding>utf-8</fileEncoding>
+                        </postinstallScriptlet>
+              -->
+              <!--
+                        <preremoveScriptlet>
+                          <scriptFile>src/main/scripts/preremove</scriptFile>
+                          <fileEncoding>utf-8</fileEncoding>
+                        </preremoveScriptlet>
+              -->
+            </configuration>
+          </plugin>
+
+        </plugins>
+      </build>
+    </profile>
+
+  </profiles>
+
   <dependencies>
 
     <dependency>
@@ -227,7 +392,6 @@
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-client</artifactId>
-      <type>pom</type>
     </dependency>
  
   </dependencies>
diff --git a/slider-assembly/src/conf-hdp/log4j.properties b/slider-assembly/src/conf-hdp/log4j.properties
new file mode 100644
index 0000000..3c0d08c
--- /dev/null
+++ b/slider-assembly/src/conf-hdp/log4j.properties
@@ -0,0 +1,58 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=INFO,stdout
+log4j.threshhold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+
+# log layout skips stack-trace creation operations by avoiding line numbers and method
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n
+
+# debug edition is much more expensive
+#log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+log4j.appender.subprocess=org.apache.log4j.ConsoleAppender
+log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout
+log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n
+#log4j.logger.org.apache.slider.yarn.appmaster.SliderAppMasterer.master=INFO,subprocess
+
+# for debugging Slider
+#log4j.logger.org.apache.slider=DEBUG
+#log4j.logger.org.apache.slider=DEBUG
+
+# uncomment to debug service lifecycle issues
+#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG
+#log4j.logger.org.apache.hadoop.yarn.service=DEBUG
+
+# uncomment for YARN operations
+#log4j.logger.org.apache.hadoop.yarn.client=DEBUG
+
+# uncomment this to debug security problems
+#log4j.logger.org.apache.hadoop.security=DEBUG
+
+#crank back on some noise
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.hadoop.hdfs=WARN
+
+
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN
+log4j.logger.org.apache.zookeeper=WARN
diff --git a/slider-assembly/src/conf-hdp/slider-client.xml b/slider-assembly/src/conf-hdp/slider-client.xml
new file mode 100644
index 0000000..f844106
--- /dev/null
+++ b/slider-assembly/src/conf-hdp/slider-client.xml
@@ -0,0 +1,77 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!--
+  Properties set here are picked up in the client.
+  They are not passed to the AM -though the filesystem
+  binding details (URL And principal) are added to the
+  hbase-site.xml file when a cluster is created.
+-->
+<configuration>
+
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+  </property>
+  
+
+  <property>
+    <name>slider.yarn.queue</name>
+    <value>default</value>
+    <description>YARN queue for the Application Master</description>
+  </property>
+
+
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>
+      /etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*
+    </value>
+  </property>
+  
+<!--
+
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>master:8032</value>
+  </property>
+
+  <property>
+    <name>fs.defaultFS</name>
+    <value>hdfs://master:9090</value>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.principal</name>
+    <value>yarn/master@MINICLUSTER</value>
+  </property>
+
+  <property>
+    <name>slider.security.enabled</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>dfs.namenode.kerberos.principal</name>
+    <value>hdfs/master@MINICLUSTER</value>
+  </property>
+-->
+
+
+</configuration>
diff --git a/slider-assembly/src/conf/slider-client.xml b/slider-assembly/src/conf/slider-client.xml
index d0c8f69..bd17254 100644
--- a/slider-assembly/src/conf/slider-client.xml
+++ b/slider-assembly/src/conf/slider-client.xml
@@ -19,9 +19,6 @@
 
 <!--
   Properties set here are picked up in the client.
-  They are not passed to the AM -though the filesystem
-  binding details (URL And principal) are added to the
-  hbase-site.xml file when a cluster is created.
 -->
 <configuration>
   <property>
diff --git a/slider-assembly/src/main/scripts/slider b/slider-assembly/src/main/scripts/slider
index e9522cf..caf275b 100755
--- a/slider-assembly/src/main/scripts/slider
+++ b/slider-assembly/src/main/scripts/slider
@@ -32,7 +32,6 @@
 this="${BASH_SOURCE-$0}"
 bindir=$(cd -P -- "$(dirname -- "$this")" && pwd -P)
 script="$(basename -- "$this")"
-this="$bin/$script"
 
 # lib directory is one up; it is expected to contain 
 # slider.jar and any other dependencies that are not in the
diff --git a/slider-assembly/src/main/scripts/slider.py b/slider-assembly/src/main/scripts/slider.py
new file mode 100644
index 0000000..e60bed3
--- /dev/null
+++ b/slider-assembly/src/main/scripts/slider.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import sys
+import os
+import subprocess
+
+CONF = "conf"
+
+LIB = "lib"
+
+SLIDER_CONF_DIR = "SLIDER_CONF_DIR"
+SLIDER_JVM_OPTS = "SLIDER_JVM_OPTS"
+SLIDER_CLASSPATH_EXTRA = "SLIDER_CLASSPATH_EXTRA"
+
+SLIDER_CLASSNAME = "org.apache.slider.Slider"
+DEFAULT_JVM__OPTS = "-Djava.net.preferIPv4Stack=true -Djava.awt.headless=true -Xmx256m -Djava.confdir=%s"
+
+"""
+Launches slider
+
+
+"""
+
+
+
+def scriptDir():
+  """ 
+  get the script path
+  """
+  return os.path.dirname(os.path.realpath(__file__))
+
+def sliderDir():
+  return os.path.dirname(scriptDir())
+
+def libDir(sliderdir) :
+  return os.path.join(sliderdir, LIB)
+
+def confDir(sliderdir):
+  """
+  determine the active configuration directory 
+  :param sliderdir: slider directory 
+  :return: the configuration directory -any env var will
+  override the relative path
+  """
+  localconf = os.path.join(sliderdir, CONF)
+  return os.environ.get(SLIDER_CONF_DIR,localconf) 
+
+def dirMustExist(dirname):
+  if not os.path.exists(dirname):
+    raise Exception("Directory does not exist: %s " % dirname)
+  return dirname
+
+def read(pipe, line):
+  """
+  read a char, append to the listing if there is a char that is not \n
+  :param pipe: pipe to read from 
+  :param line: line being built up
+  :return: (the potentially updated line, flag indicating newline reached)
+  """
+
+  c = pipe.read(1)
+  if c != "":
+    o = c.decode('utf-8')
+    if o != '\n':
+      line += o
+      return line, False
+    else:
+      return line, True
+  else:
+    return line, False
+
+
+def runProcess(commandline):
+  """
+  Run a process
+  :param commandline: command line 
+  :return:the return code
+  """
+  print "ready to exec : %s" % commandline
+  exe = subprocess.Popen(commandline,
+                         stdin=None,
+                         stdout=subprocess.PIPE,
+                         stderr=subprocess.PIPE,
+                         shell=False)
+  stdout = exe.stdout
+  stderr = exe.stderr
+  outline = ""
+  errline = ""
+  while exe.poll() is None:
+    # process is running; grab output and echo every line
+    outline, done = read(stdout, outline)
+    if done:
+      print outline
+      outline = ""
+    errline, done = read(stderr, errline)
+    if done:
+      print errline
+      errline = ""
+
+  # get tail
+  out, err = exe.communicate()
+  print outline + out.decode()
+  print errline + err.decode()
+  return exe.returncode
+
+
+def java(classname, args, classpath, jvm_opts_list):
+  """
+  Execute a java process, hooking up stdout and stderr
+  and printing them a line at a time as they come in
+  :param classname: classname
+  :param args:  arguments to the java program
+  :param classpath: classpath
+  :param jvm_opts_list: list of JVM options
+  :return: the exit code.
+  """
+  # split the JVM opts by space
+  # java = "/usr/bin/java"
+  commandline = ["java"]
+  commandline.extend(jvm_opts_list)
+  commandline.append("-classpath")
+  commandline.append(classpath)
+  commandline.append(classname)
+  commandline.extend(args)
+  return runProcess(commandline)
+
+
+def usage():
+  print "Usage: slider <action> <arguments>"
+  return 1
+
+
+def main():
+  """
+  Slider main method
+  :return: exit code of the process
+  """
+  if len(sys.argv)==1 :
+    return usage()
+  # print "stdout encoding: "+ sys.stdout.encoding
+  args = sys.argv[1:]
+  slider_home = sliderDir()
+  libdir = dirMustExist(libDir(slider_home))
+  confdir = dirMustExist(confDir(slider_home))
+  default_jvm_opts = DEFAULT_JVM__OPTS % confdir
+  slider_jvm_opts = os.environ.get(SLIDER_JVM_OPTS, default_jvm_opts)
+  jvm_opts_split = slider_jvm_opts.split()
+  slider_classpath_extra = os.environ.get(SLIDER_CLASSPATH_EXTRA, "")
+  p = os.pathsep    # path separator
+  d = os.sep        # dir separator
+  slider_classpath = libdir + d + "*" + p \
+                     + confdir + p \
+                     + slider_classpath_extra 
+                     
+
+  print "slider_home = \"%s\"" % slider_home
+  print "slider_jvm_opts = \"%s\"" % slider_jvm_opts
+  print "slider_classpath = \"%s\"" % slider_classpath
+
+  return java(SLIDER_CLASSNAME,
+              args,
+              slider_classpath,
+              jvm_opts_split)
+
+if __name__ == '__main__':
+  """
+  Entry point
+  """
+  try:
+    returncode = main()
+  except Exception as e:
+    print "Exception: %s " % e.message
+    returncode = -1
+  
+  sys.exit(returncode)
diff --git a/slider-core/pom.xml b/slider-core/pom.xml
index 925eb44..95a7e71 100644
--- a/slider-core/pom.xml
+++ b/slider-core/pom.xml
@@ -18,13 +18,12 @@
   <modelVersion>4.0.0</modelVersion>
   <artifactId>slider-core</artifactId>
   <name>Slider Core</name>
-  <version>0.30</version>
   <packaging>jar</packaging>
   <description>Core Slider Module</description>
   <parent>
     <groupId>org.apache.slider</groupId>
     <artifactId>slider</artifactId>
-    <version>0.30</version>
+    <version>0.40</version>
   </parent>
 
   <build>
@@ -159,77 +158,6 @@
       </plugin>
 
       <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
-        <version>${hadoop.version}</version>
-
-        <executions>
-          <execution>
-            <id>version-info</id>
-            <phase>generate-resources</phase>
-            <goals>
-              <goal>version-info</goal>
-            </goals>
-            <configuration>
-              <source>
-                <directory>${basedir}/src/main</directory>
-                <includes>
-                  <include>java/**/*.java</include>
-                  <include>proto/**/*.proto</include>
-                </includes>
-              </source>
-            </configuration>
-          </execution>
-          <execution>
-            <id>compile-protoc</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
-            <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>protoc</protocCommand>
-              <imports>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>SliderClusterMessages.proto</include>
-                  <include>SliderClusterProtocol.proto</include>
-                </includes>
-              </source>
-              <output>${project.build.directory}/generated-sources/java</output>
-            </configuration>
-          </execution>
-          <!--
-                    <execution>
-                      <id>compile-test-protoc</id>
-                      <phase>generate-test-sources</phase>
-                      <goals>
-                        <goal>protoc</goal>
-                      </goals>
-                      <configuration>
-                        <protocVersion>${protobuf.version}</protocVersion>
-                        <protocCommand>${protoc.path}</protocCommand>
-                        <imports>
-                          <param>${basedir}/src/test/proto</param>
-                        </imports>
-                        <source>
-                          <directory>${basedir}/src/test/proto</directory>
-                          <includes>
-                            <include>test.proto</include>
-                            <include>test_rpc_service.proto</include>
-                          </includes>
-                        </source>
-                        <output>${project.build.directory}/generated-test-sources/java
-                        </output>
-                      </configuration>
-                    </execution>
-          -->
-        </executions>
-      </plugin>
-      <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-site-plugin</artifactId>
         <version>${maven-site-plugin.version}</version>
@@ -287,6 +215,7 @@
             <exclude>src/test/python/agent.ini</exclude>
             <exclude>src/test/python/version</exclude>
             <exclude>src/main/resources/webapps/slideram/.keep</exclude>
+            <exclude>src/main/resources/webapps/slideragent/.keep</exclude>
             <exclude>src/main/resources/webapps/static/yarn.dt.plugins.js</exclude>
             <!-- jQuery DataTables files (BSD license) -->
             <exclude>src/main/resources/webapps/static/dt-1.9.4/**</exclude>
@@ -297,6 +226,9 @@
             <exclude>src/main/resources/webapps/static/jquery/themes-1.9.1/base/jquery-ui.css</exclude>
             <!-- jQuery jsTree (MIT license) -->
             <exclude>src/main/resources/webapps/static/jt/jquery.jstree.js</exclude>
+            <!-- protobuf generated classes -->
+            <exclude>src/main/java/org/apache/slider/api/proto/Messages.java</exclude>
+            <exclude>src/main/java/org/apache/slider/api/proto/SliderClusterAPI.java</exclude>
           </excludes>
         </configuration>
       </plugin>
@@ -322,11 +254,6 @@
     </dependency>
 
     <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-    </dependency>
-
-    <dependency>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>
       <scope>runtime</scope>
@@ -369,7 +296,20 @@
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
+      <artifactId>hadoop-client</artifactId>
+      <scope>compile</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-client</artifactId>
+      <scope>compile</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-web-proxy</artifactId>
+      <scope>compile</scope>
     </dependency>
 
     <dependency>
@@ -377,48 +317,6 @@
       <artifactId>hadoop-minicluster</artifactId>
       <scope>test</scope>
     </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-server-common</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-common</artifactId>
-      <version>${hadoop.version}</version>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-server-web-proxy</artifactId>
-      <version>${hadoop.version}</version>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-client</artifactId>
-      <type>pom</type>
-        <exclusions>
-          <exclusion>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-client</artifactId>
-          </exclusion>
-        </exclusions>
-    </dependency>
 <!--
 
     <dependency>
@@ -474,8 +372,13 @@
     </dependency>
 
     <dependency>
-      <groupId>commons-codec</groupId>
-      <artifactId>commons-codec</artifactId>
+      <groupId>org.apache.avro</groupId>
+      <artifactId>avro</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-compress</artifactId>
     </dependency>
 
     <dependency>
@@ -483,6 +386,26 @@
       <artifactId>commons-digester</artifactId>
     </dependency>
 
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>commons-httpclient</groupId>
+      <artifactId>commons-httpclient</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+
     <!-- ======================================================== -->
     <!-- service registry -->
     <!-- ======================================================== -->
@@ -490,25 +413,21 @@
     <dependency>
       <groupId>org.apache.curator</groupId>
       <artifactId>curator-client</artifactId>
-      <version>${curator.version}</version>
     </dependency>
 
     <dependency>
       <groupId>org.apache.curator</groupId>
       <artifactId>curator-framework</artifactId>
-      <version>${curator.version}</version>
     </dependency>
 
     <dependency>
       <groupId>org.apache.curator</groupId>
       <artifactId>curator-x-discovery</artifactId>
-      <version>${curator.version}</version>
     </dependency>
 
     <dependency>
       <groupId>org.apache.curator</groupId>
       <artifactId>curator-x-discovery-server</artifactId>
-      <version>${curator.version}</version>
     </dependency>
 
     <dependency>
@@ -521,10 +440,20 @@
     <!-- ======================================================== -->
 
     <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-core</artifactId>
+      <groupId>javax.servlet</groupId>
+      <artifactId>servlet-api</artifactId>
     </dependency>
-    
+
+    <dependency>
+      <groupId>javax.ws.rs</groupId>
+      <artifactId>jsr311-api</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-client</artifactId>
+    </dependency>
+
     <dependency>
       <groupId>com.sun.jersey</groupId>
       <artifactId>jersey-json</artifactId>
@@ -560,10 +489,11 @@
         <artifactId>jersey-test-framework-core</artifactId>
         <scope>test</scope>
     </dependency>
-    
+
     <dependency>
-        <groupId>com.sun.jersey.jersey-test-framework</groupId>
-        <artifactId>jersey-test-framework-grizzly2</artifactId>
+      <groupId>com.sun.jersey.jersey-test-framework</groupId>
+      <artifactId>jersey-test-framework-grizzly2</artifactId>
+      <scope>test</scope>
     </dependency>
 
     <dependency>
@@ -577,25 +507,11 @@
       <artifactId>easymock</artifactId>
       <scope>test</scope>
     </dependency>
+
     <dependency>
-      <groupId>org.powermock</groupId>
-      <artifactId>powermock-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.powermock</groupId>
-      <artifactId>powermock-reflect</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.powermock</groupId>
-      <artifactId>powermock-api-easymock</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.powermock</groupId>
-      <artifactId>powermock-module-junit4</artifactId>
-      <scope>test</scope>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jetty-sslengine</artifactId>
+      <scope>compile</scope>
     </dependency>
 
   </dependencies>
@@ -603,6 +519,43 @@
 
   <profiles>
 
+    <profile>
+      <id>compile-protobuf</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-maven-plugins</artifactId>
+            <version>${hadoop.version}</version>
+
+            <executions>
+              <execution>
+                <id>compile-protoc</id>
+                <phase>generate-sources</phase>
+                <goals>
+                  <goal>protoc</goal>
+                </goals>
+                <configuration>
+                  <protocVersion>${protobuf.version}</protocVersion>
+                  <protocCommand>protoc</protocCommand>
+                  <imports>
+                    <param>${basedir}/src/main/proto</param>
+                  </imports>
+                  <source>
+                    <directory>${basedir}/src/main/proto</directory>
+                    <includes>
+                      <include>SliderClusterMessages.proto</include>
+                      <include>SliderClusterProtocol.proto</include>
+                    </includes>
+                  </source>
+                  <output>${basedir}/src/main/java</output>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
 
     <!--
     a test run, currently hard-coded for stevel's secure
diff --git a/slider-core/src/main/java/org/apache/slider/api/StatusKeys.java b/slider-core/src/main/java/org/apache/slider/api/StatusKeys.java
index 709c137..4bfcf41 100644
--- a/slider-core/src/main/java/org/apache/slider/api/StatusKeys.java
+++ b/slider-core/src/main/java/org/apache/slider/api/StatusKeys.java
@@ -67,4 +67,7 @@
   String INFO_AM_RPC_PORT = "info.am.rpc.port";
   String INFO_AM_WEB_PORT = "info.am.web.port";
   String INFO_AM_WEB_URL = "info.am.web.url";
+  String INFO_AM_AGENT_PORT = "info.am.agent.port";
+  String INFO_AM_AGENT_URL = "info.am.agent.url";
+  String INFO_AM_SECURED_AGENT_PORT = "info.am.agent.secured.port";
 }
diff --git a/slider-core/src/main/java/org/apache/slider/api/proto/Messages.java b/slider-core/src/main/java/org/apache/slider/api/proto/Messages.java
new file mode 100644
index 0000000..250e5ff
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/api/proto/Messages.java
@@ -0,0 +1,13186 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: SliderClusterMessages.proto
+
+package org.apache.slider.api.proto;
+
+public final class Messages {
+  private Messages() {}
+  public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface RoleInstanceStateOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string name = 1;
+    /**
+     * <code>required string name = 1;</code>
+     */
+    boolean hasName();
+    /**
+     * <code>required string name = 1;</code>
+     */
+    java.lang.String getName();
+    /**
+     * <code>required string name = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getNameBytes();
+
+    // optional string role = 2;
+    /**
+     * <code>optional string role = 2;</code>
+     */
+    boolean hasRole();
+    /**
+     * <code>optional string role = 2;</code>
+     */
+    java.lang.String getRole();
+    /**
+     * <code>optional string role = 2;</code>
+     */
+    com.google.protobuf.ByteString
+        getRoleBytes();
+
+    // required uint32 state = 4;
+    /**
+     * <code>required uint32 state = 4;</code>
+     */
+    boolean hasState();
+    /**
+     * <code>required uint32 state = 4;</code>
+     */
+    int getState();
+
+    // required uint32 exitCode = 5;
+    /**
+     * <code>required uint32 exitCode = 5;</code>
+     */
+    boolean hasExitCode();
+    /**
+     * <code>required uint32 exitCode = 5;</code>
+     */
+    int getExitCode();
+
+    // optional string command = 6;
+    /**
+     * <code>optional string command = 6;</code>
+     */
+    boolean hasCommand();
+    /**
+     * <code>optional string command = 6;</code>
+     */
+    java.lang.String getCommand();
+    /**
+     * <code>optional string command = 6;</code>
+     */
+    com.google.protobuf.ByteString
+        getCommandBytes();
+
+    // optional string diagnostics = 7;
+    /**
+     * <code>optional string diagnostics = 7;</code>
+     */
+    boolean hasDiagnostics();
+    /**
+     * <code>optional string diagnostics = 7;</code>
+     */
+    java.lang.String getDiagnostics();
+    /**
+     * <code>optional string diagnostics = 7;</code>
+     */
+    com.google.protobuf.ByteString
+        getDiagnosticsBytes();
+
+    // repeated string output = 8;
+    /**
+     * <code>repeated string output = 8;</code>
+     */
+    java.util.List<java.lang.String>
+    getOutputList();
+    /**
+     * <code>repeated string output = 8;</code>
+     */
+    int getOutputCount();
+    /**
+     * <code>repeated string output = 8;</code>
+     */
+    java.lang.String getOutput(int index);
+    /**
+     * <code>repeated string output = 8;</code>
+     */
+    com.google.protobuf.ByteString
+        getOutputBytes(int index);
+
+    // repeated string environment = 9;
+    /**
+     * <code>repeated string environment = 9;</code>
+     */
+    java.util.List<java.lang.String>
+    getEnvironmentList();
+    /**
+     * <code>repeated string environment = 9;</code>
+     */
+    int getEnvironmentCount();
+    /**
+     * <code>repeated string environment = 9;</code>
+     */
+    java.lang.String getEnvironment(int index);
+    /**
+     * <code>repeated string environment = 9;</code>
+     */
+    com.google.protobuf.ByteString
+        getEnvironmentBytes(int index);
+
+    // required uint32 roleId = 10;
+    /**
+     * <code>required uint32 roleId = 10;</code>
+     */
+    boolean hasRoleId();
+    /**
+     * <code>required uint32 roleId = 10;</code>
+     */
+    int getRoleId();
+
+    // required bool released = 11;
+    /**
+     * <code>required bool released = 11;</code>
+     */
+    boolean hasReleased();
+    /**
+     * <code>required bool released = 11;</code>
+     */
+    boolean getReleased();
+
+    // required int64 createTime = 12;
+    /**
+     * <code>required int64 createTime = 12;</code>
+     */
+    boolean hasCreateTime();
+    /**
+     * <code>required int64 createTime = 12;</code>
+     */
+    long getCreateTime();
+
+    // required int64 startTime = 13;
+    /**
+     * <code>required int64 startTime = 13;</code>
+     */
+    boolean hasStartTime();
+    /**
+     * <code>required int64 startTime = 13;</code>
+     */
+    long getStartTime();
+
+    // required string host = 14;
+    /**
+     * <code>required string host = 14;</code>
+     */
+    boolean hasHost();
+    /**
+     * <code>required string host = 14;</code>
+     */
+    java.lang.String getHost();
+    /**
+     * <code>required string host = 14;</code>
+     */
+    com.google.protobuf.ByteString
+        getHostBytes();
+
+    // required string hostURL = 15;
+    /**
+     * <code>required string hostURL = 15;</code>
+     */
+    boolean hasHostURL();
+    /**
+     * <code>required string hostURL = 15;</code>
+     */
+    java.lang.String getHostURL();
+    /**
+     * <code>required string hostURL = 15;</code>
+     */
+    com.google.protobuf.ByteString
+        getHostURLBytes();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.RoleInstanceState}
+   */
+  public static final class RoleInstanceState extends
+      com.google.protobuf.GeneratedMessage
+      implements RoleInstanceStateOrBuilder {
+    // Use RoleInstanceState.newBuilder() to construct.
+    private RoleInstanceState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private RoleInstanceState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final RoleInstanceState defaultInstance;
+    public static RoleInstanceState getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public RoleInstanceState getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private RoleInstanceState(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              name_ = input.readBytes();
+              break;
+            }
+            case 18: {
+              bitField0_ |= 0x00000002;
+              role_ = input.readBytes();
+              break;
+            }
+            case 32: {
+              bitField0_ |= 0x00000004;
+              state_ = input.readUInt32();
+              break;
+            }
+            case 40: {
+              bitField0_ |= 0x00000008;
+              exitCode_ = input.readUInt32();
+              break;
+            }
+            case 50: {
+              bitField0_ |= 0x00000010;
+              command_ = input.readBytes();
+              break;
+            }
+            case 58: {
+              bitField0_ |= 0x00000020;
+              diagnostics_ = input.readBytes();
+              break;
+            }
+            case 66: {
+              if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+                output_ = new com.google.protobuf.LazyStringArrayList();
+                mutable_bitField0_ |= 0x00000040;
+              }
+              output_.add(input.readBytes());
+              break;
+            }
+            case 74: {
+              if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
+                environment_ = new com.google.protobuf.LazyStringArrayList();
+                mutable_bitField0_ |= 0x00000080;
+              }
+              environment_.add(input.readBytes());
+              break;
+            }
+            case 80: {
+              bitField0_ |= 0x00000040;
+              roleId_ = input.readUInt32();
+              break;
+            }
+            case 88: {
+              bitField0_ |= 0x00000080;
+              released_ = input.readBool();
+              break;
+            }
+            case 96: {
+              bitField0_ |= 0x00000100;
+              createTime_ = input.readInt64();
+              break;
+            }
+            case 104: {
+              bitField0_ |= 0x00000200;
+              startTime_ = input.readInt64();
+              break;
+            }
+            case 114: {
+              bitField0_ |= 0x00000400;
+              host_ = input.readBytes();
+              break;
+            }
+            case 122: {
+              bitField0_ |= 0x00000800;
+              hostURL_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+          output_ = new com.google.protobuf.UnmodifiableLazyStringList(output_);
+        }
+        if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
+          environment_ = new com.google.protobuf.UnmodifiableLazyStringList(environment_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_RoleInstanceState_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_RoleInstanceState_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.RoleInstanceState.class, org.apache.slider.api.proto.Messages.RoleInstanceState.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<RoleInstanceState> PARSER =
+        new com.google.protobuf.AbstractParser<RoleInstanceState>() {
+      public RoleInstanceState parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new RoleInstanceState(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<RoleInstanceState> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string name = 1;
+    public static final int NAME_FIELD_NUMBER = 1;
+    private java.lang.Object name_;
+    /**
+     * <code>required string name = 1;</code>
+     */
+    public boolean hasName() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string name = 1;</code>
+     */
+    public java.lang.String getName() {
+      java.lang.Object ref = name_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          name_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string name = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getNameBytes() {
+      java.lang.Object ref = name_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        name_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // optional string role = 2;
+    public static final int ROLE_FIELD_NUMBER = 2;
+    private java.lang.Object role_;
+    /**
+     * <code>optional string role = 2;</code>
+     */
+    public boolean hasRole() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>optional string role = 2;</code>
+     */
+    public java.lang.String getRole() {
+      java.lang.Object ref = role_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          role_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>optional string role = 2;</code>
+     */
+    public com.google.protobuf.ByteString
+        getRoleBytes() {
+      java.lang.Object ref = role_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        role_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // required uint32 state = 4;
+    public static final int STATE_FIELD_NUMBER = 4;
+    private int state_;
+    /**
+     * <code>required uint32 state = 4;</code>
+     */
+    public boolean hasState() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>required uint32 state = 4;</code>
+     */
+    public int getState() {
+      return state_;
+    }
+
+    // required uint32 exitCode = 5;
+    public static final int EXITCODE_FIELD_NUMBER = 5;
+    private int exitCode_;
+    /**
+     * <code>required uint32 exitCode = 5;</code>
+     */
+    public boolean hasExitCode() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    /**
+     * <code>required uint32 exitCode = 5;</code>
+     */
+    public int getExitCode() {
+      return exitCode_;
+    }
+
+    // optional string command = 6;
+    public static final int COMMAND_FIELD_NUMBER = 6;
+    private java.lang.Object command_;
+    /**
+     * <code>optional string command = 6;</code>
+     */
+    public boolean hasCommand() {
+      return ((bitField0_ & 0x00000010) == 0x00000010);
+    }
+    /**
+     * <code>optional string command = 6;</code>
+     */
+    public java.lang.String getCommand() {
+      java.lang.Object ref = command_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          command_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>optional string command = 6;</code>
+     */
+    public com.google.protobuf.ByteString
+        getCommandBytes() {
+      java.lang.Object ref = command_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        command_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // optional string diagnostics = 7;
+    public static final int DIAGNOSTICS_FIELD_NUMBER = 7;
+    private java.lang.Object diagnostics_;
+    /**
+     * <code>optional string diagnostics = 7;</code>
+     */
+    public boolean hasDiagnostics() {
+      return ((bitField0_ & 0x00000020) == 0x00000020);
+    }
+    /**
+     * <code>optional string diagnostics = 7;</code>
+     */
+    public java.lang.String getDiagnostics() {
+      java.lang.Object ref = diagnostics_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          diagnostics_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>optional string diagnostics = 7;</code>
+     */
+    public com.google.protobuf.ByteString
+        getDiagnosticsBytes() {
+      java.lang.Object ref = diagnostics_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        diagnostics_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // repeated string output = 8;
+    public static final int OUTPUT_FIELD_NUMBER = 8;
+    private com.google.protobuf.LazyStringList output_;
+    /**
+     * <code>repeated string output = 8;</code>
+     */
+    public java.util.List<java.lang.String>
+        getOutputList() {
+      return output_;
+    }
+    /**
+     * <code>repeated string output = 8;</code>
+     */
+    public int getOutputCount() {
+      return output_.size();
+    }
+    /**
+     * <code>repeated string output = 8;</code>
+     */
+    public java.lang.String getOutput(int index) {
+      return output_.get(index);
+    }
+    /**
+     * <code>repeated string output = 8;</code>
+     */
+    public com.google.protobuf.ByteString
+        getOutputBytes(int index) {
+      return output_.getByteString(index);
+    }
+
+    // repeated string environment = 9;
+    public static final int ENVIRONMENT_FIELD_NUMBER = 9;
+    private com.google.protobuf.LazyStringList environment_;
+    /**
+     * <code>repeated string environment = 9;</code>
+     */
+    public java.util.List<java.lang.String>
+        getEnvironmentList() {
+      return environment_;
+    }
+    /**
+     * <code>repeated string environment = 9;</code>
+     */
+    public int getEnvironmentCount() {
+      return environment_.size();
+    }
+    /**
+     * <code>repeated string environment = 9;</code>
+     */
+    public java.lang.String getEnvironment(int index) {
+      return environment_.get(index);
+    }
+    /**
+     * <code>repeated string environment = 9;</code>
+     */
+    public com.google.protobuf.ByteString
+        getEnvironmentBytes(int index) {
+      return environment_.getByteString(index);
+    }
+
+    // required uint32 roleId = 10;
+    public static final int ROLEID_FIELD_NUMBER = 10;
+    private int roleId_;
+    /**
+     * <code>required uint32 roleId = 10;</code>
+     */
+    public boolean hasRoleId() {
+      return ((bitField0_ & 0x00000040) == 0x00000040);
+    }
+    /**
+     * <code>required uint32 roleId = 10;</code>
+     */
+    public int getRoleId() {
+      return roleId_;
+    }
+
+    // required bool released = 11;
+    public static final int RELEASED_FIELD_NUMBER = 11;
+    private boolean released_;
+    /**
+     * <code>required bool released = 11;</code>
+     */
+    public boolean hasReleased() {
+      return ((bitField0_ & 0x00000080) == 0x00000080);
+    }
+    /**
+     * <code>required bool released = 11;</code>
+     */
+    public boolean getReleased() {
+      return released_;
+    }
+
+    // required int64 createTime = 12;
+    public static final int CREATETIME_FIELD_NUMBER = 12;
+    private long createTime_;
+    /**
+     * <code>required int64 createTime = 12;</code>
+     */
+    public boolean hasCreateTime() {
+      return ((bitField0_ & 0x00000100) == 0x00000100);
+    }
+    /**
+     * <code>required int64 createTime = 12;</code>
+     */
+    public long getCreateTime() {
+      return createTime_;
+    }
+
+    // required int64 startTime = 13;
+    public static final int STARTTIME_FIELD_NUMBER = 13;
+    private long startTime_;
+    /**
+     * <code>required int64 startTime = 13;</code>
+     */
+    public boolean hasStartTime() {
+      return ((bitField0_ & 0x00000200) == 0x00000200);
+    }
+    /**
+     * <code>required int64 startTime = 13;</code>
+     */
+    public long getStartTime() {
+      return startTime_;
+    }
+
+    // required string host = 14;
+    public static final int HOST_FIELD_NUMBER = 14;
+    private java.lang.Object host_;
+    /**
+     * <code>required string host = 14;</code>
+     */
+    public boolean hasHost() {
+      return ((bitField0_ & 0x00000400) == 0x00000400);
+    }
+    /**
+     * <code>required string host = 14;</code>
+     */
+    public java.lang.String getHost() {
+      java.lang.Object ref = host_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          host_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string host = 14;</code>
+     */
+    public com.google.protobuf.ByteString
+        getHostBytes() {
+      java.lang.Object ref = host_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        host_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // required string hostURL = 15;
+    public static final int HOSTURL_FIELD_NUMBER = 15;
+    private java.lang.Object hostURL_;
+    /**
+     * <code>required string hostURL = 15;</code>
+     */
+    public boolean hasHostURL() {
+      return ((bitField0_ & 0x00000800) == 0x00000800);
+    }
+    /**
+     * <code>required string hostURL = 15;</code>
+     */
+    public java.lang.String getHostURL() {
+      java.lang.Object ref = hostURL_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          hostURL_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string hostURL = 15;</code>
+     */
+    public com.google.protobuf.ByteString
+        getHostURLBytes() {
+      java.lang.Object ref = hostURL_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        hostURL_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      name_ = "";
+      role_ = "";
+      state_ = 0;
+      exitCode_ = 0;
+      command_ = "";
+      diagnostics_ = "";
+      output_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+      environment_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+      roleId_ = 0;
+      released_ = false;
+      createTime_ = 0L;
+      startTime_ = 0L;
+      host_ = "";
+      hostURL_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasName()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasState()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasExitCode()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasRoleId()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasReleased()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasCreateTime()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasStartTime()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasHost()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasHostURL()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getNameBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBytes(2, getRoleBytes());
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeUInt32(4, state_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeUInt32(5, exitCode_);
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        output.writeBytes(6, getCommandBytes());
+      }
+      if (((bitField0_ & 0x00000020) == 0x00000020)) {
+        output.writeBytes(7, getDiagnosticsBytes());
+      }
+      for (int i = 0; i < output_.size(); i++) {
+        output.writeBytes(8, output_.getByteString(i));
+      }
+      for (int i = 0; i < environment_.size(); i++) {
+        output.writeBytes(9, environment_.getByteString(i));
+      }
+      if (((bitField0_ & 0x00000040) == 0x00000040)) {
+        output.writeUInt32(10, roleId_);
+      }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        output.writeBool(11, released_);
+      }
+      if (((bitField0_ & 0x00000100) == 0x00000100)) {
+        output.writeInt64(12, createTime_);
+      }
+      if (((bitField0_ & 0x00000200) == 0x00000200)) {
+        output.writeInt64(13, startTime_);
+      }
+      if (((bitField0_ & 0x00000400) == 0x00000400)) {
+        output.writeBytes(14, getHostBytes());
+      }
+      if (((bitField0_ & 0x00000800) == 0x00000800)) {
+        output.writeBytes(15, getHostURLBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getNameBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(2, getRoleBytes());
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt32Size(4, state_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt32Size(5, exitCode_);
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(6, getCommandBytes());
+      }
+      if (((bitField0_ & 0x00000020) == 0x00000020)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(7, getDiagnosticsBytes());
+      }
+      {
+        int dataSize = 0;
+        for (int i = 0; i < output_.size(); i++) {
+          dataSize += com.google.protobuf.CodedOutputStream
+            .computeBytesSizeNoTag(output_.getByteString(i));
+        }
+        size += dataSize;
+        size += 1 * getOutputList().size();
+      }
+      {
+        int dataSize = 0;
+        for (int i = 0; i < environment_.size(); i++) {
+          dataSize += com.google.protobuf.CodedOutputStream
+            .computeBytesSizeNoTag(environment_.getByteString(i));
+        }
+        size += dataSize;
+        size += 1 * getEnvironmentList().size();
+      }
+      if (((bitField0_ & 0x00000040) == 0x00000040)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt32Size(10, roleId_);
+      }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(11, released_);
+      }
+      if (((bitField0_ & 0x00000100) == 0x00000100)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt64Size(12, createTime_);
+      }
+      if (((bitField0_ & 0x00000200) == 0x00000200)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt64Size(13, startTime_);
+      }
+      if (((bitField0_ & 0x00000400) == 0x00000400)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(14, getHostBytes());
+      }
+      if (((bitField0_ & 0x00000800) == 0x00000800)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(15, getHostURLBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.RoleInstanceState)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.RoleInstanceState other = (org.apache.slider.api.proto.Messages.RoleInstanceState) obj;
+
+      boolean result = true;
+      result = result && (hasName() == other.hasName());
+      if (hasName()) {
+        result = result && getName()
+            .equals(other.getName());
+      }
+      result = result && (hasRole() == other.hasRole());
+      if (hasRole()) {
+        result = result && getRole()
+            .equals(other.getRole());
+      }
+      result = result && (hasState() == other.hasState());
+      if (hasState()) {
+        result = result && (getState()
+            == other.getState());
+      }
+      result = result && (hasExitCode() == other.hasExitCode());
+      if (hasExitCode()) {
+        result = result && (getExitCode()
+            == other.getExitCode());
+      }
+      result = result && (hasCommand() == other.hasCommand());
+      if (hasCommand()) {
+        result = result && getCommand()
+            .equals(other.getCommand());
+      }
+      result = result && (hasDiagnostics() == other.hasDiagnostics());
+      if (hasDiagnostics()) {
+        result = result && getDiagnostics()
+            .equals(other.getDiagnostics());
+      }
+      result = result && getOutputList()
+          .equals(other.getOutputList());
+      result = result && getEnvironmentList()
+          .equals(other.getEnvironmentList());
+      result = result && (hasRoleId() == other.hasRoleId());
+      if (hasRoleId()) {
+        result = result && (getRoleId()
+            == other.getRoleId());
+      }
+      result = result && (hasReleased() == other.hasReleased());
+      if (hasReleased()) {
+        result = result && (getReleased()
+            == other.getReleased());
+      }
+      result = result && (hasCreateTime() == other.hasCreateTime());
+      if (hasCreateTime()) {
+        result = result && (getCreateTime()
+            == other.getCreateTime());
+      }
+      result = result && (hasStartTime() == other.hasStartTime());
+      if (hasStartTime()) {
+        result = result && (getStartTime()
+            == other.getStartTime());
+      }
+      result = result && (hasHost() == other.hasHost());
+      if (hasHost()) {
+        result = result && getHost()
+            .equals(other.getHost());
+      }
+      result = result && (hasHostURL() == other.hasHostURL());
+      if (hasHostURL()) {
+        result = result && getHostURL()
+            .equals(other.getHostURL());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasName()) {
+        hash = (37 * hash) + NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getName().hashCode();
+      }
+      if (hasRole()) {
+        hash = (37 * hash) + ROLE_FIELD_NUMBER;
+        hash = (53 * hash) + getRole().hashCode();
+      }
+      if (hasState()) {
+        hash = (37 * hash) + STATE_FIELD_NUMBER;
+        hash = (53 * hash) + getState();
+      }
+      if (hasExitCode()) {
+        hash = (37 * hash) + EXITCODE_FIELD_NUMBER;
+        hash = (53 * hash) + getExitCode();
+      }
+      if (hasCommand()) {
+        hash = (37 * hash) + COMMAND_FIELD_NUMBER;
+        hash = (53 * hash) + getCommand().hashCode();
+      }
+      if (hasDiagnostics()) {
+        hash = (37 * hash) + DIAGNOSTICS_FIELD_NUMBER;
+        hash = (53 * hash) + getDiagnostics().hashCode();
+      }
+      if (getOutputCount() > 0) {
+        hash = (37 * hash) + OUTPUT_FIELD_NUMBER;
+        hash = (53 * hash) + getOutputList().hashCode();
+      }
+      if (getEnvironmentCount() > 0) {
+        hash = (37 * hash) + ENVIRONMENT_FIELD_NUMBER;
+        hash = (53 * hash) + getEnvironmentList().hashCode();
+      }
+      if (hasRoleId()) {
+        hash = (37 * hash) + ROLEID_FIELD_NUMBER;
+        hash = (53 * hash) + getRoleId();
+      }
+      if (hasReleased()) {
+        hash = (37 * hash) + RELEASED_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getReleased());
+      }
+      if (hasCreateTime()) {
+        hash = (37 * hash) + CREATETIME_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getCreateTime());
+      }
+      if (hasStartTime()) {
+        hash = (37 * hash) + STARTTIME_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getStartTime());
+      }
+      if (hasHost()) {
+        hash = (37 * hash) + HOST_FIELD_NUMBER;
+        hash = (53 * hash) + getHost().hashCode();
+      }
+      if (hasHostURL()) {
+        hash = (37 * hash) + HOSTURL_FIELD_NUMBER;
+        hash = (53 * hash) + getHostURL().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.RoleInstanceState parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.RoleInstanceState parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.RoleInstanceState parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.RoleInstanceState parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.RoleInstanceState parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.RoleInstanceState parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.RoleInstanceState parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.RoleInstanceState parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.RoleInstanceState parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.RoleInstanceState parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.RoleInstanceState prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.RoleInstanceState}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_RoleInstanceState_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_RoleInstanceState_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.RoleInstanceState.class, org.apache.slider.api.proto.Messages.RoleInstanceState.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.RoleInstanceState.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        name_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        role_ = "";
+        bitField0_ = (bitField0_ & ~0x00000002);
+        state_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        exitCode_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000008);
+        command_ = "";
+        bitField0_ = (bitField0_ & ~0x00000010);
+        diagnostics_ = "";
+        bitField0_ = (bitField0_ & ~0x00000020);
+        output_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000040);
+        environment_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000080);
+        roleId_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000100);
+        released_ = false;
+        bitField0_ = (bitField0_ & ~0x00000200);
+        createTime_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000400);
+        startTime_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000800);
+        host_ = "";
+        bitField0_ = (bitField0_ & ~0x00001000);
+        hostURL_ = "";
+        bitField0_ = (bitField0_ & ~0x00002000);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_RoleInstanceState_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.RoleInstanceState getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.RoleInstanceState.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.RoleInstanceState build() {
+        org.apache.slider.api.proto.Messages.RoleInstanceState result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.RoleInstanceState buildPartial() {
+        org.apache.slider.api.proto.Messages.RoleInstanceState result = new org.apache.slider.api.proto.Messages.RoleInstanceState(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.name_ = name_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.role_ = role_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.state_ = state_;
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        result.exitCode_ = exitCode_;
+        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+          to_bitField0_ |= 0x00000010;
+        }
+        result.command_ = command_;
+        if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+          to_bitField0_ |= 0x00000020;
+        }
+        result.diagnostics_ = diagnostics_;
+        if (((bitField0_ & 0x00000040) == 0x00000040)) {
+          output_ = new com.google.protobuf.UnmodifiableLazyStringList(
+              output_);
+          bitField0_ = (bitField0_ & ~0x00000040);
+        }
+        result.output_ = output_;
+        if (((bitField0_ & 0x00000080) == 0x00000080)) {
+          environment_ = new com.google.protobuf.UnmodifiableLazyStringList(
+              environment_);
+          bitField0_ = (bitField0_ & ~0x00000080);
+        }
+        result.environment_ = environment_;
+        if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+          to_bitField0_ |= 0x00000040;
+        }
+        result.roleId_ = roleId_;
+        if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
+          to_bitField0_ |= 0x00000080;
+        }
+        result.released_ = released_;
+        if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
+          to_bitField0_ |= 0x00000100;
+        }
+        result.createTime_ = createTime_;
+        if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
+          to_bitField0_ |= 0x00000200;
+        }
+        result.startTime_ = startTime_;
+        if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
+          to_bitField0_ |= 0x00000400;
+        }
+        result.host_ = host_;
+        if (((from_bitField0_ & 0x00002000) == 0x00002000)) {
+          to_bitField0_ |= 0x00000800;
+        }
+        result.hostURL_ = hostURL_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.RoleInstanceState) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.RoleInstanceState)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.RoleInstanceState other) {
+        if (other == org.apache.slider.api.proto.Messages.RoleInstanceState.getDefaultInstance()) return this;
+        if (other.hasName()) {
+          bitField0_ |= 0x00000001;
+          name_ = other.name_;
+          onChanged();
+        }
+        if (other.hasRole()) {
+          bitField0_ |= 0x00000002;
+          role_ = other.role_;
+          onChanged();
+        }
+        if (other.hasState()) {
+          setState(other.getState());
+        }
+        if (other.hasExitCode()) {
+          setExitCode(other.getExitCode());
+        }
+        if (other.hasCommand()) {
+          bitField0_ |= 0x00000010;
+          command_ = other.command_;
+          onChanged();
+        }
+        if (other.hasDiagnostics()) {
+          bitField0_ |= 0x00000020;
+          diagnostics_ = other.diagnostics_;
+          onChanged();
+        }
+        if (!other.output_.isEmpty()) {
+          if (output_.isEmpty()) {
+            output_ = other.output_;
+            bitField0_ = (bitField0_ & ~0x00000040);
+          } else {
+            ensureOutputIsMutable();
+            output_.addAll(other.output_);
+          }
+          onChanged();
+        }
+        if (!other.environment_.isEmpty()) {
+          if (environment_.isEmpty()) {
+            environment_ = other.environment_;
+            bitField0_ = (bitField0_ & ~0x00000080);
+          } else {
+            ensureEnvironmentIsMutable();
+            environment_.addAll(other.environment_);
+          }
+          onChanged();
+        }
+        if (other.hasRoleId()) {
+          setRoleId(other.getRoleId());
+        }
+        if (other.hasReleased()) {
+          setReleased(other.getReleased());
+        }
+        if (other.hasCreateTime()) {
+          setCreateTime(other.getCreateTime());
+        }
+        if (other.hasStartTime()) {
+          setStartTime(other.getStartTime());
+        }
+        if (other.hasHost()) {
+          bitField0_ |= 0x00001000;
+          host_ = other.host_;
+          onChanged();
+        }
+        if (other.hasHostURL()) {
+          bitField0_ |= 0x00002000;
+          hostURL_ = other.hostURL_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasName()) {
+          
+          return false;
+        }
+        if (!hasState()) {
+          
+          return false;
+        }
+        if (!hasExitCode()) {
+          
+          return false;
+        }
+        if (!hasRoleId()) {
+          
+          return false;
+        }
+        if (!hasReleased()) {
+          
+          return false;
+        }
+        if (!hasCreateTime()) {
+          
+          return false;
+        }
+        if (!hasStartTime()) {
+          
+          return false;
+        }
+        if (!hasHost()) {
+          
+          return false;
+        }
+        if (!hasHostURL()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.RoleInstanceState parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.RoleInstanceState) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string name = 1;
+      private java.lang.Object name_ = "";
+      /**
+       * <code>required string name = 1;</code>
+       */
+      public boolean hasName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string name = 1;</code>
+       */
+      public java.lang.String getName() {
+        java.lang.Object ref = name_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          name_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string name = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getNameBytes() {
+        java.lang.Object ref = name_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          name_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string name = 1;</code>
+       */
+      public Builder setName(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        name_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string name = 1;</code>
+       */
+      public Builder clearName() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        name_ = getDefaultInstance().getName();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string name = 1;</code>
+       */
+      public Builder setNameBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        name_ = value;
+        onChanged();
+        return this;
+      }
+
+      // optional string role = 2;
+      private java.lang.Object role_ = "";
+      /**
+       * <code>optional string role = 2;</code>
+       */
+      public boolean hasRole() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>optional string role = 2;</code>
+       */
+      public java.lang.String getRole() {
+        java.lang.Object ref = role_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          role_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>optional string role = 2;</code>
+       */
+      public com.google.protobuf.ByteString
+          getRoleBytes() {
+        java.lang.Object ref = role_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          role_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>optional string role = 2;</code>
+       */
+      public Builder setRole(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        role_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string role = 2;</code>
+       */
+      public Builder clearRole() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        role_ = getDefaultInstance().getRole();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string role = 2;</code>
+       */
+      public Builder setRoleBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        role_ = value;
+        onChanged();
+        return this;
+      }
+
+      // required uint32 state = 4;
+      private int state_ ;
+      /**
+       * <code>required uint32 state = 4;</code>
+       */
+      public boolean hasState() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>required uint32 state = 4;</code>
+       */
+      public int getState() {
+        return state_;
+      }
+      /**
+       * <code>required uint32 state = 4;</code>
+       */
+      public Builder setState(int value) {
+        bitField0_ |= 0x00000004;
+        state_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required uint32 state = 4;</code>
+       */
+      public Builder clearState() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        state_ = 0;
+        onChanged();
+        return this;
+      }
+
+      // required uint32 exitCode = 5;
+      private int exitCode_ ;
+      /**
+       * <code>required uint32 exitCode = 5;</code>
+       */
+      public boolean hasExitCode() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      /**
+       * <code>required uint32 exitCode = 5;</code>
+       */
+      public int getExitCode() {
+        return exitCode_;
+      }
+      /**
+       * <code>required uint32 exitCode = 5;</code>
+       */
+      public Builder setExitCode(int value) {
+        bitField0_ |= 0x00000008;
+        exitCode_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required uint32 exitCode = 5;</code>
+       */
+      public Builder clearExitCode() {
+        bitField0_ = (bitField0_ & ~0x00000008);
+        exitCode_ = 0;
+        onChanged();
+        return this;
+      }
+
+      // optional string command = 6;
+      private java.lang.Object command_ = "";
+      /**
+       * <code>optional string command = 6;</code>
+       */
+      public boolean hasCommand() {
+        return ((bitField0_ & 0x00000010) == 0x00000010);
+      }
+      /**
+       * <code>optional string command = 6;</code>
+       */
+      public java.lang.String getCommand() {
+        java.lang.Object ref = command_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          command_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>optional string command = 6;</code>
+       */
+      public com.google.protobuf.ByteString
+          getCommandBytes() {
+        java.lang.Object ref = command_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          command_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>optional string command = 6;</code>
+       */
+      public Builder setCommand(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000010;
+        command_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string command = 6;</code>
+       */
+      public Builder clearCommand() {
+        bitField0_ = (bitField0_ & ~0x00000010);
+        command_ = getDefaultInstance().getCommand();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string command = 6;</code>
+       */
+      public Builder setCommandBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000010;
+        command_ = value;
+        onChanged();
+        return this;
+      }
+
+      // optional string diagnostics = 7;
+      private java.lang.Object diagnostics_ = "";
+      /**
+       * <code>optional string diagnostics = 7;</code>
+       */
+      public boolean hasDiagnostics() {
+        return ((bitField0_ & 0x00000020) == 0x00000020);
+      }
+      /**
+       * <code>optional string diagnostics = 7;</code>
+       */
+      public java.lang.String getDiagnostics() {
+        java.lang.Object ref = diagnostics_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          diagnostics_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>optional string diagnostics = 7;</code>
+       */
+      public com.google.protobuf.ByteString
+          getDiagnosticsBytes() {
+        java.lang.Object ref = diagnostics_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          diagnostics_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>optional string diagnostics = 7;</code>
+       */
+      public Builder setDiagnostics(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000020;
+        diagnostics_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string diagnostics = 7;</code>
+       */
+      public Builder clearDiagnostics() {
+        bitField0_ = (bitField0_ & ~0x00000020);
+        diagnostics_ = getDefaultInstance().getDiagnostics();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string diagnostics = 7;</code>
+       */
+      public Builder setDiagnosticsBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000020;
+        diagnostics_ = value;
+        onChanged();
+        return this;
+      }
+
+      // repeated string output = 8;
+      private com.google.protobuf.LazyStringList output_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+      private void ensureOutputIsMutable() {
+        if (!((bitField0_ & 0x00000040) == 0x00000040)) {
+          output_ = new com.google.protobuf.LazyStringArrayList(output_);
+          bitField0_ |= 0x00000040;
+         }
+      }
+      /**
+       * <code>repeated string output = 8;</code>
+       */
+      public java.util.List<java.lang.String>
+          getOutputList() {
+        return java.util.Collections.unmodifiableList(output_);
+      }
+      /**
+       * <code>repeated string output = 8;</code>
+       */
+      public int getOutputCount() {
+        return output_.size();
+      }
+      /**
+       * <code>repeated string output = 8;</code>
+       */
+      public java.lang.String getOutput(int index) {
+        return output_.get(index);
+      }
+      /**
+       * <code>repeated string output = 8;</code>
+       */
+      public com.google.protobuf.ByteString
+          getOutputBytes(int index) {
+        return output_.getByteString(index);
+      }
+      /**
+       * <code>repeated string output = 8;</code>
+       */
+      public Builder setOutput(
+          int index, java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureOutputIsMutable();
+        output_.set(index, value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string output = 8;</code>
+       */
+      public Builder addOutput(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureOutputIsMutable();
+        output_.add(value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string output = 8;</code>
+       */
+      public Builder addAllOutput(
+          java.lang.Iterable<java.lang.String> values) {
+        ensureOutputIsMutable();
+        super.addAll(values, output_);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string output = 8;</code>
+       */
+      public Builder clearOutput() {
+        output_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000040);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string output = 8;</code>
+       */
+      public Builder addOutputBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureOutputIsMutable();
+        output_.add(value);
+        onChanged();
+        return this;
+      }
+
+      // repeated string environment = 9;
+      private com.google.protobuf.LazyStringList environment_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+      private void ensureEnvironmentIsMutable() {
+        if (!((bitField0_ & 0x00000080) == 0x00000080)) {
+          environment_ = new com.google.protobuf.LazyStringArrayList(environment_);
+          bitField0_ |= 0x00000080;
+         }
+      }
+      /**
+       * <code>repeated string environment = 9;</code>
+       */
+      public java.util.List<java.lang.String>
+          getEnvironmentList() {
+        return java.util.Collections.unmodifiableList(environment_);
+      }
+      /**
+       * <code>repeated string environment = 9;</code>
+       */
+      public int getEnvironmentCount() {
+        return environment_.size();
+      }
+      /**
+       * <code>repeated string environment = 9;</code>
+       */
+      public java.lang.String getEnvironment(int index) {
+        return environment_.get(index);
+      }
+      /**
+       * <code>repeated string environment = 9;</code>
+       */
+      public com.google.protobuf.ByteString
+          getEnvironmentBytes(int index) {
+        return environment_.getByteString(index);
+      }
+      /**
+       * <code>repeated string environment = 9;</code>
+       */
+      public Builder setEnvironment(
+          int index, java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureEnvironmentIsMutable();
+        environment_.set(index, value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string environment = 9;</code>
+       */
+      public Builder addEnvironment(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureEnvironmentIsMutable();
+        environment_.add(value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string environment = 9;</code>
+       */
+      public Builder addAllEnvironment(
+          java.lang.Iterable<java.lang.String> values) {
+        ensureEnvironmentIsMutable();
+        super.addAll(values, environment_);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string environment = 9;</code>
+       */
+      public Builder clearEnvironment() {
+        environment_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000080);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string environment = 9;</code>
+       */
+      public Builder addEnvironmentBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureEnvironmentIsMutable();
+        environment_.add(value);
+        onChanged();
+        return this;
+      }
+
+      // required uint32 roleId = 10;
+      private int roleId_ ;
+      /**
+       * <code>required uint32 roleId = 10;</code>
+       */
+      public boolean hasRoleId() {
+        return ((bitField0_ & 0x00000100) == 0x00000100);
+      }
+      /**
+       * <code>required uint32 roleId = 10;</code>
+       */
+      public int getRoleId() {
+        return roleId_;
+      }
+      /**
+       * <code>required uint32 roleId = 10;</code>
+       */
+      public Builder setRoleId(int value) {
+        bitField0_ |= 0x00000100;
+        roleId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required uint32 roleId = 10;</code>
+       */
+      public Builder clearRoleId() {
+        bitField0_ = (bitField0_ & ~0x00000100);
+        roleId_ = 0;
+        onChanged();
+        return this;
+      }
+
+      // required bool released = 11;
+      private boolean released_ ;
+      /**
+       * <code>required bool released = 11;</code>
+       */
+      public boolean hasReleased() {
+        return ((bitField0_ & 0x00000200) == 0x00000200);
+      }
+      /**
+       * <code>required bool released = 11;</code>
+       */
+      public boolean getReleased() {
+        return released_;
+      }
+      /**
+       * <code>required bool released = 11;</code>
+       */
+      public Builder setReleased(boolean value) {
+        bitField0_ |= 0x00000200;
+        released_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required bool released = 11;</code>
+       */
+      public Builder clearReleased() {
+        bitField0_ = (bitField0_ & ~0x00000200);
+        released_ = false;
+        onChanged();
+        return this;
+      }
+
+      // required int64 createTime = 12;
+      private long createTime_ ;
+      /**
+       * <code>required int64 createTime = 12;</code>
+       */
+      public boolean hasCreateTime() {
+        return ((bitField0_ & 0x00000400) == 0x00000400);
+      }
+      /**
+       * <code>required int64 createTime = 12;</code>
+       */
+      public long getCreateTime() {
+        return createTime_;
+      }
+      /**
+       * <code>required int64 createTime = 12;</code>
+       */
+      public Builder setCreateTime(long value) {
+        bitField0_ |= 0x00000400;
+        createTime_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required int64 createTime = 12;</code>
+       */
+      public Builder clearCreateTime() {
+        bitField0_ = (bitField0_ & ~0x00000400);
+        createTime_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // required int64 startTime = 13;
+      private long startTime_ ;
+      /**
+       * <code>required int64 startTime = 13;</code>
+       */
+      public boolean hasStartTime() {
+        return ((bitField0_ & 0x00000800) == 0x00000800);
+      }
+      /**
+       * <code>required int64 startTime = 13;</code>
+       */
+      public long getStartTime() {
+        return startTime_;
+      }
+      /**
+       * <code>required int64 startTime = 13;</code>
+       */
+      public Builder setStartTime(long value) {
+        bitField0_ |= 0x00000800;
+        startTime_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required int64 startTime = 13;</code>
+       */
+      public Builder clearStartTime() {
+        bitField0_ = (bitField0_ & ~0x00000800);
+        startTime_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // required string host = 14;
+      private java.lang.Object host_ = "";
+      /**
+       * <code>required string host = 14;</code>
+       */
+      public boolean hasHost() {
+        return ((bitField0_ & 0x00001000) == 0x00001000);
+      }
+      /**
+       * <code>required string host = 14;</code>
+       */
+      public java.lang.String getHost() {
+        java.lang.Object ref = host_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          host_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string host = 14;</code>
+       */
+      public com.google.protobuf.ByteString
+          getHostBytes() {
+        java.lang.Object ref = host_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          host_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string host = 14;</code>
+       */
+      public Builder setHost(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00001000;
+        host_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string host = 14;</code>
+       */
+      public Builder clearHost() {
+        bitField0_ = (bitField0_ & ~0x00001000);
+        host_ = getDefaultInstance().getHost();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string host = 14;</code>
+       */
+      public Builder setHostBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00001000;
+        host_ = value;
+        onChanged();
+        return this;
+      }
+
+      // required string hostURL = 15;
+      private java.lang.Object hostURL_ = "";
+      /**
+       * <code>required string hostURL = 15;</code>
+       */
+      public boolean hasHostURL() {
+        return ((bitField0_ & 0x00002000) == 0x00002000);
+      }
+      /**
+       * <code>required string hostURL = 15;</code>
+       */
+      public java.lang.String getHostURL() {
+        java.lang.Object ref = hostURL_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          hostURL_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string hostURL = 15;</code>
+       */
+      public com.google.protobuf.ByteString
+          getHostURLBytes() {
+        java.lang.Object ref = hostURL_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          hostURL_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string hostURL = 15;</code>
+       */
+      public Builder setHostURL(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00002000;
+        hostURL_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string hostURL = 15;</code>
+       */
+      public Builder clearHostURL() {
+        bitField0_ = (bitField0_ & ~0x00002000);
+        hostURL_ = getDefaultInstance().getHostURL();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string hostURL = 15;</code>
+       */
+      public Builder setHostURLBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00002000;
+        hostURL_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.RoleInstanceState)
+    }
+
+    static {
+      defaultInstance = new RoleInstanceState(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.RoleInstanceState)
+  }
+
+  public interface StopClusterRequestProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string message = 1;
+    /**
+     * <code>required string message = 1;</code>
+     *
+     * <pre>
+     **
+     *message to include
+     * </pre>
+     */
+    boolean hasMessage();
+    /**
+     * <code>required string message = 1;</code>
+     *
+     * <pre>
+     **
+     *message to include
+     * </pre>
+     */
+    java.lang.String getMessage();
+    /**
+     * <code>required string message = 1;</code>
+     *
+     * <pre>
+     **
+     *message to include
+     * </pre>
+     */
+    com.google.protobuf.ByteString
+        getMessageBytes();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.StopClusterRequestProto}
+   *
+   * <pre>
+   **
+   * stop the cluster
+   * </pre>
+   */
+  public static final class StopClusterRequestProto extends
+      com.google.protobuf.GeneratedMessage
+      implements StopClusterRequestProtoOrBuilder {
+    // Use StopClusterRequestProto.newBuilder() to construct.
+    private StopClusterRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private StopClusterRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final StopClusterRequestProto defaultInstance;
+    public static StopClusterRequestProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public StopClusterRequestProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private StopClusterRequestProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              message_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_StopClusterRequestProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_StopClusterRequestProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.StopClusterRequestProto.class, org.apache.slider.api.proto.Messages.StopClusterRequestProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<StopClusterRequestProto> PARSER =
+        new com.google.protobuf.AbstractParser<StopClusterRequestProto>() {
+      public StopClusterRequestProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new StopClusterRequestProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<StopClusterRequestProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string message = 1;
+    public static final int MESSAGE_FIELD_NUMBER = 1;
+    private java.lang.Object message_;
+    /**
+     * <code>required string message = 1;</code>
+     *
+     * <pre>
+     **
+     *message to include
+     * </pre>
+     */
+    public boolean hasMessage() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string message = 1;</code>
+     *
+     * <pre>
+     **
+     *message to include
+     * </pre>
+     */
+    public java.lang.String getMessage() {
+      java.lang.Object ref = message_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          message_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string message = 1;</code>
+     *
+     * <pre>
+     **
+     *message to include
+     * </pre>
+     */
+    public com.google.protobuf.ByteString
+        getMessageBytes() {
+      java.lang.Object ref = message_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        message_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      message_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasMessage()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getMessageBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getMessageBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.StopClusterRequestProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.StopClusterRequestProto other = (org.apache.slider.api.proto.Messages.StopClusterRequestProto) obj;
+
+      boolean result = true;
+      result = result && (hasMessage() == other.hasMessage());
+      if (hasMessage()) {
+        result = result && getMessage()
+            .equals(other.getMessage());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasMessage()) {
+        hash = (37 * hash) + MESSAGE_FIELD_NUMBER;
+        hash = (53 * hash) + getMessage().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.StopClusterRequestProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterRequestProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterRequestProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterRequestProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterRequestProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterRequestProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterRequestProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterRequestProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.StopClusterRequestProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.StopClusterRequestProto}
+     *
+     * <pre>
+     **
+     * stop the cluster
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.StopClusterRequestProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_StopClusterRequestProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_StopClusterRequestProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.StopClusterRequestProto.class, org.apache.slider.api.proto.Messages.StopClusterRequestProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.StopClusterRequestProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        message_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_StopClusterRequestProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.StopClusterRequestProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.StopClusterRequestProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.StopClusterRequestProto build() {
+        org.apache.slider.api.proto.Messages.StopClusterRequestProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.StopClusterRequestProto buildPartial() {
+        org.apache.slider.api.proto.Messages.StopClusterRequestProto result = new org.apache.slider.api.proto.Messages.StopClusterRequestProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.message_ = message_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.StopClusterRequestProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.StopClusterRequestProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.StopClusterRequestProto other) {
+        if (other == org.apache.slider.api.proto.Messages.StopClusterRequestProto.getDefaultInstance()) return this;
+        if (other.hasMessage()) {
+          bitField0_ |= 0x00000001;
+          message_ = other.message_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasMessage()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.StopClusterRequestProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.StopClusterRequestProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string message = 1;
+      private java.lang.Object message_ = "";
+      /**
+       * <code>required string message = 1;</code>
+       *
+       * <pre>
+       **
+       *message to include
+       * </pre>
+       */
+      public boolean hasMessage() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string message = 1;</code>
+       *
+       * <pre>
+       **
+       *message to include
+       * </pre>
+       */
+      public java.lang.String getMessage() {
+        java.lang.Object ref = message_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          message_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string message = 1;</code>
+       *
+       * <pre>
+       **
+       *message to include
+       * </pre>
+       */
+      public com.google.protobuf.ByteString
+          getMessageBytes() {
+        java.lang.Object ref = message_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          message_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string message = 1;</code>
+       *
+       * <pre>
+       **
+       *message to include
+       * </pre>
+       */
+      public Builder setMessage(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        message_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string message = 1;</code>
+       *
+       * <pre>
+       **
+       *message to include
+       * </pre>
+       */
+      public Builder clearMessage() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        message_ = getDefaultInstance().getMessage();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string message = 1;</code>
+       *
+       * <pre>
+       **
+       *message to include
+       * </pre>
+       */
+      public Builder setMessageBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        message_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.StopClusterRequestProto)
+    }
+
+    static {
+      defaultInstance = new StopClusterRequestProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.StopClusterRequestProto)
+  }
+
+  public interface StopClusterResponseProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.StopClusterResponseProto}
+   *
+   * <pre>
+   **
+   * stop the cluster
+   * </pre>
+   */
+  public static final class StopClusterResponseProto extends
+      com.google.protobuf.GeneratedMessage
+      implements StopClusterResponseProtoOrBuilder {
+    // Use StopClusterResponseProto.newBuilder() to construct.
+    private StopClusterResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private StopClusterResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final StopClusterResponseProto defaultInstance;
+    public static StopClusterResponseProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public StopClusterResponseProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private StopClusterResponseProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_StopClusterResponseProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_StopClusterResponseProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.StopClusterResponseProto.class, org.apache.slider.api.proto.Messages.StopClusterResponseProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<StopClusterResponseProto> PARSER =
+        new com.google.protobuf.AbstractParser<StopClusterResponseProto>() {
+      public StopClusterResponseProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new StopClusterResponseProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<StopClusterResponseProto> getParserForType() {
+      return PARSER;
+    }
+
+    private void initFields() {
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.StopClusterResponseProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.StopClusterResponseProto other = (org.apache.slider.api.proto.Messages.StopClusterResponseProto) obj;
+
+      boolean result = true;
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.StopClusterResponseProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterResponseProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterResponseProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterResponseProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterResponseProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterResponseProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterResponseProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterResponseProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.StopClusterResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.StopClusterResponseProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.StopClusterResponseProto}
+     *
+     * <pre>
+     **
+     * stop the cluster
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.StopClusterResponseProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_StopClusterResponseProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_StopClusterResponseProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.StopClusterResponseProto.class, org.apache.slider.api.proto.Messages.StopClusterResponseProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.StopClusterResponseProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_StopClusterResponseProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.StopClusterResponseProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.StopClusterResponseProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.StopClusterResponseProto build() {
+        org.apache.slider.api.proto.Messages.StopClusterResponseProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.StopClusterResponseProto buildPartial() {
+        org.apache.slider.api.proto.Messages.StopClusterResponseProto result = new org.apache.slider.api.proto.Messages.StopClusterResponseProto(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.StopClusterResponseProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.StopClusterResponseProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.StopClusterResponseProto other) {
+        if (other == org.apache.slider.api.proto.Messages.StopClusterResponseProto.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.StopClusterResponseProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.StopClusterResponseProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.StopClusterResponseProto)
+    }
+
+    static {
+      defaultInstance = new StopClusterResponseProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.StopClusterResponseProto)
+  }
+
+  public interface FlexClusterRequestProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string clusterSpec = 1;
+    /**
+     * <code>required string clusterSpec = 1;</code>
+     */
+    boolean hasClusterSpec();
+    /**
+     * <code>required string clusterSpec = 1;</code>
+     */
+    java.lang.String getClusterSpec();
+    /**
+     * <code>required string clusterSpec = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getClusterSpecBytes();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.FlexClusterRequestProto}
+   *
+   * <pre>
+   **
+   * flex the cluster
+   * </pre>
+   */
+  public static final class FlexClusterRequestProto extends
+      com.google.protobuf.GeneratedMessage
+      implements FlexClusterRequestProtoOrBuilder {
+    // Use FlexClusterRequestProto.newBuilder() to construct.
+    private FlexClusterRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private FlexClusterRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final FlexClusterRequestProto defaultInstance;
+    public static FlexClusterRequestProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public FlexClusterRequestProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private FlexClusterRequestProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              clusterSpec_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_FlexClusterRequestProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_FlexClusterRequestProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.FlexClusterRequestProto.class, org.apache.slider.api.proto.Messages.FlexClusterRequestProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<FlexClusterRequestProto> PARSER =
+        new com.google.protobuf.AbstractParser<FlexClusterRequestProto>() {
+      public FlexClusterRequestProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new FlexClusterRequestProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<FlexClusterRequestProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string clusterSpec = 1;
+    public static final int CLUSTERSPEC_FIELD_NUMBER = 1;
+    private java.lang.Object clusterSpec_;
+    /**
+     * <code>required string clusterSpec = 1;</code>
+     */
+    public boolean hasClusterSpec() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string clusterSpec = 1;</code>
+     */
+    public java.lang.String getClusterSpec() {
+      java.lang.Object ref = clusterSpec_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          clusterSpec_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string clusterSpec = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getClusterSpecBytes() {
+      java.lang.Object ref = clusterSpec_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        clusterSpec_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      clusterSpec_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasClusterSpec()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getClusterSpecBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getClusterSpecBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.FlexClusterRequestProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.FlexClusterRequestProto other = (org.apache.slider.api.proto.Messages.FlexClusterRequestProto) obj;
+
+      boolean result = true;
+      result = result && (hasClusterSpec() == other.hasClusterSpec());
+      if (hasClusterSpec()) {
+        result = result && getClusterSpec()
+            .equals(other.getClusterSpec());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasClusterSpec()) {
+        hash = (37 * hash) + CLUSTERSPEC_FIELD_NUMBER;
+        hash = (53 * hash) + getClusterSpec().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.FlexClusterRequestProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterRequestProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterRequestProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterRequestProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterRequestProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterRequestProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterRequestProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterRequestProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.FlexClusterRequestProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.FlexClusterRequestProto}
+     *
+     * <pre>
+     **
+     * flex the cluster
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.FlexClusterRequestProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_FlexClusterRequestProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_FlexClusterRequestProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.FlexClusterRequestProto.class, org.apache.slider.api.proto.Messages.FlexClusterRequestProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.FlexClusterRequestProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        clusterSpec_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_FlexClusterRequestProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.FlexClusterRequestProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.FlexClusterRequestProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.FlexClusterRequestProto build() {
+        org.apache.slider.api.proto.Messages.FlexClusterRequestProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.FlexClusterRequestProto buildPartial() {
+        org.apache.slider.api.proto.Messages.FlexClusterRequestProto result = new org.apache.slider.api.proto.Messages.FlexClusterRequestProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.clusterSpec_ = clusterSpec_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.FlexClusterRequestProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.FlexClusterRequestProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.FlexClusterRequestProto other) {
+        if (other == org.apache.slider.api.proto.Messages.FlexClusterRequestProto.getDefaultInstance()) return this;
+        if (other.hasClusterSpec()) {
+          bitField0_ |= 0x00000001;
+          clusterSpec_ = other.clusterSpec_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasClusterSpec()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.FlexClusterRequestProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.FlexClusterRequestProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string clusterSpec = 1;
+      private java.lang.Object clusterSpec_ = "";
+      /**
+       * <code>required string clusterSpec = 1;</code>
+       */
+      public boolean hasClusterSpec() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string clusterSpec = 1;</code>
+       */
+      public java.lang.String getClusterSpec() {
+        java.lang.Object ref = clusterSpec_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          clusterSpec_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string clusterSpec = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getClusterSpecBytes() {
+        java.lang.Object ref = clusterSpec_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          clusterSpec_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string clusterSpec = 1;</code>
+       */
+      public Builder setClusterSpec(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        clusterSpec_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string clusterSpec = 1;</code>
+       */
+      public Builder clearClusterSpec() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        clusterSpec_ = getDefaultInstance().getClusterSpec();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string clusterSpec = 1;</code>
+       */
+      public Builder setClusterSpecBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        clusterSpec_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.FlexClusterRequestProto)
+    }
+
+    static {
+      defaultInstance = new FlexClusterRequestProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.FlexClusterRequestProto)
+  }
+
+  public interface FlexClusterResponseProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required bool response = 1;
+    /**
+     * <code>required bool response = 1;</code>
+     */
+    boolean hasResponse();
+    /**
+     * <code>required bool response = 1;</code>
+     */
+    boolean getResponse();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.FlexClusterResponseProto}
+   *
+   * <pre>
+   **
+   * stop the cluster
+   * </pre>
+   */
+  public static final class FlexClusterResponseProto extends
+      com.google.protobuf.GeneratedMessage
+      implements FlexClusterResponseProtoOrBuilder {
+    // Use FlexClusterResponseProto.newBuilder() to construct.
+    private FlexClusterResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private FlexClusterResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final FlexClusterResponseProto defaultInstance;
+    public static FlexClusterResponseProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public FlexClusterResponseProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private FlexClusterResponseProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              response_ = input.readBool();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_FlexClusterResponseProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_FlexClusterResponseProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.FlexClusterResponseProto.class, org.apache.slider.api.proto.Messages.FlexClusterResponseProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<FlexClusterResponseProto> PARSER =
+        new com.google.protobuf.AbstractParser<FlexClusterResponseProto>() {
+      public FlexClusterResponseProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new FlexClusterResponseProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<FlexClusterResponseProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required bool response = 1;
+    public static final int RESPONSE_FIELD_NUMBER = 1;
+    private boolean response_;
+    /**
+     * <code>required bool response = 1;</code>
+     */
+    public boolean hasResponse() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required bool response = 1;</code>
+     */
+    public boolean getResponse() {
+      return response_;
+    }
+
+    private void initFields() {
+      response_ = false;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasResponse()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBool(1, response_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(1, response_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.FlexClusterResponseProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.FlexClusterResponseProto other = (org.apache.slider.api.proto.Messages.FlexClusterResponseProto) obj;
+
+      boolean result = true;
+      result = result && (hasResponse() == other.hasResponse());
+      if (hasResponse()) {
+        result = result && (getResponse()
+            == other.getResponse());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasResponse()) {
+        hash = (37 * hash) + RESPONSE_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getResponse());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.FlexClusterResponseProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterResponseProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterResponseProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterResponseProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterResponseProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterResponseProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterResponseProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterResponseProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.FlexClusterResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.FlexClusterResponseProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.FlexClusterResponseProto}
+     *
+     * <pre>
+     **
+     * stop the cluster
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.FlexClusterResponseProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_FlexClusterResponseProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_FlexClusterResponseProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.FlexClusterResponseProto.class, org.apache.slider.api.proto.Messages.FlexClusterResponseProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.FlexClusterResponseProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        response_ = false;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_FlexClusterResponseProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.FlexClusterResponseProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.FlexClusterResponseProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.FlexClusterResponseProto build() {
+        org.apache.slider.api.proto.Messages.FlexClusterResponseProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.FlexClusterResponseProto buildPartial() {
+        org.apache.slider.api.proto.Messages.FlexClusterResponseProto result = new org.apache.slider.api.proto.Messages.FlexClusterResponseProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.response_ = response_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.FlexClusterResponseProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.FlexClusterResponseProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.FlexClusterResponseProto other) {
+        if (other == org.apache.slider.api.proto.Messages.FlexClusterResponseProto.getDefaultInstance()) return this;
+        if (other.hasResponse()) {
+          setResponse(other.getResponse());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasResponse()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.FlexClusterResponseProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.FlexClusterResponseProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required bool response = 1;
+      private boolean response_ ;
+      /**
+       * <code>required bool response = 1;</code>
+       */
+      public boolean hasResponse() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required bool response = 1;</code>
+       */
+      public boolean getResponse() {
+        return response_;
+      }
+      /**
+       * <code>required bool response = 1;</code>
+       */
+      public Builder setResponse(boolean value) {
+        bitField0_ |= 0x00000001;
+        response_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required bool response = 1;</code>
+       */
+      public Builder clearResponse() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        response_ = false;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.FlexClusterResponseProto)
+    }
+
+    static {
+      defaultInstance = new FlexClusterResponseProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.FlexClusterResponseProto)
+  }
+
+  public interface GetJSONClusterStatusRequestProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.GetJSONClusterStatusRequestProto}
+   *
+   * <pre>
+   **
+   * void request
+   * </pre>
+   */
+  public static final class GetJSONClusterStatusRequestProto extends
+      com.google.protobuf.GeneratedMessage
+      implements GetJSONClusterStatusRequestProtoOrBuilder {
+    // Use GetJSONClusterStatusRequestProto.newBuilder() to construct.
+    private GetJSONClusterStatusRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetJSONClusterStatusRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetJSONClusterStatusRequestProto defaultInstance;
+    public static GetJSONClusterStatusRequestProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetJSONClusterStatusRequestProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetJSONClusterStatusRequestProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetJSONClusterStatusRequestProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetJSONClusterStatusRequestProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto.class, org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetJSONClusterStatusRequestProto> PARSER =
+        new com.google.protobuf.AbstractParser<GetJSONClusterStatusRequestProto>() {
+      public GetJSONClusterStatusRequestProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetJSONClusterStatusRequestProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetJSONClusterStatusRequestProto> getParserForType() {
+      return PARSER;
+    }
+
+    private void initFields() {
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto other = (org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto) obj;
+
+      boolean result = true;
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.GetJSONClusterStatusRequestProto}
+     *
+     * <pre>
+     **
+     * void request
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetJSONClusterStatusRequestProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetJSONClusterStatusRequestProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto.class, org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetJSONClusterStatusRequestProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto build() {
+        org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto buildPartial() {
+        org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto result = new org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto other) {
+        if (other == org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.GetJSONClusterStatusRequestProto)
+    }
+
+    static {
+      defaultInstance = new GetJSONClusterStatusRequestProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.GetJSONClusterStatusRequestProto)
+  }
+
+  public interface GetJSONClusterStatusResponseProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string clusterSpec = 1;
+    /**
+     * <code>required string clusterSpec = 1;</code>
+     */
+    boolean hasClusterSpec();
+    /**
+     * <code>required string clusterSpec = 1;</code>
+     */
+    java.lang.String getClusterSpec();
+    /**
+     * <code>required string clusterSpec = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getClusterSpecBytes();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.GetJSONClusterStatusResponseProto}
+   *
+   * <pre>
+   **
+   * response
+   * </pre>
+   */
+  public static final class GetJSONClusterStatusResponseProto extends
+      com.google.protobuf.GeneratedMessage
+      implements GetJSONClusterStatusResponseProtoOrBuilder {
+    // Use GetJSONClusterStatusResponseProto.newBuilder() to construct.
+    private GetJSONClusterStatusResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetJSONClusterStatusResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetJSONClusterStatusResponseProto defaultInstance;
+    public static GetJSONClusterStatusResponseProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetJSONClusterStatusResponseProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetJSONClusterStatusResponseProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              clusterSpec_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetJSONClusterStatusResponseProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetJSONClusterStatusResponseProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto.class, org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetJSONClusterStatusResponseProto> PARSER =
+        new com.google.protobuf.AbstractParser<GetJSONClusterStatusResponseProto>() {
+      public GetJSONClusterStatusResponseProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetJSONClusterStatusResponseProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetJSONClusterStatusResponseProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string clusterSpec = 1;
+    public static final int CLUSTERSPEC_FIELD_NUMBER = 1;
+    private java.lang.Object clusterSpec_;
+    /**
+     * <code>required string clusterSpec = 1;</code>
+     */
+    public boolean hasClusterSpec() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string clusterSpec = 1;</code>
+     */
+    public java.lang.String getClusterSpec() {
+      java.lang.Object ref = clusterSpec_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          clusterSpec_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string clusterSpec = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getClusterSpecBytes() {
+      java.lang.Object ref = clusterSpec_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        clusterSpec_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      clusterSpec_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasClusterSpec()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getClusterSpecBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getClusterSpecBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto other = (org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto) obj;
+
+      boolean result = true;
+      result = result && (hasClusterSpec() == other.hasClusterSpec());
+      if (hasClusterSpec()) {
+        result = result && getClusterSpec()
+            .equals(other.getClusterSpec());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasClusterSpec()) {
+        hash = (37 * hash) + CLUSTERSPEC_FIELD_NUMBER;
+        hash = (53 * hash) + getClusterSpec().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.GetJSONClusterStatusResponseProto}
+     *
+     * <pre>
+     **
+     * response
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetJSONClusterStatusResponseProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetJSONClusterStatusResponseProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto.class, org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        clusterSpec_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetJSONClusterStatusResponseProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto build() {
+        org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto buildPartial() {
+        org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto result = new org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.clusterSpec_ = clusterSpec_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto other) {
+        if (other == org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto.getDefaultInstance()) return this;
+        if (other.hasClusterSpec()) {
+          bitField0_ |= 0x00000001;
+          clusterSpec_ = other.clusterSpec_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasClusterSpec()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string clusterSpec = 1;
+      private java.lang.Object clusterSpec_ = "";
+      /**
+       * <code>required string clusterSpec = 1;</code>
+       */
+      public boolean hasClusterSpec() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string clusterSpec = 1;</code>
+       */
+      public java.lang.String getClusterSpec() {
+        java.lang.Object ref = clusterSpec_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          clusterSpec_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string clusterSpec = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getClusterSpecBytes() {
+        java.lang.Object ref = clusterSpec_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          clusterSpec_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string clusterSpec = 1;</code>
+       */
+      public Builder setClusterSpec(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        clusterSpec_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string clusterSpec = 1;</code>
+       */
+      public Builder clearClusterSpec() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        clusterSpec_ = getDefaultInstance().getClusterSpec();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string clusterSpec = 1;</code>
+       */
+      public Builder setClusterSpecBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        clusterSpec_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.GetJSONClusterStatusResponseProto)
+    }
+
+    static {
+      defaultInstance = new GetJSONClusterStatusResponseProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.GetJSONClusterStatusResponseProto)
+  }
+
+  public interface ListNodeUUIDsByRoleRequestProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string role = 1;
+    /**
+     * <code>required string role = 1;</code>
+     */
+    boolean hasRole();
+    /**
+     * <code>required string role = 1;</code>
+     */
+    java.lang.String getRole();
+    /**
+     * <code>required string role = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getRoleBytes();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.ListNodeUUIDsByRoleRequestProto}
+   *
+   * <pre>
+   **
+   * list the nodes in a role
+   * </pre>
+   */
+  public static final class ListNodeUUIDsByRoleRequestProto extends
+      com.google.protobuf.GeneratedMessage
+      implements ListNodeUUIDsByRoleRequestProtoOrBuilder {
+    // Use ListNodeUUIDsByRoleRequestProto.newBuilder() to construct.
+    private ListNodeUUIDsByRoleRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private ListNodeUUIDsByRoleRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final ListNodeUUIDsByRoleRequestProto defaultInstance;
+    public static ListNodeUUIDsByRoleRequestProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public ListNodeUUIDsByRoleRequestProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private ListNodeUUIDsByRoleRequestProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              role_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_ListNodeUUIDsByRoleRequestProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_ListNodeUUIDsByRoleRequestProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto.class, org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<ListNodeUUIDsByRoleRequestProto> PARSER =
+        new com.google.protobuf.AbstractParser<ListNodeUUIDsByRoleRequestProto>() {
+      public ListNodeUUIDsByRoleRequestProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new ListNodeUUIDsByRoleRequestProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<ListNodeUUIDsByRoleRequestProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string role = 1;
+    public static final int ROLE_FIELD_NUMBER = 1;
+    private java.lang.Object role_;
+    /**
+     * <code>required string role = 1;</code>
+     */
+    public boolean hasRole() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string role = 1;</code>
+     */
+    public java.lang.String getRole() {
+      java.lang.Object ref = role_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          role_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string role = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getRoleBytes() {
+      java.lang.Object ref = role_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        role_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      role_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasRole()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getRoleBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getRoleBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto other = (org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto) obj;
+
+      boolean result = true;
+      result = result && (hasRole() == other.hasRole());
+      if (hasRole()) {
+        result = result && getRole()
+            .equals(other.getRole());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasRole()) {
+        hash = (37 * hash) + ROLE_FIELD_NUMBER;
+        hash = (53 * hash) + getRole().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.ListNodeUUIDsByRoleRequestProto}
+     *
+     * <pre>
+     **
+     * list the nodes in a role
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_ListNodeUUIDsByRoleRequestProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_ListNodeUUIDsByRoleRequestProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto.class, org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        role_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_ListNodeUUIDsByRoleRequestProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto build() {
+        org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto buildPartial() {
+        org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto result = new org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.role_ = role_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto other) {
+        if (other == org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto.getDefaultInstance()) return this;
+        if (other.hasRole()) {
+          bitField0_ |= 0x00000001;
+          role_ = other.role_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasRole()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string role = 1;
+      private java.lang.Object role_ = "";
+      /**
+       * <code>required string role = 1;</code>
+       */
+      public boolean hasRole() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string role = 1;</code>
+       */
+      public java.lang.String getRole() {
+        java.lang.Object ref = role_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          role_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string role = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getRoleBytes() {
+        java.lang.Object ref = role_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          role_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string role = 1;</code>
+       */
+      public Builder setRole(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        role_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string role = 1;</code>
+       */
+      public Builder clearRole() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        role_ = getDefaultInstance().getRole();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string role = 1;</code>
+       */
+      public Builder setRoleBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        role_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.ListNodeUUIDsByRoleRequestProto)
+    }
+
+    static {
+      defaultInstance = new ListNodeUUIDsByRoleRequestProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.ListNodeUUIDsByRoleRequestProto)
+  }
+
+  public interface ListNodeUUIDsByRoleResponseProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // repeated string uuid = 1;
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    java.util.List<java.lang.String>
+    getUuidList();
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    int getUuidCount();
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    java.lang.String getUuid(int index);
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getUuidBytes(int index);
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.ListNodeUUIDsByRoleResponseProto}
+   *
+   * <pre>
+   **
+   * list the nodes in a role
+   * </pre>
+   */
+  public static final class ListNodeUUIDsByRoleResponseProto extends
+      com.google.protobuf.GeneratedMessage
+      implements ListNodeUUIDsByRoleResponseProtoOrBuilder {
+    // Use ListNodeUUIDsByRoleResponseProto.newBuilder() to construct.
+    private ListNodeUUIDsByRoleResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private ListNodeUUIDsByRoleResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final ListNodeUUIDsByRoleResponseProto defaultInstance;
+    public static ListNodeUUIDsByRoleResponseProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public ListNodeUUIDsByRoleResponseProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private ListNodeUUIDsByRoleResponseProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+                uuid_ = new com.google.protobuf.LazyStringArrayList();
+                mutable_bitField0_ |= 0x00000001;
+              }
+              uuid_.add(input.readBytes());
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+          uuid_ = new com.google.protobuf.UnmodifiableLazyStringList(uuid_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_ListNodeUUIDsByRoleResponseProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_ListNodeUUIDsByRoleResponseProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto.class, org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<ListNodeUUIDsByRoleResponseProto> PARSER =
+        new com.google.protobuf.AbstractParser<ListNodeUUIDsByRoleResponseProto>() {
+      public ListNodeUUIDsByRoleResponseProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new ListNodeUUIDsByRoleResponseProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<ListNodeUUIDsByRoleResponseProto> getParserForType() {
+      return PARSER;
+    }
+
+    // repeated string uuid = 1;
+    public static final int UUID_FIELD_NUMBER = 1;
+    private com.google.protobuf.LazyStringList uuid_;
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    public java.util.List<java.lang.String>
+        getUuidList() {
+      return uuid_;
+    }
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    public int getUuidCount() {
+      return uuid_.size();
+    }
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    public java.lang.String getUuid(int index) {
+      return uuid_.get(index);
+    }
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getUuidBytes(int index) {
+      return uuid_.getByteString(index);
+    }
+
+    private void initFields() {
+      uuid_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      for (int i = 0; i < uuid_.size(); i++) {
+        output.writeBytes(1, uuid_.getByteString(i));
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      {
+        int dataSize = 0;
+        for (int i = 0; i < uuid_.size(); i++) {
+          dataSize += com.google.protobuf.CodedOutputStream
+            .computeBytesSizeNoTag(uuid_.getByteString(i));
+        }
+        size += dataSize;
+        size += 1 * getUuidList().size();
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto other = (org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto) obj;
+
+      boolean result = true;
+      result = result && getUuidList()
+          .equals(other.getUuidList());
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (getUuidCount() > 0) {
+        hash = (37 * hash) + UUID_FIELD_NUMBER;
+        hash = (53 * hash) + getUuidList().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.ListNodeUUIDsByRoleResponseProto}
+     *
+     * <pre>
+     **
+     * list the nodes in a role
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_ListNodeUUIDsByRoleResponseProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_ListNodeUUIDsByRoleResponseProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto.class, org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        uuid_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_ListNodeUUIDsByRoleResponseProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto build() {
+        org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto buildPartial() {
+        org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto result = new org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto(this);
+        int from_bitField0_ = bitField0_;
+        if (((bitField0_ & 0x00000001) == 0x00000001)) {
+          uuid_ = new com.google.protobuf.UnmodifiableLazyStringList(
+              uuid_);
+          bitField0_ = (bitField0_ & ~0x00000001);
+        }
+        result.uuid_ = uuid_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto other) {
+        if (other == org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto.getDefaultInstance()) return this;
+        if (!other.uuid_.isEmpty()) {
+          if (uuid_.isEmpty()) {
+            uuid_ = other.uuid_;
+            bitField0_ = (bitField0_ & ~0x00000001);
+          } else {
+            ensureUuidIsMutable();
+            uuid_.addAll(other.uuid_);
+          }
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // repeated string uuid = 1;
+      private com.google.protobuf.LazyStringList uuid_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+      private void ensureUuidIsMutable() {
+        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+          uuid_ = new com.google.protobuf.LazyStringArrayList(uuid_);
+          bitField0_ |= 0x00000001;
+         }
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public java.util.List<java.lang.String>
+          getUuidList() {
+        return java.util.Collections.unmodifiableList(uuid_);
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public int getUuidCount() {
+        return uuid_.size();
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public java.lang.String getUuid(int index) {
+        return uuid_.get(index);
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getUuidBytes(int index) {
+        return uuid_.getByteString(index);
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public Builder setUuid(
+          int index, java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureUuidIsMutable();
+        uuid_.set(index, value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public Builder addUuid(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureUuidIsMutable();
+        uuid_.add(value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public Builder addAllUuid(
+          java.lang.Iterable<java.lang.String> values) {
+        ensureUuidIsMutable();
+        super.addAll(values, uuid_);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public Builder clearUuid() {
+        uuid_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public Builder addUuidBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureUuidIsMutable();
+        uuid_.add(value);
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.ListNodeUUIDsByRoleResponseProto)
+    }
+
+    static {
+      defaultInstance = new ListNodeUUIDsByRoleResponseProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.ListNodeUUIDsByRoleResponseProto)
+  }
+
+  public interface GetNodeRequestProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string uuid = 1;
+    /**
+     * <code>required string uuid = 1;</code>
+     */
+    boolean hasUuid();
+    /**
+     * <code>required string uuid = 1;</code>
+     */
+    java.lang.String getUuid();
+    /**
+     * <code>required string uuid = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getUuidBytes();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.GetNodeRequestProto}
+   *
+   * <pre>
+   **
+   * get a node
+   * </pre>
+   */
+  public static final class GetNodeRequestProto extends
+      com.google.protobuf.GeneratedMessage
+      implements GetNodeRequestProtoOrBuilder {
+    // Use GetNodeRequestProto.newBuilder() to construct.
+    private GetNodeRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetNodeRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetNodeRequestProto defaultInstance;
+    public static GetNodeRequestProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetNodeRequestProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetNodeRequestProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              uuid_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetNodeRequestProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetNodeRequestProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.GetNodeRequestProto.class, org.apache.slider.api.proto.Messages.GetNodeRequestProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetNodeRequestProto> PARSER =
+        new com.google.protobuf.AbstractParser<GetNodeRequestProto>() {
+      public GetNodeRequestProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetNodeRequestProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetNodeRequestProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string uuid = 1;
+    public static final int UUID_FIELD_NUMBER = 1;
+    private java.lang.Object uuid_;
+    /**
+     * <code>required string uuid = 1;</code>
+     */
+    public boolean hasUuid() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string uuid = 1;</code>
+     */
+    public java.lang.String getUuid() {
+      java.lang.Object ref = uuid_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          uuid_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string uuid = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getUuidBytes() {
+      java.lang.Object ref = uuid_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        uuid_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      uuid_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasUuid()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getUuidBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getUuidBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.GetNodeRequestProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.GetNodeRequestProto other = (org.apache.slider.api.proto.Messages.GetNodeRequestProto) obj;
+
+      boolean result = true;
+      result = result && (hasUuid() == other.hasUuid());
+      if (hasUuid()) {
+        result = result && getUuid()
+            .equals(other.getUuid());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasUuid()) {
+        hash = (37 * hash) + UUID_FIELD_NUMBER;
+        hash = (53 * hash) + getUuid().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.GetNodeRequestProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeRequestProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeRequestProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeRequestProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeRequestProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeRequestProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeRequestProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeRequestProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.GetNodeRequestProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.GetNodeRequestProto}
+     *
+     * <pre>
+     **
+     * get a node
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.GetNodeRequestProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetNodeRequestProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetNodeRequestProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.GetNodeRequestProto.class, org.apache.slider.api.proto.Messages.GetNodeRequestProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.GetNodeRequestProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        uuid_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetNodeRequestProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetNodeRequestProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.GetNodeRequestProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.GetNodeRequestProto build() {
+        org.apache.slider.api.proto.Messages.GetNodeRequestProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetNodeRequestProto buildPartial() {
+        org.apache.slider.api.proto.Messages.GetNodeRequestProto result = new org.apache.slider.api.proto.Messages.GetNodeRequestProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.uuid_ = uuid_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.GetNodeRequestProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.GetNodeRequestProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.GetNodeRequestProto other) {
+        if (other == org.apache.slider.api.proto.Messages.GetNodeRequestProto.getDefaultInstance()) return this;
+        if (other.hasUuid()) {
+          bitField0_ |= 0x00000001;
+          uuid_ = other.uuid_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasUuid()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.GetNodeRequestProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.GetNodeRequestProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string uuid = 1;
+      private java.lang.Object uuid_ = "";
+      /**
+       * <code>required string uuid = 1;</code>
+       */
+      public boolean hasUuid() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string uuid = 1;</code>
+       */
+      public java.lang.String getUuid() {
+        java.lang.Object ref = uuid_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          uuid_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string uuid = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getUuidBytes() {
+        java.lang.Object ref = uuid_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          uuid_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string uuid = 1;</code>
+       */
+      public Builder setUuid(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        uuid_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string uuid = 1;</code>
+       */
+      public Builder clearUuid() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        uuid_ = getDefaultInstance().getUuid();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string uuid = 1;</code>
+       */
+      public Builder setUuidBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        uuid_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.GetNodeRequestProto)
+    }
+
+    static {
+      defaultInstance = new GetNodeRequestProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.GetNodeRequestProto)
+  }
+
+  public interface GetNodeResponseProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .org.apache.slider.api.RoleInstanceState clusterNode = 1;
+    /**
+     * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    boolean hasClusterNode();
+    /**
+     * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    org.apache.slider.api.proto.Messages.RoleInstanceState getClusterNode();
+    /**
+     * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder getClusterNodeOrBuilder();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.GetNodeResponseProto}
+   *
+   * <pre>
+   **
+   * response on a node
+   * </pre>
+   */
+  public static final class GetNodeResponseProto extends
+      com.google.protobuf.GeneratedMessage
+      implements GetNodeResponseProtoOrBuilder {
+    // Use GetNodeResponseProto.newBuilder() to construct.
+    private GetNodeResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetNodeResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetNodeResponseProto defaultInstance;
+    public static GetNodeResponseProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetNodeResponseProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetNodeResponseProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.slider.api.proto.Messages.RoleInstanceState.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = clusterNode_.toBuilder();
+              }
+              clusterNode_ = input.readMessage(org.apache.slider.api.proto.Messages.RoleInstanceState.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(clusterNode_);
+                clusterNode_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetNodeResponseProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetNodeResponseProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.GetNodeResponseProto.class, org.apache.slider.api.proto.Messages.GetNodeResponseProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetNodeResponseProto> PARSER =
+        new com.google.protobuf.AbstractParser<GetNodeResponseProto>() {
+      public GetNodeResponseProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetNodeResponseProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetNodeResponseProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required .org.apache.slider.api.RoleInstanceState clusterNode = 1;
+    public static final int CLUSTERNODE_FIELD_NUMBER = 1;
+    private org.apache.slider.api.proto.Messages.RoleInstanceState clusterNode_;
+    /**
+     * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    public boolean hasClusterNode() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    public org.apache.slider.api.proto.Messages.RoleInstanceState getClusterNode() {
+      return clusterNode_;
+    }
+    /**
+     * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    public org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder getClusterNodeOrBuilder() {
+      return clusterNode_;
+    }
+
+    private void initFields() {
+      clusterNode_ = org.apache.slider.api.proto.Messages.RoleInstanceState.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasClusterNode()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getClusterNode().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, clusterNode_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, clusterNode_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.GetNodeResponseProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.GetNodeResponseProto other = (org.apache.slider.api.proto.Messages.GetNodeResponseProto) obj;
+
+      boolean result = true;
+      result = result && (hasClusterNode() == other.hasClusterNode());
+      if (hasClusterNode()) {
+        result = result && getClusterNode()
+            .equals(other.getClusterNode());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasClusterNode()) {
+        hash = (37 * hash) + CLUSTERNODE_FIELD_NUMBER;
+        hash = (53 * hash) + getClusterNode().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.GetNodeResponseProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeResponseProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeResponseProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeResponseProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeResponseProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeResponseProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeResponseProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeResponseProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetNodeResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.GetNodeResponseProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.GetNodeResponseProto}
+     *
+     * <pre>
+     **
+     * response on a node
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.GetNodeResponseProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetNodeResponseProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetNodeResponseProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.GetNodeResponseProto.class, org.apache.slider.api.proto.Messages.GetNodeResponseProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.GetNodeResponseProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getClusterNodeFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (clusterNodeBuilder_ == null) {
+          clusterNode_ = org.apache.slider.api.proto.Messages.RoleInstanceState.getDefaultInstance();
+        } else {
+          clusterNodeBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetNodeResponseProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetNodeResponseProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.GetNodeResponseProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.GetNodeResponseProto build() {
+        org.apache.slider.api.proto.Messages.GetNodeResponseProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetNodeResponseProto buildPartial() {
+        org.apache.slider.api.proto.Messages.GetNodeResponseProto result = new org.apache.slider.api.proto.Messages.GetNodeResponseProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (clusterNodeBuilder_ == null) {
+          result.clusterNode_ = clusterNode_;
+        } else {
+          result.clusterNode_ = clusterNodeBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.GetNodeResponseProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.GetNodeResponseProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.GetNodeResponseProto other) {
+        if (other == org.apache.slider.api.proto.Messages.GetNodeResponseProto.getDefaultInstance()) return this;
+        if (other.hasClusterNode()) {
+          mergeClusterNode(other.getClusterNode());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasClusterNode()) {
+          
+          return false;
+        }
+        if (!getClusterNode().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.GetNodeResponseProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.GetNodeResponseProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .org.apache.slider.api.RoleInstanceState clusterNode = 1;
+      private org.apache.slider.api.proto.Messages.RoleInstanceState clusterNode_ = org.apache.slider.api.proto.Messages.RoleInstanceState.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.slider.api.proto.Messages.RoleInstanceState, org.apache.slider.api.proto.Messages.RoleInstanceState.Builder, org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder> clusterNodeBuilder_;
+      /**
+       * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public boolean hasClusterNode() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public org.apache.slider.api.proto.Messages.RoleInstanceState getClusterNode() {
+        if (clusterNodeBuilder_ == null) {
+          return clusterNode_;
+        } else {
+          return clusterNodeBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public Builder setClusterNode(org.apache.slider.api.proto.Messages.RoleInstanceState value) {
+        if (clusterNodeBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          clusterNode_ = value;
+          onChanged();
+        } else {
+          clusterNodeBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public Builder setClusterNode(
+          org.apache.slider.api.proto.Messages.RoleInstanceState.Builder builderForValue) {
+        if (clusterNodeBuilder_ == null) {
+          clusterNode_ = builderForValue.build();
+          onChanged();
+        } else {
+          clusterNodeBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public Builder mergeClusterNode(org.apache.slider.api.proto.Messages.RoleInstanceState value) {
+        if (clusterNodeBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              clusterNode_ != org.apache.slider.api.proto.Messages.RoleInstanceState.getDefaultInstance()) {
+            clusterNode_ =
+              org.apache.slider.api.proto.Messages.RoleInstanceState.newBuilder(clusterNode_).mergeFrom(value).buildPartial();
+          } else {
+            clusterNode_ = value;
+          }
+          onChanged();
+        } else {
+          clusterNodeBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public Builder clearClusterNode() {
+        if (clusterNodeBuilder_ == null) {
+          clusterNode_ = org.apache.slider.api.proto.Messages.RoleInstanceState.getDefaultInstance();
+          onChanged();
+        } else {
+          clusterNodeBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public org.apache.slider.api.proto.Messages.RoleInstanceState.Builder getClusterNodeBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getClusterNodeFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder getClusterNodeOrBuilder() {
+        if (clusterNodeBuilder_ != null) {
+          return clusterNodeBuilder_.getMessageOrBuilder();
+        } else {
+          return clusterNode_;
+        }
+      }
+      /**
+       * <code>required .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.slider.api.proto.Messages.RoleInstanceState, org.apache.slider.api.proto.Messages.RoleInstanceState.Builder, org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder> 
+          getClusterNodeFieldBuilder() {
+        if (clusterNodeBuilder_ == null) {
+          clusterNodeBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.slider.api.proto.Messages.RoleInstanceState, org.apache.slider.api.proto.Messages.RoleInstanceState.Builder, org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder>(
+                  clusterNode_,
+                  getParentForChildren(),
+                  isClean());
+          clusterNode_ = null;
+        }
+        return clusterNodeBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.GetNodeResponseProto)
+    }
+
+    static {
+      defaultInstance = new GetNodeResponseProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.GetNodeResponseProto)
+  }
+
+  public interface GetClusterNodesRequestProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // repeated string uuid = 1;
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    java.util.List<java.lang.String>
+    getUuidList();
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    int getUuidCount();
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    java.lang.String getUuid(int index);
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getUuidBytes(int index);
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.GetClusterNodesRequestProto}
+   *
+   * <pre>
+   **
+   * list the nodes for the UUDs
+   * </pre>
+   */
+  public static final class GetClusterNodesRequestProto extends
+      com.google.protobuf.GeneratedMessage
+      implements GetClusterNodesRequestProtoOrBuilder {
+    // Use GetClusterNodesRequestProto.newBuilder() to construct.
+    private GetClusterNodesRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetClusterNodesRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetClusterNodesRequestProto defaultInstance;
+    public static GetClusterNodesRequestProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetClusterNodesRequestProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetClusterNodesRequestProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+                uuid_ = new com.google.protobuf.LazyStringArrayList();
+                mutable_bitField0_ |= 0x00000001;
+              }
+              uuid_.add(input.readBytes());
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+          uuid_ = new com.google.protobuf.UnmodifiableLazyStringList(uuid_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetClusterNodesRequestProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetClusterNodesRequestProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto.class, org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetClusterNodesRequestProto> PARSER =
+        new com.google.protobuf.AbstractParser<GetClusterNodesRequestProto>() {
+      public GetClusterNodesRequestProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetClusterNodesRequestProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetClusterNodesRequestProto> getParserForType() {
+      return PARSER;
+    }
+
+    // repeated string uuid = 1;
+    public static final int UUID_FIELD_NUMBER = 1;
+    private com.google.protobuf.LazyStringList uuid_;
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    public java.util.List<java.lang.String>
+        getUuidList() {
+      return uuid_;
+    }
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    public int getUuidCount() {
+      return uuid_.size();
+    }
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    public java.lang.String getUuid(int index) {
+      return uuid_.get(index);
+    }
+    /**
+     * <code>repeated string uuid = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getUuidBytes(int index) {
+      return uuid_.getByteString(index);
+    }
+
+    private void initFields() {
+      uuid_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      for (int i = 0; i < uuid_.size(); i++) {
+        output.writeBytes(1, uuid_.getByteString(i));
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      {
+        int dataSize = 0;
+        for (int i = 0; i < uuid_.size(); i++) {
+          dataSize += com.google.protobuf.CodedOutputStream
+            .computeBytesSizeNoTag(uuid_.getByteString(i));
+        }
+        size += dataSize;
+        size += 1 * getUuidList().size();
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto other = (org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto) obj;
+
+      boolean result = true;
+      result = result && getUuidList()
+          .equals(other.getUuidList());
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (getUuidCount() > 0) {
+        hash = (37 * hash) + UUID_FIELD_NUMBER;
+        hash = (53 * hash) + getUuidList().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.GetClusterNodesRequestProto}
+     *
+     * <pre>
+     **
+     * list the nodes for the UUDs
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.GetClusterNodesRequestProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetClusterNodesRequestProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetClusterNodesRequestProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto.class, org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        uuid_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetClusterNodesRequestProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto build() {
+        org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto buildPartial() {
+        org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto result = new org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto(this);
+        int from_bitField0_ = bitField0_;
+        if (((bitField0_ & 0x00000001) == 0x00000001)) {
+          uuid_ = new com.google.protobuf.UnmodifiableLazyStringList(
+              uuid_);
+          bitField0_ = (bitField0_ & ~0x00000001);
+        }
+        result.uuid_ = uuid_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto other) {
+        if (other == org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto.getDefaultInstance()) return this;
+        if (!other.uuid_.isEmpty()) {
+          if (uuid_.isEmpty()) {
+            uuid_ = other.uuid_;
+            bitField0_ = (bitField0_ & ~0x00000001);
+          } else {
+            ensureUuidIsMutable();
+            uuid_.addAll(other.uuid_);
+          }
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // repeated string uuid = 1;
+      private com.google.protobuf.LazyStringList uuid_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+      private void ensureUuidIsMutable() {
+        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+          uuid_ = new com.google.protobuf.LazyStringArrayList(uuid_);
+          bitField0_ |= 0x00000001;
+         }
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public java.util.List<java.lang.String>
+          getUuidList() {
+        return java.util.Collections.unmodifiableList(uuid_);
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public int getUuidCount() {
+        return uuid_.size();
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public java.lang.String getUuid(int index) {
+        return uuid_.get(index);
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getUuidBytes(int index) {
+        return uuid_.getByteString(index);
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public Builder setUuid(
+          int index, java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureUuidIsMutable();
+        uuid_.set(index, value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public Builder addUuid(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureUuidIsMutable();
+        uuid_.add(value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public Builder addAllUuid(
+          java.lang.Iterable<java.lang.String> values) {
+        ensureUuidIsMutable();
+        super.addAll(values, uuid_);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public Builder clearUuid() {
+        uuid_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated string uuid = 1;</code>
+       */
+      public Builder addUuidBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  ensureUuidIsMutable();
+        uuid_.add(value);
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.GetClusterNodesRequestProto)
+    }
+
+    static {
+      defaultInstance = new GetClusterNodesRequestProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.GetClusterNodesRequestProto)
+  }
+
+  public interface GetClusterNodesResponseProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;
+    /**
+     * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    java.util.List<org.apache.slider.api.proto.Messages.RoleInstanceState> 
+        getClusterNodeList();
+    /**
+     * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    org.apache.slider.api.proto.Messages.RoleInstanceState getClusterNode(int index);
+    /**
+     * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    int getClusterNodeCount();
+    /**
+     * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    java.util.List<? extends org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder> 
+        getClusterNodeOrBuilderList();
+    /**
+     * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder getClusterNodeOrBuilder(
+        int index);
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.GetClusterNodesResponseProto}
+   *
+   * <pre>
+   **
+   * list the nodes in a role
+   * </pre>
+   */
+  public static final class GetClusterNodesResponseProto extends
+      com.google.protobuf.GeneratedMessage
+      implements GetClusterNodesResponseProtoOrBuilder {
+    // Use GetClusterNodesResponseProto.newBuilder() to construct.
+    private GetClusterNodesResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetClusterNodesResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetClusterNodesResponseProto defaultInstance;
+    public static GetClusterNodesResponseProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetClusterNodesResponseProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetClusterNodesResponseProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+                clusterNode_ = new java.util.ArrayList<org.apache.slider.api.proto.Messages.RoleInstanceState>();
+                mutable_bitField0_ |= 0x00000001;
+              }
+              clusterNode_.add(input.readMessage(org.apache.slider.api.proto.Messages.RoleInstanceState.PARSER, extensionRegistry));
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+          clusterNode_ = java.util.Collections.unmodifiableList(clusterNode_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetClusterNodesResponseProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetClusterNodesResponseProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto.class, org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetClusterNodesResponseProto> PARSER =
+        new com.google.protobuf.AbstractParser<GetClusterNodesResponseProto>() {
+      public GetClusterNodesResponseProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetClusterNodesResponseProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetClusterNodesResponseProto> getParserForType() {
+      return PARSER;
+    }
+
+    // repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;
+    public static final int CLUSTERNODE_FIELD_NUMBER = 1;
+    private java.util.List<org.apache.slider.api.proto.Messages.RoleInstanceState> clusterNode_;
+    /**
+     * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    public java.util.List<org.apache.slider.api.proto.Messages.RoleInstanceState> getClusterNodeList() {
+      return clusterNode_;
+    }
+    /**
+     * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    public java.util.List<? extends org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder> 
+        getClusterNodeOrBuilderList() {
+      return clusterNode_;
+    }
+    /**
+     * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    public int getClusterNodeCount() {
+      return clusterNode_.size();
+    }
+    /**
+     * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    public org.apache.slider.api.proto.Messages.RoleInstanceState getClusterNode(int index) {
+      return clusterNode_.get(index);
+    }
+    /**
+     * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+     */
+    public org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder getClusterNodeOrBuilder(
+        int index) {
+      return clusterNode_.get(index);
+    }
+
+    private void initFields() {
+      clusterNode_ = java.util.Collections.emptyList();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      for (int i = 0; i < getClusterNodeCount(); i++) {
+        if (!getClusterNode(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      for (int i = 0; i < clusterNode_.size(); i++) {
+        output.writeMessage(1, clusterNode_.get(i));
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      for (int i = 0; i < clusterNode_.size(); i++) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, clusterNode_.get(i));
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto other = (org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto) obj;
+
+      boolean result = true;
+      result = result && getClusterNodeList()
+          .equals(other.getClusterNodeList());
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (getClusterNodeCount() > 0) {
+        hash = (37 * hash) + CLUSTERNODE_FIELD_NUMBER;
+        hash = (53 * hash) + getClusterNodeList().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.GetClusterNodesResponseProto}
+     *
+     * <pre>
+     **
+     * list the nodes in a role
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.GetClusterNodesResponseProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetClusterNodesResponseProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetClusterNodesResponseProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto.class, org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getClusterNodeFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (clusterNodeBuilder_ == null) {
+          clusterNode_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+        } else {
+          clusterNodeBuilder_.clear();
+        }
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetClusterNodesResponseProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto build() {
+        org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto buildPartial() {
+        org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto result = new org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto(this);
+        int from_bitField0_ = bitField0_;
+        if (clusterNodeBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001)) {
+            clusterNode_ = java.util.Collections.unmodifiableList(clusterNode_);
+            bitField0_ = (bitField0_ & ~0x00000001);
+          }
+          result.clusterNode_ = clusterNode_;
+        } else {
+          result.clusterNode_ = clusterNodeBuilder_.build();
+        }
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto other) {
+        if (other == org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto.getDefaultInstance()) return this;
+        if (clusterNodeBuilder_ == null) {
+          if (!other.clusterNode_.isEmpty()) {
+            if (clusterNode_.isEmpty()) {
+              clusterNode_ = other.clusterNode_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+            } else {
+              ensureClusterNodeIsMutable();
+              clusterNode_.addAll(other.clusterNode_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.clusterNode_.isEmpty()) {
+            if (clusterNodeBuilder_.isEmpty()) {
+              clusterNodeBuilder_.dispose();
+              clusterNodeBuilder_ = null;
+              clusterNode_ = other.clusterNode_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+              clusterNodeBuilder_ = 
+                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+                   getClusterNodeFieldBuilder() : null;
+            } else {
+              clusterNodeBuilder_.addAllMessages(other.clusterNode_);
+            }
+          }
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        for (int i = 0; i < getClusterNodeCount(); i++) {
+          if (!getClusterNode(i).isInitialized()) {
+            
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;
+      private java.util.List<org.apache.slider.api.proto.Messages.RoleInstanceState> clusterNode_ =
+        java.util.Collections.emptyList();
+      private void ensureClusterNodeIsMutable() {
+        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+          clusterNode_ = new java.util.ArrayList<org.apache.slider.api.proto.Messages.RoleInstanceState>(clusterNode_);
+          bitField0_ |= 0x00000001;
+         }
+      }
+
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.slider.api.proto.Messages.RoleInstanceState, org.apache.slider.api.proto.Messages.RoleInstanceState.Builder, org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder> clusterNodeBuilder_;
+
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public java.util.List<org.apache.slider.api.proto.Messages.RoleInstanceState> getClusterNodeList() {
+        if (clusterNodeBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(clusterNode_);
+        } else {
+          return clusterNodeBuilder_.getMessageList();
+        }
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public int getClusterNodeCount() {
+        if (clusterNodeBuilder_ == null) {
+          return clusterNode_.size();
+        } else {
+          return clusterNodeBuilder_.getCount();
+        }
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public org.apache.slider.api.proto.Messages.RoleInstanceState getClusterNode(int index) {
+        if (clusterNodeBuilder_ == null) {
+          return clusterNode_.get(index);
+        } else {
+          return clusterNodeBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public Builder setClusterNode(
+          int index, org.apache.slider.api.proto.Messages.RoleInstanceState value) {
+        if (clusterNodeBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureClusterNodeIsMutable();
+          clusterNode_.set(index, value);
+          onChanged();
+        } else {
+          clusterNodeBuilder_.setMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public Builder setClusterNode(
+          int index, org.apache.slider.api.proto.Messages.RoleInstanceState.Builder builderForValue) {
+        if (clusterNodeBuilder_ == null) {
+          ensureClusterNodeIsMutable();
+          clusterNode_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          clusterNodeBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public Builder addClusterNode(org.apache.slider.api.proto.Messages.RoleInstanceState value) {
+        if (clusterNodeBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureClusterNodeIsMutable();
+          clusterNode_.add(value);
+          onChanged();
+        } else {
+          clusterNodeBuilder_.addMessage(value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public Builder addClusterNode(
+          int index, org.apache.slider.api.proto.Messages.RoleInstanceState value) {
+        if (clusterNodeBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureClusterNodeIsMutable();
+          clusterNode_.add(index, value);
+          onChanged();
+        } else {
+          clusterNodeBuilder_.addMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public Builder addClusterNode(
+          org.apache.slider.api.proto.Messages.RoleInstanceState.Builder builderForValue) {
+        if (clusterNodeBuilder_ == null) {
+          ensureClusterNodeIsMutable();
+          clusterNode_.add(builderForValue.build());
+          onChanged();
+        } else {
+          clusterNodeBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public Builder addClusterNode(
+          int index, org.apache.slider.api.proto.Messages.RoleInstanceState.Builder builderForValue) {
+        if (clusterNodeBuilder_ == null) {
+          ensureClusterNodeIsMutable();
+          clusterNode_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          clusterNodeBuilder_.addMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public Builder addAllClusterNode(
+          java.lang.Iterable<? extends org.apache.slider.api.proto.Messages.RoleInstanceState> values) {
+        if (clusterNodeBuilder_ == null) {
+          ensureClusterNodeIsMutable();
+          super.addAll(values, clusterNode_);
+          onChanged();
+        } else {
+          clusterNodeBuilder_.addAllMessages(values);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public Builder clearClusterNode() {
+        if (clusterNodeBuilder_ == null) {
+          clusterNode_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+          onChanged();
+        } else {
+          clusterNodeBuilder_.clear();
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public Builder removeClusterNode(int index) {
+        if (clusterNodeBuilder_ == null) {
+          ensureClusterNodeIsMutable();
+          clusterNode_.remove(index);
+          onChanged();
+        } else {
+          clusterNodeBuilder_.remove(index);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public org.apache.slider.api.proto.Messages.RoleInstanceState.Builder getClusterNodeBuilder(
+          int index) {
+        return getClusterNodeFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder getClusterNodeOrBuilder(
+          int index) {
+        if (clusterNodeBuilder_ == null) {
+          return clusterNode_.get(index);  } else {
+          return clusterNodeBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public java.util.List<? extends org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder> 
+           getClusterNodeOrBuilderList() {
+        if (clusterNodeBuilder_ != null) {
+          return clusterNodeBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(clusterNode_);
+        }
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public org.apache.slider.api.proto.Messages.RoleInstanceState.Builder addClusterNodeBuilder() {
+        return getClusterNodeFieldBuilder().addBuilder(
+            org.apache.slider.api.proto.Messages.RoleInstanceState.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public org.apache.slider.api.proto.Messages.RoleInstanceState.Builder addClusterNodeBuilder(
+          int index) {
+        return getClusterNodeFieldBuilder().addBuilder(
+            index, org.apache.slider.api.proto.Messages.RoleInstanceState.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .org.apache.slider.api.RoleInstanceState clusterNode = 1;</code>
+       */
+      public java.util.List<org.apache.slider.api.proto.Messages.RoleInstanceState.Builder> 
+           getClusterNodeBuilderList() {
+        return getClusterNodeFieldBuilder().getBuilderList();
+      }
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.slider.api.proto.Messages.RoleInstanceState, org.apache.slider.api.proto.Messages.RoleInstanceState.Builder, org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder> 
+          getClusterNodeFieldBuilder() {
+        if (clusterNodeBuilder_ == null) {
+          clusterNodeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+              org.apache.slider.api.proto.Messages.RoleInstanceState, org.apache.slider.api.proto.Messages.RoleInstanceState.Builder, org.apache.slider.api.proto.Messages.RoleInstanceStateOrBuilder>(
+                  clusterNode_,
+                  ((bitField0_ & 0x00000001) == 0x00000001),
+                  getParentForChildren(),
+                  isClean());
+          clusterNode_ = null;
+        }
+        return clusterNodeBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.GetClusterNodesResponseProto)
+    }
+
+    static {
+      defaultInstance = new GetClusterNodesResponseProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.GetClusterNodesResponseProto)
+  }
+
+  public interface EchoRequestProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string text = 1;
+    /**
+     * <code>required string text = 1;</code>
+     */
+    boolean hasText();
+    /**
+     * <code>required string text = 1;</code>
+     */
+    java.lang.String getText();
+    /**
+     * <code>required string text = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getTextBytes();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.EchoRequestProto}
+   *
+   * <pre>
+   **
+   * Echo
+   * </pre>
+   */
+  public static final class EchoRequestProto extends
+      com.google.protobuf.GeneratedMessage
+      implements EchoRequestProtoOrBuilder {
+    // Use EchoRequestProto.newBuilder() to construct.
+    private EchoRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private EchoRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final EchoRequestProto defaultInstance;
+    public static EchoRequestProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public EchoRequestProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private EchoRequestProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              text_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_EchoRequestProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_EchoRequestProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.EchoRequestProto.class, org.apache.slider.api.proto.Messages.EchoRequestProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<EchoRequestProto> PARSER =
+        new com.google.protobuf.AbstractParser<EchoRequestProto>() {
+      public EchoRequestProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new EchoRequestProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<EchoRequestProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string text = 1;
+    public static final int TEXT_FIELD_NUMBER = 1;
+    private java.lang.Object text_;
+    /**
+     * <code>required string text = 1;</code>
+     */
+    public boolean hasText() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string text = 1;</code>
+     */
+    public java.lang.String getText() {
+      java.lang.Object ref = text_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          text_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string text = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getTextBytes() {
+      java.lang.Object ref = text_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        text_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      text_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasText()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getTextBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getTextBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.EchoRequestProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.EchoRequestProto other = (org.apache.slider.api.proto.Messages.EchoRequestProto) obj;
+
+      boolean result = true;
+      result = result && (hasText() == other.hasText());
+      if (hasText()) {
+        result = result && getText()
+            .equals(other.getText());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasText()) {
+        hash = (37 * hash) + TEXT_FIELD_NUMBER;
+        hash = (53 * hash) + getText().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.EchoRequestProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoRequestProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoRequestProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoRequestProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoRequestProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoRequestProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoRequestProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoRequestProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.EchoRequestProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.EchoRequestProto}
+     *
+     * <pre>
+     **
+     * Echo
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.EchoRequestProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_EchoRequestProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_EchoRequestProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.EchoRequestProto.class, org.apache.slider.api.proto.Messages.EchoRequestProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.EchoRequestProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        text_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_EchoRequestProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.EchoRequestProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.EchoRequestProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.EchoRequestProto build() {
+        org.apache.slider.api.proto.Messages.EchoRequestProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.EchoRequestProto buildPartial() {
+        org.apache.slider.api.proto.Messages.EchoRequestProto result = new org.apache.slider.api.proto.Messages.EchoRequestProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.text_ = text_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.EchoRequestProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.EchoRequestProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.EchoRequestProto other) {
+        if (other == org.apache.slider.api.proto.Messages.EchoRequestProto.getDefaultInstance()) return this;
+        if (other.hasText()) {
+          bitField0_ |= 0x00000001;
+          text_ = other.text_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasText()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.EchoRequestProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.EchoRequestProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string text = 1;
+      private java.lang.Object text_ = "";
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public boolean hasText() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public java.lang.String getText() {
+        java.lang.Object ref = text_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          text_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getTextBytes() {
+        java.lang.Object ref = text_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          text_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public Builder setText(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        text_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public Builder clearText() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        text_ = getDefaultInstance().getText();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public Builder setTextBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        text_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.EchoRequestProto)
+    }
+
+    static {
+      defaultInstance = new EchoRequestProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.EchoRequestProto)
+  }
+
+  public interface EchoResponseProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string text = 1;
+    /**
+     * <code>required string text = 1;</code>
+     */
+    boolean hasText();
+    /**
+     * <code>required string text = 1;</code>
+     */
+    java.lang.String getText();
+    /**
+     * <code>required string text = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getTextBytes();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.EchoResponseProto}
+   *
+   * <pre>
+   **
+   * Echo reply
+   * </pre>
+   */
+  public static final class EchoResponseProto extends
+      com.google.protobuf.GeneratedMessage
+      implements EchoResponseProtoOrBuilder {
+    // Use EchoResponseProto.newBuilder() to construct.
+    private EchoResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private EchoResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final EchoResponseProto defaultInstance;
+    public static EchoResponseProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public EchoResponseProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private EchoResponseProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              text_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_EchoResponseProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_EchoResponseProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.EchoResponseProto.class, org.apache.slider.api.proto.Messages.EchoResponseProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<EchoResponseProto> PARSER =
+        new com.google.protobuf.AbstractParser<EchoResponseProto>() {
+      public EchoResponseProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new EchoResponseProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<EchoResponseProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string text = 1;
+    public static final int TEXT_FIELD_NUMBER = 1;
+    private java.lang.Object text_;
+    /**
+     * <code>required string text = 1;</code>
+     */
+    public boolean hasText() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string text = 1;</code>
+     */
+    public java.lang.String getText() {
+      java.lang.Object ref = text_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          text_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string text = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getTextBytes() {
+      java.lang.Object ref = text_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        text_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      text_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasText()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getTextBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getTextBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.EchoResponseProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.EchoResponseProto other = (org.apache.slider.api.proto.Messages.EchoResponseProto) obj;
+
+      boolean result = true;
+      result = result && (hasText() == other.hasText());
+      if (hasText()) {
+        result = result && getText()
+            .equals(other.getText());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasText()) {
+        hash = (37 * hash) + TEXT_FIELD_NUMBER;
+        hash = (53 * hash) + getText().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.EchoResponseProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoResponseProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoResponseProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoResponseProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoResponseProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoResponseProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoResponseProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoResponseProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.EchoResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.EchoResponseProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.EchoResponseProto}
+     *
+     * <pre>
+     **
+     * Echo reply
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.EchoResponseProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_EchoResponseProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_EchoResponseProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.EchoResponseProto.class, org.apache.slider.api.proto.Messages.EchoResponseProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.EchoResponseProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        text_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_EchoResponseProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.EchoResponseProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.EchoResponseProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.EchoResponseProto build() {
+        org.apache.slider.api.proto.Messages.EchoResponseProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.EchoResponseProto buildPartial() {
+        org.apache.slider.api.proto.Messages.EchoResponseProto result = new org.apache.slider.api.proto.Messages.EchoResponseProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.text_ = text_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.EchoResponseProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.EchoResponseProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.EchoResponseProto other) {
+        if (other == org.apache.slider.api.proto.Messages.EchoResponseProto.getDefaultInstance()) return this;
+        if (other.hasText()) {
+          bitField0_ |= 0x00000001;
+          text_ = other.text_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasText()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.EchoResponseProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.EchoResponseProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string text = 1;
+      private java.lang.Object text_ = "";
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public boolean hasText() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public java.lang.String getText() {
+        java.lang.Object ref = text_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          text_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getTextBytes() {
+        java.lang.Object ref = text_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          text_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public Builder setText(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        text_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public Builder clearText() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        text_ = getDefaultInstance().getText();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public Builder setTextBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        text_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.EchoResponseProto)
+    }
+
+    static {
+      defaultInstance = new EchoResponseProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.EchoResponseProto)
+  }
+
+  public interface KillContainerRequestProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string id = 1;
+    /**
+     * <code>required string id = 1;</code>
+     */
+    boolean hasId();
+    /**
+     * <code>required string id = 1;</code>
+     */
+    java.lang.String getId();
+    /**
+     * <code>required string id = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getIdBytes();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.KillContainerRequestProto}
+   *
+   * <pre>
+   **
+   * Kill a container
+   * </pre>
+   */
+  public static final class KillContainerRequestProto extends
+      com.google.protobuf.GeneratedMessage
+      implements KillContainerRequestProtoOrBuilder {
+    // Use KillContainerRequestProto.newBuilder() to construct.
+    private KillContainerRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private KillContainerRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final KillContainerRequestProto defaultInstance;
+    public static KillContainerRequestProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public KillContainerRequestProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private KillContainerRequestProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              id_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_KillContainerRequestProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_KillContainerRequestProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.KillContainerRequestProto.class, org.apache.slider.api.proto.Messages.KillContainerRequestProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<KillContainerRequestProto> PARSER =
+        new com.google.protobuf.AbstractParser<KillContainerRequestProto>() {
+      public KillContainerRequestProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new KillContainerRequestProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<KillContainerRequestProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string id = 1;
+    public static final int ID_FIELD_NUMBER = 1;
+    private java.lang.Object id_;
+    /**
+     * <code>required string id = 1;</code>
+     */
+    public boolean hasId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string id = 1;</code>
+     */
+    public java.lang.String getId() {
+      java.lang.Object ref = id_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          id_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string id = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getIdBytes() {
+      java.lang.Object ref = id_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        id_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      id_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasId()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getIdBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getIdBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.KillContainerRequestProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.KillContainerRequestProto other = (org.apache.slider.api.proto.Messages.KillContainerRequestProto) obj;
+
+      boolean result = true;
+      result = result && (hasId() == other.hasId());
+      if (hasId()) {
+        result = result && getId()
+            .equals(other.getId());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasId()) {
+        hash = (37 * hash) + ID_FIELD_NUMBER;
+        hash = (53 * hash) + getId().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.KillContainerRequestProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerRequestProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerRequestProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerRequestProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerRequestProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerRequestProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerRequestProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerRequestProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.KillContainerRequestProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.KillContainerRequestProto}
+     *
+     * <pre>
+     **
+     * Kill a container
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.KillContainerRequestProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_KillContainerRequestProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_KillContainerRequestProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.KillContainerRequestProto.class, org.apache.slider.api.proto.Messages.KillContainerRequestProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.KillContainerRequestProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        id_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_KillContainerRequestProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.KillContainerRequestProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.KillContainerRequestProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.KillContainerRequestProto build() {
+        org.apache.slider.api.proto.Messages.KillContainerRequestProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.KillContainerRequestProto buildPartial() {
+        org.apache.slider.api.proto.Messages.KillContainerRequestProto result = new org.apache.slider.api.proto.Messages.KillContainerRequestProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.id_ = id_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.KillContainerRequestProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.KillContainerRequestProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.KillContainerRequestProto other) {
+        if (other == org.apache.slider.api.proto.Messages.KillContainerRequestProto.getDefaultInstance()) return this;
+        if (other.hasId()) {
+          bitField0_ |= 0x00000001;
+          id_ = other.id_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasId()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.KillContainerRequestProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.KillContainerRequestProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string id = 1;
+      private java.lang.Object id_ = "";
+      /**
+       * <code>required string id = 1;</code>
+       */
+      public boolean hasId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string id = 1;</code>
+       */
+      public java.lang.String getId() {
+        java.lang.Object ref = id_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          id_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string id = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getIdBytes() {
+        java.lang.Object ref = id_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          id_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string id = 1;</code>
+       */
+      public Builder setId(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        id_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string id = 1;</code>
+       */
+      public Builder clearId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        id_ = getDefaultInstance().getId();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string id = 1;</code>
+       */
+      public Builder setIdBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        id_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.KillContainerRequestProto)
+    }
+
+    static {
+      defaultInstance = new KillContainerRequestProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.KillContainerRequestProto)
+  }
+
+  public interface KillContainerResponseProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required bool success = 1;
+    /**
+     * <code>required bool success = 1;</code>
+     */
+    boolean hasSuccess();
+    /**
+     * <code>required bool success = 1;</code>
+     */
+    boolean getSuccess();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.KillContainerResponseProto}
+   *
+   * <pre>
+   **
+   * Kill reply
+   * </pre>
+   */
+  public static final class KillContainerResponseProto extends
+      com.google.protobuf.GeneratedMessage
+      implements KillContainerResponseProtoOrBuilder {
+    // Use KillContainerResponseProto.newBuilder() to construct.
+    private KillContainerResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private KillContainerResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final KillContainerResponseProto defaultInstance;
+    public static KillContainerResponseProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public KillContainerResponseProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private KillContainerResponseProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              success_ = input.readBool();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_KillContainerResponseProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_KillContainerResponseProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.KillContainerResponseProto.class, org.apache.slider.api.proto.Messages.KillContainerResponseProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<KillContainerResponseProto> PARSER =
+        new com.google.protobuf.AbstractParser<KillContainerResponseProto>() {
+      public KillContainerResponseProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new KillContainerResponseProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<KillContainerResponseProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required bool success = 1;
+    public static final int SUCCESS_FIELD_NUMBER = 1;
+    private boolean success_;
+    /**
+     * <code>required bool success = 1;</code>
+     */
+    public boolean hasSuccess() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required bool success = 1;</code>
+     */
+    public boolean getSuccess() {
+      return success_;
+    }
+
+    private void initFields() {
+      success_ = false;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasSuccess()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBool(1, success_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(1, success_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.KillContainerResponseProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.KillContainerResponseProto other = (org.apache.slider.api.proto.Messages.KillContainerResponseProto) obj;
+
+      boolean result = true;
+      result = result && (hasSuccess() == other.hasSuccess());
+      if (hasSuccess()) {
+        result = result && (getSuccess()
+            == other.getSuccess());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasSuccess()) {
+        hash = (37 * hash) + SUCCESS_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getSuccess());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.KillContainerResponseProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerResponseProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerResponseProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerResponseProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerResponseProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerResponseProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerResponseProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerResponseProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.KillContainerResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.KillContainerResponseProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.KillContainerResponseProto}
+     *
+     * <pre>
+     **
+     * Kill reply
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.KillContainerResponseProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_KillContainerResponseProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_KillContainerResponseProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.KillContainerResponseProto.class, org.apache.slider.api.proto.Messages.KillContainerResponseProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.KillContainerResponseProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        success_ = false;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_KillContainerResponseProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.KillContainerResponseProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.KillContainerResponseProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.KillContainerResponseProto build() {
+        org.apache.slider.api.proto.Messages.KillContainerResponseProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.KillContainerResponseProto buildPartial() {
+        org.apache.slider.api.proto.Messages.KillContainerResponseProto result = new org.apache.slider.api.proto.Messages.KillContainerResponseProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.success_ = success_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.KillContainerResponseProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.KillContainerResponseProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.KillContainerResponseProto other) {
+        if (other == org.apache.slider.api.proto.Messages.KillContainerResponseProto.getDefaultInstance()) return this;
+        if (other.hasSuccess()) {
+          setSuccess(other.getSuccess());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasSuccess()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.KillContainerResponseProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.KillContainerResponseProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required bool success = 1;
+      private boolean success_ ;
+      /**
+       * <code>required bool success = 1;</code>
+       */
+      public boolean hasSuccess() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required bool success = 1;</code>
+       */
+      public boolean getSuccess() {
+        return success_;
+      }
+      /**
+       * <code>required bool success = 1;</code>
+       */
+      public Builder setSuccess(boolean value) {
+        bitField0_ |= 0x00000001;
+        success_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required bool success = 1;</code>
+       */
+      public Builder clearSuccess() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        success_ = false;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.KillContainerResponseProto)
+    }
+
+    static {
+      defaultInstance = new KillContainerResponseProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.KillContainerResponseProto)
+  }
+
+  public interface AMSuicideRequestProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string text = 1;
+    /**
+     * <code>required string text = 1;</code>
+     */
+    boolean hasText();
+    /**
+     * <code>required string text = 1;</code>
+     */
+    java.lang.String getText();
+    /**
+     * <code>required string text = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getTextBytes();
+
+    // required int32 signal = 2;
+    /**
+     * <code>required int32 signal = 2;</code>
+     */
+    boolean hasSignal();
+    /**
+     * <code>required int32 signal = 2;</code>
+     */
+    int getSignal();
+
+    // required int32 delay = 3;
+    /**
+     * <code>required int32 delay = 3;</code>
+     */
+    boolean hasDelay();
+    /**
+     * <code>required int32 delay = 3;</code>
+     */
+    int getDelay();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.AMSuicideRequestProto}
+   *
+   * <pre>
+   **
+   * AM suicide
+   * </pre>
+   */
+  public static final class AMSuicideRequestProto extends
+      com.google.protobuf.GeneratedMessage
+      implements AMSuicideRequestProtoOrBuilder {
+    // Use AMSuicideRequestProto.newBuilder() to construct.
+    private AMSuicideRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private AMSuicideRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final AMSuicideRequestProto defaultInstance;
+    public static AMSuicideRequestProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public AMSuicideRequestProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private AMSuicideRequestProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              text_ = input.readBytes();
+              break;
+            }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              signal_ = input.readInt32();
+              break;
+            }
+            case 24: {
+              bitField0_ |= 0x00000004;
+              delay_ = input.readInt32();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_AMSuicideRequestProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_AMSuicideRequestProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.AMSuicideRequestProto.class, org.apache.slider.api.proto.Messages.AMSuicideRequestProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<AMSuicideRequestProto> PARSER =
+        new com.google.protobuf.AbstractParser<AMSuicideRequestProto>() {
+      public AMSuicideRequestProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new AMSuicideRequestProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<AMSuicideRequestProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string text = 1;
+    public static final int TEXT_FIELD_NUMBER = 1;
+    private java.lang.Object text_;
+    /**
+     * <code>required string text = 1;</code>
+     */
+    public boolean hasText() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string text = 1;</code>
+     */
+    public java.lang.String getText() {
+      java.lang.Object ref = text_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          text_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string text = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getTextBytes() {
+      java.lang.Object ref = text_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        text_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // required int32 signal = 2;
+    public static final int SIGNAL_FIELD_NUMBER = 2;
+    private int signal_;
+    /**
+     * <code>required int32 signal = 2;</code>
+     */
+    public boolean hasSignal() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required int32 signal = 2;</code>
+     */
+    public int getSignal() {
+      return signal_;
+    }
+
+    // required int32 delay = 3;
+    public static final int DELAY_FIELD_NUMBER = 3;
+    private int delay_;
+    /**
+     * <code>required int32 delay = 3;</code>
+     */
+    public boolean hasDelay() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>required int32 delay = 3;</code>
+     */
+    public int getDelay() {
+      return delay_;
+    }
+
+    private void initFields() {
+      text_ = "";
+      signal_ = 0;
+      delay_ = 0;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasText()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasSignal()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasDelay()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getTextBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeInt32(2, signal_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeInt32(3, delay_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getTextBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt32Size(2, signal_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt32Size(3, delay_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.AMSuicideRequestProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.AMSuicideRequestProto other = (org.apache.slider.api.proto.Messages.AMSuicideRequestProto) obj;
+
+      boolean result = true;
+      result = result && (hasText() == other.hasText());
+      if (hasText()) {
+        result = result && getText()
+            .equals(other.getText());
+      }
+      result = result && (hasSignal() == other.hasSignal());
+      if (hasSignal()) {
+        result = result && (getSignal()
+            == other.getSignal());
+      }
+      result = result && (hasDelay() == other.hasDelay());
+      if (hasDelay()) {
+        result = result && (getDelay()
+            == other.getDelay());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasText()) {
+        hash = (37 * hash) + TEXT_FIELD_NUMBER;
+        hash = (53 * hash) + getText().hashCode();
+      }
+      if (hasSignal()) {
+        hash = (37 * hash) + SIGNAL_FIELD_NUMBER;
+        hash = (53 * hash) + getSignal();
+      }
+      if (hasDelay()) {
+        hash = (37 * hash) + DELAY_FIELD_NUMBER;
+        hash = (53 * hash) + getDelay();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.AMSuicideRequestProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideRequestProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideRequestProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideRequestProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideRequestProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideRequestProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideRequestProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideRequestProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.AMSuicideRequestProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.AMSuicideRequestProto}
+     *
+     * <pre>
+     **
+     * AM suicide
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.AMSuicideRequestProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_AMSuicideRequestProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_AMSuicideRequestProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.AMSuicideRequestProto.class, org.apache.slider.api.proto.Messages.AMSuicideRequestProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.AMSuicideRequestProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        text_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        signal_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        delay_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_AMSuicideRequestProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.AMSuicideRequestProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.AMSuicideRequestProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.AMSuicideRequestProto build() {
+        org.apache.slider.api.proto.Messages.AMSuicideRequestProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.AMSuicideRequestProto buildPartial() {
+        org.apache.slider.api.proto.Messages.AMSuicideRequestProto result = new org.apache.slider.api.proto.Messages.AMSuicideRequestProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.text_ = text_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.signal_ = signal_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.delay_ = delay_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.AMSuicideRequestProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.AMSuicideRequestProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.AMSuicideRequestProto other) {
+        if (other == org.apache.slider.api.proto.Messages.AMSuicideRequestProto.getDefaultInstance()) return this;
+        if (other.hasText()) {
+          bitField0_ |= 0x00000001;
+          text_ = other.text_;
+          onChanged();
+        }
+        if (other.hasSignal()) {
+          setSignal(other.getSignal());
+        }
+        if (other.hasDelay()) {
+          setDelay(other.getDelay());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasText()) {
+          
+          return false;
+        }
+        if (!hasSignal()) {
+          
+          return false;
+        }
+        if (!hasDelay()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.AMSuicideRequestProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.AMSuicideRequestProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string text = 1;
+      private java.lang.Object text_ = "";
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public boolean hasText() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public java.lang.String getText() {
+        java.lang.Object ref = text_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          text_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getTextBytes() {
+        java.lang.Object ref = text_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          text_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public Builder setText(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        text_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public Builder clearText() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        text_ = getDefaultInstance().getText();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string text = 1;</code>
+       */
+      public Builder setTextBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        text_ = value;
+        onChanged();
+        return this;
+      }
+
+      // required int32 signal = 2;
+      private int signal_ ;
+      /**
+       * <code>required int32 signal = 2;</code>
+       */
+      public boolean hasSignal() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required int32 signal = 2;</code>
+       */
+      public int getSignal() {
+        return signal_;
+      }
+      /**
+       * <code>required int32 signal = 2;</code>
+       */
+      public Builder setSignal(int value) {
+        bitField0_ |= 0x00000002;
+        signal_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required int32 signal = 2;</code>
+       */
+      public Builder clearSignal() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        signal_ = 0;
+        onChanged();
+        return this;
+      }
+
+      // required int32 delay = 3;
+      private int delay_ ;
+      /**
+       * <code>required int32 delay = 3;</code>
+       */
+      public boolean hasDelay() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>required int32 delay = 3;</code>
+       */
+      public int getDelay() {
+        return delay_;
+      }
+      /**
+       * <code>required int32 delay = 3;</code>
+       */
+      public Builder setDelay(int value) {
+        bitField0_ |= 0x00000004;
+        delay_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required int32 delay = 3;</code>
+       */
+      public Builder clearDelay() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        delay_ = 0;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.AMSuicideRequestProto)
+    }
+
+    static {
+      defaultInstance = new AMSuicideRequestProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.AMSuicideRequestProto)
+  }
+
+  public interface AMSuicideResponseProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.AMSuicideResponseProto}
+   *
+   * <pre>
+   **
+   * AM suicide reply. For this to be returned implies
+   * a failure of the AM to kill itself
+   * </pre>
+   */
+  public static final class AMSuicideResponseProto extends
+      com.google.protobuf.GeneratedMessage
+      implements AMSuicideResponseProtoOrBuilder {
+    // Use AMSuicideResponseProto.newBuilder() to construct.
+    private AMSuicideResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private AMSuicideResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final AMSuicideResponseProto defaultInstance;
+    public static AMSuicideResponseProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public AMSuicideResponseProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private AMSuicideResponseProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_AMSuicideResponseProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_AMSuicideResponseProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.AMSuicideResponseProto.class, org.apache.slider.api.proto.Messages.AMSuicideResponseProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<AMSuicideResponseProto> PARSER =
+        new com.google.protobuf.AbstractParser<AMSuicideResponseProto>() {
+      public AMSuicideResponseProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new AMSuicideResponseProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<AMSuicideResponseProto> getParserForType() {
+      return PARSER;
+    }
+
+    private void initFields() {
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.AMSuicideResponseProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.AMSuicideResponseProto other = (org.apache.slider.api.proto.Messages.AMSuicideResponseProto) obj;
+
+      boolean result = true;
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.AMSuicideResponseProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideResponseProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideResponseProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideResponseProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideResponseProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideResponseProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideResponseProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideResponseProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.AMSuicideResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.AMSuicideResponseProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.AMSuicideResponseProto}
+     *
+     * <pre>
+     **
+     * AM suicide reply. For this to be returned implies
+     * a failure of the AM to kill itself
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.AMSuicideResponseProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_AMSuicideResponseProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_AMSuicideResponseProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.AMSuicideResponseProto.class, org.apache.slider.api.proto.Messages.AMSuicideResponseProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.AMSuicideResponseProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_AMSuicideResponseProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.AMSuicideResponseProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.AMSuicideResponseProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.AMSuicideResponseProto build() {
+        org.apache.slider.api.proto.Messages.AMSuicideResponseProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.AMSuicideResponseProto buildPartial() {
+        org.apache.slider.api.proto.Messages.AMSuicideResponseProto result = new org.apache.slider.api.proto.Messages.AMSuicideResponseProto(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.AMSuicideResponseProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.AMSuicideResponseProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.AMSuicideResponseProto other) {
+        if (other == org.apache.slider.api.proto.Messages.AMSuicideResponseProto.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.AMSuicideResponseProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.AMSuicideResponseProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.AMSuicideResponseProto)
+    }
+
+    static {
+      defaultInstance = new AMSuicideResponseProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.AMSuicideResponseProto)
+  }
+
+  public interface GetInstanceDefinitionRequestProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.GetInstanceDefinitionRequestProto}
+   *
+   * <pre>
+   **
+   * Ask for the instance definition details
+   * </pre>
+   */
+  public static final class GetInstanceDefinitionRequestProto extends
+      com.google.protobuf.GeneratedMessage
+      implements GetInstanceDefinitionRequestProtoOrBuilder {
+    // Use GetInstanceDefinitionRequestProto.newBuilder() to construct.
+    private GetInstanceDefinitionRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetInstanceDefinitionRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetInstanceDefinitionRequestProto defaultInstance;
+    public static GetInstanceDefinitionRequestProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetInstanceDefinitionRequestProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetInstanceDefinitionRequestProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetInstanceDefinitionRequestProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetInstanceDefinitionRequestProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto.class, org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetInstanceDefinitionRequestProto> PARSER =
+        new com.google.protobuf.AbstractParser<GetInstanceDefinitionRequestProto>() {
+      public GetInstanceDefinitionRequestProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetInstanceDefinitionRequestProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetInstanceDefinitionRequestProto> getParserForType() {
+      return PARSER;
+    }
+
+    private void initFields() {
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto other = (org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto) obj;
+
+      boolean result = true;
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.GetInstanceDefinitionRequestProto}
+     *
+     * <pre>
+     **
+     * Ask for the instance definition details
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetInstanceDefinitionRequestProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetInstanceDefinitionRequestProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto.class, org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetInstanceDefinitionRequestProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto build() {
+        org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto buildPartial() {
+        org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto result = new org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto other) {
+        if (other == org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.GetInstanceDefinitionRequestProto)
+    }
+
+    static {
+      defaultInstance = new GetInstanceDefinitionRequestProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.GetInstanceDefinitionRequestProto)
+  }
+
+  public interface GetInstanceDefinitionResponseProtoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string internal = 1;
+    /**
+     * <code>required string internal = 1;</code>
+     */
+    boolean hasInternal();
+    /**
+     * <code>required string internal = 1;</code>
+     */
+    java.lang.String getInternal();
+    /**
+     * <code>required string internal = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getInternalBytes();
+
+    // required string resources = 2;
+    /**
+     * <code>required string resources = 2;</code>
+     */
+    boolean hasResources();
+    /**
+     * <code>required string resources = 2;</code>
+     */
+    java.lang.String getResources();
+    /**
+     * <code>required string resources = 2;</code>
+     */
+    com.google.protobuf.ByteString
+        getResourcesBytes();
+
+    // required string application = 3;
+    /**
+     * <code>required string application = 3;</code>
+     */
+    boolean hasApplication();
+    /**
+     * <code>required string application = 3;</code>
+     */
+    java.lang.String getApplication();
+    /**
+     * <code>required string application = 3;</code>
+     */
+    com.google.protobuf.ByteString
+        getApplicationBytes();
+  }
+  /**
+   * Protobuf type {@code org.apache.slider.api.GetInstanceDefinitionResponseProto}
+   *
+   * <pre>
+   **
+   * Get the definition back as three separate JSON strings
+   * </pre>
+   */
+  public static final class GetInstanceDefinitionResponseProto extends
+      com.google.protobuf.GeneratedMessage
+      implements GetInstanceDefinitionResponseProtoOrBuilder {
+    // Use GetInstanceDefinitionResponseProto.newBuilder() to construct.
+    private GetInstanceDefinitionResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetInstanceDefinitionResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetInstanceDefinitionResponseProto defaultInstance;
+    public static GetInstanceDefinitionResponseProto getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetInstanceDefinitionResponseProto getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetInstanceDefinitionResponseProto(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              internal_ = input.readBytes();
+              break;
+            }
+            case 18: {
+              bitField0_ |= 0x00000002;
+              resources_ = input.readBytes();
+              break;
+            }
+            case 26: {
+              bitField0_ |= 0x00000004;
+              application_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetInstanceDefinitionResponseProto_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetInstanceDefinitionResponseProto_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto.class, org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetInstanceDefinitionResponseProto> PARSER =
+        new com.google.protobuf.AbstractParser<GetInstanceDefinitionResponseProto>() {
+      public GetInstanceDefinitionResponseProto parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetInstanceDefinitionResponseProto(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetInstanceDefinitionResponseProto> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string internal = 1;
+    public static final int INTERNAL_FIELD_NUMBER = 1;
+    private java.lang.Object internal_;
+    /**
+     * <code>required string internal = 1;</code>
+     */
+    public boolean hasInternal() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string internal = 1;</code>
+     */
+    public java.lang.String getInternal() {
+      java.lang.Object ref = internal_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          internal_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string internal = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getInternalBytes() {
+      java.lang.Object ref = internal_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        internal_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // required string resources = 2;
+    public static final int RESOURCES_FIELD_NUMBER = 2;
+    private java.lang.Object resources_;
+    /**
+     * <code>required string resources = 2;</code>
+     */
+    public boolean hasResources() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required string resources = 2;</code>
+     */
+    public java.lang.String getResources() {
+      java.lang.Object ref = resources_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          resources_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string resources = 2;</code>
+     */
+    public com.google.protobuf.ByteString
+        getResourcesBytes() {
+      java.lang.Object ref = resources_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        resources_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // required string application = 3;
+    public static final int APPLICATION_FIELD_NUMBER = 3;
+    private java.lang.Object application_;
+    /**
+     * <code>required string application = 3;</code>
+     */
+    public boolean hasApplication() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>required string application = 3;</code>
+     */
+    public java.lang.String getApplication() {
+      java.lang.Object ref = application_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          application_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string application = 3;</code>
+     */
+    public com.google.protobuf.ByteString
+        getApplicationBytes() {
+      java.lang.Object ref = application_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        application_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      internal_ = "";
+      resources_ = "";
+      application_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasInternal()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasResources()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasApplication()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getInternalBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBytes(2, getResourcesBytes());
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeBytes(3, getApplicationBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getInternalBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(2, getResourcesBytes());
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(3, getApplicationBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto)) {
+        return super.equals(obj);
+      }
+      org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto other = (org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto) obj;
+
+      boolean result = true;
+      result = result && (hasInternal() == other.hasInternal());
+      if (hasInternal()) {
+        result = result && getInternal()
+            .equals(other.getInternal());
+      }
+      result = result && (hasResources() == other.hasResources());
+      if (hasResources()) {
+        result = result && getResources()
+            .equals(other.getResources());
+      }
+      result = result && (hasApplication() == other.hasApplication());
+      if (hasApplication()) {
+        result = result && getApplication()
+            .equals(other.getApplication());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasInternal()) {
+        hash = (37 * hash) + INTERNAL_FIELD_NUMBER;
+        hash = (53 * hash) + getInternal().hashCode();
+      }
+      if (hasResources()) {
+        hash = (37 * hash) + RESOURCES_FIELD_NUMBER;
+        hash = (53 * hash) + getResources().hashCode();
+      }
+      if (hasApplication()) {
+        hash = (37 * hash) + APPLICATION_FIELD_NUMBER;
+        hash = (53 * hash) + getApplication().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.slider.api.GetInstanceDefinitionResponseProto}
+     *
+     * <pre>
+     **
+     * Get the definition back as three separate JSON strings
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProtoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetInstanceDefinitionResponseProto_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetInstanceDefinitionResponseProto_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto.class, org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto.Builder.class);
+      }
+
+      // Construct using org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        internal_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        resources_ = "";
+        bitField0_ = (bitField0_ & ~0x00000002);
+        application_ = "";
+        bitField0_ = (bitField0_ & ~0x00000004);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.slider.api.proto.Messages.internal_static_org_apache_slider_api_GetInstanceDefinitionResponseProto_descriptor;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto getDefaultInstanceForType() {
+        return org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto.getDefaultInstance();
+      }
+
+      public org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto build() {
+        org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto buildPartial() {
+        org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto result = new org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.internal_ = internal_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.resources_ = resources_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.application_ = application_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto) {
+          return mergeFrom((org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto other) {
+        if (other == org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto.getDefaultInstance()) return this;
+        if (other.hasInternal()) {
+          bitField0_ |= 0x00000001;
+          internal_ = other.internal_;
+          onChanged();
+        }
+        if (other.hasResources()) {
+          bitField0_ |= 0x00000002;
+          resources_ = other.resources_;
+          onChanged();
+        }
+        if (other.hasApplication()) {
+          bitField0_ |= 0x00000004;
+          application_ = other.application_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasInternal()) {
+          
+          return false;
+        }
+        if (!hasResources()) {
+          
+          return false;
+        }
+        if (!hasApplication()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string internal = 1;
+      private java.lang.Object internal_ = "";
+      /**
+       * <code>required string internal = 1;</code>
+       */
+      public boolean hasInternal() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string internal = 1;</code>
+       */
+      public java.lang.String getInternal() {
+        java.lang.Object ref = internal_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          internal_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string internal = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getInternalBytes() {
+        java.lang.Object ref = internal_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          internal_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string internal = 1;</code>
+       */
+      public Builder setInternal(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        internal_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string internal = 1;</code>
+       */
+      public Builder clearInternal() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        internal_ = getDefaultInstance().getInternal();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string internal = 1;</code>
+       */
+      public Builder setInternalBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        internal_ = value;
+        onChanged();
+        return this;
+      }
+
+      // required string resources = 2;
+      private java.lang.Object resources_ = "";
+      /**
+       * <code>required string resources = 2;</code>
+       */
+      public boolean hasResources() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required string resources = 2;</code>
+       */
+      public java.lang.String getResources() {
+        java.lang.Object ref = resources_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          resources_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string resources = 2;</code>
+       */
+      public com.google.protobuf.ByteString
+          getResourcesBytes() {
+        java.lang.Object ref = resources_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          resources_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string resources = 2;</code>
+       */
+      public Builder setResources(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        resources_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string resources = 2;</code>
+       */
+      public Builder clearResources() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        resources_ = getDefaultInstance().getResources();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string resources = 2;</code>
+       */
+      public Builder setResourcesBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        resources_ = value;
+        onChanged();
+        return this;
+      }
+
+      // required string application = 3;
+      private java.lang.Object application_ = "";
+      /**
+       * <code>required string application = 3;</code>
+       */
+      public boolean hasApplication() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>required string application = 3;</code>
+       */
+      public java.lang.String getApplication() {
+        java.lang.Object ref = application_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          application_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string application = 3;</code>
+       */
+      public com.google.protobuf.ByteString
+          getApplicationBytes() {
+        java.lang.Object ref = application_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          application_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string application = 3;</code>
+       */
+      public Builder setApplication(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000004;
+        application_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string application = 3;</code>
+       */
+      public Builder clearApplication() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        application_ = getDefaultInstance().getApplication();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string application = 3;</code>
+       */
+      public Builder setApplicationBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000004;
+        application_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.slider.api.GetInstanceDefinitionResponseProto)
+    }
+
+    static {
+      defaultInstance = new GetInstanceDefinitionResponseProto(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.GetInstanceDefinitionResponseProto)
+  }
+
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_RoleInstanceState_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_RoleInstanceState_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_StopClusterRequestProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_StopClusterRequestProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_StopClusterResponseProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_StopClusterResponseProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_FlexClusterRequestProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_FlexClusterRequestProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_FlexClusterResponseProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_FlexClusterResponseProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_GetJSONClusterStatusRequestProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_GetJSONClusterStatusRequestProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_GetJSONClusterStatusResponseProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_GetJSONClusterStatusResponseProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_ListNodeUUIDsByRoleRequestProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_ListNodeUUIDsByRoleRequestProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_ListNodeUUIDsByRoleResponseProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_ListNodeUUIDsByRoleResponseProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_GetNodeRequestProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_GetNodeRequestProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_GetNodeResponseProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_GetNodeResponseProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_GetClusterNodesRequestProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_GetClusterNodesRequestProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_GetClusterNodesResponseProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_GetClusterNodesResponseProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_EchoRequestProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_EchoRequestProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_EchoResponseProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_EchoResponseProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_KillContainerRequestProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_KillContainerRequestProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_KillContainerResponseProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_KillContainerResponseProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_AMSuicideRequestProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_AMSuicideRequestProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_AMSuicideResponseProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_AMSuicideResponseProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_GetInstanceDefinitionRequestProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_GetInstanceDefinitionRequestProto_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_slider_api_GetInstanceDefinitionResponseProto_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_slider_api_GetInstanceDefinitionResponseProto_fieldAccessorTable;
+
+  public static com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\033SliderClusterMessages.proto\022\025org.apach" +
+      "e.slider.api\"\203\002\n\021RoleInstanceState\022\014\n\004na" +
+      "me\030\001 \002(\t\022\014\n\004role\030\002 \001(\t\022\r\n\005state\030\004 \002(\r\022\020\n" +
+      "\010exitCode\030\005 \002(\r\022\017\n\007command\030\006 \001(\t\022\023\n\013diag" +
+      "nostics\030\007 \001(\t\022\016\n\006output\030\010 \003(\t\022\023\n\013environ" +
+      "ment\030\t \003(\t\022\016\n\006roleId\030\n \002(\r\022\020\n\010released\030\013" +
+      " \002(\010\022\022\n\ncreateTime\030\014 \002(\003\022\021\n\tstartTime\030\r " +
+      "\002(\003\022\014\n\004host\030\016 \002(\t\022\017\n\007hostURL\030\017 \002(\t\"*\n\027St" +
+      "opClusterRequestProto\022\017\n\007message\030\001 \002(\t\"\032" +
+      "\n\030StopClusterResponseProto\".\n\027FlexCluste",
+      "rRequestProto\022\023\n\013clusterSpec\030\001 \002(\t\",\n\030Fl" +
+      "exClusterResponseProto\022\020\n\010response\030\001 \002(\010" +
+      "\"\"\n GetJSONClusterStatusRequestProto\"8\n!" +
+      "GetJSONClusterStatusResponseProto\022\023\n\013clu" +
+      "sterSpec\030\001 \002(\t\"/\n\037ListNodeUUIDsByRoleReq" +
+      "uestProto\022\014\n\004role\030\001 \002(\t\"0\n ListNodeUUIDs" +
+      "ByRoleResponseProto\022\014\n\004uuid\030\001 \003(\t\"#\n\023Get" +
+      "NodeRequestProto\022\014\n\004uuid\030\001 \002(\t\"U\n\024GetNod" +
+      "eResponseProto\022=\n\013clusterNode\030\001 \002(\0132(.or" +
+      "g.apache.slider.api.RoleInstanceState\"+\n",
+      "\033GetClusterNodesRequestProto\022\014\n\004uuid\030\001 \003" +
+      "(\t\"]\n\034GetClusterNodesResponseProto\022=\n\013cl" +
+      "usterNode\030\001 \003(\0132(.org.apache.slider.api." +
+      "RoleInstanceState\" \n\020EchoRequestProto\022\014\n" +
+      "\004text\030\001 \002(\t\"!\n\021EchoResponseProto\022\014\n\004text" +
+      "\030\001 \002(\t\"\'\n\031KillContainerRequestProto\022\n\n\002i" +
+      "d\030\001 \002(\t\"-\n\032KillContainerResponseProto\022\017\n" +
+      "\007success\030\001 \002(\010\"D\n\025AMSuicideRequestProto\022" +
+      "\014\n\004text\030\001 \002(\t\022\016\n\006signal\030\002 \002(\005\022\r\n\005delay\030\003" +
+      " \002(\005\"\030\n\026AMSuicideResponseProto\"#\n!GetIns",
+      "tanceDefinitionRequestProto\"^\n\"GetInstan" +
+      "ceDefinitionResponseProto\022\020\n\010internal\030\001 " +
+      "\002(\t\022\021\n\tresources\030\002 \002(\t\022\023\n\013application\030\003 " +
+      "\002(\tB-\n\033org.apache.slider.api.protoB\010Mess" +
+      "ages\210\001\001\240\001\001"
+    };
+    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+      new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+        public com.google.protobuf.ExtensionRegistry assignDescriptors(
+            com.google.protobuf.Descriptors.FileDescriptor root) {
+          descriptor = root;
+          internal_static_org_apache_slider_api_RoleInstanceState_descriptor =
+            getDescriptor().getMessageTypes().get(0);
+          internal_static_org_apache_slider_api_RoleInstanceState_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_RoleInstanceState_descriptor,
+              new java.lang.String[] { "Name", "Role", "State", "ExitCode", "Command", "Diagnostics", "Output", "Environment", "RoleId", "Released", "CreateTime", "StartTime", "Host", "HostURL", });
+          internal_static_org_apache_slider_api_StopClusterRequestProto_descriptor =
+            getDescriptor().getMessageTypes().get(1);
+          internal_static_org_apache_slider_api_StopClusterRequestProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_StopClusterRequestProto_descriptor,
+              new java.lang.String[] { "Message", });
+          internal_static_org_apache_slider_api_StopClusterResponseProto_descriptor =
+            getDescriptor().getMessageTypes().get(2);
+          internal_static_org_apache_slider_api_StopClusterResponseProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_StopClusterResponseProto_descriptor,
+              new java.lang.String[] { });
+          internal_static_org_apache_slider_api_FlexClusterRequestProto_descriptor =
+            getDescriptor().getMessageTypes().get(3);
+          internal_static_org_apache_slider_api_FlexClusterRequestProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_FlexClusterRequestProto_descriptor,
+              new java.lang.String[] { "ClusterSpec", });
+          internal_static_org_apache_slider_api_FlexClusterResponseProto_descriptor =
+            getDescriptor().getMessageTypes().get(4);
+          internal_static_org_apache_slider_api_FlexClusterResponseProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_FlexClusterResponseProto_descriptor,
+              new java.lang.String[] { "Response", });
+          internal_static_org_apache_slider_api_GetJSONClusterStatusRequestProto_descriptor =
+            getDescriptor().getMessageTypes().get(5);
+          internal_static_org_apache_slider_api_GetJSONClusterStatusRequestProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_GetJSONClusterStatusRequestProto_descriptor,
+              new java.lang.String[] { });
+          internal_static_org_apache_slider_api_GetJSONClusterStatusResponseProto_descriptor =
+            getDescriptor().getMessageTypes().get(6);
+          internal_static_org_apache_slider_api_GetJSONClusterStatusResponseProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_GetJSONClusterStatusResponseProto_descriptor,
+              new java.lang.String[] { "ClusterSpec", });
+          internal_static_org_apache_slider_api_ListNodeUUIDsByRoleRequestProto_descriptor =
+            getDescriptor().getMessageTypes().get(7);
+          internal_static_org_apache_slider_api_ListNodeUUIDsByRoleRequestProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_ListNodeUUIDsByRoleRequestProto_descriptor,
+              new java.lang.String[] { "Role", });
+          internal_static_org_apache_slider_api_ListNodeUUIDsByRoleResponseProto_descriptor =
+            getDescriptor().getMessageTypes().get(8);
+          internal_static_org_apache_slider_api_ListNodeUUIDsByRoleResponseProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_ListNodeUUIDsByRoleResponseProto_descriptor,
+              new java.lang.String[] { "Uuid", });
+          internal_static_org_apache_slider_api_GetNodeRequestProto_descriptor =
+            getDescriptor().getMessageTypes().get(9);
+          internal_static_org_apache_slider_api_GetNodeRequestProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_GetNodeRequestProto_descriptor,
+              new java.lang.String[] { "Uuid", });
+          internal_static_org_apache_slider_api_GetNodeResponseProto_descriptor =
+            getDescriptor().getMessageTypes().get(10);
+          internal_static_org_apache_slider_api_GetNodeResponseProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_GetNodeResponseProto_descriptor,
+              new java.lang.String[] { "ClusterNode", });
+          internal_static_org_apache_slider_api_GetClusterNodesRequestProto_descriptor =
+            getDescriptor().getMessageTypes().get(11);
+          internal_static_org_apache_slider_api_GetClusterNodesRequestProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_GetClusterNodesRequestProto_descriptor,
+              new java.lang.String[] { "Uuid", });
+          internal_static_org_apache_slider_api_GetClusterNodesResponseProto_descriptor =
+            getDescriptor().getMessageTypes().get(12);
+          internal_static_org_apache_slider_api_GetClusterNodesResponseProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_GetClusterNodesResponseProto_descriptor,
+              new java.lang.String[] { "ClusterNode", });
+          internal_static_org_apache_slider_api_EchoRequestProto_descriptor =
+            getDescriptor().getMessageTypes().get(13);
+          internal_static_org_apache_slider_api_EchoRequestProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_EchoRequestProto_descriptor,
+              new java.lang.String[] { "Text", });
+          internal_static_org_apache_slider_api_EchoResponseProto_descriptor =
+            getDescriptor().getMessageTypes().get(14);
+          internal_static_org_apache_slider_api_EchoResponseProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_EchoResponseProto_descriptor,
+              new java.lang.String[] { "Text", });
+          internal_static_org_apache_slider_api_KillContainerRequestProto_descriptor =
+            getDescriptor().getMessageTypes().get(15);
+          internal_static_org_apache_slider_api_KillContainerRequestProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_KillContainerRequestProto_descriptor,
+              new java.lang.String[] { "Id", });
+          internal_static_org_apache_slider_api_KillContainerResponseProto_descriptor =
+            getDescriptor().getMessageTypes().get(16);
+          internal_static_org_apache_slider_api_KillContainerResponseProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_KillContainerResponseProto_descriptor,
+              new java.lang.String[] { "Success", });
+          internal_static_org_apache_slider_api_AMSuicideRequestProto_descriptor =
+            getDescriptor().getMessageTypes().get(17);
+          internal_static_org_apache_slider_api_AMSuicideRequestProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_AMSuicideRequestProto_descriptor,
+              new java.lang.String[] { "Text", "Signal", "Delay", });
+          internal_static_org_apache_slider_api_AMSuicideResponseProto_descriptor =
+            getDescriptor().getMessageTypes().get(18);
+          internal_static_org_apache_slider_api_AMSuicideResponseProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_AMSuicideResponseProto_descriptor,
+              new java.lang.String[] { });
+          internal_static_org_apache_slider_api_GetInstanceDefinitionRequestProto_descriptor =
+            getDescriptor().getMessageTypes().get(19);
+          internal_static_org_apache_slider_api_GetInstanceDefinitionRequestProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_GetInstanceDefinitionRequestProto_descriptor,
+              new java.lang.String[] { });
+          internal_static_org_apache_slider_api_GetInstanceDefinitionResponseProto_descriptor =
+            getDescriptor().getMessageTypes().get(20);
+          internal_static_org_apache_slider_api_GetInstanceDefinitionResponseProto_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_slider_api_GetInstanceDefinitionResponseProto_descriptor,
+              new java.lang.String[] { "Internal", "Resources", "Application", });
+          return null;
+        }
+      };
+    com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new com.google.protobuf.Descriptors.FileDescriptor[] {
+        }, assigner);
+  }
+
+  // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/slider-core/src/main/java/org/apache/slider/api/proto/SliderClusterAPI.java b/slider-core/src/main/java/org/apache/slider/api/proto/SliderClusterAPI.java
new file mode 100644
index 0000000..c00f99d
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/api/proto/SliderClusterAPI.java
@@ -0,0 +1,1043 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: SliderClusterProtocol.proto
+
+package org.apache.slider.api.proto;
+
+public final class SliderClusterAPI {
+  private SliderClusterAPI() {}
+  public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistry registry) {
+  }
+  /**
+   * Protobuf service {@code org.apache.slider.api.SliderClusterProtocolPB}
+   *
+   * <pre>
+   **
+   * Protocol used from between Slider Client and AM
+   * </pre>
+   */
+  public static abstract class SliderClusterProtocolPB
+      implements com.google.protobuf.Service {
+    protected SliderClusterProtocolPB() {}
+
+    public interface Interface {
+      /**
+       * <code>rpc stopCluster(.org.apache.slider.api.StopClusterRequestProto) returns (.org.apache.slider.api.StopClusterResponseProto);</code>
+       */
+      public abstract void stopCluster(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.StopClusterRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.StopClusterResponseProto> done);
+
+      /**
+       * <code>rpc flexCluster(.org.apache.slider.api.FlexClusterRequestProto) returns (.org.apache.slider.api.FlexClusterResponseProto);</code>
+       *
+       * <pre>
+       **
+       * Flex the cluster. 
+       * </pre>
+       */
+      public abstract void flexCluster(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.FlexClusterRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.FlexClusterResponseProto> done);
+
+      /**
+       * <code>rpc getJSONClusterStatus(.org.apache.slider.api.GetJSONClusterStatusRequestProto) returns (.org.apache.slider.api.GetJSONClusterStatusResponseProto);</code>
+       *
+       * <pre>
+       **
+       * Get the current cluster status
+       * </pre>
+       */
+      public abstract void getJSONClusterStatus(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto> done);
+
+      /**
+       * <code>rpc getInstanceDefinition(.org.apache.slider.api.GetInstanceDefinitionRequestProto) returns (.org.apache.slider.api.GetInstanceDefinitionResponseProto);</code>
+       *
+       * <pre>
+       **
+       * Get the instance definition
+       * </pre>
+       */
+      public abstract void getInstanceDefinition(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto> done);
+
+      /**
+       * <code>rpc listNodeUUIDsByRole(.org.apache.slider.api.ListNodeUUIDsByRoleRequestProto) returns (.org.apache.slider.api.ListNodeUUIDsByRoleResponseProto);</code>
+       *
+       * <pre>
+       **
+       * List all running nodes in a role
+       * </pre>
+       */
+      public abstract void listNodeUUIDsByRole(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto> done);
+
+      /**
+       * <code>rpc getNode(.org.apache.slider.api.GetNodeRequestProto) returns (.org.apache.slider.api.GetNodeResponseProto);</code>
+       *
+       * <pre>
+       **
+       * Get the details on a node
+       * </pre>
+       */
+      public abstract void getNode(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetNodeRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetNodeResponseProto> done);
+
+      /**
+       * <code>rpc getClusterNodes(.org.apache.slider.api.GetClusterNodesRequestProto) returns (.org.apache.slider.api.GetClusterNodesResponseProto);</code>
+       *
+       * <pre>
+       **
+       * Get the 
+       * details on a list of nodes.
+       * Unknown nodes are not returned
+       * &lt;i&gt;Important: the order of the results are undefined&lt;/i&gt;
+       * </pre>
+       */
+      public abstract void getClusterNodes(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto> done);
+
+      /**
+       * <code>rpc echo(.org.apache.slider.api.EchoRequestProto) returns (.org.apache.slider.api.EchoResponseProto);</code>
+       *
+       * <pre>
+       **
+       * echo some text
+       * </pre>
+       */
+      public abstract void echo(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.EchoRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.EchoResponseProto> done);
+
+      /**
+       * <code>rpc killContainer(.org.apache.slider.api.KillContainerRequestProto) returns (.org.apache.slider.api.KillContainerResponseProto);</code>
+       *
+       * <pre>
+       **
+       * kill a container
+       * </pre>
+       */
+      public abstract void killContainer(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.KillContainerRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.KillContainerResponseProto> done);
+
+      /**
+       * <code>rpc amSuicide(.org.apache.slider.api.AMSuicideRequestProto) returns (.org.apache.slider.api.AMSuicideResponseProto);</code>
+       *
+       * <pre>
+       **
+       * kill the AM
+       * </pre>
+       */
+      public abstract void amSuicide(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.AMSuicideRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.AMSuicideResponseProto> done);
+
+    }
+
+    public static com.google.protobuf.Service newReflectiveService(
+        final Interface impl) {
+      return new SliderClusterProtocolPB() {
+        @java.lang.Override
+        public  void stopCluster(
+            com.google.protobuf.RpcController controller,
+            org.apache.slider.api.proto.Messages.StopClusterRequestProto request,
+            com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.StopClusterResponseProto> done) {
+          impl.stopCluster(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void flexCluster(
+            com.google.protobuf.RpcController controller,
+            org.apache.slider.api.proto.Messages.FlexClusterRequestProto request,
+            com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.FlexClusterResponseProto> done) {
+          impl.flexCluster(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void getJSONClusterStatus(
+            com.google.protobuf.RpcController controller,
+            org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto request,
+            com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto> done) {
+          impl.getJSONClusterStatus(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void getInstanceDefinition(
+            com.google.protobuf.RpcController controller,
+            org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto request,
+            com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto> done) {
+          impl.getInstanceDefinition(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void listNodeUUIDsByRole(
+            com.google.protobuf.RpcController controller,
+            org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto request,
+            com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto> done) {
+          impl.listNodeUUIDsByRole(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void getNode(
+            com.google.protobuf.RpcController controller,
+            org.apache.slider.api.proto.Messages.GetNodeRequestProto request,
+            com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetNodeResponseProto> done) {
+          impl.getNode(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void getClusterNodes(
+            com.google.protobuf.RpcController controller,
+            org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto request,
+            com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto> done) {
+          impl.getClusterNodes(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void echo(
+            com.google.protobuf.RpcController controller,
+            org.apache.slider.api.proto.Messages.EchoRequestProto request,
+            com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.EchoResponseProto> done) {
+          impl.echo(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void killContainer(
+            com.google.protobuf.RpcController controller,
+            org.apache.slider.api.proto.Messages.KillContainerRequestProto request,
+            com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.KillContainerResponseProto> done) {
+          impl.killContainer(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void amSuicide(
+            com.google.protobuf.RpcController controller,
+            org.apache.slider.api.proto.Messages.AMSuicideRequestProto request,
+            com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.AMSuicideResponseProto> done) {
+          impl.amSuicide(controller, request, done);
+        }
+
+      };
+    }
+
+    public static com.google.protobuf.BlockingService
+        newReflectiveBlockingService(final BlockingInterface impl) {
+      return new com.google.protobuf.BlockingService() {
+        public final com.google.protobuf.Descriptors.ServiceDescriptor
+            getDescriptorForType() {
+          return getDescriptor();
+        }
+
+        public final com.google.protobuf.Message callBlockingMethod(
+            com.google.protobuf.Descriptors.MethodDescriptor method,
+            com.google.protobuf.RpcController controller,
+            com.google.protobuf.Message request)
+            throws com.google.protobuf.ServiceException {
+          if (method.getService() != getDescriptor()) {
+            throw new java.lang.IllegalArgumentException(
+              "Service.callBlockingMethod() given method descriptor for " +
+              "wrong service type.");
+          }
+          switch(method.getIndex()) {
+            case 0:
+              return impl.stopCluster(controller, (org.apache.slider.api.proto.Messages.StopClusterRequestProto)request);
+            case 1:
+              return impl.flexCluster(controller, (org.apache.slider.api.proto.Messages.FlexClusterRequestProto)request);
+            case 2:
+              return impl.getJSONClusterStatus(controller, (org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto)request);
+            case 3:
+              return impl.getInstanceDefinition(controller, (org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto)request);
+            case 4:
+              return impl.listNodeUUIDsByRole(controller, (org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto)request);
+            case 5:
+              return impl.getNode(controller, (org.apache.slider.api.proto.Messages.GetNodeRequestProto)request);
+            case 6:
+              return impl.getClusterNodes(controller, (org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto)request);
+            case 7:
+              return impl.echo(controller, (org.apache.slider.api.proto.Messages.EchoRequestProto)request);
+            case 8:
+              return impl.killContainer(controller, (org.apache.slider.api.proto.Messages.KillContainerRequestProto)request);
+            case 9:
+              return impl.amSuicide(controller, (org.apache.slider.api.proto.Messages.AMSuicideRequestProto)request);
+            default:
+              throw new java.lang.AssertionError("Can't get here.");
+          }
+        }
+
+        public final com.google.protobuf.Message
+            getRequestPrototype(
+            com.google.protobuf.Descriptors.MethodDescriptor method) {
+          if (method.getService() != getDescriptor()) {
+            throw new java.lang.IllegalArgumentException(
+              "Service.getRequestPrototype() given method " +
+              "descriptor for wrong service type.");
+          }
+          switch(method.getIndex()) {
+            case 0:
+              return org.apache.slider.api.proto.Messages.StopClusterRequestProto.getDefaultInstance();
+            case 1:
+              return org.apache.slider.api.proto.Messages.FlexClusterRequestProto.getDefaultInstance();
+            case 2:
+              return org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto.getDefaultInstance();
+            case 3:
+              return org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto.getDefaultInstance();
+            case 4:
+              return org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto.getDefaultInstance();
+            case 5:
+              return org.apache.slider.api.proto.Messages.GetNodeRequestProto.getDefaultInstance();
+            case 6:
+              return org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto.getDefaultInstance();
+            case 7:
+              return org.apache.slider.api.proto.Messages.EchoRequestProto.getDefaultInstance();
+            case 8:
+              return org.apache.slider.api.proto.Messages.KillContainerRequestProto.getDefaultInstance();
+            case 9:
+              return org.apache.slider.api.proto.Messages.AMSuicideRequestProto.getDefaultInstance();
+            default:
+              throw new java.lang.AssertionError("Can't get here.");
+          }
+        }
+
+        public final com.google.protobuf.Message
+            getResponsePrototype(
+            com.google.protobuf.Descriptors.MethodDescriptor method) {
+          if (method.getService() != getDescriptor()) {
+            throw new java.lang.IllegalArgumentException(
+              "Service.getResponsePrototype() given method " +
+              "descriptor for wrong service type.");
+          }
+          switch(method.getIndex()) {
+            case 0:
+              return org.apache.slider.api.proto.Messages.StopClusterResponseProto.getDefaultInstance();
+            case 1:
+              return org.apache.slider.api.proto.Messages.FlexClusterResponseProto.getDefaultInstance();
+            case 2:
+              return org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto.getDefaultInstance();
+            case 3:
+              return org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto.getDefaultInstance();
+            case 4:
+              return org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto.getDefaultInstance();
+            case 5:
+              return org.apache.slider.api.proto.Messages.GetNodeResponseProto.getDefaultInstance();
+            case 6:
+              return org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto.getDefaultInstance();
+            case 7:
+              return org.apache.slider.api.proto.Messages.EchoResponseProto.getDefaultInstance();
+            case 8:
+              return org.apache.slider.api.proto.Messages.KillContainerResponseProto.getDefaultInstance();
+            case 9:
+              return org.apache.slider.api.proto.Messages.AMSuicideResponseProto.getDefaultInstance();
+            default:
+              throw new java.lang.AssertionError("Can't get here.");
+          }
+        }
+
+      };
+    }
+
+    /**
+     * <code>rpc stopCluster(.org.apache.slider.api.StopClusterRequestProto) returns (.org.apache.slider.api.StopClusterResponseProto);</code>
+     */
+    public abstract void stopCluster(
+        com.google.protobuf.RpcController controller,
+        org.apache.slider.api.proto.Messages.StopClusterRequestProto request,
+        com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.StopClusterResponseProto> done);
+
+    /**
+     * <code>rpc flexCluster(.org.apache.slider.api.FlexClusterRequestProto) returns (.org.apache.slider.api.FlexClusterResponseProto);</code>
+     *
+     * <pre>
+     **
+     * Flex the cluster. 
+     * </pre>
+     */
+    public abstract void flexCluster(
+        com.google.protobuf.RpcController controller,
+        org.apache.slider.api.proto.Messages.FlexClusterRequestProto request,
+        com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.FlexClusterResponseProto> done);
+
+    /**
+     * <code>rpc getJSONClusterStatus(.org.apache.slider.api.GetJSONClusterStatusRequestProto) returns (.org.apache.slider.api.GetJSONClusterStatusResponseProto);</code>
+     *
+     * <pre>
+     **
+     * Get the current cluster status
+     * </pre>
+     */
+    public abstract void getJSONClusterStatus(
+        com.google.protobuf.RpcController controller,
+        org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto request,
+        com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto> done);
+
+    /**
+     * <code>rpc getInstanceDefinition(.org.apache.slider.api.GetInstanceDefinitionRequestProto) returns (.org.apache.slider.api.GetInstanceDefinitionResponseProto);</code>
+     *
+     * <pre>
+     **
+     * Get the instance definition
+     * </pre>
+     */
+    public abstract void getInstanceDefinition(
+        com.google.protobuf.RpcController controller,
+        org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto request,
+        com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto> done);
+
+    /**
+     * <code>rpc listNodeUUIDsByRole(.org.apache.slider.api.ListNodeUUIDsByRoleRequestProto) returns (.org.apache.slider.api.ListNodeUUIDsByRoleResponseProto);</code>
+     *
+     * <pre>
+     **
+     * List all running nodes in a role
+     * </pre>
+     */
+    public abstract void listNodeUUIDsByRole(
+        com.google.protobuf.RpcController controller,
+        org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto request,
+        com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto> done);
+
+    /**
+     * <code>rpc getNode(.org.apache.slider.api.GetNodeRequestProto) returns (.org.apache.slider.api.GetNodeResponseProto);</code>
+     *
+     * <pre>
+     **
+     * Get the details on a node
+     * </pre>
+     */
+    public abstract void getNode(
+        com.google.protobuf.RpcController controller,
+        org.apache.slider.api.proto.Messages.GetNodeRequestProto request,
+        com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetNodeResponseProto> done);
+
+    /**
+     * <code>rpc getClusterNodes(.org.apache.slider.api.GetClusterNodesRequestProto) returns (.org.apache.slider.api.GetClusterNodesResponseProto);</code>
+     *
+     * <pre>
+     **
+     * Get the 
+     * details on a list of nodes.
+     * Unknown nodes are not returned
+     * &lt;i&gt;Important: the order of the results are undefined&lt;/i&gt;
+     * </pre>
+     */
+    public abstract void getClusterNodes(
+        com.google.protobuf.RpcController controller,
+        org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto request,
+        com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto> done);
+
+    /**
+     * <code>rpc echo(.org.apache.slider.api.EchoRequestProto) returns (.org.apache.slider.api.EchoResponseProto);</code>
+     *
+     * <pre>
+     **
+     * echo some text
+     * </pre>
+     */
+    public abstract void echo(
+        com.google.protobuf.RpcController controller,
+        org.apache.slider.api.proto.Messages.EchoRequestProto request,
+        com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.EchoResponseProto> done);
+
+    /**
+     * <code>rpc killContainer(.org.apache.slider.api.KillContainerRequestProto) returns (.org.apache.slider.api.KillContainerResponseProto);</code>
+     *
+     * <pre>
+     **
+     * kill a container
+     * </pre>
+     */
+    public abstract void killContainer(
+        com.google.protobuf.RpcController controller,
+        org.apache.slider.api.proto.Messages.KillContainerRequestProto request,
+        com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.KillContainerResponseProto> done);
+
+    /**
+     * <code>rpc amSuicide(.org.apache.slider.api.AMSuicideRequestProto) returns (.org.apache.slider.api.AMSuicideResponseProto);</code>
+     *
+     * <pre>
+     **
+     * kill the AM
+     * </pre>
+     */
+    public abstract void amSuicide(
+        com.google.protobuf.RpcController controller,
+        org.apache.slider.api.proto.Messages.AMSuicideRequestProto request,
+        com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.AMSuicideResponseProto> done);
+
+    public static final
+        com.google.protobuf.Descriptors.ServiceDescriptor
+        getDescriptor() {
+      return org.apache.slider.api.proto.SliderClusterAPI.getDescriptor().getServices().get(0);
+    }
+    public final com.google.protobuf.Descriptors.ServiceDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+
+    public final void callMethod(
+        com.google.protobuf.Descriptors.MethodDescriptor method,
+        com.google.protobuf.RpcController controller,
+        com.google.protobuf.Message request,
+        com.google.protobuf.RpcCallback<
+          com.google.protobuf.Message> done) {
+      if (method.getService() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "Service.callMethod() given method descriptor for wrong " +
+          "service type.");
+      }
+      switch(method.getIndex()) {
+        case 0:
+          this.stopCluster(controller, (org.apache.slider.api.proto.Messages.StopClusterRequestProto)request,
+            com.google.protobuf.RpcUtil.<org.apache.slider.api.proto.Messages.StopClusterResponseProto>specializeCallback(
+              done));
+          return;
+        case 1:
+          this.flexCluster(controller, (org.apache.slider.api.proto.Messages.FlexClusterRequestProto)request,
+            com.google.protobuf.RpcUtil.<org.apache.slider.api.proto.Messages.FlexClusterResponseProto>specializeCallback(
+              done));
+          return;
+        case 2:
+          this.getJSONClusterStatus(controller, (org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto)request,
+            com.google.protobuf.RpcUtil.<org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto>specializeCallback(
+              done));
+          return;
+        case 3:
+          this.getInstanceDefinition(controller, (org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto)request,
+            com.google.protobuf.RpcUtil.<org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto>specializeCallback(
+              done));
+          return;
+        case 4:
+          this.listNodeUUIDsByRole(controller, (org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto)request,
+            com.google.protobuf.RpcUtil.<org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto>specializeCallback(
+              done));
+          return;
+        case 5:
+          this.getNode(controller, (org.apache.slider.api.proto.Messages.GetNodeRequestProto)request,
+            com.google.protobuf.RpcUtil.<org.apache.slider.api.proto.Messages.GetNodeResponseProto>specializeCallback(
+              done));
+          return;
+        case 6:
+          this.getClusterNodes(controller, (org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto)request,
+            com.google.protobuf.RpcUtil.<org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto>specializeCallback(
+              done));
+          return;
+        case 7:
+          this.echo(controller, (org.apache.slider.api.proto.Messages.EchoRequestProto)request,
+            com.google.protobuf.RpcUtil.<org.apache.slider.api.proto.Messages.EchoResponseProto>specializeCallback(
+              done));
+          return;
+        case 8:
+          this.killContainer(controller, (org.apache.slider.api.proto.Messages.KillContainerRequestProto)request,
+            com.google.protobuf.RpcUtil.<org.apache.slider.api.proto.Messages.KillContainerResponseProto>specializeCallback(
+              done));
+          return;
+        case 9:
+          this.amSuicide(controller, (org.apache.slider.api.proto.Messages.AMSuicideRequestProto)request,
+            com.google.protobuf.RpcUtil.<org.apache.slider.api.proto.Messages.AMSuicideResponseProto>specializeCallback(
+              done));
+          return;
+        default:
+          throw new java.lang.AssertionError("Can't get here.");
+      }
+    }
+
+    public final com.google.protobuf.Message
+        getRequestPrototype(
+        com.google.protobuf.Descriptors.MethodDescriptor method) {
+      if (method.getService() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "Service.getRequestPrototype() given method " +
+          "descriptor for wrong service type.");
+      }
+      switch(method.getIndex()) {
+        case 0:
+          return org.apache.slider.api.proto.Messages.StopClusterRequestProto.getDefaultInstance();
+        case 1:
+          return org.apache.slider.api.proto.Messages.FlexClusterRequestProto.getDefaultInstance();
+        case 2:
+          return org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto.getDefaultInstance();
+        case 3:
+          return org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto.getDefaultInstance();
+        case 4:
+          return org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto.getDefaultInstance();
+        case 5:
+          return org.apache.slider.api.proto.Messages.GetNodeRequestProto.getDefaultInstance();
+        case 6:
+          return org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto.getDefaultInstance();
+        case 7:
+          return org.apache.slider.api.proto.Messages.EchoRequestProto.getDefaultInstance();
+        case 8:
+          return org.apache.slider.api.proto.Messages.KillContainerRequestProto.getDefaultInstance();
+        case 9:
+          return org.apache.slider.api.proto.Messages.AMSuicideRequestProto.getDefaultInstance();
+        default:
+          throw new java.lang.AssertionError("Can't get here.");
+      }
+    }
+
+    public final com.google.protobuf.Message
+        getResponsePrototype(
+        com.google.protobuf.Descriptors.MethodDescriptor method) {
+      if (method.getService() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "Service.getResponsePrototype() given method " +
+          "descriptor for wrong service type.");
+      }
+      switch(method.getIndex()) {
+        case 0:
+          return org.apache.slider.api.proto.Messages.StopClusterResponseProto.getDefaultInstance();
+        case 1:
+          return org.apache.slider.api.proto.Messages.FlexClusterResponseProto.getDefaultInstance();
+        case 2:
+          return org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto.getDefaultInstance();
+        case 3:
+          return org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto.getDefaultInstance();
+        case 4:
+          return org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto.getDefaultInstance();
+        case 5:
+          return org.apache.slider.api.proto.Messages.GetNodeResponseProto.getDefaultInstance();
+        case 6:
+          return org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto.getDefaultInstance();
+        case 7:
+          return org.apache.slider.api.proto.Messages.EchoResponseProto.getDefaultInstance();
+        case 8:
+          return org.apache.slider.api.proto.Messages.KillContainerResponseProto.getDefaultInstance();
+        case 9:
+          return org.apache.slider.api.proto.Messages.AMSuicideResponseProto.getDefaultInstance();
+        default:
+          throw new java.lang.AssertionError("Can't get here.");
+      }
+    }
+
+    public static Stub newStub(
+        com.google.protobuf.RpcChannel channel) {
+      return new Stub(channel);
+    }
+
+    public static final class Stub extends org.apache.slider.api.proto.SliderClusterAPI.SliderClusterProtocolPB implements Interface {
+      private Stub(com.google.protobuf.RpcChannel channel) {
+        this.channel = channel;
+      }
+
+      private final com.google.protobuf.RpcChannel channel;
+
+      public com.google.protobuf.RpcChannel getChannel() {
+        return channel;
+      }
+
+      public  void stopCluster(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.StopClusterRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.StopClusterResponseProto> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(0),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.StopClusterResponseProto.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.slider.api.proto.Messages.StopClusterResponseProto.class,
+            org.apache.slider.api.proto.Messages.StopClusterResponseProto.getDefaultInstance()));
+      }
+
+      public  void flexCluster(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.FlexClusterRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.FlexClusterResponseProto> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(1),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.FlexClusterResponseProto.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.slider.api.proto.Messages.FlexClusterResponseProto.class,
+            org.apache.slider.api.proto.Messages.FlexClusterResponseProto.getDefaultInstance()));
+      }
+
+      public  void getJSONClusterStatus(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(2),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto.class,
+            org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto.getDefaultInstance()));
+      }
+
+      public  void getInstanceDefinition(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(3),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto.class,
+            org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto.getDefaultInstance()));
+      }
+
+      public  void listNodeUUIDsByRole(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(4),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto.class,
+            org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto.getDefaultInstance()));
+      }
+
+      public  void getNode(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetNodeRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetNodeResponseProto> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(5),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.GetNodeResponseProto.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.slider.api.proto.Messages.GetNodeResponseProto.class,
+            org.apache.slider.api.proto.Messages.GetNodeResponseProto.getDefaultInstance()));
+      }
+
+      public  void getClusterNodes(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(6),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto.class,
+            org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto.getDefaultInstance()));
+      }
+
+      public  void echo(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.EchoRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.EchoResponseProto> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(7),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.EchoResponseProto.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.slider.api.proto.Messages.EchoResponseProto.class,
+            org.apache.slider.api.proto.Messages.EchoResponseProto.getDefaultInstance()));
+      }
+
+      public  void killContainer(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.KillContainerRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.KillContainerResponseProto> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(8),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.KillContainerResponseProto.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.slider.api.proto.Messages.KillContainerResponseProto.class,
+            org.apache.slider.api.proto.Messages.KillContainerResponseProto.getDefaultInstance()));
+      }
+
+      public  void amSuicide(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.AMSuicideRequestProto request,
+          com.google.protobuf.RpcCallback<org.apache.slider.api.proto.Messages.AMSuicideResponseProto> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(9),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.AMSuicideResponseProto.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.slider.api.proto.Messages.AMSuicideResponseProto.class,
+            org.apache.slider.api.proto.Messages.AMSuicideResponseProto.getDefaultInstance()));
+      }
+    }
+
+    public static BlockingInterface newBlockingStub(
+        com.google.protobuf.BlockingRpcChannel channel) {
+      return new BlockingStub(channel);
+    }
+
+    public interface BlockingInterface {
+      public org.apache.slider.api.proto.Messages.StopClusterResponseProto stopCluster(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.StopClusterRequestProto request)
+          throws com.google.protobuf.ServiceException;
+
+      public org.apache.slider.api.proto.Messages.FlexClusterResponseProto flexCluster(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.FlexClusterRequestProto request)
+          throws com.google.protobuf.ServiceException;
+
+      public org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto getJSONClusterStatus(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto request)
+          throws com.google.protobuf.ServiceException;
+
+      public org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto getInstanceDefinition(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto request)
+          throws com.google.protobuf.ServiceException;
+
+      public org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto listNodeUUIDsByRole(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto request)
+          throws com.google.protobuf.ServiceException;
+
+      public org.apache.slider.api.proto.Messages.GetNodeResponseProto getNode(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetNodeRequestProto request)
+          throws com.google.protobuf.ServiceException;
+
+      public org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto getClusterNodes(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto request)
+          throws com.google.protobuf.ServiceException;
+
+      public org.apache.slider.api.proto.Messages.EchoResponseProto echo(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.EchoRequestProto request)
+          throws com.google.protobuf.ServiceException;
+
+      public org.apache.slider.api.proto.Messages.KillContainerResponseProto killContainer(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.KillContainerRequestProto request)
+          throws com.google.protobuf.ServiceException;
+
+      public org.apache.slider.api.proto.Messages.AMSuicideResponseProto amSuicide(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.AMSuicideRequestProto request)
+          throws com.google.protobuf.ServiceException;
+    }
+
+    private static final class BlockingStub implements BlockingInterface {
+      private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
+        this.channel = channel;
+      }
+
+      private final com.google.protobuf.BlockingRpcChannel channel;
+
+      public org.apache.slider.api.proto.Messages.StopClusterResponseProto stopCluster(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.StopClusterRequestProto request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.slider.api.proto.Messages.StopClusterResponseProto) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(0),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.StopClusterResponseProto.getDefaultInstance());
+      }
+
+
+      public org.apache.slider.api.proto.Messages.FlexClusterResponseProto flexCluster(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.FlexClusterRequestProto request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.slider.api.proto.Messages.FlexClusterResponseProto) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(1),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.FlexClusterResponseProto.getDefaultInstance());
+      }
+
+
+      public org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto getJSONClusterStatus(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(2),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.GetJSONClusterStatusResponseProto.getDefaultInstance());
+      }
+
+
+      public org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto getInstanceDefinition(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(3),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.GetInstanceDefinitionResponseProto.getDefaultInstance());
+      }
+
+
+      public org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto listNodeUUIDsByRole(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(4),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleResponseProto.getDefaultInstance());
+      }
+
+
+      public org.apache.slider.api.proto.Messages.GetNodeResponseProto getNode(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetNodeRequestProto request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.slider.api.proto.Messages.GetNodeResponseProto) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(5),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.GetNodeResponseProto.getDefaultInstance());
+      }
+
+
+      public org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto getClusterNodes(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.GetClusterNodesRequestProto request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(6),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.GetClusterNodesResponseProto.getDefaultInstance());
+      }
+
+
+      public org.apache.slider.api.proto.Messages.EchoResponseProto echo(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.EchoRequestProto request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.slider.api.proto.Messages.EchoResponseProto) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(7),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.EchoResponseProto.getDefaultInstance());
+      }
+
+
+      public org.apache.slider.api.proto.Messages.KillContainerResponseProto killContainer(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.KillContainerRequestProto request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.slider.api.proto.Messages.KillContainerResponseProto) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(8),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.KillContainerResponseProto.getDefaultInstance());
+      }
+
+
+      public org.apache.slider.api.proto.Messages.AMSuicideResponseProto amSuicide(
+          com.google.protobuf.RpcController controller,
+          org.apache.slider.api.proto.Messages.AMSuicideRequestProto request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.slider.api.proto.Messages.AMSuicideResponseProto) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(9),
+          controller,
+          request,
+          org.apache.slider.api.proto.Messages.AMSuicideResponseProto.getDefaultInstance());
+      }
+
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.slider.api.SliderClusterProtocolPB)
+  }
+
+
+  public static com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\033SliderClusterProtocol.proto\022\025org.apach" +
+      "e.slider.api\032\033SliderClusterMessages.prot" +
+      "o2\270\t\n\027SliderClusterProtocolPB\022n\n\013stopClu" +
+      "ster\022..org.apache.slider.api.StopCluster" +
+      "RequestProto\032/.org.apache.slider.api.Sto" +
+      "pClusterResponseProto\022n\n\013flexCluster\022..o" +
+      "rg.apache.slider.api.FlexClusterRequestP" +
+      "roto\032/.org.apache.slider.api.FlexCluster" +
+      "ResponseProto\022\211\001\n\024getJSONClusterStatus\0227" +
+      ".org.apache.slider.api.GetJSONClusterSta",
+      "tusRequestProto\0328.org.apache.slider.api." +
+      "GetJSONClusterStatusResponseProto\022\214\001\n\025ge" +
+      "tInstanceDefinition\0228.org.apache.slider." +
+      "api.GetInstanceDefinitionRequestProto\0329." +
+      "org.apache.slider.api.GetInstanceDefinit" +
+      "ionResponseProto\022\206\001\n\023listNodeUUIDsByRole" +
+      "\0226.org.apache.slider.api.ListNodeUUIDsBy" +
+      "RoleRequestProto\0327.org.apache.slider.api" +
+      ".ListNodeUUIDsByRoleResponseProto\022b\n\007get" +
+      "Node\022*.org.apache.slider.api.GetNodeRequ",
+      "estProto\032+.org.apache.slider.api.GetNode" +
+      "ResponseProto\022z\n\017getClusterNodes\0222.org.a" +
+      "pache.slider.api.GetClusterNodesRequestP" +
+      "roto\0323.org.apache.slider.api.GetClusterN" +
+      "odesResponseProto\022Y\n\004echo\022\'.org.apache.s" +
+      "lider.api.EchoRequestProto\032(.org.apache." +
+      "slider.api.EchoResponseProto\022t\n\rkillCont" +
+      "ainer\0220.org.apache.slider.api.KillContai" +
+      "nerRequestProto\0321.org.apache.slider.api." +
+      "KillContainerResponseProto\022h\n\tamSuicide\022",
+      ",.org.apache.slider.api.AMSuicideRequest" +
+      "Proto\032-.org.apache.slider.api.AMSuicideR" +
+      "esponseProtoB5\n\033org.apache.slider.api.pr" +
+      "otoB\020SliderClusterAPI\210\001\001\240\001\001"
+    };
+    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+      new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+        public com.google.protobuf.ExtensionRegistry assignDescriptors(
+            com.google.protobuf.Descriptors.FileDescriptor root) {
+          descriptor = root;
+          return null;
+        }
+      };
+    com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new com.google.protobuf.Descriptors.FileDescriptor[] {
+          org.apache.slider.api.proto.Messages.getDescriptor(),
+        }, assigner);
+  }
+
+  // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index 95c120c..e762c1e 100644
--- a/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -94,6 +94,8 @@
 import org.apache.slider.core.registry.info.RegisteredEndpoint;
 import org.apache.slider.core.registry.info.ServiceInstanceData;
 import org.apache.slider.core.registry.retrieve.RegistryRetriever;
+import org.apache.slider.core.zk.BlockingZKWatcher;
+import org.apache.slider.core.zk.ZKIntegration;
 import org.apache.slider.core.zk.ZKPathBuilder;
 import org.apache.slider.providers.AbstractClientProvider;
 import org.apache.slider.providers.SliderProviderFactory;
@@ -106,6 +108,10 @@
 import org.apache.slider.server.services.utility.AbstractSliderLaunchedService;
 
 import static org.apache.slider.common.params.SliderActions.*;
+
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooDefs;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -268,6 +274,89 @@
     return exitCode;
   }
 
+  /**
+   * Delete the zookeeper node associated with the calling user and the cluster
+   **/
+  protected boolean deleteZookeeperNode(String clusterName) throws YarnException, IOException {
+    String user = getUsername();
+    String zkPath = ZKIntegration.mkClusterPath(user, clusterName);
+    Exception e = null;
+    try {
+      Configuration config = getConfig();
+      if (!SliderUtils.isHadoopClusterSecure(config)) {
+        ZKIntegration client = getZkClient(clusterName, user);
+        if (client != null) {
+          if (client.exists(zkPath)) {
+            log.info("Deleting zookeeper path {}", zkPath);
+          }
+          client.deleteRecursive(zkPath);
+          return true;
+        }
+      } else {
+        log.warn("Default zookeeper node is not available for secure cluster");
+      }
+    } catch (InterruptedException ignored) {
+      e = ignored;
+    } catch (KeeperException ignored) {
+      e = ignored;
+    } catch (BadConfigException ignored) {
+      e = ignored;
+    }
+    if (e != null) {
+      log.warn("Unable to recursively delete zk node {}", zkPath);
+      log.debug("Reason: ", e);
+    }
+
+    return false;
+  }
+
+  /**
+   * Create the zookeeper node associated with the calling user and the cluster
+   */
+  protected String createZookeeperNode(String clusterName, Boolean nameOnly) throws YarnException, IOException {
+    String user = getUsername();
+    String zkPath = ZKIntegration.mkClusterPath(user, clusterName);
+    if(nameOnly) {
+      return zkPath;
+    }
+    Configuration config = getConfig();
+    if (!SliderUtils.isHadoopClusterSecure(config)) {
+      ZKIntegration client = getZkClient(clusterName, user);
+      if (client != null) {
+        try {
+          client.createPath(zkPath, "", ZooDefs.Ids.OPEN_ACL_UNSAFE,
+                            CreateMode.PERSISTENT);
+          return zkPath;
+        } catch (InterruptedException e) {
+          log.warn("Unable to create zk node {}", zkPath, e);
+        } catch (KeeperException e) {
+          log.warn("Unable to create zk node {}", zkPath, e);
+        }
+      }
+    }
+
+    return null;
+  }
+
+  /**
+   * Gets a zookeeper client, returns null if it cannot connect to zookeeper
+   **/
+  protected ZKIntegration getZkClient(String clusterName, String user) throws YarnException {
+    String registryQuorum = lookupZKQuorum();
+    ZKIntegration client = null;
+    try {
+      BlockingZKWatcher watcher = new BlockingZKWatcher();
+      client = ZKIntegration.newInstance(registryQuorum, user, clusterName, true, false, watcher);
+      client.init();
+      watcher.waitForZKConnection(2 * 1000);
+    } catch (InterruptedException e) {
+      client = null;
+      log.warn("Unable to connect to zookeeper quorum {}", registryQuorum, e);
+    } catch (IOException e) {
+      log.warn("Unable to connect to zookeeper quorum {}", registryQuorum, e);
+    }
+    return client;
+  }
 
   /**
    * Destroy a cluster. There's two race conditions here
@@ -297,6 +386,10 @@
       log.warn("Filesystem returned false from delete() operation");
     }
 
+    if(!deleteZookeeperNode(clustername)) {
+      log.warn("Unable to perform node cleanup in Zookeeper.");
+    }
+
     List<ApplicationReport> instances = findAllLiveInstances(clustername);
     // detect any race leading to cluster creation during the check/destroy process
     // and report a problem.
@@ -382,7 +475,7 @@
    */
   
   public void buildInstanceDefinition(String clustername,
-                                         AbstractClusterBuildingActionArgs buildInfo)
+                                      AbstractClusterBuildingActionArgs buildInfo)
         throws YarnException, IOException {
     // verify that a live cluster isn't there
     SliderUtils.validateClusterName(clustername);
@@ -498,11 +591,25 @@
         registryQuorum,
         quorum);
     String zookeeperRoot = buildInfo.getAppZKPath();
-    
+
     if (isSet(zookeeperRoot)) {
       zkPaths.setAppPath(zookeeperRoot);
-      
+    } else {
+      String createDefaultZkNode = appConf.getGlobalOptions().getOption(AgentKeys.CREATE_DEF_ZK_NODE, "false");
+      if (createDefaultZkNode.equals("true")) {
+        String defaultZKPath = createZookeeperNode(clustername, false);
+        log.info("ZK node created for application instance: {}.", defaultZKPath);
+        if (defaultZKPath != null) {
+          zkPaths.setAppPath(defaultZKPath);
+        }
+      } else {
+        // create AppPath if default is being used
+        String defaultZKPath = createZookeeperNode(clustername, true);
+        log.info("ZK node assigned to application instance: {}.", defaultZKPath);
+        zkPaths.setAppPath(defaultZKPath);
+      }
     }
+
     builder.addZKBinding(zkPaths);
 
     //then propagate any package URI
@@ -646,8 +753,8 @@
    */
   public LaunchedApplication launchApplication(String clustername,
                                                Path clusterDirectory,
-                               AggregateConf instanceDefinition,
-                               boolean debugAM)
+                                               AggregateConf instanceDefinition,
+                                               boolean debugAM)
     throws YarnException, IOException {
 
 
@@ -894,7 +1001,9 @@
     if (clusterSecure) {
       // if the cluster is secure, make sure that
       // the relevant security settings go over
-      addConfOptionToCLI(commandLine, config, KEY_SECURITY_ENABLED);
+/*
+      addConfOptionToCLI(commandLine, config, KEY_SECURITY);
+*/
       addConfOptionToCLI(commandLine,
           config,
           DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY);
@@ -2190,7 +2299,7 @@
    * @throws IOException
    * @throws YarnException
    */
-  public List<String> listRegistedSliderInstances() throws
+  public List<String> listRegisteredSliderInstances() throws
       IOException,
       YarnException {
     try {
diff --git a/slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java b/slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java
index 0c83e0c..e7b492b 100644
--- a/slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java
+++ b/slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java
@@ -19,6 +19,7 @@
 package org.apache.slider.client;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
@@ -74,7 +75,7 @@
     List<ApplicationReport> allApps = getApplications(types);
     List<ApplicationReport> results = new ArrayList<>();
     for (ApplicationReport report : allApps) {
-      if (user == null || user.equals(report.getUser())) {
+      if (StringUtils.isEmpty(user) || user.equals(report.getUser())) {
         results.add(report);
       }
     }
diff --git a/slider-core/src/main/java/org/apache/slider/common/SliderKeys.java b/slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
index a0e29c9..0ba562a 100644
--- a/slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
+++ b/slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
@@ -151,6 +151,20 @@
 
   String HADOOP_USER_NAME = "HADOOP_USER_NAME";
   String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
+  String SLIDER_PASSPHRASE = "SLIDER_PASSPHRASE";
 
   boolean PROPAGATE_RESOURCE_OPTION = true;
+
+  /**
+   * Security associated keys.
+   */
+  String SECURITY_DIR = "security";
+  String CRT_FILE_NAME = "ca.crt";
+  String CSR_FILE_NAME = "ca.csr";
+  String KEY_FILE_NAME = "ca.key";
+  String KEYSTORE_FILE_NAME = "keystore.p12";
+  String CRT_PASS_FILE_NAME = "pass.txt";
+  String PASSPHRASE = "DEV";
+  String PASS_LEN = "50";
+  String KEYSTORE_LOCATION = "ssl.server.keystore.location";
 }
diff --git a/slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java b/slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java
index 272ae6a..3f16f25 100644
--- a/slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java
+++ b/slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java
@@ -93,8 +93,8 @@
    * Flag which is set to indicate that security should be enabled
    * when talking to this cluster.
    */
-  String KEY_SECURITY_ENABLED =
-      CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION;
+  String KEY_SECURITY =
+      CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
 
   /**
    * queue name
@@ -153,4 +153,6 @@
 
   String IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH =
       "ipc.client.fallback-to-simple-auth-allowed";
+  String HADOOP_HTTP_FILTER_INITIALIZERS =
+      "hadoop.http.filter.initializers";
 }
diff --git a/slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java b/slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
index ff3fd52..2f0b9ac 100644
--- a/slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
+++ b/slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
@@ -198,7 +198,6 @@
     } catch (ParserConfigurationException | SAXException e) {
       throw new IOException(e);
     }
-
   }
   
   /**
diff --git a/slider-core/src/main/java/org/apache/slider/common/tools/Duration.java b/slider-core/src/main/java/org/apache/slider/common/tools/Duration.java
index 119991f..25b68ae 100644
--- a/slider-core/src/main/java/org/apache/slider/common/tools/Duration.java
+++ b/slider-core/src/main/java/org/apache/slider/common/tools/Duration.java
@@ -18,44 +18,93 @@
 
 package org.apache.slider.common.tools;
 
+import java.io.Closeable;
+
 /**
- * A duration in milliseconds
+ * A duration in milliseconds. This class can be used
+ * to count time, and to be polled to see if a time limit has
+ * passed.
  */
-public class Duration {
+public class Duration implements Closeable {
   public long start, finish;
   public final long limit;
 
+  /**
+   * Create a duration instance with a limit of 0
+   */
   public Duration() {
     this(0);
   }
 
+  /**
+   * Create a duration with a limit specified in millis
+   * @param limit duration in milliseconds
+   */
   public Duration(long limit) {
     this.limit = limit;
   }
 
+  /**
+   * Start
+   * @return self
+   */
   public Duration start() {
-    start = System.currentTimeMillis();
+    start = now();
     return this;
   }
 
+  /**
+   * The close operation relays to {@link #finish()}.
+   * Implementing it allows Duration instances to be automatically
+   * finish()'d in Java7 try blocks for when used in measuring durations.
+   */
+  @Override
+  public final void close() {
+    finish();
+  }
+
   public void finish() {
-    finish = System.currentTimeMillis();
+    finish = now();
+  }
+
+  protected long now() {
+    return System.currentTimeMillis();
   }
 
   public long getInterval() {
     return finish - start;
   }
 
+  /**
+   * return true if the limit has been exceeded
+   * @return true if a limit was set and the current time
+   * exceeds it.
+   */
   public boolean getLimitExceeded() {
-    return limit >= 0 && ((System.currentTimeMillis() - start) > limit);
+    return limit >= 0 && ((now() - start) > limit);
   }
 
   @Override
   public String toString() {
-    return "Duration " +
-           ((finish >= start)
-            ? (" of " + getInterval() + " millis")
-            : "undefined");
+    StringBuilder builder = new StringBuilder();
+    builder.append("Duration");
+     if (finish >= start) {
+       builder.append(" finished at ").append(getInterval()).append(" millis;");
+     } else {
+       if (start > 0) {
+         builder.append(" started but not yet finished;");
+       } else {
+         builder.append(" unstarted;");
+       }
+     }
+    if (limit > 0) {
+      builder.append(" limit: ").append(limit).append(" millis");
+      if (getLimitExceeded()) {
+        builder.append(" -  exceeded");
+      }
+    }
+    
+    return  builder.toString();
   }
 
 
diff --git a/slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java b/slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
index 5b246d0..17f8b70 100644
--- a/slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
+++ b/slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
@@ -20,6 +20,7 @@
 
 import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
 import org.apache.commons.compress.archivers.zip.ZipArchiveInputStream;
+import org.apache.commons.io.output.ByteArrayOutputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -36,7 +37,6 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.VersionInfo;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.LocalResource;
@@ -54,6 +54,7 @@
 import org.apache.slider.core.exceptions.MissingArgException;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.launch.ClasspathConstructor;
+import org.apache.slider.server.services.utility.PatternValidator;
 import org.apache.zookeeper.server.util.KerberosUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -103,6 +104,7 @@
       "java.security.krb5.realm";
   public static final String JAVA_SECURITY_KRB5_KDC = "java.security.krb5.kdc";
 
+  
   private SliderUtils() {
   }
 
@@ -301,12 +303,18 @@
     return file;
   }
 
+  private static final PatternValidator clusternamePattern
+      = new PatternValidator("[a-z][a-z0-9_-]*");
+      
   /**
    * Normalize a cluster name then verify that it is valid
    * @param name proposed cluster name
    * @return true iff it is valid
    */
   public static boolean isClusternameValid(String name) {
+    return name != null && clusternamePattern.matches(name);
+  }
+  public static boolean oldIsClusternameValid(String name) {
     if (name == null || name.isEmpty()) {
       return false;
     }
@@ -465,7 +473,7 @@
     }
     return trailing? 
            b.toString()
-           : (b.substring(0, b.length() - 1));
+           : (b.substring(0, b.length() - separator.length()));
   }
 
   /**
@@ -896,7 +904,8 @@
    * @return true if the slider client/service should be in secure mode
    */
   public static boolean isHadoopClusterSecure(Configuration conf) {
-    return conf.getBoolean(SliderXmlConfKeys.KEY_SECURITY_ENABLED, false);
+    return SecurityUtil.getAuthenticationMethod(conf) !=
+           UserGroupInformation.AuthenticationMethod.SIMPLE;
   }
 
   /**
@@ -945,22 +954,22 @@
         conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION));
     log.debug("hadoop.security.authorization={}",
         conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION));
-    SecurityUtil.setAuthenticationMethod(
-        UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
+/*    SecurityUtil.setAuthenticationMethod(
+        UserGroupInformation.AuthenticationMethod.KERBEROS, conf);*/
     UserGroupInformation.setConfiguration(conf);
     UserGroupInformation authUser = UserGroupInformation.getCurrentUser();
     log.debug("Authenticating as " + authUser.toString());
     log.debug("Login user is {}", UserGroupInformation.getLoginUser());
     if (!UserGroupInformation.isSecurityEnabled()) {
       throw new BadConfigException("Although secure mode is enabled," +
-                                   "the application has already set up its user as an insecure entity %s",
-                                   authUser);
+               "the application has already set up its user as an insecure entity %s",
+               authUser);
     }
     if (authUser.getAuthenticationMethod() ==
         UserGroupInformation.AuthenticationMethod.SIMPLE) {
       throw new BadConfigException("Auth User is not Kerberized %s" +
-                   " -security has already been set up with the wrong authentication method",
-                                   authUser);
+       " -security has already been set up with the wrong authentication method",
+                       authUser);
 
     }
 
@@ -1135,9 +1144,9 @@
     if (usingMiniMRCluster) {
       // for mini cluster we pass down the java CP properties
       // and nothing else
-      classpath.appendAll(classpath.javaVMClasspath());
+      classpath.appendAll(classpath.localJVMClasspath());
     } else {
-      classpath.addLibDir("./" + libdir);
+      classpath.addLibDir(libdir);
       if (sliderConfDir != null) {
         classpath.addClassDirectory(sliderConfDir);
       }
@@ -1423,8 +1432,8 @@
   }
 
   public static InputStream getApplicationResourceInputStream(FileSystem fs,
-                                                       Path appPath,
-                                                       String entry)
+                                                              Path appPath,
+                                                              String entry)
       throws IOException {
     InputStream is = null;
     FSDataInputStream appStream = fs.open(appPath);
@@ -1434,12 +1443,26 @@
     while (!done && (zipEntry = zis.getNextZipEntry()) != null) {
       if (entry.equals(zipEntry.getName())) {
         int size = (int) zipEntry.getSize();
-        byte[] content = new byte[size];
-        int offset = 0;
-        while (offset < size) {
-          offset += zis.read(content, offset, size - offset);
+        if (size != -1) {
+          log.info("Reading {} of size {}", zipEntry.getName(), zipEntry.getSize());
+          byte[] content = new byte[size];
+          int offset = 0;
+          while (offset < size) {
+            offset += zis.read(content, offset, size - offset);
+          }
+          is = new ByteArrayInputStream(content);
+        } else {
+          log.info("Size unknown. Reading {}", zipEntry.getName());
+          ByteArrayOutputStream baos = new ByteArrayOutputStream();
+          while (true) {
+            int byteRead = zis.read();
+            if (byteRead == -1) {
+              break;
+            }
+            baos.write(byteRead);
+          }
+          is = new ByteArrayInputStream(baos.toByteArray());
         }
-        is = new ByteArrayInputStream(content);
         done = true;
       }
     }
diff --git a/slider-core/src/main/java/org/apache/slider/core/launch/ClasspathConstructor.java b/slider-core/src/main/java/org/apache/slider/core/launch/ClasspathConstructor.java
index ee4c4d1..3527149 100644
--- a/slider-core/src/main/java/org/apache/slider/core/launch/ClasspathConstructor.java
+++ b/slider-core/src/main/java/org/apache/slider/core/launch/ClasspathConstructor.java
@@ -24,7 +24,6 @@
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.slider.common.tools.SliderUtils;
 
-import java.io.File;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -38,8 +37,8 @@
  */
 public class ClasspathConstructor {
 
-  //  public static final String CLASS_PATH_SEPARATOR = ApplicationConstants.CLASS_PATH_SEPARATOR;
-  public static final String CLASS_PATH_SEPARATOR = File.pathSeparator;
+    public static final String CLASS_PATH_SEPARATOR = ApplicationConstants.CLASS_PATH_SEPARATOR;
+//  public static final String CLASS_PATH_SEPARATOR = File.pathSeparator;
   private final List<String> pathElements = new ArrayList<>();
 
   public ClasspathConstructor() {
@@ -123,12 +122,12 @@
 
 
   public void addRemoteClasspathEnvVar() {
-    append(ApplicationConstants.Environment.CLASSPATH.$());
+    append(ApplicationConstants.Environment.CLASSPATH.$$());
   }
 
 
   public void insertRemoteClasspathEnvVar() {
-    append(ApplicationConstants.Environment.CLASSPATH.$());
+    append(ApplicationConstants.Environment.CLASSPATH.$$());
   }
 
 
@@ -152,12 +151,22 @@
     return dir;
   }
 
+  /**
+   * Split a classpath. This uses the local path separator so MUST NOT
+   * be used to work with remote classpaths
+   * @param localpath local path
+   * @return a splite
+   */
   public Collection<String> splitClasspath(String localpath) {
     String separator = System.getProperty("path.separator");
     return StringUtils.getStringCollection(localpath, separator);
   }
 
-  public Collection<String> javaVMClasspath() {
+  /**
+   * Get the local JVM classpath split up
+   * @return the list of entries on the JVM classpath env var
+   */
+  public Collection<String> localJVMClasspath() {
     return splitClasspath(System.getProperty("java.class.path"));
   }
 
diff --git a/slider-core/src/main/java/org/apache/slider/core/launch/JavaCommandLineBuilder.java b/slider-core/src/main/java/org/apache/slider/core/launch/JavaCommandLineBuilder.java
index e8b6eb1..7b60461 100644
--- a/slider-core/src/main/java/org/apache/slider/core/launch/JavaCommandLineBuilder.java
+++ b/slider-core/src/main/java/org/apache/slider/core/launch/JavaCommandLineBuilder.java
@@ -38,7 +38,7 @@
    * @return the path to the Java binary
    */
   protected String getJavaBinary() {
-    return ApplicationConstants.Environment.JAVA_HOME.$() + "/bin/java";
+    return ApplicationConstants.Environment.JAVA_HOME.$$() + "/bin/java";
   }
 
   /**
diff --git a/slider-core/src/main/java/org/apache/slider/core/main/ExitCodeProvider.java b/slider-core/src/main/java/org/apache/slider/core/main/ExitCodeProvider.java
index 3059c05..503b9b9 100644
--- a/slider-core/src/main/java/org/apache/slider/core/main/ExitCodeProvider.java
+++ b/slider-core/src/main/java/org/apache/slider/core/main/ExitCodeProvider.java
@@ -1,5 +1,5 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one
+ *  Licensed to the Apache Software Foundation (ASF) under one
  *  or more contributor license agreements.  See the NOTICE file
  *  distributed with this work for additional information
  *  regarding copyright ownership.  The ASF licenses this file
diff --git a/slider-core/src/main/java/org/apache/slider/core/main/IrqHandler.java b/slider-core/src/main/java/org/apache/slider/core/main/IrqHandler.java
index 9e7a596..42442d1 100644
--- a/slider-core/src/main/java/org/apache/slider/core/main/IrqHandler.java
+++ b/slider-core/src/main/java/org/apache/slider/core/main/IrqHandler.java
@@ -1,5 +1,5 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one
+ *  Licensed to the Apache Software Foundation (ASF) under one
  *  or more contributor license agreements.  See the NOTICE file
  *  distributed with this work for additional information
  *  regarding copyright ownership.  The ASF licenses this file
diff --git a/slider-core/src/main/java/org/apache/slider/core/main/LauncherExitCodes.java b/slider-core/src/main/java/org/apache/slider/core/main/LauncherExitCodes.java
index b172260..6fdebcd 100644
--- a/slider-core/src/main/java/org/apache/slider/core/main/LauncherExitCodes.java
+++ b/slider-core/src/main/java/org/apache/slider/core/main/LauncherExitCodes.java
@@ -20,7 +20,7 @@
 
 
 /*
- * YARN Codes, 
+ * Common Exit codes
  * Exit codes from 32 up  are relative to a base value that
  * we put a fair way up from the the base numbers, so that 
  * applications can have their own set of failures
@@ -30,59 +30,58 @@
    * 0: success
    */
   int EXIT_SUCCESS                    =  0;
-  
+
   /**
    * -1: generic "false" response. The operation worked but
    * the result was not true
    */
   int EXIT_FALSE                      = -1;
-  
+
   /**
    * Exit code when a client requested service termination: {@value}
    */
   int EXIT_CLIENT_INITIATED_SHUTDOWN  =  1;
-  
+
   /**
    * Exit code when targets could not be launched: {@value}
    */
   int EXIT_TASK_LAUNCH_FAILURE        =  2;
-  
+
   /**
    * Exit code when an exception was thrown from the service: {@value}
    */
   int EXIT_EXCEPTION_THROWN           = 32;
-  
+
   /**
    * Exit code when a usage message was printed: {@value}
    */
   int EXIT_USAGE                      = 33;
-  
+
   /**
    * Exit code when something happened but we can't be specific: {@value}
    */
   int EXIT_OTHER_FAILURE              = 34;
-  
+
   /**
    * Exit code when a control-C, kill -3, signal was picked up: {@value}
    */
-                                
+
   int EXIT_INTERRUPTED                = 35;
-  
+
   /**
    * Exit code when the command line doesn't parse: {@value}, or
    * when it is otherwise invalid.
    */
   int EXIT_COMMAND_ARGUMENT_ERROR     = 36;
-  
+
   /**
    * Exit code when the configurations in valid/incomplete: {@value}
    */
   int EXIT_BAD_CONFIGURATION          = 37;
- 
+
   /**
    * Exit code when the configurations in valid/incomplete: {@value}
    */
   int EXIT_CONNECTIVITY_PROBLEM       = 38;
- 
-  
+
 }
diff --git a/slider-core/src/main/java/org/apache/slider/core/main/RunService.java b/slider-core/src/main/java/org/apache/slider/core/main/RunService.java
index 9a52b38..c3a1d0e 100644
--- a/slider-core/src/main/java/org/apache/slider/core/main/RunService.java
+++ b/slider-core/src/main/java/org/apache/slider/core/main/RunService.java
@@ -1,5 +1,5 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one
+ *  Licensed to the Apache Software Foundation (ASF) under one
  *  or more contributor license agreements.  See the NOTICE file
  *  distributed with this work for additional information
  *  regarding copyright ownership.  The ASF licenses this file
diff --git a/slider-core/src/main/java/org/apache/slider/core/main/ServiceLaunchException.java b/slider-core/src/main/java/org/apache/slider/core/main/ServiceLaunchException.java
index 8277a51..27813b7 100644
--- a/slider-core/src/main/java/org/apache/slider/core/main/ServiceLaunchException.java
+++ b/slider-core/src/main/java/org/apache/slider/core/main/ServiceLaunchException.java
@@ -1,5 +1,5 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one
+ *  Licensed to the Apache Software Foundation (ASF) under one
  *  or more contributor license agreements.  See the NOTICE file
  *  distributed with this work for additional information
  *  regarding copyright ownership.  The ASF licenses this file
@@ -30,22 +30,42 @@
   implements ExitCodeProvider, LauncherExitCodes {
 
   private final int exitCode;
-  
+
+  /**
+   * Create an exception with the specific exit code
+   * @param exitCode exit code
+   * @param cause cause of the exception
+   */
   public ServiceLaunchException(int exitCode, Throwable cause) {
     super(cause);
     this.exitCode = exitCode;
   }
 
+  /**
+   * Create an exception with the specific exit code and text
+   * @param exitCode exit code
+   * @param message message to use in exception
+   */
   public ServiceLaunchException(int exitCode, String message) {
     super(message);
     this.exitCode = exitCode;
   }
 
+  /**
+   * Create an exception with the specific exit code, text and cause
+   * @param exitCode exit code
+   * @param message message to use in exception
+   * @param cause cause of the exception
+   */
   public ServiceLaunchException(int exitCode, String message, Throwable cause) {
     super(message, cause);
     this.exitCode = exitCode;
   }
 
+  /**
+   * Get the exit code
+   * @return the exit code
+   */
   @Override
   public int getExitCode() {
     return exitCode;
diff --git a/slider-core/src/main/java/org/apache/slider/core/main/ServiceLauncher.java b/slider-core/src/main/java/org/apache/slider/core/main/ServiceLauncher.java
index e5e72fa..c92dfda 100644
--- a/slider-core/src/main/java/org/apache/slider/core/main/ServiceLauncher.java
+++ b/slider-core/src/main/java/org/apache/slider/core/main/ServiceLauncher.java
@@ -1,25 +1,24 @@
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *  
- *       http://www.apache.org/licenses/LICENSE-2.0
- *  
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 
 package org.apache.slider.core.main;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.service.Service;
@@ -27,6 +26,8 @@
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -64,34 +65,31 @@
  */
 @SuppressWarnings("UseOfSystemOutOrSystemErr")
 public class ServiceLauncher<S extends Service>
-  implements LauncherExitCodes, IrqHandler.Interrupted {
-  private static final Log LOG = LogFactory.getLog(ServiceLauncher.class);
+  implements LauncherExitCodes, IrqHandler.Interrupted,
+    Thread.UncaughtExceptionHandler {
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ServiceLauncher.class);
+
   protected static final int PRIORITY = 30;
 
   public static final String NAME = "ServiceLauncher";
-  /**
-   * name of class for entry point strings: {@value}
-   */
-  public static final String ENTRY_POINT =
-    "org.apache.hadoop.yarn.service.launcher." + NAME;
-
-
-  public static final String USAGE_MESSAGE =
-    "Usage: " + NAME + " classname [--conf <conf file>] <service arguments> | ";
 
   /**
    * Name of the "--conf" argument. 
    */
   public static final String ARG_CONF = "--conf";
+
+  public static final String USAGE_MESSAGE =
+    "Usage: " + NAME + " classname ["+ARG_CONF + "<conf file>] <service arguments> | ";
   static final int SHUTDOWN_TIME_ON_INTERRUPT = 30 * 1000;
 
   private volatile S service;
   private int serviceExitCode;
-  private final List<IrqHandler> interruptHandlers = new ArrayList<>(1);
+  private final List<IrqHandler> interruptHandlers = new ArrayList<IrqHandler>(1);
   private Configuration configuration;
   private String serviceClassName;
   private static AtomicBoolean signalAlreadyReceived = new AtomicBoolean(false);
-  
+
 
   /**
    * Create an instance of the launcher
@@ -147,29 +145,23 @@
    * @param conf configuration
    * @param processedArgs arguments after the configuration parameters
    * have been stripped out.
-   * @param addShutdownHook should a shutdown hook be added to terminate
-   * this service on shutdown. Tests should set this to false.
+   * @param addProcessHooks should process failure handlers be added to
+   * terminate this service on shutdown. Tests should set this to false.
    * @throws ClassNotFoundException classname not on the classpath
    * @throws IllegalAccessException not allowed at the class
    * @throws InstantiationException not allowed to instantiate it
    * @throws InterruptedException thread interrupted
-   * @throws IOException any IO exception
+   * @throws Throwable any other failure
    */
   public int launchService(Configuration conf,
-                           String[] processedArgs,
-                           boolean addShutdownHook)
-    throws Throwable,
-           ClassNotFoundException,
-           InstantiationException,
-           IllegalAccessException,
-           ExitUtil.ExitException {
+      String[] processedArgs,
+      boolean addProcessHooks)
+    throws Throwable {
 
     instantiateService(conf);
 
-    //Register the interrupt handlers
-    registerInterruptHandler();
-    //and the shutdown hook
-    if (addShutdownHook) {
+    // add any process shutdown hooks
+    if (addProcessHooks) {
       ServiceShutdownHook shutdownHook = new ServiceShutdownHook(service);
       ShutdownHookManager.get().addShutdownHook(shutdownHook, PRIORITY);
     }
@@ -179,7 +171,8 @@
       //if its a runService, pass in the conf and arguments before init)
       runService = (RunService) service;
       configuration = runService.bindArgs(configuration, processedArgs);
-      assert configuration != null : "null configuration returned by bindArgs()";
+      Preconditions.checkNotNull(configuration,
+          "null configuration returned by bindArgs()");
     }
 
     //some class constructors init; here this is picked up on.
@@ -191,7 +184,7 @@
     if (runService != null) {
       //assume that runnable services are meant to run from here
       exitCode = runService.runService();
-      LOG.debug("Service exited with exit code " + exitCode);
+      LOG.debug("Service exited with exit code {}", exitCode);
 
     } else {
       //run the service until it stops or an interrupt happens on a different thread.
@@ -212,26 +205,27 @@
    * @throws ClassNotFoundException no such class
    * @throws InstantiationException no empty constructor,
    * problems with dependencies
-   * @throws IllegalAccessException no access rights
+   * @throws ClassNotFoundException classname not on the classpath
+   * @throws IllegalAccessException not allowed at the class
+   * @throws InstantiationException not allowed to instantiate it
+   * @throws InterruptedException thread interrupted
+   * @throws Throwable any other failure
    */
-  public Service instantiateService(Configuration conf) throws
-                                                        ClassNotFoundException,
-                                                        InstantiationException,
-                                                        IllegalAccessException,
-                                                        ExitUtil.ExitException,
-      NoSuchMethodException,
-      InvocationTargetException {
+  public Service instantiateService(Configuration conf)
+      throws ClassNotFoundException, InstantiationException, IllegalAccessException,
+      ExitUtil.ExitException, NoSuchMethodException, InvocationTargetException {
+    Preconditions.checkArgument(conf != null, "null conf");
     configuration = conf;
 
     //Instantiate the class -this requires the service to have a public
     // zero-argument constructor
     Class<?> serviceClass =
-      this.getClass().getClassLoader().loadClass(serviceClassName);
+        this.getClass().getClassLoader().loadClass(serviceClassName);
     Object instance = serviceClass.getConstructor().newInstance();
     if (!(instance instanceof Service)) {
       //not a service
       throw new ExitUtil.ExitException(EXIT_BAD_CONFIGURATION,
-                                       "Not a Service class: " + serviceClassName);
+          "Not a Service class: " + serviceClassName);
     }
 
     service = (S) instance;
@@ -241,20 +235,18 @@
   /**
    * Register this class as the handler for the control-C interrupt.
    * Can be overridden for testing.
-   * @throws IOException on a failure to add the handler
    */
-  protected void registerInterruptHandler() throws IOException {
+  protected void registerInterruptHandler() {
     try {
       interruptHandlers.add(new IrqHandler(IrqHandler.CONTROL_C, this));
       interruptHandlers.add(new IrqHandler(IrqHandler.SIGTERM, this));
     } catch (IOException e) {
-      error("Signal handler setup failed : " + e, e);
+      error("Signal handler setup failed : {}" + e, e);
     }
   }
 
   /**
-   * The service has been interrupted. 
-   * Trigger something resembling an elegant shutdown;
+   * The service has been interrupted -try to shut down the service.
    * Give the service time to do this before the exit operation is called 
    * @param interruptData the interrupted data.
    */
@@ -287,17 +279,73 @@
     exit(EXIT_INTERRUPTED, message);
   }
 
+  /**
+   * Uncaught exception handler.
+   * If an error is raised: shutdown
+   * The state of the system is unknown at this point -attempting
+   * a clean shutdown is dangerous. Instead: exit
+   * @param thread thread that failed
+   * @param exception exception
+   */
+  @Override
+  public void uncaughtException(Thread thread, Throwable exception) {
+    if (ShutdownHookManager.get().isShutdownInProgress()) {
+      LOG.error("Thread {} threw an error during shutdown: {}.",
+          thread.toString(),
+          exception,
+          exception);
+    } else if (exception instanceof Error) {
+      try {
+        LOG.error("Thread {} threw an error: {}. Shutting down",
+            thread.toString(),
+            exception,
+            exception);
+      } catch (Throwable err) {
+        // We don't want to not exit because of an issue with logging
+      }
+      if (exception instanceof OutOfMemoryError) {
+        // After catching an OOM java says it is undefined behavior, so don't
+        // even try to clean up or we can get stuck on shutdown.
+        try {
+          System.err.println("Halting due to Out Of Memory Error...");
+        } catch (Throwable err) {
+          // Again we don't want to exit because of logging issues.
+        }
+        ExitUtil.halt(EXIT_EXCEPTION_THROWN);
+      } else {
+        // error other than OutOfMemory
+        exit(convertToExitException(exception));
+      }
+    } else {
+      // simple exception in a thread. There's a policy decision here:
+      // terminate the service vs. keep going after a thread has failed
+      LOG.error("Thread {} threw an exception: {}",
+          thread.toString(),
+          exception,
+          exception);
+    }
+  }
+
+  /**
+   * Print a warning: currently this goes to stderr
+   * @param text
+   */
   protected void warn(String text) {
     System.err.println(text);
   }
 
-
+  /**
+   * Report an error. The message is printed to stderr; the exception
+   * is logged via the current logger.
+   * @param message message for the user
+   * @param thrown the exception thrown
+   */
   protected void error(String message, Throwable thrown) {
     String text = "Exception: " + message;
-    System.err.println(text);
+    warn(text);
     LOG.error(text, thrown);
   }
-  
+
   /**
    * Exit the code.
    * This is method can be overridden for testing, throwing an 
@@ -350,6 +398,7 @@
    */
   public void launchServiceAndExit(List<String> args) {
 
+    registerInterruptHandler();
     //Currently the config just the default
     Configuration conf = new Configuration();
     String[] processedArgs = extractConfigurationArgs(conf, args);
@@ -432,7 +481,7 @@
           if (failureState == Service.STATE.STOPPED) {
             //the failure occurred during shutdown, not important enough to bother
             //the user as it may just scare them
-            LOG.debug("Failure during shutdown: " + failure, failure);
+            LOG.debug("Failure during shutdown:{} ", failure, failure);
           } else {
             //throw it for the catch handlers to deal with
             throw failure;
@@ -441,33 +490,47 @@
       }
       exitException = new ExitUtil.ExitException(exitCode,
                                      "In " + serviceClassName);
-      //either the service succeeded, or an error raised during shutdown, 
-      //which we don't worry that much about
+      // either the service succeeded, or an error raised during shutdown, 
+      // which we don't worry that much about
     } catch (ExitUtil.ExitException ee) {
       exitException = ee;
     } catch (Throwable thrown) {
-      int exitCode;
-      String message = thrown.getMessage();
-      if (message == null) {
-        message = thrown.toString();
-      }
-      LOG.error(message) ;
-      if (thrown instanceof ExitCodeProvider) {
-        exitCode = ((ExitCodeProvider) thrown).getExitCode();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("While running " + getServiceName() + ": " + message, thrown);
-        }
-      } else {
-        //not any of the service launcher exceptions -assume something worse
-        error(message, thrown);
-        exitCode = EXIT_EXCEPTION_THROWN;
-        }
-      exitException = new ExitUtil.ExitException(exitCode, message);
-      exitException.initCause(thrown);
+      exitException = convertToExitException(thrown);
     }
     return exitException;
   }
 
+  /**
+   * Convert the exception to one that can be handed off to ExitUtils;
+   * if it is of the write type it is passed throw as is. If not, a 
+   * new exception with the exit code {@link #EXIT_EXCEPTION_THROWN}
+   * is created, with the argument <code>thrown</code> as the inner cause
+   * @param thrown the exception thrown
+   * @return an exception to terminate the process with
+   */
+  protected ExitUtil.ExitException convertToExitException(Throwable thrown) {
+    ExitUtil.ExitException exitException;
+    int exitCode;
+    String message = thrown.getMessage();
+    if (message == null) {
+      message = thrown.toString();
+    }
+    if (thrown instanceof ExitCodeProvider) {
+      exitCode = ((ExitCodeProvider) thrown).getExitCode();
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("While running {}: {}", getServiceName(), message, thrown);
+      }
+      LOG.error(message);
+    } else {
+      // not any of the service launcher exceptions -assume something worse
+      error(message, thrown);
+      exitCode = EXIT_EXCEPTION_THROWN;
+    }
+    exitException = new ExitUtil.ExitException(exitCode, message);
+    exitException.initCause(thrown);
+    return exitException;
+  }
+
 
   /**
    * Build a log message for starting up and shutting down. 
diff --git a/slider-core/src/main/java/org/apache/slider/core/main/ServiceShutdownHook.java b/slider-core/src/main/java/org/apache/slider/core/main/ServiceShutdownHook.java
index 82e0e27..83448ad 100644
--- a/slider-core/src/main/java/org/apache/slider/core/main/ServiceShutdownHook.java
+++ b/slider-core/src/main/java/org/apache/slider/core/main/ServiceShutdownHook.java
@@ -1,5 +1,5 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one
+ *  Licensed to the Apache Software Foundation (ASF) under one
  *  or more contributor license agreements.  See the NOTICE file
  *  distributed with this work for additional information
  *  regarding copyright ownership.  The ASF licenses this file
@@ -18,10 +18,10 @@
 
 package org.apache.slider.core.main;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.service.Service;
 import org.apache.hadoop.util.ShutdownHookManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.lang.ref.WeakReference;
 
@@ -33,13 +33,14 @@
  * been stopped and deferenced elsewhere.
  */
 public class ServiceShutdownHook implements Runnable {
-  private static final Log LOG = LogFactory.getLog(ServiceShutdownHook.class);
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ServiceShutdownHook.class);
 
-  private WeakReference<Service> serviceRef;
+  private final WeakReference<Service> serviceRef;
   private Runnable hook;
 
   public ServiceShutdownHook(Service service) {
-    serviceRef = new WeakReference<>(service);
+    serviceRef = new WeakReference<Service>(service);
   }
 
   public void register(int priority) {
@@ -48,20 +49,24 @@
     ShutdownHookManager.get().addShutdownHook(hook, priority);
   }
 
-  public void unregister() {
+  public synchronized void unregister() {
     if (hook != null) {
       try {
         ShutdownHookManager.get().removeShutdownHook(hook);
       } catch (IllegalStateException e) {
-        LOG.info("Failed to unregister shutdown hook",e);
+        LOG.info("Failed to unregister shutdown hook: {}", e, e);
       }
       hook = null;
     }
   }
 
-//  @Override
+  @Override
   public void run() {
-    Service service = serviceRef.get();
+    Service service;
+    synchronized (this) {
+      service = serviceRef.get();
+      serviceRef.clear();
+    }
     if (service == null) {
       return;
     }
@@ -69,7 +74,7 @@
       // Stop the  Service
       service.stop();
     } catch (Throwable t) {
-      LOG.info("Error stopping " + service.getName(), t);
+      LOG.info("Error stopping {}: {}", service.getName(), t);
     }
   }
 }
diff --git a/slider-core/src/main/java/org/apache/slider/core/registry/info/CustomRegistryConstants.java b/slider-core/src/main/java/org/apache/slider/core/registry/info/CustomRegistryConstants.java
index 1eb87c6..38fb4a5 100644
--- a/slider-core/src/main/java/org/apache/slider/core/registry/info/CustomRegistryConstants.java
+++ b/slider-core/src/main/java/org/apache/slider/core/registry/info/CustomRegistryConstants.java
@@ -32,7 +32,7 @@
       "org.apache.slider.publisher";
   
   public static final String AGENT_REST_API =
-      "org.apache.slider.publisher";
+      "org.apache.slider.agents";
 
   public static final String AM_IPC_PROTOCOL =
       "org.apache.slider.appmaster";
diff --git a/slider-core/src/main/java/org/apache/slider/core/registry/info/RegisteredEndpoint.java b/slider-core/src/main/java/org/apache/slider/core/registry/info/RegisteredEndpoint.java
index 3dae7fc..f3477d0 100644
--- a/slider-core/src/main/java/org/apache/slider/core/registry/info/RegisteredEndpoint.java
+++ b/slider-core/src/main/java/org/apache/slider/core/registry/info/RegisteredEndpoint.java
@@ -19,7 +19,6 @@
 package org.apache.slider.core.registry.info;
 
 import org.apache.slider.core.exceptions.SliderException;
-import org.codehaus.jackson.annotate.JsonIgnore;
 import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 import org.codehaus.jackson.map.annotate.JsonSerialize;
 
@@ -48,7 +47,12 @@
   /**
    * "hostname:port" pair: {@value}
    */
-  public static final String TYPE_ADDRESS = "address";
+  public static final String TYPE_INETADDRESS = "inetaddress";
+  
+  /**
+   * simple path string: {@value}
+   */
+  public static final String TYPE_PATH = "path";
 
   // standard protocols
 
@@ -86,19 +90,31 @@
    */
   public static final String PROTOCOL_HADOOP_PROTOBUF = "org.apache.hadoop.ipc.Protobuf";
 
-  public String value;
+  /**
+   * The address -format is driven by the type entry
+   */
+  public String address;
+
+  /**
+   * Protocol
+   */
   public String protocol = "";
+
   public String type = "";
+
+  /**
+   * Human readable type
+   */
   public String description = "";
   
   public RegisteredEndpoint() {
   }
 
-  public RegisteredEndpoint(String value,
+  public RegisteredEndpoint(String address,
                             String protocol,
                             String type,
                             String description) {
-    this.value = value;
+    this.address = address;
     this.protocol = protocol;
     this.type = type;
     this.description = description;
@@ -113,7 +129,7 @@
   public RegisteredEndpoint(URI uri,
                             String description) {
     
-    this.value = uri.toString();
+    this.address = uri.toString();
     this.protocol = uri.getScheme();
     this.type = TYPE_URL;
     this.description = description;
@@ -128,9 +144,9 @@
     String protocol,
       String description) {
     
-    this.value = address.toString();
+    this.address = address.toString();
     this.protocol = protocol;
-    this.type = TYPE_ADDRESS;
+    this.type = TYPE_INETADDRESS;
     this.description = description;
   }
 
@@ -153,10 +169,10 @@
   public URL asURL() throws SliderException {
     verifyEndpointType(TYPE_URL);
     try {
-      return new URL(value);
+      return new URL(address);
     } catch (MalformedURLException e) {
       throw new SliderException(-1, e,
-          "could not create a URL from %s : %s", value, e.toString());
+          "could not create a URL from %s : %s", address, e.toString());
     }
   }
 
@@ -165,20 +181,16 @@
     final StringBuilder sb =
         new StringBuilder();
     if (TYPE_URL.equals(type)) {
-      sb.append(value);
+      sb.append(address);
     } else {
       sb.append("protocol='").append(protocol).append('\'');
-      sb.append(" value='").append(value).append('\'');
+      sb.append(" address='").append(address).append('\'');
       sb.append(" type='").append(type).append('\'');
     }
     sb.append(" -- \"").append(description).append('"');
     return sb.toString();
   }
 
-  @JsonIgnore
-  public boolean isHttpProtocol() {
-    return PROTOCOL_HTTP.equals(protocol) || PROTOCOL_HTTPS.equals(protocol);
-  }
 
   /**
    * Verify that an endpoint is of the desired type
diff --git a/slider-core/src/main/java/org/apache/slider/core/registry/info/ServiceInstanceData.java b/slider-core/src/main/java/org/apache/slider/core/registry/info/ServiceInstanceData.java
index 1d8c561..80f0b34 100644
--- a/slider-core/src/main/java/org/apache/slider/core/registry/info/ServiceInstanceData.java
+++ b/slider-core/src/main/java/org/apache/slider/core/registry/info/ServiceInstanceData.java
@@ -39,6 +39,8 @@
 
   public String serviceType;
   public String id;
+  public String description;
+  public String yarnApplicationId;
   public long registrationTimeUTC;
 
   /**
diff --git a/slider-core/src/main/java/org/apache/slider/core/zk/BlockingZKWatcher.java b/slider-core/src/main/java/org/apache/slider/core/zk/BlockingZKWatcher.java
index 62ebff3..ca49888 100644
--- a/slider-core/src/main/java/org/apache/slider/core/zk/BlockingZKWatcher.java
+++ b/slider-core/src/main/java/org/apache/slider/core/zk/BlockingZKWatcher.java
@@ -23,6 +23,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.net.ConnectException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 public class BlockingZKWatcher implements Watcher {
@@ -49,7 +50,8 @@
    * @param timeout timeout in millis
    */
 
-  public void waitForZKConnection(int timeout) throws InterruptedException {
+  public void waitForZKConnection(int timeout)
+      throws InterruptedException, ConnectException {
     synchronized (connectedFlag) {
       if (!connectedFlag.get()) {
         log.info("waiting for ZK event");
@@ -57,7 +59,9 @@
         connectedFlag.wait(timeout);
       }
     }
-    assert connectedFlag.get();
+    if (!connectedFlag.get()) {
+      throw new ConnectException("Unable to connect to ZK quorum");
+    }
   }
 
 }
diff --git a/slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java b/slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
index 6270123..54aeb4f 100644
--- a/slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
+++ b/slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
@@ -250,13 +250,34 @@
                                      KeeperException {
     try {
       zookeeper.delete(path, -1);
+      log.debug("Deleting {}", path);
       return true;
     } catch (KeeperException.NoNodeException ignored) {
       return false;
     }
   }
 
-/**
+  /**
+   * Recursively delete a node, does not throw exception if any node does not exist.
+   * @param path
+   * @return true if delete was successful
+   */
+  public boolean deleteRecursive(String path) throws KeeperException, InterruptedException {
+
+    try {
+      List<String> children = zookeeper.getChildren(path, false);
+      for (String child : children) {
+        deleteRecursive(path + "/" + child);
+      }
+      delete(path);
+    } catch (KeeperException.NoNodeException ignored) {
+      return false;
+    }
+
+    return true;
+  }
+
+  /**
  * Build the path to a cluster; exists once the cluster has come up.
  * Even before that, a ZK watcher could wait for it.
  * @param username user
diff --git a/slider-core/src/main/java/org/apache/slider/core/zk/ZookeeperUtils.java b/slider-core/src/main/java/org/apache/slider/core/zk/ZookeeperUtils.java
index ea56bc2..8bf25f9 100644
--- a/slider-core/src/main/java/org/apache/slider/core/zk/ZookeeperUtils.java
+++ b/slider-core/src/main/java/org/apache/slider/core/zk/ZookeeperUtils.java
@@ -45,9 +45,15 @@
   public static List<String> splitToPairs(String hostPortQuorumList) {
     // split an address hot
     String[] strings = StringUtils.getStrings(hostPortQuorumList);
-    List<String> tuples = new ArrayList<>(strings.length);
-    for (String s : strings) {
-      tuples.add(s.trim());
+    int len = 0;
+    if (strings != null) {
+      len = strings.length;
+    }
+    List<String> tuples = new ArrayList<>(len);
+    if (strings != null) {
+      for (String s : strings) {
+        tuples.add(s.trim());
+      }
     }
     return tuples;
   }
@@ -60,9 +66,15 @@
   public static List<HostAndPort> splitToHostsAndPorts(String hostPortQuorumList) {
     // split an address hot
     String[] strings = StringUtils.getStrings(hostPortQuorumList);
-    List<HostAndPort> list = new ArrayList<>(strings.length);
-    for (String s : strings) {
-      list.add(HostAndPort.fromString(s.trim()));
+    int len = 0;
+    if (strings != null) {
+      len = strings.length;
+    }
+    List<HostAndPort> list = new ArrayList<>(len);
+    if (strings != null) {
+      for (String s : strings) {
+        list.add(HostAndPort.fromString(s.trim()));
+      }
     }
     return list;
   }
diff --git a/slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java b/slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java
index a06134b..e35227c 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java
@@ -30,12 +30,13 @@
 import org.apache.slider.core.main.ExitCodeProvider;
 import org.apache.slider.core.registry.info.RegisteredEndpoint;
 import org.apache.slider.core.registry.info.ServiceInstanceData;
+import org.apache.slider.server.appmaster.AMViewForProviders;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
 import org.apache.slider.server.appmaster.web.rest.agent.AgentRestOperations;
 import org.apache.slider.server.services.registry.RegistryViewForProviders;
-import org.apache.slider.server.services.utility.ForkedProcessService;
-import org.apache.slider.server.services.utility.Parent;
-import org.apache.slider.server.services.utility.SequenceService;
+import org.apache.slider.server.services.workflow.ForkedProcessService;
+import org.apache.slider.server.services.workflow.ServiceParent;
+import org.apache.slider.server.services.workflow.WorkflowSequenceService;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -55,7 +56,7 @@
  * upstream
  */
 public abstract class AbstractProviderService
-    extends SequenceService
+    extends WorkflowSequenceService
     implements
     ProviderCore,
     SliderKeys,
@@ -66,6 +67,7 @@
   protected AgentRestOperations restOps;
   protected RegistryViewForProviders registry;
   protected ServiceInstanceData registryInstanceData;
+  protected AMViewForProviders amView;
   protected URL amWebAPI;
 
   public AbstractProviderService(String name) {
@@ -81,15 +83,20 @@
     return amState;
   }
 
+  public AMViewForProviders getAppMaster() {
+    return amView;
+  }
+
   public void setAmState(StateAccessForProviders amState) {
     this.amState = amState;
   }
 
   @Override
   public void bind(StateAccessForProviders stateAccessor,
-      RegistryViewForProviders reg) {
+      RegistryViewForProviders reg, AMViewForProviders amView) {
     this.amState = stateAccessor;
     this.registry = reg;
+    this.amView = amView;
   }
 
   @Override
@@ -129,16 +136,16 @@
 
   /**
    * No-op implementation of this method.
-   * 
+   *
    * {@inheritDoc}
    */
   @Override
   public void validateApplicationConfiguration(AggregateConf instance,
                                                File confDir,
                                                boolean secure) throws
-                                                               IOException,
+      IOException,
       SliderException {
-    
+
   }
 
   /**
@@ -169,7 +176,7 @@
       }
     }
     ForkedProcessService lastProc = latestProcess();
-    if (lastProc == null) {
+    if (lastProc == null || !lastProc.isProcessTerminated()) {
       return 0;
     } else {
       return lastProc.getExitCode();
@@ -181,7 +188,7 @@
    * @return the forkes service
    */
   protected ForkedProcessService latestProcess() {
-    Service current = getCurrentService();
+    Service current = getActiveService();
     Service prev = getPreviousService();
 
     Service latest = current != null ? current : prev;
@@ -189,8 +196,8 @@
       return (ForkedProcessService) latest;
     } else {
       //its a composite object, so look inside it for a process
-      if (latest instanceof Parent) {
-        return getFPSFromParentService((Parent) latest);
+      if (latest instanceof ServiceParent) {
+        return getFPSFromParentService((ServiceParent) latest);
       } else {
         //no match
         return null;
@@ -201,11 +208,11 @@
 
   /**
    * Given a parent service, find the one that is a forked process
-   * @param parent parent
+   * @param serviceParent parent
    * @return the forked process service or null if there is none
    */
-  protected ForkedProcessService getFPSFromParentService(Parent parent) {
-    List<Service> services = parent.getServices();
+  protected ForkedProcessService getFPSFromParentService(ServiceParent serviceParent) {
+    List<Service> services = serviceParent.getServices();
     for (Service s : services) {
       if (s instanceof ForkedProcessService) {
         return (ForkedProcessService) s;
@@ -300,16 +307,17 @@
     for (Map.Entry<String, RegisteredEndpoint> endpoint : endpoints.entrySet()) {
       RegisteredEndpoint val = endpoint.getValue();
       if (val.type.equals(RegisteredEndpoint.TYPE_URL)) {
-          details.put(val.description, val.value);
+          details.put(val.description, val.address);
       }
     }
   }
   @Override
-  public void applyInitialRegistryDefinitions(URL amWebAPI,
-      ServiceInstanceData registryInstanceData) throws MalformedURLException,
+  public void applyInitialRegistryDefinitions(URL unsecureWebAPI,
+                                              URL secureWebAPI,
+                                              ServiceInstanceData registryInstanceData) throws MalformedURLException,
       IOException {
 
-      this.amWebAPI = amWebAPI;
+      this.amWebAPI = unsecureWebAPI;
     this.registryInstanceData = registryInstanceData;
   }
 }
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java b/slider-core/src/main/java/org/apache/slider/providers/ProviderCompleted.java
similarity index 76%
copy from slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
copy to slider-core/src/main/java/org/apache/slider/providers/ProviderCompleted.java
index 7af463d..f6ff4fd 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/ProviderCompleted.java
@@ -16,10 +16,14 @@
  * limitations under the License.
  */
 
-package org.apache.slider.server.services.utility;
+package org.apache.slider.providers;
 
-public interface EventCallback {
+/**
+ * This is the callback triggered by the {@link ProviderCompletedCallable}
+ * when it generates a notification
+ */
+public interface ProviderCompleted {
   
-  public void eventCallbackEvent();
+  public void eventCallbackEvent(Object parameter);
   
 }
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java b/slider-core/src/main/java/org/apache/slider/providers/ProviderCompletedCallable.java
similarity index 61%
copy from slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
copy to slider-core/src/main/java/org/apache/slider/providers/ProviderCompletedCallable.java
index 7af463d..47939c9 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/ProviderCompletedCallable.java
@@ -16,10 +16,23 @@
  * limitations under the License.
  */
 
-package org.apache.slider.server.services.utility;
+package org.apache.slider.providers;
 
-public interface EventCallback {
-  
-  public void eventCallbackEvent();
-  
+import java.util.concurrent.Callable;
+
+public class ProviderCompletedCallable implements Callable<Object> {
+
+  private final ProviderCompleted callback;
+  private final Object parameter;
+
+  public ProviderCompletedCallable(ProviderCompleted callback, Object parameter) {
+    this.callback = callback;
+    this.parameter = parameter;
+  }
+
+  @Override
+  public Object call() throws Exception {
+    callback.eventCallbackEvent(parameter);
+    return parameter;
+  }
 }
diff --git a/slider-core/src/main/java/org/apache/slider/providers/ProviderService.java b/slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
index 8d2462e..56e24e9 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
@@ -31,10 +31,10 @@
 import org.apache.slider.core.launch.ContainerLauncher;
 import org.apache.slider.core.main.ExitCodeProvider;
 import org.apache.slider.core.registry.info.ServiceInstanceData;
+import org.apache.slider.server.appmaster.AMViewForProviders;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
 import org.apache.slider.server.appmaster.web.rest.agent.AgentRestOperations;
 import org.apache.slider.server.services.registry.RegistryViewForProviders;
-import org.apache.slider.server.services.utility.EventCallback;
 
 import java.io.File;
 import java.io.IOException;
@@ -81,7 +81,7 @@
   boolean exec(AggregateConf instanceDefinition,
                File confDir,
                Map<String, String> env,
-               EventCallback execInProgress) throws IOException,
+               ProviderCompleted execInProgress) throws IOException,
       SliderException;
 
   /**
@@ -139,9 +139,11 @@
    * bind operation -invoked before the service is started
    * @param stateAccessor interface offering read access to the state
    * @param registry
+   * @param amView
    */
   void bind(StateAccessForProviders stateAccessor,
-      RegistryViewForProviders registry);
+            RegistryViewForProviders registry,
+            AMViewForProviders amView);
 
   /**
    * Returns the agent rest operations interface.
@@ -157,10 +159,12 @@
 
   /**
    * Prior to going live -register the initial service registry data
-   * @param amWebAPI
+   * @param unsecureWebAPI
+   * @param secureWebAPI
    * @param registryInstanceData
    */
-  void applyInitialRegistryDefinitions(URL amWebAPI,
-      ServiceInstanceData registryInstanceData) throws MalformedURLException,
+  void applyInitialRegistryDefinitions(URL unsecureWebAPI,
+                                       URL secureWebAPI,
+                                       ServiceInstanceData registryInstanceData) throws MalformedURLException,
       IOException;
 }
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/AgentClientProvider.java b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentClientProvider.java
index 1946ebd..3835df6 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/agent/AgentClientProvider.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentClientProvider.java
@@ -18,6 +18,7 @@
 
 package org.apache.slider.providers.agent;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.slider.api.OptionKeys;
@@ -34,8 +35,8 @@
 import org.apache.slider.providers.AbstractClientProvider;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.ProviderUtils;
+import org.apache.slider.providers.agent.application.metadata.Application;
 import org.apache.slider.providers.agent.application.metadata.Metainfo;
-import org.apache.slider.providers.agent.application.metadata.Service;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -93,8 +94,10 @@
     sliderFileSystem.verifyFileExists(appDefPath);
 
     String agentConf = instanceDefinition.getAppConfOperations().
-        getGlobalOptions().getMandatoryOption(AgentKeys.AGENT_CONF);
-    sliderFileSystem.verifyFileExists(new Path(agentConf));
+        getGlobalOptions().getOption(AgentKeys.AGENT_CONF, "");
+    if (StringUtils.isNotEmpty(agentConf)) {
+      sliderFileSystem.verifyFileExists(new Path(agentConf));
+    }
 
     String appHome = instanceDefinition.getAppConfOperations().
         getGlobalOptions().get(AgentKeys.PACKAGE_PATH);
@@ -152,7 +155,7 @@
       instanceDefinition.getAppConfOperations().
           getGlobalOptions().getMandatoryOption(AgentKeys.APP_DEF);
     } catch (BadConfigException bce) {
-      throw new BadConfigException("Application definition must be provided." + bce.getMessage());
+      throw new BadConfigException("Application definition must be provided. " + bce.getMessage());
     }
     String appDef = instanceDefinition.getAppConfOperations().
         getGlobalOptions().getMandatoryOption(AgentKeys.APP_DEF);
@@ -171,16 +174,7 @@
       throw new BadConfigException("Either agent package path " +
                                    AgentKeys.PACKAGE_PATH + " or image root " +
                                    OptionKeys.INTERNAL_APPLICATION_IMAGE_PATH
-                                   + " must be provided");
-    }
-
-    try {
-      // Validate the agent config
-      instanceDefinition.getAppConfOperations().
-          getGlobalOptions().getMandatoryOption(AgentKeys.AGENT_CONF);
-    } catch (BadConfigException bce) {
-      throw new BadConfigException("Agent config "+ AgentKeys.AGENT_CONF 
-                                   + " property must be provided.");
+                                   + " must be provided.");
     }
   }
 
@@ -202,18 +196,25 @@
   public Set<String> getApplicationTags(SliderFileSystem fileSystem,
                                         String appDef) throws SliderException {
     Set<String> tags;
+    Metainfo metainfo;
     try {
-      Metainfo metainfo = AgentUtils.getApplicationMetainfo(fileSystem, appDef);
-      Service service = metainfo.getServices().get(0);
-      tags = new HashSet<>();
-      tags.add("Name: " + service.getName());
-      tags.add("Version: " + service.getVersion());
-      tags.add("Description: " + SliderUtils.truncate(service.getComment(), 80));
+      metainfo = AgentUtils.getApplicationMetainfo(fileSystem, appDef);
     } catch (IOException e) {
-      log.error("error retrieving metainfo from {}", appDef, e);
-      throw new SliderException("error retrieving metainfo", e);
+      log.error("Error retrieving metainfo from {}", appDef, e);
+      throw new SliderException("Error retrieving metainfo", e);
     }
 
+    if(metainfo == null) {
+      log.error("Error retrieving metainfo from {}", appDef);
+      throw new SliderException("Error parsing metainfo file, possibly bad structure.");
+    }
+
+    Application application = metainfo.getApplication();
+    tags = new HashSet<>();
+    tags.add("Name: " + application.getName());
+    tags.add("Version: " + application.getVersion());
+    tags.add("Description: " + SliderUtils.truncate(application.getComment(), 80));
+
     return tags;
   }
 }
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/AgentKeys.java b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentKeys.java
index 7136fd9..31d09c4 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/agent/AgentKeys.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentKeys.java
@@ -28,10 +28,6 @@
   /**
    * {@value}
    */
-  String CONF_FILE = "agent.conf";
-  /**
-   * {@value}
-   */
   String REGION_SERVER = "regionserver";
   /**
    * What is the command for hbase to print a version: {@value}
@@ -71,6 +67,8 @@
   String ARG_LABEL = "--label";
   String ARG_HOST = "--host";
   String ARG_PORT = "--port";
+  String ARG_SECURED_PORT = "--secured_port";
+  String ARG_DEBUG = "--debug";
   String AGENT_MAIN_SCRIPT_ROOT = "./infra/agent/slider-agent/";
   String AGENT_MAIN_SCRIPT = "agent/main.py";
 
@@ -85,9 +83,11 @@
 
   String JAVA_HOME = "java_home";
   String PACKAGE_LIST = "package_list";
-  String COMPONENT_SCRIPT = "role.script";
   String WAIT_HEARTBEAT = "wait.heartbeat";
   String PYTHON_EXE = "python";
+  String CREATE_DEF_ZK_NODE = "create.default.zookeeper.node";
+  String HEARTBEAT_MONITOR_INTERVAL = "heartbeat.monitor.interval";
+  String AGENT_INSTANCE_DEBUG_DATA = "agent.instance.debug.data";
 }
 
 
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/AgentLaunchParameter.java b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentLaunchParameter.java
new file mode 100644
index 0000000..b839e58
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentLaunchParameter.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.providers.agent;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Pattern;
+
+class AgentLaunchParameter {
+  public static final Logger log =
+      LoggerFactory.getLogger(AgentLaunchParameter.class);
+  private static final String DEFAULT_PARAMETER = "";
+  private static final String ANY_COMPONENT = "ANY";
+  private static final String NONE_VALUE = "NONE";
+  private final Map<String, CommandTracker> launchParameterTracker;
+
+  public AgentLaunchParameter(String parameters) {
+    launchParameterTracker = parseExpectedLaunchParameters(parameters);
+  }
+
+  /**
+   * Get command for the component type
+   *
+   * @param componentName
+   *
+   * @return
+   */
+  public String getNextLaunchParameter(String componentName) {
+    if (launchParameterTracker != null) {
+      if (launchParameterTracker.containsKey(componentName)
+          || launchParameterTracker.containsKey(ANY_COMPONENT)) {
+        synchronized (this) {
+          CommandTracker indexTracker = null;
+          if (launchParameterTracker.containsKey(componentName)) {
+            indexTracker = launchParameterTracker.get(componentName);
+          } else {
+            indexTracker = launchParameterTracker.get(ANY_COMPONENT);
+          }
+
+          return indexTracker.getNextCommand();
+        }
+      }
+    }
+
+    return DEFAULT_PARAMETER;
+  }
+
+  /**
+   * Parse launch parameters of the form ANY:PARAM_FOR_FIRST:PARAM_FOR_SECOND:...:PARAM_FOR_REST|HBASE_MASTER:...
+   *
+   * E.g. ANY:DO_NOT_REGISTER:DO_NOT_HEARTBEAT:NONE For any container, first one gets DO_NOT_REGISTER second one gets
+   * DO_NOT_HEARTBEAT, then all of the rest get nothing
+   *
+   * E.g. HBASE_MASTER:FAIL_AFTER_START:NONE For HBASE_MASTER, first one gets FAIL_AFTER_START then "" for all
+   *
+   * @param launchParameters
+   *
+   * @return
+   */
+  Map<String, CommandTracker> parseExpectedLaunchParameters(String launchParameters) {
+    Map<String, CommandTracker> trackers = null;
+    if (launchParameters != null && launchParameters.length() > 0) {
+      String[] componentSpecificParameters = launchParameters.split(Pattern.quote("|"));
+      for (String componentSpecificParameter : componentSpecificParameters) {
+        if (componentSpecificParameter.length() != 0) {
+          String[] parameters = componentSpecificParameter.split(Pattern.quote(":"));
+
+          if (parameters.length > 1 && parameters[0].length() > 0) {
+
+            for (int index = 1; index < parameters.length; index++) {
+              if (parameters[index].equals(NONE_VALUE)) {
+                parameters[index] = DEFAULT_PARAMETER;
+              }
+            }
+
+            if (trackers == null) {
+              trackers = new HashMap<>(10);
+            }
+            String componentName = parameters[0];
+            CommandTracker tracker = new CommandTracker(Arrays.copyOfRange(parameters, 1, parameters.length));
+            trackers.put(componentName, tracker);
+          }
+        }
+      }
+    }
+
+    return trackers;
+  }
+
+  class CommandTracker {
+    private final int maxIndex;
+    private final String[] launchCommands;
+    private int currentIndex;
+
+    CommandTracker(String[] launchCommands) {
+      this.currentIndex = 0;
+      this.maxIndex = launchCommands.length - 1;
+      this.launchCommands = launchCommands;
+    }
+
+    String getNextCommand() {
+      String retVal = launchCommands[currentIndex];
+      if (currentIndex != maxIndex) {
+        currentIndex++;
+      }
+
+      return retVal;
+    }
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java
index 6d3d0e1..c1719b7 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java
@@ -46,14 +46,15 @@
 import org.apache.slider.core.registry.info.RegisteredEndpoint;
 import org.apache.slider.core.registry.info.ServiceInstanceData;
 import org.apache.slider.providers.AbstractProviderService;
+import org.apache.slider.providers.ProviderCompleted;
 import org.apache.slider.providers.ProviderCore;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.ProviderUtils;
+import org.apache.slider.providers.agent.application.metadata.Application;
 import org.apache.slider.providers.agent.application.metadata.Component;
 import org.apache.slider.providers.agent.application.metadata.Export;
 import org.apache.slider.providers.agent.application.metadata.ExportGroup;
 import org.apache.slider.providers.agent.application.metadata.Metainfo;
-import org.apache.slider.providers.agent.application.metadata.Service;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
 import org.apache.slider.server.appmaster.web.rest.agent.AgentCommandType;
 import org.apache.slider.server.appmaster.web.rest.agent.AgentRestOperations;
@@ -66,7 +67,6 @@
 import org.apache.slider.server.appmaster.web.rest.agent.RegistrationResponse;
 import org.apache.slider.server.appmaster.web.rest.agent.RegistrationStatus;
 import org.apache.slider.server.appmaster.web.rest.agent.StatusCommand;
-import org.apache.slider.server.services.utility.EventCallback;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -91,7 +91,9 @@
 
 import static org.apache.slider.server.appmaster.web.rest.RestPaths.SLIDER_PATH_AGENTS;
 
-/** This class implements the server-side aspects of an agent deployment */
+/** This class implements the server-side logic for application deployment
+ *  through Slider application package
+ **/
 public class AgentProviderService extends AbstractProviderService implements
     ProviderCore,
     AgentKeys,
@@ -106,13 +108,16 @@
   private static final String GLOBAL_CONFIG_TAG = "global";
   private static final String LOG_FOLDERS_TAG = "LogFolders";
   private static final int MAX_LOG_ENTRIES = 20;
+  private static final int DEFAULT_HEARTBEAT_MONITOR_INTERVAL = 60 * 1000;
   private final Object syncLock = new Object();
   private final Map<String, String> allocatedPorts = new ConcurrentHashMap<>();
+  private int heartbeatMonitorInterval = 0;
   private AgentClientProvider clientProvider;
-  private Map<String, ComponentInstanceState> componentStatuses = new HashMap<>();
+  private Map<String, ComponentInstanceState> componentStatuses = new ConcurrentHashMap<>();
   private AtomicInteger taskId = new AtomicInteger(0);
   private volatile Metainfo metainfo = null;
   private ComponentCommandOrder commandOrder = null;
+  private HeartbeatMonitor monitor;
   private Map<String, String> workFolders =
       Collections.synchronizedMap(new LinkedHashMap<String, String>(MAX_LOG_ENTRIES, 0.75f, false) {
         protected boolean removeEldestEntry(Map.Entry eldest) {
@@ -120,10 +125,15 @@
         }
       });
   private Boolean canAnyMasterPublish = null;
+  private AgentLaunchParameter agentLaunchParameter = null;
 
+  /**
+   * Create an instance of AgentProviderService
+   */
   public AgentProviderService() {
     super("AgentProviderService");
     setAgentRestOperations(this);
+    setHeartbeatMonitorInterval(DEFAULT_HEARTBEAT_MONITOR_INTERVAL);
   }
 
   @Override
@@ -170,13 +180,18 @@
     if (metainfo == null) {
       synchronized (syncLock) {
         if (metainfo == null) {
+          readAndSetHeartbeatMonitoringInterval(instanceDefinition);
+          initializeAgentDebugCommands(instanceDefinition);
+
           metainfo = getApplicationMetainfo(fileSystem, appDef);
-          if (metainfo == null || metainfo.getServices() == null || metainfo.getServices().size() == 0) {
+          if (metainfo == null || metainfo.getApplication() == null) {
             log.error("metainfo.xml is unavailable or malformed at {}.", appDef);
             throw new SliderException("metainfo.xml is required in app package.");
           }
 
-          commandOrder = new ComponentCommandOrder(metainfo.getServices().get(0).getCommandOrder());
+          commandOrder = new ComponentCommandOrder(metainfo.getApplication().getCommandOrder());
+          monitor = new HeartbeatMonitor(this, getHeartbeatMonitorInterval());
+          monitor.start();
         }
       }
     }
@@ -194,6 +209,8 @@
     launcher.setEnv("AGENT_LOG_ROOT", logDir);
     log.info("AGENT_LOG_ROOT set to {}", logDir);
     launcher.setEnv(HADOOP_USER_NAME, System.getenv(HADOOP_USER_NAME));
+    // for 2-Way SSL
+    launcher.setEnv(SLIDER_PASSPHRASE, SliderKeys.PASSPHRASE);
 
     //local resources
 
@@ -219,11 +236,13 @@
     launcher.addLocalResource(AgentKeys.APP_DEFINITION_DIR, appDefRes);
 
     String agentConf = instanceDefinition.getAppConfOperations().
-        getGlobalOptions().getMandatoryOption(AgentKeys.AGENT_CONF);
-    LocalResource agentConfRes = fileSystem.createAmResource(
-        fileSystem.getFileSystem().resolvePath(new Path(agentConf)),
-        LocalResourceType.FILE);
-    launcher.addLocalResource(AgentKeys.AGENT_CONFIG_FILE, agentConfRes);
+        getGlobalOptions().getOption(AgentKeys.AGENT_CONF, "");
+    if (org.apache.commons.lang.StringUtils.isNotEmpty(agentConf)) {
+      LocalResource agentConfRes = fileSystem.createAmResource(fileSystem
+          .getFileSystem().resolvePath(new Path(agentConf)),
+          LocalResourceType.FILE);
+      launcher.addLocalResource(AgentKeys.AGENT_CONFIG_FILE, agentConfRes);
+    }
 
     String agentVer = instanceDefinition.getAppConfOperations().
         getGlobalOptions().getOption(AgentKeys.AGENT_VERSION, null);
@@ -244,7 +263,15 @@
     operation.add(ARG_HOST);
     operation.add(getClusterInfoPropertyValue(StatusKeys.INFO_AM_HOSTNAME));
     operation.add(ARG_PORT);
-    operation.add(getClusterInfoPropertyValue(StatusKeys.INFO_AM_WEB_PORT));
+    operation.add(getClusterInfoPropertyValue(StatusKeys.INFO_AM_AGENT_PORT));
+    operation.add(ARG_SECURED_PORT);
+    operation.add(getClusterInfoPropertyValue(StatusKeys.INFO_AM_SECURED_AGENT_PORT));
+
+    String debugCmd = agentLaunchParameter.getNextLaunchParameter(role);
+    if (debugCmd != null && debugCmd.length() != 0) {
+      operation.add(ARG_DEBUG);
+      operation.add(debugCmd);
+    }
 
     launcher.addCommand(operation.build());
 
@@ -256,15 +283,66 @@
                               getClusterInfoPropertyValue(OptionKeys.APPLICATION_NAME)));
   }
 
+  /**
+   * Reads and sets the heartbeat monitoring interval. If bad value is provided then log it and set to default.
+   * @param instanceDefinition
+   */
+  private void readAndSetHeartbeatMonitoringInterval(AggregateConf instanceDefinition) {
+    String hbMonitorInterval = instanceDefinition.getAppConfOperations().
+        getGlobalOptions().getOption(AgentKeys.HEARTBEAT_MONITOR_INTERVAL,
+                                     Integer.toString(DEFAULT_HEARTBEAT_MONITOR_INTERVAL));
+    try {
+      setHeartbeatMonitorInterval(Integer.parseInt(hbMonitorInterval));
+    }catch (NumberFormatException e) {
+      log.warn(
+          "Bad value {} for {}. Defaulting to ",
+          hbMonitorInterval,
+          HEARTBEAT_MONITOR_INTERVAL,
+          DEFAULT_HEARTBEAT_MONITOR_INTERVAL);
+    }
+  }
+
+  /**
+   * Reads and sets the heartbeat monitoring interval. If bad value is provided then log it and set to default.
+   * @param instanceDefinition
+   */
+  private void initializeAgentDebugCommands(AggregateConf instanceDefinition) {
+    String launchParameterStr = instanceDefinition.getAppConfOperations().
+        getGlobalOptions().getOption(AgentKeys.AGENT_INSTANCE_DEBUG_DATA, "");
+    agentLaunchParameter = new AgentLaunchParameter(launchParameterStr);
+  }
+
+  @VisibleForTesting
   protected Metainfo getMetainfo() {
     return this.metainfo;
   }
 
+  @VisibleForTesting
+  protected Map<String, ComponentInstanceState> getComponentStatuses() {
+    return componentStatuses;
+  }
+
+  @VisibleForTesting
   protected Metainfo getApplicationMetainfo(SliderFileSystem fileSystem,
                                             String appDef) throws IOException {
     return AgentUtils.getApplicationMetainfo(fileSystem, appDef);
   }
 
+  @VisibleForTesting
+  protected void setHeartbeatMonitorInterval(int heartbeatMonitorInterval) {
+    this.heartbeatMonitorInterval = heartbeatMonitorInterval;
+  }
+
+  private int getHeartbeatMonitorInterval() {
+    return this.heartbeatMonitorInterval;
+  }
+
+  /**
+   * Publish a named config bag that may contain name-value pairs for app configurations such as hbase-site
+   * @param name
+   * @param description
+   * @param entries
+   */
   protected void publishComponentConfiguration(String name, String description,
                                                Iterable<Map.Entry<String, String>> entries) {
     PublishedConfiguration pubconf = new PublishedConfiguration();
@@ -274,6 +352,10 @@
     getAmState().getPublishedSliderConfigurations().put(name, pubconf);
   }
 
+  /**
+   * Get a list of all hosts for all role/container per role
+   * @return
+   */
   protected Map<String, Map<String, ClusterNode>> getRoleClusterNodeMapping() {
     amState.refreshClusterStatus();
     return (Map<String, Map<String, ClusterNode>>)
@@ -293,6 +375,25 @@
   }
 
   /**
+   * Lost heartbeat from the container - release it and ask for a replacement
+   *
+   * @param label
+   *
+   * @return if release is requested successfully
+   */
+  protected boolean releaseContainer(String label) {
+    componentStatuses.remove(label);
+    try {
+      getAppMaster().refreshContainer(getContainerId(label), true);
+    } catch (SliderException e) {
+      log.info("Error while requesting container release for {}. Message: {}", label, e.getMessage());
+      return false;
+    }
+
+    return true;
+  }
+
+  /**
    * Run this service
    *
    * @param instanceDefinition component description
@@ -307,7 +408,7 @@
   public boolean exec(AggregateConf instanceDefinition,
                       File confDir,
                       Map<String, String> env,
-                      EventCallback execInProgress) throws
+                      ProviderCompleted execInProgress) throws
       IOException,
       SliderException {
 
@@ -329,12 +430,18 @@
     return true;
   }
 
+  /**
+   * Handle registration calls from the agents
+   * @param registration
+   * @return
+   */
   @Override
   public RegistrationResponse handleRegistration(Register registration) {
     RegistrationResponse response = new RegistrationResponse();
     String label = registration.getHostname();
     if (componentStatuses.containsKey(label)) {
       response.setResponseStatus(RegistrationStatus.OK);
+      componentStatuses.get(label).setLastHeartbeat(System.currentTimeMillis());
     } else {
       response.setResponseStatus(RegistrationStatus.FAILED);
       response.setLog("Label not recognized.");
@@ -342,31 +449,11 @@
     return response;
   }
 
-  private Command getCommand(String commandVal) {
-    if (commandVal.equals(Command.START.toString())) {
-      return Command.START;
-    }
-    if (commandVal.equals(Command.INSTALL.toString())) {
-      return Command.INSTALL;
-    }
-
-    return Command.NOP;
-  }
-
-  private CommandResult getCommandResult(String commandResVal) {
-    if (commandResVal.equals(CommandResult.COMPLETED.toString())) {
-      return CommandResult.COMPLETED;
-    }
-    if (commandResVal.equals(CommandResult.FAILED.toString())) {
-      return CommandResult.FAILED;
-    }
-    if (commandResVal.equals(CommandResult.IN_PROGRESS.toString())) {
-      return CommandResult.IN_PROGRESS;
-    }
-
-    throw new IllegalArgumentException("Unrecognized value " + commandResVal);
-  }
-
+  /**
+   * Handle heartbeat response from agents
+   * @param heartBeat
+   * @return
+   */
   @Override
   public HeartBeatResponse handleHeartBeat(HeartBeat heartBeat) {
     HeartBeatResponse response = new HeartBeatResponse();
@@ -391,6 +478,7 @@
 
     Boolean isMaster = isMaster(roleName);
     ComponentInstanceState componentStatus = componentStatuses.get(label);
+    componentStatus.setLastHeartbeat(System.currentTimeMillis());
     // If no Master can explicitly publish then publish if its a master
     // Otherwise, wait till the master that can publish is ready
     if (isMaster &&
@@ -408,8 +496,8 @@
           this.allocatedPorts.put(port.getKey(), port.getValue());
         }
       }
-      CommandResult result = getCommandResult(report.getStatus());
-      Command command = getCommand(report.getRoleCommand());
+      CommandResult result = CommandResult.getCommandResult(report.getStatus());
+      Command command = Command.getCommand(report.getRoleCommand());
       componentStatus.applyCommandResult(result, command);
       log.info("Component operation. Status: {}", result);
 
@@ -461,14 +549,25 @@
     return response;
   }
 
+  /**
+   * Format the folder locations before publishing in the registry service
+   * @param folders
+   * @param containerId
+   * @param hostFqdn
+   */
   private void processFolderPaths(Map<String, String> folders, String containerId, String hostFqdn) {
-    for(String key : folders.keySet()) {
+    for (String key : folders.keySet()) {
       workFolders.put(String.format("%s-%s-%s", hostFqdn, containerId, key), folders.get(key));
     }
 
     publishComponentConfiguration(LOG_FOLDERS_TAG, LOG_FOLDERS_TAG, (new HashMap<>(this.workFolders)).entrySet());
   }
 
+  /**
+   * Process return status for component instances
+   * @param heartBeat
+   * @param componentStatus
+   */
   protected void processReturnedStatus(HeartBeat heartBeat, ComponentInstanceState componentStatus) {
     List<ComponentStatus> statuses = heartBeat.getComponentStatus();
     if (statuses != null && !statuses.isEmpty()) {
@@ -481,8 +580,8 @@
             publishComponentConfiguration(key, key, configs.entrySet());
           }
 
-          Service service = getMetainfo().getServices().get(0);
-          List<ExportGroup> exportGroups = service.getExportGroups();
+          Application application = getMetainfo().getApplication();
+          List<ExportGroup> exportGroups = application.getExportGroups();
           if (exportGroups != null && !exportGroups.isEmpty()) {
 
             String configKeyFormat = "${site.%s.%s}";
@@ -529,14 +628,21 @@
     }
   }
 
+  /**
+   * Extract script path from the application metainfo
+   *
+   * @param roleName
+   *
+   * @return
+   */
   protected String getScriptPathFromMetainfo(String roleName) {
     String scriptPath = null;
-    List<Service> services = getMetainfo().getServices();
-    if (services.size() != 1) {
-      log.error("Malformed app definition: Expect only one service in the metainfo.xml");
+    Application application = getMetainfo().getApplication();
+    if (application == null) {
+      log.error("Malformed app definition: Expect application as the top level element for metainfo.xml");
+      return scriptPath;
     }
-    Service service = services.get(0);
-    for (Component component : service.getComponents()) {
+    for (Component component : application.getComponents()) {
       if (component.getName().equals(roleName)) {
         scriptPath = component.getCommandScript().getScript();
         break;
@@ -545,13 +651,19 @@
     return scriptPath;
   }
 
+  /**
+   * Is the role of type MASTER
+   *
+   * @param roleName
+   *
+   * @return
+   */
   protected boolean isMaster(String roleName) {
-    List<Service> services = getMetainfo().getServices();
-    if (services.size() != 1) {
-      log.error("Malformed app definition: Expect only one service in the metainfo.xml");
+    Application application = getMetainfo().getApplication();
+    if (application == null) {
+      log.error("Malformed app definition: Expect application as the top level element for metainfo.xml");
     } else {
-      Service service = services.get(0);
-      for (Component component : service.getComponents()) {
+      for (Component component : application.getComponents()) {
         if (component.getName().equals(roleName)) {
           if (component.getCategory().equals("MASTER")) {
             return true;
@@ -564,13 +676,19 @@
     return false;
   }
 
+  /**
+   * Can the role publish configuration
+   *
+   * @param roleName
+   *
+   * @return
+   */
   protected boolean canPublishConfig(String roleName) {
-    List<Service> services = getMetainfo().getServices();
-    if (services.size() != 1) {
-      log.error("Malformed app definition: Expect only one service in the metainfo.xml");
+    Application application = getMetainfo().getApplication();
+    if (application == null) {
+      log.error("Malformed app definition: Expect application as the top level element for metainfo.xml");
     } else {
-      Service service = services.get(0);
-      for (Component component : service.getComponents()) {
+      for (Component component : application.getComponents()) {
         if (component.getName().equals(roleName)) {
           return Boolean.TRUE.toString().equals(component.getPublishConfig());
         }
@@ -579,14 +697,17 @@
     return false;
   }
 
+  /**
+   * Can any master publish config explicitly, if not a random master is used
+   * @return
+   */
   protected boolean canAnyMasterPublishConfig() {
     if (canAnyMasterPublish == null) {
-      List<Service> services = getMetainfo().getServices();
-      if (services.size() != 1) {
-        log.error("Malformed app definition: Expect only one service in the metainfo.xml");
+      Application application = getMetainfo().getApplication();
+      if (application == null) {
+        log.error("Malformed app definition: Expect application as root element in the metainfo.xml");
       } else {
-        Service service = services.get(0);
-        for (Component component : service.getComponents()) {
+        for (Component component : application.getComponents()) {
           if (Boolean.TRUE.toString().equals(component.getPublishConfig()) &&
               component.getCategory().equals("MASTER")) {
             canAnyMasterPublish = true;
@@ -609,6 +730,15 @@
     return label.substring(0, label.indexOf(LABEL_MAKER));
   }
 
+  /**
+   * Add install command to the heartbeat response
+   * @param roleName
+   * @param containerId
+   * @param response
+   * @param scriptPath
+   * @throws SliderException
+   */
+  @VisibleForTesting
   protected void addInstallCommand(String roleName, String containerId, HeartBeatResponse response, String scriptPath)
       throws SliderException {
     assert getAmState().isApplicationLive();
@@ -657,7 +787,7 @@
     return cmdParams;
   }
 
-  private void setInstallCommandConfigurations(ExecutionCommand cmd) {
+  private void setInstallCommandConfigurations(ExecutionCommand cmd) throws SliderException {
     ConfTreeOperations appConf = getAmState().getAppConfSnapshot();
     Map<String, Map<String, String>> configurations = buildCommandConfigurations(appConf);
     cmd.setConfigurations(configurations);
@@ -751,7 +881,8 @@
     return this.allocatedPorts;
   }
 
-  private Map<String, Map<String, String>> buildCommandConfigurations(ConfTreeOperations appConf) {
+  private Map<String, Map<String, String>> buildCommandConfigurations(ConfTreeOperations appConf)
+      throws SliderException {
 
     Map<String, Map<String, String>> configurations = new TreeMap<>();
     Map<String, String> tokens = getStandardTokenMap(appConf);
@@ -767,12 +898,17 @@
     return configurations;
   }
 
-  private Map<String, String> getStandardTokenMap(ConfTreeOperations appConf) {
+  private Map<String, String> getStandardTokenMap(ConfTreeOperations appConf) throws SliderException {
     Map<String, String> tokens = new HashMap<>();
     String nnuri = appConf.get("site.fs.defaultFS");
     tokens.put("${NN_URI}", nnuri);
     tokens.put("${NN_HOST}", URI.create(nnuri).getHost());
     tokens.put("${ZK_HOST}", appConf.get(OptionKeys.ZOOKEEPER_HOSTS));
+    tokens.put("${DEF_ZK_PATH}", appConf.get(OptionKeys.ZOOKEEPER_PATH));
+    tokens.put("${DEFAULT_DATA_DIR}", getAmState()
+        .getInternalsSnapshot()
+        .getGlobalOptions()
+        .getMandatoryOption(OptionKeys.INTERNAL_DATA_DIR_PATH));
     return tokens;
   }
 
@@ -854,16 +990,20 @@
   }
 
   @Override
-  public void applyInitialRegistryDefinitions(URL amWebAPI,
-      ServiceInstanceData instanceData) throws IOException {
-    super.applyInitialRegistryDefinitions(amWebAPI, instanceData);
+  public void applyInitialRegistryDefinitions(URL unsecureWebAPI,
+                                              URL secureWebAPI,
+                                              ServiceInstanceData instanceData) throws IOException {
+    super.applyInitialRegistryDefinitions(unsecureWebAPI,
+                                          secureWebAPI,
+                                          instanceData
+    );
 
     try {
       instanceData.internalView.endpoints.put(
           CustomRegistryConstants.AGENT_REST_API,
           new RegisteredEndpoint(
-              new URL(amWebAPI, SLIDER_PATH_AGENTS),
-              "Agent REST API") );
+              new URL(secureWebAPI, SLIDER_PATH_AGENTS),
+              "Agent REST API"));
     } catch (URISyntaxException e) {
       throw new IOException(e);
     }
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/AgentRoles.java b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentRoles.java
index d8aefc6..281895a 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/agent/AgentRoles.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentRoles.java
@@ -18,7 +18,6 @@
 
 package org.apache.slider.providers.agent;
 
-import org.apache.slider.common.SliderKeys;
 import org.apache.slider.providers.ProviderRole;
 
 import java.util.ArrayList;
@@ -27,22 +26,11 @@
 public class AgentRoles {
 
   /**
-   * List of roles
+   * List of roles Agent provider does not have any roles by default. All roles are read from the application
+   * specification.
    */
   protected static final List<ProviderRole> ROLES =
-    new ArrayList<ProviderRole>();
-
-  public static final int KEY_NODE =
-                                 SliderKeys.ROLE_AM_PRIORITY_INDEX + 1;
-    /**
-     * Initialize role list
-     */
-/*
-    static {
-      ROLES.add(new ProviderRole(AgentKeys.ROLE_NODE, KEY_NODE));
-  }
-*/
-
+      new ArrayList<ProviderRole>();
 
   public static List<ProviderRole> getRoles() {
     return ROLES;
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/Command.java b/slider-core/src/main/java/org/apache/slider/providers/agent/Command.java
index 541dcc2..cbeb69d 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/agent/Command.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/Command.java
@@ -22,5 +22,16 @@
 public enum Command {
   NOP,      // do nothing
   INSTALL,  // Install the component
-  START     // Start the component
+  START;     // Start the component
+
+  public static Command getCommand(String commandVal) {
+    if (commandVal.equals(Command.START.toString())) {
+      return Command.START;
+    }
+    if (commandVal.equals(Command.INSTALL.toString())) {
+      return Command.INSTALL;
+    }
+
+    return Command.NOP;
+  }
 }
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/CommandResult.java b/slider-core/src/main/java/org/apache/slider/providers/agent/CommandResult.java
index f318096..35d9116 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/agent/CommandResult.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/CommandResult.java
@@ -22,5 +22,19 @@
 public enum CommandResult {
   IN_PROGRESS,  // Command is in progress
   COMPLETED,    // Command has successfully completed
-  FAILED        // Command has failed
+  FAILED;        // Command has failed
+
+  public static CommandResult getCommandResult(String commandResVal) {
+    if (commandResVal.equals(CommandResult.COMPLETED.toString())) {
+      return CommandResult.COMPLETED;
+    }
+    if (commandResVal.equals(CommandResult.FAILED.toString())) {
+      return CommandResult.FAILED;
+    }
+    if (commandResVal.equals(CommandResult.IN_PROGRESS.toString())) {
+      return CommandResult.IN_PROGRESS;
+    }
+
+    throw new IllegalArgumentException("Unrecognized value " + commandResVal);
+  }
 }
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/ComponentInstanceState.java b/slider-core/src/main/java/org/apache/slider/providers/agent/ComponentInstanceState.java
index 2ad16af..60a6f82 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/agent/ComponentInstanceState.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/ComponentInstanceState.java
@@ -37,6 +37,8 @@
   private State targetState = State.STARTED;
   private int failuresSeen = 0;
   private Boolean configReported = false;
+  private long lastHeartbeat = 0;
+  private ContainerState containerState;
 
   public ComponentInstanceState(String compName,
                                 String containerId,
@@ -44,6 +46,8 @@
     this.compName = compName;
     this.containerId = containerId;
     this.applicationId = applicationId;
+    this.containerState = ContainerState.INIT;
+    this.lastHeartbeat = System.currentTimeMillis();
   }
 
   public String getCompName() {
@@ -58,6 +62,26 @@
     this.configReported = configReported;
   }
 
+  public ContainerState getContainerState() {
+    return containerState;
+  }
+
+  public void setContainerState(ContainerState containerState) {
+    this.containerState = containerState;
+  }
+
+  public long getLastHeartbeat() {
+    return lastHeartbeat;
+  }
+
+  public void setLastHeartbeat(long lastHeartbeat) {
+    this.lastHeartbeat = lastHeartbeat;
+    if(this.containerState == ContainerState.UNHEALTHY ||
+       this.containerState == ContainerState.INIT) {
+      this.containerState = ContainerState.HEALTHY;
+    }
+  }
+
   public void commandIssued(Command command) {
     Command expected = getNextCommand();
     if (expected != command) {
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/ContainerState.java b/slider-core/src/main/java/org/apache/slider/providers/agent/ContainerState.java
new file mode 100644
index 0000000..0394ba2
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/ContainerState.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.providers.agent;
+
+/** The states a component instance can be. */
+public enum ContainerState {
+  INIT,           // Container is not net activated
+  HEALTHY,     // Agent is heartbeating
+  UNHEALTHY,      // Container is unhealthy - no heartbeat for some interval
+  HEARTBEAT_LOST;  // Container is lost - request a new instance
+
+  /**
+   * Indicates whether or not it is a valid state to produce a command.
+   *
+   * @return true if command can be issued for this state.
+   */
+  public boolean canIssueCommands() {
+    switch (this) {
+      case HEALTHY:
+        return true;
+      default:
+        return false;
+    }
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/HeartbeatMonitor.java b/slider-core/src/main/java/org/apache/slider/providers/agent/HeartbeatMonitor.java
new file mode 100644
index 0000000..3aeff66
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/HeartbeatMonitor.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers.agent;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+
+/** Monitors the container state and heartbeats. */
+public class HeartbeatMonitor implements Runnable {
+  protected static final Logger log =
+      LoggerFactory.getLogger(HeartbeatMonitor.class);
+  private final int threadWakeupInterval; //1 minute
+  private final AgentProviderService provider;
+  private volatile boolean shouldRun = true;
+  private Thread monitorThread = null;
+
+  public HeartbeatMonitor(AgentProviderService provider, int threadWakeupInterval) {
+    this.provider = provider;
+    this.threadWakeupInterval = threadWakeupInterval;
+  }
+
+  public void shutdown() {
+    shouldRun = false;
+  }
+
+  public void start() {
+    log.info("Starting heartbeat monitor with interval {}", threadWakeupInterval);
+    monitorThread = new Thread(this);
+    monitorThread.start();
+  }
+
+  void join(long millis) throws InterruptedException {
+    if (isAlive()) {
+      monitorThread.join(millis);
+    }
+  }
+
+  public boolean isAlive() {
+    if (monitorThread != null) {
+      return monitorThread.isAlive();
+    }
+    return false;
+  }
+
+  @Override
+  public void run() {
+    while (shouldRun) {
+      try {
+        log.debug("Putting monitor to sleep for " + threadWakeupInterval + " " +
+                  "milliseconds");
+        Thread.sleep(threadWakeupInterval);
+        doWork();
+      } catch (InterruptedException ex) {
+        log.warn("Scheduler thread is interrupted going to stop", ex);
+        shouldRun = false;
+      } catch (Exception ex) {
+        log.warn("Exception received", ex);
+      } catch (Throwable t) {
+        log.warn("ERROR", t);
+      }
+    }
+  }
+
+  /**
+   * Every interval the current state of the container are checked. If the state is INIT or HEALTHY and no HB are
+   * received in last check interval they are marked as UNHEALTHY. INIT is when the agent is started but it did not
+   * communicate at all. HEALTHY being the AM has received heartbeats. After an interval as UNHEALTHY the container is
+   * declared unavailable
+   */
+  private void doWork() {
+    Map<String, ComponentInstanceState> componentStatuses = provider.getComponentStatuses();
+    if (componentStatuses != null) {
+      for (String containerLabel : componentStatuses.keySet()) {
+        ComponentInstanceState componentInstanceState = componentStatuses.get(containerLabel);
+        long timeSinceLastHeartbeat = System.currentTimeMillis() - componentInstanceState.getLastHeartbeat();
+
+        if (timeSinceLastHeartbeat > threadWakeupInterval) {
+          if (componentInstanceState.getContainerState() == ContainerState.HEALTHY ||
+              componentInstanceState.getContainerState() == ContainerState.INIT) {
+            componentInstanceState.setContainerState(ContainerState.UNHEALTHY);
+            log.warn("Component {} marked UNHEALTHY. Last heartbeat received at {} approx. {} ms. back.",
+                     containerLabel, componentInstanceState.getLastHeartbeat(),
+                     timeSinceLastHeartbeat);
+            continue;
+          }
+          if (componentInstanceState.getContainerState() == ContainerState.UNHEALTHY
+              && timeSinceLastHeartbeat > threadWakeupInterval * 2) {
+            componentInstanceState.setContainerState(ContainerState.HEARTBEAT_LOST);
+            log.warn("Component {} marked HEARTBEAT_LOST. Last heartbeat received at {} approx. {} ms. back.",
+                     containerLabel, componentInstanceState.getLastHeartbeat(),
+                     timeSinceLastHeartbeat);
+            this.provider.releaseContainer(containerLabel);
+            continue;
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Service.java b/slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Application.java
similarity index 96%
rename from slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Service.java
rename to slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Application.java
index 0fc009f..b007313 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Service.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Application.java
@@ -20,9 +20,9 @@
 import java.util.List;
 
 /**
- *
+ * Application type defined in the metainfo
  */
-public class Service {
+public class Application {
   String name;
   String comment;
   String version;
@@ -32,7 +32,7 @@
   List<CommandOrder> commandOrders;
   ConfigurationDependencies configDependencies;
 
-  public Service() {
+  public Application() {
     exportGroups = new ArrayList<>();
     components = new ArrayList<>();
     osSpecifics = new ArrayList<>();
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Metainfo.java b/slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Metainfo.java
index 21e8b24..2455e8e 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Metainfo.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Metainfo.java
@@ -16,19 +16,14 @@
  */
 package org.apache.slider.providers.agent.application.metadata;
 
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
 /**
- *
+ * Application metainfo uber class
  */
 public class Metainfo {
   String schemaVersion;
-  List<Service> services;
+  Application application;
 
   public Metainfo() {
-    services = new ArrayList<Service>();
   }
 
   public String getSchemaVersion() {
@@ -39,11 +34,11 @@
     this.schemaVersion = schemaVersion;
   }
 
-  public void addService(Service service) {
-    services.add(service);
+  public Application getApplication() {
+    return application;
   }
 
-  public List<Service> getServices() {
-    return Collections.unmodifiableList(services);
+  public void setApplication(Application application) {
+    this.application = application;
   }
 }
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/MetainfoParser.java b/slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/MetainfoParser.java
index a97c879..c7922a7 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/MetainfoParser.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/MetainfoParser.java
@@ -34,10 +34,10 @@
     digester.addObjectCreate("metainfo", Metainfo.class);
     digester.addBeanPropertySetter("metainfo/schemaVersion");
 
-    digester.addObjectCreate("*/service", Service.class);
-    digester.addBeanPropertySetter("*/service/name");
-    digester.addBeanPropertySetter("*/service/comment");
-    digester.addBeanPropertySetter("*/service/version");
+    digester.addObjectCreate("*/application", Application.class);
+    digester.addBeanPropertySetter("*/application/name");
+    digester.addBeanPropertySetter("*/application/comment");
+    digester.addBeanPropertySetter("*/application/version");
 
     digester.addObjectCreate("*/commandOrder", CommandOrder.class);
     digester.addBeanPropertySetter("*/commandOrder/command");
@@ -79,7 +79,7 @@
     digester.addBeanPropertySetter("*/config-type", "configType");
     digester.addSetNext("*/configuration-dependencies", "setConfigDependencies");
 
-    digester.addSetNext("*/service", "addService");
+    digester.addSetRoot("*/application", "setApplication");
 
     try {
       return (Metainfo) digester.parse(metainfoStream);
diff --git a/slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMClientProvider.java b/slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMClientProvider.java
index 6b40856..6aeb801 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMClientProvider.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMClientProvider.java
@@ -44,6 +44,7 @@
 import org.apache.slider.providers.PlacementPolicy;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.ProviderUtils;
+import org.mortbay.jetty.security.SslSelectChannelConnector;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -160,7 +161,8 @@
     Class<?>[] classes = {
       JCommander.class,
       GsonBuilder.class,
-      
+      SslSelectChannelConnector.class,
+
       CuratorFramework.class,
       CuratorZookeeperClient.class,
       ServiceInstance.class,
@@ -171,7 +173,8 @@
       {
         JCOMMANDER_JAR,
         GSON_JAR,
-        
+        "jetty-sslengine.jar",
+
         "curator-framework.jar",
         "curator-client.jar",
         "curator-x-discovery.jar",
diff --git a/slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMProviderService.java b/slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMProviderService.java
index 09e8229..184c25a 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMProviderService.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMProviderService.java
@@ -39,12 +39,12 @@
 import org.apache.slider.core.registry.info.RegistryView;
 import org.apache.slider.core.registry.info.ServiceInstanceData;
 import org.apache.slider.providers.AbstractProviderService;
+import org.apache.slider.providers.ProviderCompleted;
 import org.apache.slider.providers.ProviderCore;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.agent.AgentKeys;
 import org.apache.slider.server.appmaster.PublishedArtifacts;
 import org.apache.slider.server.appmaster.web.rest.RestPaths;
-import org.apache.slider.server.services.utility.EventCallback;
 
 import java.io.File;
 import java.io.IOException;
@@ -94,7 +94,7 @@
   public boolean exec(AggregateConf instanceDefinition,
       File confDir,
       Map<String, String> env,
-      EventCallback execInProgress) throws IOException, SliderException {
+      ProviderCompleted execInProgress) throws IOException, SliderException {
     return false;
   }
 
@@ -110,9 +110,13 @@
   }
 
   @Override
-  public void applyInitialRegistryDefinitions(URL amWebAPI,
-      ServiceInstanceData instanceData) throws IOException {
-    super.applyInitialRegistryDefinitions(amWebAPI, instanceData);
+  public void applyInitialRegistryDefinitions(URL unsecureWebAPI,
+                                              URL secureWebAPI,
+                                              ServiceInstanceData instanceData) throws IOException {
+    super.applyInitialRegistryDefinitions(unsecureWebAPI,
+                                          secureWebAPI,
+                                          instanceData
+    );
 
     // now publish site.xml files
     YarnConfiguration defaultYarnConfig = new YarnConfiguration();
@@ -146,24 +150,24 @@
     try {
       RegistryView externalView = instanceData.externalView;
       RegisteredEndpoint webUI =
-          new RegisteredEndpoint(amWebAPI, "Application Master Web UI");
+          new RegisteredEndpoint(unsecureWebAPI, "Application Master Web UI");
 
       externalView.endpoints.put(CommonRegistryConstants.WEB_UI, webUI);
 
       externalView.endpoints.put(
           CustomRegistryConstants.MANAGEMENT_REST_API,
           new RegisteredEndpoint(
-              new URL(amWebAPI, SLIDER_PATH_MANAGEMENT),
+              new URL(unsecureWebAPI, SLIDER_PATH_MANAGEMENT),
               "Management REST API") );
 
       externalView.endpoints.put(
           CustomRegistryConstants.REGISTRY_REST_API,
           new RegisteredEndpoint(
-              new URL(amWebAPI, RestPaths.SLIDER_PATH_REGISTRY + "/" +
+              new URL(unsecureWebAPI, RestPaths.SLIDER_PATH_REGISTRY + "/" +
                                 RestPaths.REGISTRY_SERVICE),
               "Registry Web Service" ) );
 
-      URL publisherURL = new URL(amWebAPI, SLIDER_PATH_PUBLISHER);
+      URL publisherURL = new URL(unsecureWebAPI, SLIDER_PATH_PUBLISHER);
       externalView.endpoints.put(
           CustomRegistryConstants.PUBLISHER_REST_API,
           new RegisteredEndpoint(
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/AMViewForProviders.java
similarity index 68%
copy from slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
copy to slider-core/src/main/java/org/apache/slider/server/appmaster/AMViewForProviders.java
index 7af463d..287035f 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/AMViewForProviders.java
@@ -16,10 +16,12 @@
  * limitations under the License.
  */
 
-package org.apache.slider.server.services.utility;
+package org.apache.slider.server.appmaster;
 
-public interface EventCallback {
-  
-  public void eventCallbackEvent();
-  
+import org.apache.slider.core.exceptions.SliderException;
+
+/** Operations available to a provider from AppMaster */
+public interface AMViewForProviders {
+  /** Provider can ask AppMaster to release a specific container */
+  void refreshContainer(String containerId, boolean newHostIfPossible) throws SliderException;
 }
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
index d90eeb6..5a5baaa 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
@@ -18,8 +18,9 @@
 
 package org.apache.slider.server.appmaster;
 
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.core.conf.AggregateConf;
@@ -29,38 +30,22 @@
 import org.apache.slider.providers.ProviderService;
 import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.apache.slider.server.services.workflow.AbstractWorkflowExecutorService;
+import org.apache.slider.server.services.workflow.ServiceThreadFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
+import java.util.concurrent.Executors;
 
 /**
  * A service for launching containers
  */
-public class RoleLaunchService extends AbstractService {
+public class RoleLaunchService extends AbstractWorkflowExecutorService {
   protected static final Logger log =
     LoggerFactory.getLogger(RoleLaunchService.class);
-  /**
-   * How long to expect launcher threads to shut down on AM termination:
-   * {@value}
-   */
-  public static final int LAUNCHER_THREAD_SHUTDOWN_TIME = 10000;
+
   public static final String ROLE_LAUNCH_SERVICE = "RoleLaunchService";
-  /**
-   * Map of launched threads.
-   * These are retained so that at shutdown time the AM can signal
-   * all threads to stop.
-   *
-   * However, we don't want to run out of memory even if many containers
-   * get launched over time, so the AM tries to purge this
-   * of the latest launched thread when the RoleLauncher signals
-   * the AM that it has finished
-   */
-  private final Map<RoleLauncher, Thread> launchThreads =
-    new HashMap<>();
 
   /**
    * Callback to whatever has the task of actually running the container
@@ -68,7 +53,6 @@
    */
   private final ContainerStartOperation containerStarter;
 
-
   private final ProviderService provider;
   /**
    * Filesystem to use for the launch
@@ -86,13 +70,6 @@
    */
   private final Path launcherTmpDirPath;
 
-  /**
-   * Thread group for the launchers; gives them all a useful name
-   * in stack dumps
-   */
-  private final ThreadGroup launcherThreadGroup = new ThreadGroup(
-      ROLE_LAUNCH_SERVICE);
-
   private Map<String, String> envVars;
 
   /**
@@ -101,14 +78,15 @@
    * @param provider the provider
    * @param fs filesystem
    * @param generatedConfDirPath path in the FS for the generated dir
-   * @param envVars
-   * @param launcherTmpDirPath
+   * @param envVars environment variables
+   * @param launcherTmpDirPath path for a temporary data in the launch process
    */
   public RoleLaunchService(ContainerStartOperation startOperation,
                            ProviderService provider,
                            SliderFileSystem fs,
                            Path generatedConfDirPath,
-                           Map<String, String> envVars, Path launcherTmpDirPath) {
+                           Map<String, String> envVars,
+      Path launcherTmpDirPath) {
     super(ROLE_LAUNCH_SERVICE);
     containerStarter = startOperation;
     this.fs = fs;
@@ -119,9 +97,10 @@
   }
 
   @Override
-  protected void serviceStop() throws Exception {
-    joinAllLaunchedThreads();
-    super.serviceStop();
+  public void init(Configuration conf) {
+    super.init(conf);
+    setExecutor(Executors.newCachedThreadPool(
+        new ServiceThreadFactory(ROLE_LAUNCH_SERVICE, true)));
   }
 
   /**
@@ -134,78 +113,17 @@
                          RoleStatus role,
                          AggregateConf clusterSpec) {
     String roleName = role.getName();
-    //emergency step: verify that this role is handled by the provider
-    assert provider.isSupportedRole(roleName) : "unsupported role";
+    // prelaunch safety check
+    Preconditions.checkArgument(provider.isSupportedRole(roleName));
     RoleLaunchService.RoleLauncher launcher =
       new RoleLaunchService.RoleLauncher(container,
-                                         role.getProviderRole(),
-                                         clusterSpec,
-                                         clusterSpec.getResourceOperations()
-                                                    .getOrAddComponent(roleName),
-                                         clusterSpec.getAppConfOperations()
-                                                    .getOrAddComponent(roleName) );
-    launchThread(launcher,
-                 String.format("%s-%s", roleName,
-                               container.getId().toString())
-                );
+         role.getProviderRole(),
+         clusterSpec,
+         clusterSpec.getResourceOperations() .getOrAddComponent(roleName),
+         clusterSpec.getAppConfOperations().getOrAddComponent(roleName));
+    execute(launcher);
   }
 
-
-  public void launchThread(RoleLauncher launcher, String name) {
-    Thread launchThread = new Thread(launcherThreadGroup,
-                                     launcher,
-                                     name);
-
-    // launch and start the container on a separate thread to keep
-    // the main thread unblocked
-    // as all containers may not be allocated at one go.
-    synchronized (launchThreads) {
-      launchThreads.put(launcher, launchThread);
-    }
-    launchThread.start();
-  }
-
-  /**
-   * Method called by a launcher thread when it has completed;
-   * this removes the launcher of the map of active
-   * launching threads.
-   * @param launcher launcher that completed
-   * @param ex any exception raised
-   */
-  public void launchedThreadCompleted(RoleLauncher launcher, Exception ex) {
-    log.debug("Launched thread {} completed", launcher, ex);
-    synchronized (launchThreads) {
-      launchThreads.remove(launcher);
-    }
-  }
-
-  /**
-   Join all launched threads
-   needed for when we time out
-   and we need to release containers
-   */
-  private void joinAllLaunchedThreads() {
-
-
-    //first: take a snapshot of the thread list
-    List<Thread> liveThreads;
-    synchronized (launchThreads) {
-      liveThreads = new ArrayList<Thread>(launchThreads.values());
-    }
-    int size = liveThreads.size();
-    if (size > 0) {
-      log.info("Waiting for the completion of {} threads", size);
-      for (Thread launchThread : liveThreads) {
-        try {
-          launchThread.join(LAUNCHER_THREAD_SHUTDOWN_TIME);
-        } catch (InterruptedException e) {
-          log.info("Exception thrown in thread join: " + e, e);
-        }
-      }
-    }
-  }
-
-
   /**
    * Thread that runs on the AM to launch a region server.
    */
@@ -218,6 +136,7 @@
     private final MapOperations appComponent;
     private final AggregateConf instanceDefinition;
     public final ProviderRole role;
+    private Exception raisedException;
 
     public RoleLauncher(Container container,
                         ProviderRole role,
@@ -236,6 +155,10 @@
       this.instanceDefinition = instanceDefinition;
     }
 
+    public Exception getRaisedException() {
+      return raisedException;
+    }
+
     @Override
     public String toString() {
       return "RoleLauncher{" +
@@ -246,13 +169,9 @@
 
     @Override
     public void run() {
-      Exception ex = null;
       try {
-        ContainerLauncher containerLauncher = new ContainerLauncher(getConfig(),
-                                                                    fs,
-                                                                    container);
-
-
+        ContainerLauncher containerLauncher =
+            new ContainerLauncher(getConfig(), fs, container);
         containerLauncher.setupUGI();
         containerLauncher.putEnv(envVars);
 
@@ -260,7 +179,6 @@
                   container.getId(),
                   containerRole);
 
-
         //now build up the configuration data
         Path containerTmpDirPath =
           new Path(launcherTmpDirPath, container.getId().toString());
@@ -272,8 +190,7 @@
             generatedConfDirPath,
             resourceComponent,
             appComponent,
-            containerTmpDirPath
-        );
+            containerTmpDirPath);
 
         RoleInstance instance = new RoleInstance(container);
         String[] envDescription = containerLauncher.dumpEnvToString();
@@ -292,9 +209,7 @@
       } catch (Exception e) {
         log.error("Exception thrown while trying to start {}: {}",
             containerRole, e);
-        ex = e;
-      } finally {
-        launchedThreadCompleted(this, ex);
+        raisedException = e;
       }
     }
 
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index 3f54e27..0b22910 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -88,6 +88,7 @@
 import org.apache.slider.core.registry.info.RegisteredEndpoint;
 import org.apache.slider.core.registry.info.RegistryNaming;
 import org.apache.slider.core.registry.info.ServiceInstanceData;
+import org.apache.slider.providers.ProviderCompleted;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.ProviderService;
 import org.apache.slider.providers.SliderProviderFactory;
@@ -104,6 +105,8 @@
 import org.apache.slider.server.appmaster.state.RMOperationHandler;
 import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.apache.slider.server.appmaster.web.AgentService;
+import org.apache.slider.server.appmaster.web.rest.agent.AgentWebApp;
 import org.apache.slider.server.appmaster.web.SliderAMWebApp;
 import org.apache.slider.server.appmaster.web.SliderAmFilterInitializer;
 import org.apache.slider.server.appmaster.web.SliderAmIpFilter;
@@ -111,10 +114,10 @@
 import org.apache.slider.server.appmaster.web.WebAppApiImpl;
 import org.apache.slider.server.appmaster.web.rest.RestPaths;
 import org.apache.slider.server.services.registry.SliderRegistryService;
+import org.apache.slider.server.services.security.CertificateManager;
 import org.apache.slider.server.services.utility.AbstractSliderLaunchedService;
-import org.apache.slider.server.services.utility.EventCallback;
-import org.apache.slider.server.services.utility.RpcService;
 import org.apache.slider.server.services.utility.WebAppService;
+import org.apache.slider.server.services.workflow.WorkflowRpcService;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -123,6 +126,7 @@
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URL;
+import java.net.URLClassLoader;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -135,6 +139,7 @@
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
 
+import static org.apache.slider.server.appmaster.web.rest.RestPaths.WS_AGENT_CONTEXT_ROOT;
 import static org.apache.slider.server.appmaster.web.rest.RestPaths.WS_CONTEXT_ROOT;
 
 /**
@@ -149,8 +154,9 @@
     SliderClusterProtocol,
     ServiceStateChangeListener,
     RoleKeys,
-    EventCallback,
-    ContainerStartOperation {
+    ProviderCompleted,
+    ContainerStartOperation,
+    AMViewForProviders {
   protected static final Logger log =
     LoggerFactory.getLogger(SliderAppMaster.class);
 
@@ -162,7 +168,7 @@
   public static final String SERVICE_CLASSNAME_SHORT =
       "SliderAppMaster";
   public static final String SERVICE_CLASSNAME =
-      "org.apache.slider.server.appmaster."+ SERVICE_CLASSNAME_SHORT;
+      "org.apache.slider.server.appmaster." + SERVICE_CLASSNAME_SHORT;
 
 
   /**
@@ -178,31 +184,37 @@
   private YarnRPC yarnRPC;
 
   /** Handle to communicate with the Resource Manager*/
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private AMRMClientAsync asyncRMClient;
-  
+
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
+
   private RMOperationHandler rmOperationHandler;
 
   /** Handle to communicate with the Node Manager*/
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   public NMClientAsync nmClientAsync;
-  
-  YarnConfiguration conf;
+
+//  YarnConfiguration conf;
   /**
    * token blob
    */
   private ByteBuffer allTokens;
 
-  private RpcService rpcService;
+  private WorkflowRpcService rpcService;
 
   /**
    * Secret manager
    */
-  ClientToAMTokenSecretManager secretManager;
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
+  private ClientToAMTokenSecretManager secretManager;
   
   /** Hostname of the container*/
   private String appMasterHostname = "";
   /* Port on which the app master listens for status updates from clients*/
   private int appMasterRpcPort = 0;
   /** Tracking url to which app master publishes info for clients to monitor*/
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private String appMasterTrackingUrl = "";
 
   /** Application Attempt Id ( combination of attemptId and fail count )*/
@@ -234,6 +246,10 @@
   private final ReentrantLock AMExecutionStateLock = new ReentrantLock();
   private final Condition isAMCompleted = AMExecutionStateLock.newCondition();
 
+  /**
+   * Exit code for the AM to return
+   */
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private int amExitCode =  0;
   
   /**
@@ -246,22 +262,30 @@
   /**
    * Flag to set if the process exit code was set before shutdown started
    */
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private boolean spawnedProcessExitedBeforeShutdownTriggered;
 
 
   /** Arguments passed in : raw*/
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private SliderAMArgs serviceArgs;
 
   /**
    * ID of the AM container
    */
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private ContainerId appMasterContainerID;
 
   /**
    * ProviderService of this cluster
    */
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private ProviderService providerService;
 
+  /**
+   * The registry service
+   */
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private SliderRegistryService registry;
   
   /**
@@ -276,15 +300,20 @@
   private int containerMaxMemory;
   private String amCompletionReason;
 
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private RoleLaunchService launchService;
   
   //username -null if it is not known/not to be set
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private String hadoop_user_name;
   private String service_user_name;
   
   private SliderAMWebApp webApp;
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private InetSocketAddress rpcServiceAddress;
   private ProviderService sliderAMProvider;
+  private String agentAccessUrl;
+  private CertificateManager certificateManager;
 
   /**
    * Service Constructor
@@ -326,7 +355,7 @@
                SliderUtils.getKerberosRealm());
       UserGroupInformation.setConfiguration(conf);
       UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-      log.debug("Authenticating as " + ugi.toString());
+      log.debug("Authenticating as {}", ugi);
       SliderUtils.verifyPrincipalSet(conf,
           DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY);
       // always enforce protocol to be token-based.
@@ -355,11 +384,11 @@
   @Override // RunService
   public Configuration bindArgs(Configuration config, String... args) throws
                                                                       Exception {
-    config = super.bindArgs(config, args);
+    YarnConfiguration yarnConfiguration = new YarnConfiguration(
+        super.bindArgs(config, args));
     serviceArgs = new SliderAMArgs(args);
     serviceArgs.parse();
     //yarn-ify
-    YarnConfiguration yarnConfiguration = new YarnConfiguration(config);
     return SliderUtils.patchConfiguration(yarnConfiguration);
   }
 
@@ -449,11 +478,10 @@
 
     Configuration serviceConf = getConfig();
     // Try to get the proper filtering of static resources through the yarn proxy working
-    serviceConf.set("hadoop.http.filter.initializers",
+    serviceConf.set(HADOOP_HTTP_FILTER_INITIALIZERS,
                     SliderAmFilterInitializer.NAME);
-    serviceConf.set(SliderAmIpFilter.WS_CONTEXT_ROOT, WS_CONTEXT_ROOT);
+    serviceConf.set(SliderAmIpFilter.WS_CONTEXT_ROOT, WS_CONTEXT_ROOT + "|" + WS_AGENT_CONTEXT_ROOT);
     
-    conf = new YarnConfiguration(serviceConf);
     //get our provider
     MapOperations globalInternalOptions =
       instanceDefinition.getInternalOperations().getGlobalOptions();
@@ -470,9 +498,9 @@
     sliderAMProvider = new SliderAMProviderService();
     initAndAddService(sliderAMProvider);
     
-    InetSocketAddress address = SliderUtils.getRmSchedulerAddress(conf);
+    InetSocketAddress address = SliderUtils.getRmSchedulerAddress(serviceConf);
     log.info("RM is at {}", address);
-    yarnRPC = YarnRPC.create(conf);
+    yarnRPC = YarnRPC.create(serviceConf);
 
     /*
      * Extract the container ID. This is then
@@ -510,7 +538,7 @@
       }
     }
     allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
-    
+
     // set up secret manager
     secretManager = new ClientToAMTokenSecretManager(appAttemptID, null);
 
@@ -569,22 +597,32 @@
       providerRoles.addAll(SliderAMClientProvider.ROLES);
 
       // Start up the WebApp and track the URL for it
+      certificateManager = new CertificateManager();
+      certificateManager.initRootCert(
+          instanceDefinition.getAppConfOperations()
+              .getComponent(SliderKeys.COMPONENT_AM));
+
+      startAgentWebApp(appInformation, serviceConf);
+
       webApp = new SliderAMWebApp(registry);
       WebApps.$for(SliderAMWebApp.BASE_PATH, WebAppApi.class,
-          new WebAppApiImpl(this, stateForProviders, providerService),
-          RestPaths.WS_CONTEXT)
+                   new WebAppApiImpl(this,
+                                     stateForProviders,
+                                     providerService,
+                                     certificateManager),
+                   RestPaths.WS_CONTEXT)
                       .with(serviceConf)
                       .start(webApp);
       appMasterTrackingUrl = "http://" + appMasterHostname + ":" + webApp.port();
       WebAppService<SliderAMWebApp> webAppService =
         new WebAppService<>("slider", webApp);
 
-      webAppService.init(conf);
+      webAppService.init(serviceConf);
       webAppService.start();
       addService(webAppService);
 
       appInformation.put(StatusKeys.INFO_AM_WEB_URL, appMasterTrackingUrl + "/");
-      appInformation.set(StatusKeys.INFO_AM_WEB_PORT, webApp.port());      
+      appInformation.set(StatusKeys.INFO_AM_WEB_PORT, webApp.port());
 
       // Register self with ResourceManager
       // This will start heartbeating to the RM
@@ -612,7 +650,8 @@
         applicationACLs = response.getApplicationACLs();
 
         //tell the server what the ACLs are 
-        rpcService.getServer().refreshServiceAcl(conf, new SliderAMPolicyProvider());
+        rpcService.getServer().refreshServiceAcl(serviceConf,
+            new SliderAMPolicyProvider());
       }
 
       // extract container list
@@ -675,8 +714,8 @@
 
 
     //Give the provider restricted access to the state, registry
-    providerService.bind(stateForProviders, registry);
-    sliderAMProvider.bind(stateForProviders, registry);
+    providerService.bind(stateForProviders, registry, this);
+    sliderAMProvider.bind(stateForProviders, registry, null);
 
     // now do the registration
     registerServiceInstance(clustername, appid);
@@ -701,6 +740,32 @@
     return amExitCode;
   }
 
+  private void startAgentWebApp(MapOperations appInformation,
+                                Configuration serviceConf) {
+    LOG_YARN.info("AM classpath:" + ((URLClassLoader) AgentWebApp.class.getClassLoader() ).getURLs());
+    // Start up the agent web app and track the URL for it
+    AgentWebApp agentWebApp = AgentWebApp.$for(AgentWebApp.BASE_PATH,
+                     new WebAppApiImpl(this,
+                                       stateForProviders,
+                                       providerService,
+                                       certificateManager),
+                     RestPaths.AGENT_WS_CONTEXT)
+        .withComponentConfig(getInstanceDefinition().getAppConfOperations()
+                                 .getComponent(SliderKeys.COMPONENT_AM))
+        .start();
+    agentAccessUrl = "https://" + appMasterHostname + ":" + agentWebApp.getSecuredPort();
+    AgentService agentService =
+      new AgentService("slider-agent", agentWebApp);
+
+    agentService.init(serviceConf);
+    agentService.start();
+    addService(agentService);
+
+    appInformation.put(StatusKeys.INFO_AM_AGENT_URL, agentAccessUrl + "/");
+    appInformation.set(StatusKeys.INFO_AM_AGENT_PORT, agentWebApp.getPort());
+    appInformation.set(StatusKeys.INFO_AM_SECURED_AGENT_PORT,
+                       agentWebApp.getSecuredPort());
+  }
 
   /**
    * This registers the service instance and its external values
@@ -711,7 +776,8 @@
   private void registerServiceInstance(String instanceName,
       ApplicationId appid) throws Exception {
     // the registry is running, so register services
-    URL amWebAPI = new URL(appMasterTrackingUrl);
+    URL unsecureWebAPI = new URL(appMasterTrackingUrl);
+    URL secureWebAPI = new URL(agentAccessUrl);
     String serviceName = SliderKeys.APP_TYPE;
     int id = appid.getId();
     String appServiceType = RegistryNaming.createRegistryServiceType(
@@ -740,16 +806,22 @@
 
     // internal services
    
-    sliderAMProvider.applyInitialRegistryDefinitions(amWebAPI, instanceData);
+    sliderAMProvider.applyInitialRegistryDefinitions(unsecureWebAPI,
+                                                     secureWebAPI,
+                                                     instanceData
+    );
 
     // provider service dynamic definitions.
-    providerService.applyInitialRegistryDefinitions(amWebAPI, instanceData);
+    providerService.applyInitialRegistryDefinitions(unsecureWebAPI,
+                                                    secureWebAPI,
+                                                    instanceData
+    );
 
 
     // push the registration info to ZK
 
     registry.registerSelf(
-        instanceData, amWebAPI
+        instanceData, unsecureWebAPI
     );
   }
 
@@ -911,7 +983,7 @@
                                                     .newReflectiveBlockingService(
                                                       protobufRelay);
 
-    rpcService = new RpcService(RpcBinder.createProtobufServer(
+    rpcService = new WorkflowRpcService("SliderRPC", RpcBinder.createProtobufServer(
       new InetSocketAddress("0.0.0.0", 0),
       getConfig(),
       secretManager,
@@ -978,7 +1050,8 @@
 
       // non complete containers should not be here
       assert (status.getState() == ContainerState.COMPLETE);
-      AppState.NodeCompletionResult result = appState.onCompletedNode(conf, status);
+      AppState.NodeCompletionResult result = appState.onCompletedNode(
+          getConfig(), status);
       if (result.containerFailed) {
         RoleInstance ri = result.roleInstance;
         log.error("Role instance {} failed ", ri);
@@ -1321,7 +1394,7 @@
       // didn't start, so don't register
       providerService.start();
       // and send the started event ourselves
-      eventCallbackEvent();
+      eventCallbackEvent(null);
     }
   }
 
@@ -1330,8 +1403,8 @@
   /* EventCallback  from the child or ourselves directly */
   /* =================================================================== */
 
-  @Override // EventCallback
-  public void eventCallbackEvent() {
+  @Override // ProviderCompleted
+  public void eventCallbackEvent(Object parameter) {
     // signalled that the child process is up.
     appState.noteAMLive();
     // now ask for the cluster nodes
@@ -1345,6 +1418,30 @@
     }
   }
 
+
+  /* =================================================================== */
+  /* ProviderAMOperations */
+  /* =================================================================== */
+
+  /**
+   * Refreshes the container by releasing it and having it reallocated
+   *
+   * @param containerId       id of the container to release
+   * @param newHostIfPossible allocate the replacement container on a new host
+   *
+   * @throws SliderException
+   */
+  public void refreshContainer(String containerId, boolean newHostIfPossible)
+      throws SliderException {
+    log.info(
+        "Refreshing container {} per provider request.",
+        containerId);
+    rmOperationHandler.execute(appState.releaseContainer(containerId));
+
+    // ask for more containers if needed
+    reviewRequestAndReleaseNodes();
+  }
+
   /* =================================================================== */
   /* ServiceStateChangeListener */
   /* =================================================================== */
@@ -1355,7 +1452,7 @@
    */
   @Override //ServiceStateChangeListener
   public void stateChanged(Service service) {
-    if (service == providerService) {
+    if (service == providerService && service.isInState(STATE.STOPPED)) {
       //its the current master process in play
       int exitCode = providerService.getExitCode();
       int mappedProcessExitCode =
@@ -1382,6 +1479,8 @@
           exitCode,
           mappedProcessExitCode);
       }
+    } else {
+      super.stateChanged(service);
     }
   }
 
@@ -1478,7 +1577,7 @@
   public AggregateConf getInstanceDefinition() {
     return appState.getInstanceDefinition();
   }
-  
+
   /**
    * This is the status, the live model
    */
@@ -1513,5 +1612,4 @@
     //now have the service launcher do its work
     ServiceLauncher.serviceMain(extendedArgs);
   }
-
 }
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
index b5e67f5..cc238ff 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
@@ -1171,7 +1171,7 @@
    * @param status the node that has just completed
    * @return NodeCompletionResult
    */
-  public synchronized NodeCompletionResult onCompletedNode(YarnConfiguration amConf,
+  public synchronized NodeCompletionResult onCompletedNode(Configuration amConf,
       ContainerStatus status) {
     ContainerId containerId = status.getContainerId();
     NodeCompletionResult result = new NodeCompletionResult();
@@ -1476,6 +1476,26 @@
     return operations;
   }
 
+  /**
+   * Releases a container based on container id
+   * @param containerId
+   * @return
+   * @throws SliderInternalStateException
+   */
+  public List<AbstractRMOperation> releaseContainer(String containerId)
+      throws SliderInternalStateException {
+    List<AbstractRMOperation> operations = new ArrayList<>();
+    List<RoleInstance> activeRoleInstances = cloneActiveContainerList();
+    for (RoleInstance role : activeRoleInstances) {
+      if (role.container.getId().toString().equals(containerId)) {
+        containerReleaseSubmitted(role.container);
+        operations.add(new ContainerReleaseOperation(role.getId()));
+      }
+    }
+
+    return operations;
+  }
+
 
   /**
    * Find a container running on a specific host -looking
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/state/ContainerPriority.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/state/ContainerPriority.java
index ccd9a64..369a932 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/state/ContainerPriority.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/state/ContainerPriority.java
@@ -37,9 +37,13 @@
  */
 public final class ContainerPriority {
 
+  // bit that represents whether location is specified
+  static final int NOLOCATION = 1 << 30;
+  
   public static int buildPriority(int role,
                                   boolean locationSpecified) {
-    return (role)  ;
+    int location = locationSpecified ? 0 : NOLOCATION;
+    return role | location;
   }
 
 
@@ -53,7 +57,7 @@
   
   
   public static int extractRole(int priority) {
-    return priority ;
+    return priority >= NOLOCATION ? priority^NOLOCATION : priority;
   }
 
   /**
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/state/OutstandingRequest.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/state/OutstandingRequest.java
index 7d3e427..0d8b56c 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/state/OutstandingRequest.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/state/OutstandingRequest.java
@@ -103,23 +103,20 @@
       RoleStatus role, long time) {
     String[] hosts;
     boolean relaxLocality;
-    boolean locationSpecified;
     requestedTime = time;
     if (node != null) {
       hosts = new String[1];
       hosts[0] = node.hostname;
-      relaxLocality = true;
-      locationSpecified = true;
+      relaxLocality = false;
       // tell the node it is in play
       node.getOrCreate(roleId);
       log.info("Submitting request for container on {}", hosts[0]);
     } else {
       hosts = null;
       relaxLocality = true;
-      locationSpecified = false;
     }
     Priority pri = ContainerPriority.createPriority(roleId,
-                                                    locationSpecified);
+                                                    !relaxLocality);
     AMRMClient.ContainerRequest request =
       new AMRMClient.ContainerRequest(resource,
                                       hosts,
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleHistory.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleHistory.java
index 68e7693..0cd2b39 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleHistory.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleHistory.java
@@ -29,6 +29,7 @@
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.server.avro.RoleHistoryHeader;
 import org.apache.slider.server.avro.RoleHistoryWriter;
+import org.mortbay.log.Log;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -405,6 +406,7 @@
       for (int i = 0; i < roleSize; i++) {
         NodeEntry nodeEntry = ni.get(i);
         if (nodeEntry != null && nodeEntry.isAvailable()) {
+          log.debug("Adding {} for role {}", ni, i);
           getOrCreateNodesForRoleId(i).add(ni);
         }
       }
@@ -468,12 +470,17 @@
     NodeInstance nodeInstance = null;
     
     List<NodeInstance> targets = getNodesForRoleId(roleKey);
+    int cnt = targets == null ? 0 : targets.size();
+    log.info("There're {} nodes to consider for {}", cnt, role.getName());
     while (targets != null && !targets.isEmpty() && nodeInstance == null) {
       NodeInstance head = targets.remove(0);
       if (head.getActiveRoleInstances(roleKey) == 0) {
         nodeInstance = head;
       }
     }
+    if (nodeInstance == null) {
+      log.debug("No node selected for {}", role.getName());
+    }
     return nodeInstance;
   }
 
@@ -591,6 +598,7 @@
         hosts = outstandingRequests.cancelOutstandingRequests(role);
       if (!hosts.isEmpty()) {
         //add the list
+        log.debug("Adding {} hosts for role {}", hosts.size(), role);
         getOrCreateNodesForRoleId(role).addAll(hosts);
         sortAvailableNodeList(role);
       }
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/AgentService.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/AgentService.java
new file mode 100644
index 0000000..08338e8
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/AgentService.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web;
+
+import org.apache.hadoop.service.AbstractService;
+import org.apache.slider.server.appmaster.web.rest.agent.AgentWebApp;
+
+/**
+ *
+ */
+public class AgentService extends AbstractService {
+  private volatile AgentWebApp webApp;
+
+  public AgentService(String name) {
+    super(name);
+  }
+
+  public AgentService(String name, AgentWebApp app) {
+    super(name);
+    webApp = app;
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+
+  }
+
+  /**
+   * Stop operation stops the webapp; sets the reference to null
+   * @throws Exception
+   */
+  @Override
+  protected void serviceStop() throws Exception {
+    if (webApp != null) {
+      webApp.stop();
+      webApp = null;
+    }
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/SliderAMWebApp.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/SliderAMWebApp.java
index fc9929a..4f290af 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/SliderAMWebApp.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/SliderAMWebApp.java
@@ -19,7 +19,6 @@
 import com.google.common.base.Preconditions;
 import com.sun.jersey.api.container.filter.GZIPContentEncodingFilter;
 import com.sun.jersey.api.core.ResourceConfig;
-import com.sun.jersey.core.util.FeaturesAndProperties;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.spi.container.servlet.ServletContainer;
 import org.apache.curator.x.discovery.ServiceDiscovery;
@@ -51,7 +50,7 @@
   public final SliderRegistryService registry;
 
   public SliderAMWebApp(SliderRegistryService registry) {
-    Preconditions.checkNotNull(registry);
+    Preconditions.checkArgument(registry != null, "registry null");
     this.registry = registry;
   }
 
@@ -100,7 +99,7 @@
     Map<String, String> params = new HashMap<>();
     params.put(ResourceConfig.FEATURE_IMPLICIT_VIEWABLES, "true");
     params.put(ServletContainer.FEATURE_FILTER_FORWARD_ON_404, "true");
-    params.put(FeaturesAndProperties.FEATURE_XMLROOTELEMENT_PROCESSING, "true");
+    params.put(ResourceConfig.FEATURE_XMLROOTELEMENT_PROCESSING, "true");
     params.put(ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS, GZIPContentEncodingFilter.class.getName());
     params.put(ResourceConfig.PROPERTY_CONTAINER_RESPONSE_FILTERS, GZIPContentEncodingFilter.class.getName());
     //params.put("com.sun.jersey.spi.container.ContainerRequestFilters", "com.sun.jersey.api.container.filter.LoggingFilter");
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/SliderAmIpFilter.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/SliderAmIpFilter.java
index dec89d1..ad5e219 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/SliderAmIpFilter.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/SliderAmIpFilter.java
@@ -36,7 +36,9 @@
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
+import java.util.Arrays;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 
 public class SliderAmIpFilter implements Filter {
@@ -49,17 +51,18 @@
   private static final long updateInterval = 5 * 60 * 1000;
   public static final String WS_CONTEXT_ROOT = "slider.rest.context.root";
 
+  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private String proxyHost;
   private Set<String> proxyAddresses = null;
   private long lastUpdate;
   private String proxyUriBase;
-  private String wsContextRoot;
+  private List<String> wsContextRoots;
   
   @Override
   public void init(FilterConfig conf) throws ServletException {
     proxyHost = conf.getInitParameter(PROXY_HOST);
     proxyUriBase = conf.getInitParameter(PROXY_URI_BASE);
-    wsContextRoot = conf.getInitParameter(WS_CONTEXT_ROOT);
+    wsContextRoots = Arrays.asList(conf.getInitParameter(WS_CONTEXT_ROOT).split("\\|"));
   }
   
   protected Set<String> getProxyAddresses() throws ServletException {
@@ -101,7 +104,7 @@
       log.debug("Remote address for request is: " + httpReq.getRemoteAddr());
     }
     String requestURI = httpReq.getRequestURI();
-    if(!requestURI.startsWith(wsContextRoot) &&
+      if(!isWsRequest(requestURI) &&
        !getProxyAddresses().contains(httpReq.getRemoteAddr())) {
       String redirectUrl = httpResp.encodeRedirectURL(proxyUriBase +
                                                       requestURI);
@@ -121,7 +124,7 @@
     }
     try {
       if (user == null) {
-        log.warn("Could not find " + WebAppProxyServlet.PROXY_USER_COOKIE_NAME
+        log.debug("Could not find " + WebAppProxyServlet.PROXY_USER_COOKIE_NAME
                  + " cookie, so user will not be set");
         chain.doFilter(req, resp);
       } else {
@@ -135,4 +138,14 @@
       throw e;
     }
   }
+
+  private boolean isWsRequest(String requestURI) {
+    boolean isWsReq = false;
+    for (String wsContext : wsContextRoots) {
+      isWsReq = requestURI.startsWith(wsContext);
+      if (isWsReq) break;
+    }
+
+    return isWsReq;
+  }
 }
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java
index 4fac962..aa20baa 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java
@@ -22,6 +22,7 @@
 import org.apache.slider.server.appmaster.state.RoleStatus;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
 import org.apache.slider.server.appmaster.web.rest.agent.AgentRestOperations;
+import org.apache.slider.server.services.security.CertificateManager;
 
 import java.util.Map;
 
@@ -39,7 +40,13 @@
    * The {@link ProviderService} for the current cluster
    */
   public ProviderService getProviderService();
-  
+
+
+  /**
+   * The {@link CertificateManager} for the current cluster
+   */
+  public CertificateManager getCertificateManager();
+
   /**
    * The {@link SliderClusterProtocol} for the current cluster
    */
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java
index 9a5a628..4eebd45 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java
@@ -23,6 +23,7 @@
 import org.apache.slider.server.appmaster.state.RoleStatus;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
 import org.apache.slider.server.appmaster.web.rest.agent.AgentRestOperations;
+import org.apache.slider.server.services.security.CertificateManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -45,9 +46,12 @@
   protected final SliderClusterProtocol clusterProto;
   protected final StateAccessForProviders appState;
   protected final ProviderService provider;
+  protected final CertificateManager certificateManager;
   
   public WebAppApiImpl(SliderClusterProtocol clusterProto,
-                       StateAccessForProviders appState, ProviderService provider) {
+                       StateAccessForProviders appState,
+                       ProviderService provider,
+                       CertificateManager certificateManager) {
     checkNotNull(clusterProto);
     checkNotNull(appState);
     checkNotNull(provider);
@@ -55,6 +59,7 @@
     this.clusterProto = clusterProto;
     this.appState = appState;
     this.provider = provider;
+    this.certificateManager = certificateManager;
   }
 
   /* (non-Javadoc)
@@ -73,9 +78,14 @@
     return provider;
   }
 
+  @Override
+  public CertificateManager getCertificateManager() {
+    return certificateManager;
+  }
+
   /* (non-Javadoc)
-   * @see org.apache.slider.server.appmaster.web.WebAppApi#getClusterProtocol()
-   */
+     * @see org.apache.slider.server.appmaster.web.WebAppApi#getClusterProtocol()
+     */
   @Override
   public SliderClusterProtocol getClusterProtocol() {
     return clusterProto;
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java
index 91c83f2..4f068f3 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java
@@ -42,12 +42,7 @@
     return new ManagementResource(slider);
   }
 
-  @Path(RestPaths.SLIDER_SUBPATH_AGENTS)
-  public AgentResource getAgentResource () {
-    return new AgentResource(slider);
-  }
-
-  @Path(RestPaths.SLIDER_SUBPATH_PUBLISHER) 
+  @Path(RestPaths.SLIDER_SUBPATH_PUBLISHER)
   public PublisherResource getPublisherResource() {
     return new PublisherResource(slider);
   }
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/RestPaths.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/RestPaths.java
index fed8afe..0571ca1 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/RestPaths.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/RestPaths.java
@@ -24,15 +24,18 @@
 public class RestPaths {
 
   public static final String WS_CONTEXT = "ws";
+  public static final String AGENT_WS_CONTEXT = "ws";
   public static final String WS_CONTEXT_ROOT = "/" + WS_CONTEXT;
+  public static final String WS_AGENT_CONTEXT_ROOT = "/" + AGENT_WS_CONTEXT;
   public static final String SLIDER_CONTEXT_ROOT = WS_CONTEXT_ROOT +"/v1/slider";
+  public static final String SLIDER_AGENT_CONTEXT_ROOT = WS_AGENT_CONTEXT_ROOT +"/v1/slider";
   public static final String SLIDER_SUBPATH_MANAGEMENT = "/mgmt";
   public static final String SLIDER_SUBPATH_AGENTS = "/agents";
   public static final String SLIDER_SUBPATH_PUBLISHER = "/publisher";
 
   public static final String SLIDER_PATH_MANAGEMENT = SLIDER_CONTEXT_ROOT
                                       + SLIDER_SUBPATH_MANAGEMENT;
-  public static final String SLIDER_PATH_AGENTS = SLIDER_CONTEXT_ROOT
+  public static final String SLIDER_PATH_AGENTS = SLIDER_AGENT_CONTEXT_ROOT
                                       + SLIDER_SUBPATH_AGENTS;
   
   public static final String SLIDER_PATH_PUBLISHER = SLIDER_CONTEXT_ROOT
@@ -56,4 +59,6 @@
       = "[a-z0-9][a-z0-9_.\\+-]*";
 
   public static final String SLIDER_CONFIGSET = "slider";
+
+  public static final String SLIDER_CLASSPATH = "classpath";
 }
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentResource.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentResource.java
index 96b7b47..9d1e840 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentResource.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentResource.java
@@ -17,9 +17,12 @@
 package org.apache.slider.server.appmaster.web.rest.agent;
 
 import org.apache.slider.server.appmaster.web.WebAppApi;
+import org.apache.slider.server.services.security.SignCertResponse;
+import org.apache.slider.server.services.security.SignMessage;
 import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 import org.codehaus.jackson.map.annotate.JsonSerialize;
 
+import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.Consumes;
 import javax.ws.rs.GET;
@@ -50,14 +53,14 @@
   }
 
   @GET
-  @Path("/agents/register")
+  @Path("/agent/register")
   public Response endpointAgentRegister() {
     Response response = Response.status(200).entity("/agent/register").build();
     return response;
   }
 
   @GET
-  @Path("/agents")
+  @Path("/agent")
   public Response endpointAgent() {
     Response response = Response.status(200).entity("/agent").build();
     return response;
@@ -94,4 +97,22 @@
     AgentRestOperations ops = slider.getAgentRestOperations();
     return ops.handleHeartBeat(message);
   }
+
+  @GET
+  @Path("/cert/ca")
+  @Produces({MediaType.TEXT_PLAIN})
+  public String downloadSrvrCrt() {
+    return slider.getCertificateManager().getServerCert();
+  }
+
+  @Path("/certs/{hostName}")
+  @POST
+  @Consumes(MediaType.APPLICATION_JSON)
+  @Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
+  public SignCertResponse signAgentCrt(@PathParam("hostName") String hostname,
+                                       SignMessage message, @Context HttpServletRequest req) {
+    return slider.getCertificateManager().signAgentCrt(hostname,
+                                                       message.getCsr(),
+                                                       message.getPassphrase());
+  }
 }
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentWebApp.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentWebApp.java
new file mode 100644
index 0000000..54d2b1f
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentWebApp.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.rest.agent;
+
+import com.google.common.base.Preconditions;
+import com.sun.jersey.api.core.ResourceConfig;
+import com.sun.jersey.spi.container.WebApplication;
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+import com.sun.jersey.spi.container.servlet.WebConfig;
+import com.sun.jersey.spi.inject.SingletonTypeInjectableProvider;
+import org.apache.slider.core.conf.MapOperations;
+import org.apache.slider.server.appmaster.web.WebAppApi;
+import org.apache.slider.server.appmaster.web.rest.RestPaths;
+import org.apache.slider.server.services.security.SecurityUtils;
+import org.mortbay.jetty.Connector;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.security.SslSelectChannelConnector;
+import org.mortbay.jetty.servlet.Context;
+import org.mortbay.jetty.servlet.ServletHolder;
+import org.mortbay.thread.QueuedThreadPool;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.ext.Provider;
+import java.io.File;
+import java.util.Set;
+
+/**
+ *
+ */
+public class AgentWebApp {
+  protected static final Logger LOG = LoggerFactory.getLogger(AgentWebApp.class);
+  private int port;
+  private int securedPort;
+  private static Server agentServer;
+  public static final String BASE_PATH = "slideragent";
+
+  public static class Builder {
+    final String name;
+    final String wsName;
+    final WebAppApi application;
+    MapOperations configsMap;
+
+    public Builder(String name, String wsName, WebAppApi application) {
+      this.name = name;
+      this.wsName = wsName;
+      this.application = application;
+    }
+
+    public Builder withComponentConfig(MapOperations appMasterConfig) {
+      this.configsMap = appMasterConfig;
+      return this;
+    }
+
+    public AgentWebApp start() {
+      if (configsMap == null) {
+        throw new IllegalStateException("No SSL Configuration Available");
+      }
+
+      agentServer = new Server();
+      agentServer.setThreadPool(
+          new QueuedThreadPool(
+              configsMap.getOptionInt("agent.threadpool.size.max", 25)));
+      agentServer.setStopAtShutdown(true);
+
+      SslSelectChannelConnector ssl1WayConnector = createSSLConnector(false);
+      SslSelectChannelConnector ssl2WayConnector =
+          createSSLConnector(Boolean.valueOf(
+              configsMap.getOption("ssl.server.client.auth","false")));
+      agentServer.setConnectors(new Connector[]{ssl1WayConnector,
+          ssl2WayConnector});
+
+      ServletHolder agent = new ServletHolder(new AgentServletContainer());
+      Context agentRoot = new Context(agentServer, "/", Context.SESSIONS);
+
+      agent.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
+                             "com.sun.jersey.api.core.PackagesResourceConfig");
+      agent.setInitParameter("com.sun.jersey.config.property.packages",
+                             "org.apache.slider.server.appmaster.web.rest.agent");
+      agent.setInitParameter("com.sun.jersey.api.json.POJOMappingFeature",
+                             "true");
+//      agent.setInitParameter("com.sun.jersey.spi.container.ContainerRequestFilters", "com.sun.jersey.api.container.filter.LoggingFilter");
+//      agent.setInitParameter("com.sun.jersey.spi.container.ContainerResponseFilters", "com.sun.jersey.api.container.filter.LoggingFilter");
+//      agent.setInitParameter("com.sun.jersey.config.feature.Trace", "true");
+      agentRoot.addServlet(agent, "/*");
+
+      try {
+        agentServer.start();
+      } catch (Exception e) {
+        LOG.error("Unable to start agent server", e);
+      }
+
+      AgentWebApp webApp = new AgentWebApp();
+      webApp.setPort(getConnectorPort(agentServer, 0));
+      webApp.setSecuredPort(getConnectorPort(agentServer, 1));
+
+      return webApp;
+
+    }
+
+    private SslSelectChannelConnector createSSLConnector(boolean needClientAuth) {
+      SslSelectChannelConnector sslConnector = new
+          SslSelectChannelConnector();
+
+      String keystore = SecurityUtils.getSecurityDir() +
+                        File.separator + "keystore.p12";
+      String srvrCrtPass = SecurityUtils.getKeystorePass();
+      sslConnector.setKeystore(keystore);
+      sslConnector.setTruststore(keystore);
+      sslConnector.setPassword(srvrCrtPass);
+      sslConnector.setKeyPassword(srvrCrtPass);
+      sslConnector.setTrustPassword(srvrCrtPass);
+      sslConnector.setKeystoreType("PKCS12");
+      sslConnector.setTruststoreType("PKCS12");
+      sslConnector.setNeedClientAuth(needClientAuth);
+
+      sslConnector.setAcceptors(2);
+      return sslConnector;
+    }
+
+    @Provider
+    public class WebAppApiProvider extends
+        SingletonTypeInjectableProvider<javax.ws.rs.core.Context, WebAppApi> {
+
+      public WebAppApiProvider () {
+        super(WebAppApi.class, application);
+      }
+    }
+
+    public class AgentServletContainer extends ServletContainer {
+      public AgentServletContainer() {
+        super();
+      }
+
+      @Override
+      protected void configure(WebConfig wc,
+                               ResourceConfig rc,
+                               WebApplication wa) {
+        super.configure(wc, rc, wa);
+        Set<Object> singletons = rc.getSingletons();
+        singletons.add(new WebAppApiProvider());
+      }
+    }
+
+    private int getConnectorPort(Server webServer, int index) {
+      Preconditions.checkArgument(index >= 0);
+      if (index > webServer.getConnectors().length)
+        throw new IllegalStateException("Illegal connect index requested");
+
+      Connector c = webServer.getConnectors()[index];
+      if (c.getLocalPort() == -1) {
+        // The connector is not bounded
+        throw new IllegalStateException("The connector is not bound to a port");
+      }
+
+      return c.getLocalPort();
+    }
+  }
+
+  public static Builder $for(String name, WebAppApi app, String wsPrefix) {
+    return new Builder(name, wsPrefix, app);
+  }
+
+  public int getPort() {
+    return port;
+  }
+
+  public void setPort(int port) {
+    this.port = port;
+  }
+
+  public void setSecuredPort(int securedPort) {
+    this.securedPort = securedPort;
+  }
+
+  public int getSecuredPort() {
+    return securedPort;
+  }
+
+  public void stop() {
+    //need to stop server and reset injector
+    try {
+      agentServer.stop();
+    } catch (Exception e) {
+      LOG.warn("Unable to stop agent server", e);
+    }
+  }
+
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentWebServices.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentWebServices.java
new file mode 100644
index 0000000..684ce6f
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentWebServices.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.rest.agent;
+
+import org.apache.slider.server.appmaster.web.WebAppApi;
+import org.apache.slider.server.appmaster.web.rest.RestPaths;
+
+import javax.ws.rs.Path;
+import javax.ws.rs.core.Context;
+
+/** The available agent REST services exposed by a slider AM. */
+@Path(RestPaths.SLIDER_AGENT_CONTEXT_ROOT)
+public class AgentWebServices {
+  /** AM/WebApp info object */
+  @Context
+  private WebAppApi slider;
+
+  public AgentWebServices() {
+  }
+
+  @Path(RestPaths.SLIDER_SUBPATH_AGENTS)
+  public AgentResource getAgentResource () {
+    return new AgentResource(slider);
+  }
+
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/publisher/PublisherResource.java b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/publisher/PublisherResource.java
index e6b2664..a439d9b 100644
--- a/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/publisher/PublisherResource.java
+++ b/slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/publisher/PublisherResource.java
@@ -38,8 +38,14 @@
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.UriInfo;
 import java.io.IOException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.util.Arrays;
 import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
 import java.util.Map;
+import java.util.Set;
 
 import static  org.apache.slider.server.appmaster.web.rest.RestPaths.*;
 
@@ -100,6 +106,14 @@
   }
 
   @GET
+  @Path("/classpath")
+  @Produces({MediaType.APPLICATION_JSON})
+  public Set<URL> getAMClassPath() {
+    URL[] urls = ((URLClassLoader) getClass().getClassLoader()).getURLs();
+    return new LinkedHashSet<>(Arrays.asList(urls));
+  }
+
+  @GET
   @Path("/"+ SET_NAME)
   @Produces({MediaType.APPLICATION_JSON})
   public PublishedConfigSet getPublishedConfiguration(
diff --git a/slider-core/src/main/java/org/apache/slider/server/exec/ApplicationEventHandler.java b/slider-core/src/main/java/org/apache/slider/server/exec/ApplicationEventHandler.java
deleted file mode 100644
index 1fc0bf1..0000000
--- a/slider-core/src/main/java/org/apache/slider/server/exec/ApplicationEventHandler.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.slider.server.exec;
-
-/**
- * Callback when a long-lived application exits
- */
-public interface ApplicationEventHandler {
-
-  void onApplicationStarted(RunLongLivedApp application);
-
-  void onApplicationExited(RunLongLivedApp application, int exitCode);
-}
diff --git a/slider-core/src/main/java/org/apache/slider/server/exec/RunLongLivedApp.java b/slider-core/src/main/java/org/apache/slider/server/exec/RunLongLivedApp.java
deleted file mode 100644
index 8316516..0000000
--- a/slider-core/src/main/java/org/apache/slider/server/exec/RunLongLivedApp.java
+++ /dev/null
@@ -1,439 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.slider.server.exec;
-
-import org.apache.hadoop.io.IOUtils;
-import org.apache.slider.core.exceptions.SliderException;
-import org.apache.slider.core.exceptions.SliderInternalStateException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Execute an application.
- *
- * Hadoop's Shell class isn't used because it assumes it is executing
- * a short lived application: 
- */
-public class RunLongLivedApp implements Runnable {
-  public static final int STREAM_READER_SLEEP_TIME = 200;
-  public static final int RECENT_LINE_LOG_LIMIT = 64;
-  /**
-   * Class log
-   */
-  static final Logger LOG = LoggerFactory.getLogger(RunLongLivedApp.class);
-  /**
-   * Log supplied in the constructor for the spawned process
-   */
-  final Logger processLog;
-  private final ProcessBuilder builder;
-  private Process process;
-  private Exception exception;
-  private Integer exitCode = null;
-  volatile boolean done;
-  private Thread execThread;
-  private Thread logThread;
-  private ProcessStreamReader processStreamReader;
-  //list of recent lines, recorded for extraction into reports
-  private final List<String> recentLines = new LinkedList<>();
-  private final int recentLineLimit = RECENT_LINE_LOG_LIMIT;
-
-  private ApplicationEventHandler applicationEventHandler;
-
-  public RunLongLivedApp(Logger processLog, String... commands) {
-    this.processLog = processLog;
-    builder = new ProcessBuilder(commands);
-    initBuilder();
-  }
-
-  public RunLongLivedApp(Logger processLog, List<String> commands) {
-    this.processLog = processLog;
-    builder = new ProcessBuilder(commands);
-    initBuilder();
-  }
-
-  private void initBuilder() {
-    builder.redirectErrorStream(false);
-  }
-
-  public ProcessBuilder getBuilder() {
-    return builder;
-  }
-
-  /**
-   * Set an optional application exit callback
-   * @param applicationEventHandler callback to notify on application exit
-   */
-  public void setApplicationEventHandler(ApplicationEventHandler applicationEventHandler) {
-    this.applicationEventHandler = applicationEventHandler;
-  }
-
-  /**
-   * Add an entry to the environment
-   * @param key key -must not be null
-   * @param val value 
-   */
-  public void putEnv(String key, String val) {
-    if (val == null) {
-      throw new RuntimeException("Null value for key " + key);
-    }
-    builder.environment().put(key, val);
-  }
-
-  /**
-   * Bulk set the environment from a map. This does
-   * not replace the existing environment, just extend it/overwrite single
-   * entries.
-   * @param map map to add
-   */
-  public void putEnvMap(Map<String, String> map) {
-    for (Map.Entry<String, String> entry : map.entrySet()) {
-      String val = entry.getValue();
-      String key = entry.getKey();
-      putEnv(key, val);
-    }
-  }
-
-  /**
-   * Get the process environment
-   * @param key
-   * @return
-   */
-  public String getEnv(String key) {
-    return builder.environment().get(key);
-  }
-
-  /**
-   * Get the process reference
-   * @return the process -null if the process is  not started
-   */
-  public Process getProcess() {
-    return process;
-  }
-
-  /**
-   * Get any exception raised by the process
-   * @return an exception or null
-   */
-  public Exception getException() {
-    return exception;
-  }
-
-  public List<String> getCommands() {
-    return builder.command();
-  }
-
-  public String getCommand() {
-    return getCommands().get(0);
-  }
-
-  /**
-   * probe to see if the process is running
-   * @return true iff the process has been started and is not yet finished
-   */
-  public boolean isRunning() {
-    return process != null && !done;
-  }
-
-  /**
-   * Get the exit code: null until the process has finished
-   * @return the exit code or null
-   */
-  public Integer getExitCode() {
-    return exitCode;
-  }
-
-  /**
-   * Stop the process if it is running.
-   * This will trigger an application completion event with the given exit code
-   */
-  public void stop() {
-    if (!isRunning()) {
-      return;
-    }
-    process.destroy();
-  }
-
-  /**
-   * Get a text description of the builder suitable for log output
-   * @return a multiline string 
-   */
-  protected String describeBuilder() {
-    StringBuilder buffer = new StringBuilder();
-    for (String arg : builder.command()) {
-      buffer.append('"').append(arg).append("\" ");
-    }
-    return buffer.toString();
-  }
-
-  private void dumpEnv(StringBuilder buffer) {
-    buffer.append("\nEnvironment\n-----------");
-    Map<String, String> env = builder.environment();
-    Set<String> keys = env.keySet();
-    List<String> sortedKeys = new ArrayList<String>(keys);
-    Collections.sort(sortedKeys);
-    for (String key : sortedKeys) {
-      buffer.append(key).append("=").append(env.get(key)).append('\n');
-    }
-  }
-
-  /**
-   * Exec the process
-   * @return the process
-   * @throws IOException
-   */
-  private Process spawnChildProcess() throws IOException, SliderException {
-    if (process != null) {
-      throw new SliderInternalStateException("Process already started");
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Spawning process:\n " + describeBuilder());
-    }
-    process = builder.start();
-    return process;
-  }
-
-  /**
-   * Entry point for waiting for the program to finish
-   */
-  @Override // Runnable
-  public void run() {
-    LOG.debug("Application callback thread running");
-    //notify the callback that the process has started
-    if (applicationEventHandler != null) {
-      applicationEventHandler.onApplicationStarted(this);
-    }
-    try {
-      exitCode = process.waitFor();
-    } catch (InterruptedException e) {
-      LOG.debug("Process wait interrupted -exiting thread");
-    } finally {
-      //here the process has finished
-      LOG.info("process has finished");
-      //tell the logger it has to finish too
-      done = true;
-
-      //now call the callback if it is set
-      if (applicationEventHandler != null) {
-        applicationEventHandler.onApplicationExited(this, exitCode);
-      }
-      try {
-        logThread.join();
-      } catch (InterruptedException ignored) {
-        //ignored
-      }
-    }
-  }
-
-  /**
-   * Create a thread to wait for this command to complete.
-   * THE THREAD IS NOT STARTED.
-   * @return the thread
-   * @throws IOException Execution problems
-   */
-  private Thread spawnIntoThread() throws IOException, SliderException {
-    spawnChildProcess();
-    return new Thread(this, getCommand());
-  }
-
-  /**
-   * Spawn the application
-   * @throws IOException IO problems
-   * @throws SliderException internal state of this class is wrong
-   */
-  public void spawnApplication() throws IOException, SliderException {
-    execThread = spawnIntoThread();
-    execThread.start();
-    processStreamReader =
-      new ProcessStreamReader(processLog, STREAM_READER_SLEEP_TIME);
-    logThread = new Thread(processStreamReader, "IO");
-    logThread.start();
-  }
-
-  /**
-   * Get the lines of recent output
-   * @return the last few lines of output; an empty list if there are none
-   * or the process is not actually running
-   */
-  public synchronized List<String> getRecentOutput() {
-    return new ArrayList<>(recentLines);
-  }
-
-
-  /**
-   * add the recent line to the list of recent lines; deleting
-   * an earlier on if the limit is reached.
-   *
-   * Implementation note: yes, a circular array would be more
-   * efficient, especially with some power of two as the modulo,
-   * but is it worth the complexity and risk of errors for
-   * something that is only called once per line of IO?
-   * @param line line to record
-   * @param isErrorStream is the line from the error stream
-   */
-  private synchronized void recordRecentLine(String line,
-                                             boolean isErrorStream) {
-    if (line == null) {
-      return;
-    }
-    String entry = (isErrorStream ? "[ERR] " : "[OUT] ") + line;
-    recentLines.add(entry);
-    if (recentLines.size() > recentLineLimit) {
-      recentLines.remove(0);
-    }
-  }
-
-  /**
-   * Class to read data from the two process streams, and, when run in a thread
-   * to keep running until the <code>done</code> flag is set. 
-   * Lines are fetched from stdout and stderr and logged at info and error
-   * respectively.
-   */
-
-  private class ProcessStreamReader implements Runnable {
-    private final Logger streamLog;
-    private final int sleepTime;
-
-    private ProcessStreamReader(Logger streamLog, int sleepTime) {
-      this.streamLog = streamLog;
-      this.sleepTime = sleepTime;
-    }
-
-    private int readCharNonBlocking(BufferedReader reader) throws IOException {
-      if (reader.ready()) {
-        return reader.read();
-      } else {
-        return -1;
-      }
-    }
-
-    /**
-     * Read in a line, or, if the limit has been reached, the buffer
-     * so far
-     * @param reader source of data
-     * @param line line to build
-     * @param limit limit of line length
-     * @return true if the line can be printed
-     * @throws IOException IO trouble
-     */
-    private boolean readAnyLine(BufferedReader reader,
-                                StringBuilder line,
-                                int limit)
-      throws IOException {
-      int next;
-      while ((-1 != (next = readCharNonBlocking(reader)))) {
-        if (next != '\n') {
-          line.append((char) next);
-          limit--;
-          if (line.length() > limit) {
-            //enough has been read in to print it any
-            return true;
-          }
-        } else {
-          //line end return flag to say so
-          return true;
-        }
-      }
-      //here the end of the stream is hit, or the limit
-      return false;
-    }
-
-
-    @Override //Runnable
-    @SuppressWarnings("IOResourceOpenedButNotSafelyClosed")
-    public void run() {
-      BufferedReader errReader = null;
-      BufferedReader outReader = null;
-      StringBuilder outLine = new StringBuilder(256);
-      StringBuilder errorLine = new StringBuilder(256);
-      try {
-        errReader = new BufferedReader(new InputStreamReader(process
-                                                               .getErrorStream()));
-        outReader = new BufferedReader(new InputStreamReader(process
-                                                               .getInputStream()));
-        while (!done) {
-          boolean processed = false;
-          if (readAnyLine(errReader, errorLine, 256)) {
-            String line = errorLine.toString();
-            recordRecentLine(line, true);
-            streamLog.warn(line);
-            errorLine.setLength(0);
-            processed = true;
-          }
-          if (readAnyLine(outReader, outLine, 256)) {
-            String line = outLine.toString();
-            recordRecentLine(line, false);
-            streamLog.info(line);
-            outLine.setLength(0);
-            processed |= true;
-          }
-          if (!processed) {
-            //nothing processed: wait a bit for data.
-            try {
-              Thread.sleep(sleepTime);
-            } catch (InterruptedException e) {
-              //ignore this, rely on the done flag
-              LOG.debug("Ignoring ", e);
-            }
-          }
-        }
-        //get here, done time
-
-        //print the current error line then stream through the rest
-        streamLog.error(errorLine.toString());
-        String line = errReader.readLine();
-        while (line != null) {
-          streamLog.error(line);
-          if (Thread.interrupted()) {
-            break;
-          }
-          line = errReader.readLine();
-          recordRecentLine(line, true);
-        }
-        //now do the info line
-        streamLog.info(outLine.toString());
-        line = outReader.readLine();
-        while (line != null) {
-          streamLog.info(line);
-          if (Thread.interrupted()) {
-            break;
-          }
-          line = outReader.readLine();
-          recordRecentLine(line, false);
-        }
-
-      } catch (Exception ignored) {
-        LOG.warn("encountered ", ignored);
-        //process connection has been torn down
-      } finally {
-        IOUtils.closeStream(errReader);
-        IOUtils.closeStream(outReader);
-      }
-    }
-  }
-}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/curator/CuratorService.java b/slider-core/src/main/java/org/apache/slider/server/services/curator/CuratorService.java
index 657fa57..645bc8f 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/curator/CuratorService.java
+++ b/slider-core/src/main/java/org/apache/slider/server/services/curator/CuratorService.java
@@ -42,7 +42,7 @@
                         CuratorFramework curator,
                         String basePath) {
     super(name);
-    this.curator = Preconditions.checkNotNull(curator, "null client");
+    this.curator = Preconditions.checkNotNull(curator, "null curator");
     this.basePath = basePath;
   }
 
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/curator/CuratorUriSpec.java b/slider-core/src/main/java/org/apache/slider/server/services/curator/CuratorUriSpec.java
index b2a877a..adda359 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/curator/CuratorUriSpec.java
+++ b/slider-core/src/main/java/org/apache/slider/server/services/curator/CuratorUriSpec.java
@@ -28,7 +28,7 @@
 @JsonIgnoreProperties(ignoreUnknown = true)
 public class CuratorUriSpec extends UriSpec{
 
-  private final List<Part>        parts = Lists.newArrayList();
+  private final List<Part> parts = Lists.newArrayList();
 
   public CuratorUriSpec() {
     super();
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/security/CertificateManager.java b/slider-core/src/main/java/org/apache/slider/server/services/security/CertificateManager.java
new file mode 100644
index 0000000..3771208
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/security/CertificateManager.java
@@ -0,0 +1,257 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.services.security;
+
+import com.google.inject.Singleton;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.RandomStringUtils;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.core.conf.MapOperations;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.nio.charset.Charset;
+import java.text.MessageFormat;
+
+@Singleton
+public class CertificateManager {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(CertificateManager.class);
+
+  private static final String GEN_SRVR_KEY = "openssl genrsa -des3 " +
+      "-passout pass:{0} -out {1}/{2} 4096 ";
+  private static final String GEN_SRVR_REQ = "openssl req -passin pass:{0} " +
+      "-new -key {1}/{2} -out {1}/{5} -batch";
+  private static final String SIGN_SRVR_CRT = "openssl ca -create_serial " +
+    "-out {1}/{3} -days 365 -keyfile {1}/{2} -key {0} -selfsign " +
+    "-extensions jdk7_ca -config {1}/ca.config -batch " +
+    "-infiles {1}/{5}";
+  private static final String EXPRT_KSTR = "openssl pkcs12 -export" +
+      " -in {1}/{3} -inkey {1}/{2} -certfile {1}/{3} -out {1}/{4} " +
+      "-password pass:{0} -passin pass:{0} \n";
+  private static final String REVOKE_AGENT_CRT = "openssl ca " +
+      "-config {0}/ca.config -keyfile {0}/{4} -revoke {0}/{2} -batch " +
+      "-passin pass:{3} -cert {0}/{5}";
+  private static final String SIGN_AGENT_CRT = "openssl ca -config " +
+      "{0}/ca.config -in {0}/{1} -out {0}/{2} -batch -passin pass:{3} " +
+      "-keyfile {0}/{4} -cert {0}/{5}"; /**
+       * Verify that root certificate exists, generate it otherwise.
+       */
+  public void initRootCert(MapOperations compOperations) {
+    SecurityUtils.initializeSecurityParameters(compOperations);
+
+    LOG.info("Initialization of root certificate");
+    boolean certExists = isCertExists();
+    LOG.info("Certificate exists:" + certExists);
+
+    if (!certExists) {
+      generateServerCertificate();
+    }
+
+  }
+
+  /**
+   * Checks root certificate state.
+   * @return "true" if certificate exists
+   */
+  private boolean isCertExists() {
+
+    String srvrKstrDir = SecurityUtils.getSecurityDir();
+    String srvrCrtName = SliderKeys.CRT_FILE_NAME;
+    File certFile = new File(srvrKstrDir + File.separator + srvrCrtName);
+    LOG.debug("srvrKstrDir = " + srvrKstrDir);
+    LOG.debug("srvrCrtName = " + srvrCrtName);
+    LOG.debug("certFile = " + certFile.getAbsolutePath());
+
+    return certFile.exists();
+  }
+
+  /**
+   * Runs os command
+   *
+   * @return command execution exit code
+   */
+  private int runCommand(String command) {
+    String line = null;
+    Process process = null;
+    BufferedReader br= null;
+    try {
+      process = Runtime.getRuntime().exec(command);
+      br = new BufferedReader(new InputStreamReader(
+          process.getInputStream(), Charset.forName("UTF8")));
+
+      while ((line = br.readLine()) != null) {
+        LOG.info(line);
+      }
+
+      try {
+        process.waitFor();
+        SecurityUtils.logOpenSslExitCode(command, process.exitValue());
+        return process.exitValue(); //command is executed
+      } catch (InterruptedException e) {
+        e.printStackTrace();
+      }
+    } catch (IOException e) {
+      e.printStackTrace();
+    } finally {
+      if (br != null) {
+        try {
+          br.close();
+        } catch (IOException ioe) {
+          ioe.printStackTrace();
+        }
+      }
+    }
+
+    return -1;//some exception occurred
+
+  }
+
+  private void generateServerCertificate() {
+    LOG.info("Generation of server certificate");
+
+    String srvrKstrDir = SecurityUtils.getSecurityDir();
+    String srvrCrtName = SliderKeys.CRT_FILE_NAME;
+    String srvrCsrName = SliderKeys.CSR_FILE_NAME;
+    String srvrKeyName = SliderKeys.KEY_FILE_NAME;
+    String kstrName = SliderKeys.KEYSTORE_FILE_NAME;
+    String srvrCrtPass = SecurityUtils.getKeystorePass();
+
+    Object[] scriptArgs = {srvrCrtPass, srvrKstrDir, srvrKeyName,
+        srvrCrtName, kstrName, srvrCsrName};
+
+    String command = MessageFormat.format(GEN_SRVR_KEY,scriptArgs);
+    runCommand(command);
+
+    command = MessageFormat.format(GEN_SRVR_REQ,scriptArgs);
+    runCommand(command);
+
+    command = MessageFormat.format(SIGN_SRVR_CRT,scriptArgs);
+    runCommand(command);
+
+    command = MessageFormat.format(EXPRT_KSTR,scriptArgs);
+    runCommand(command);
+
+  }
+
+  /**
+   * Returns server certificate content
+   * @return string with server certificate content
+   */
+  public String getServerCert() {
+    File certFile = new File(SecurityUtils.getSecurityDir() +
+        File.separator + SliderKeys.CRT_FILE_NAME);
+    String srvrCrtContent = null;
+    try {
+      srvrCrtContent = FileUtils.readFileToString(certFile);
+    } catch (IOException e) {
+      LOG.error(e.getMessage());
+    }
+    return srvrCrtContent;
+  }
+
+  /**
+   * Signs agent certificate
+   * Adds agent certificate to server keystore
+   * @return string with agent signed certificate content
+   */
+  public synchronized SignCertResponse signAgentCrt(String agentHostname,
+                                                    String agentCrtReqContent,
+                                                    String passphraseAgent) {
+    SignCertResponse response = new SignCertResponse();
+    LOG.info("Signing of agent certificate");
+    LOG.info("Verifying passphrase");
+
+    String passphraseSrvr = SliderKeys.PASSPHRASE;
+
+    if (!passphraseSrvr.equals(passphraseAgent.trim())) {
+      LOG.warn("Incorrect passphrase from the agent");
+      response.setResult(SignCertResponse.ERROR_STATUS);
+      response.setMessage("Incorrect passphrase from the agent");
+      return response;
+    }
+
+    String srvrKstrDir = SecurityUtils.getSecurityDir();
+    String srvrCrtPass = SecurityUtils.getKeystorePass();
+    String srvrCrtName = SliderKeys.CRT_FILE_NAME;
+    String srvrKeyName = SliderKeys.KEY_FILE_NAME;
+    String agentCrtReqName = agentHostname + ".csr";
+    String agentCrtName = agentHostname + ".crt";
+
+    Object[] scriptArgs = {srvrKstrDir, agentCrtReqName, agentCrtName,
+        srvrCrtPass, srvrKeyName, srvrCrtName};
+
+    //Revoke previous agent certificate if exists
+    File agentCrtFile = new File(srvrKstrDir + File.separator + agentCrtName);
+
+    if (agentCrtFile.exists()) {
+      LOG.info("Revoking of " + agentHostname + " certificate.");
+      String command = MessageFormat.format(REVOKE_AGENT_CRT, scriptArgs);
+      int commandExitCode = runCommand(command);
+      if (commandExitCode != 0) {
+        response.setResult(SignCertResponse.ERROR_STATUS);
+        response.setMessage(
+            SecurityUtils.getOpenSslCommandResult(command, commandExitCode));
+        return response;
+      }
+    }
+
+    File agentCrtReqFile = new File(srvrKstrDir + File.separator +
+        agentCrtReqName);
+    try {
+      FileUtils.writeStringToFile(agentCrtReqFile, agentCrtReqContent);
+    } catch (IOException e1) {
+      // TODO Auto-generated catch block
+      e1.printStackTrace();
+    }
+
+    String command = MessageFormat.format(SIGN_AGENT_CRT, scriptArgs);
+
+    LOG.debug(SecurityUtils.hideOpenSslPassword(command));
+
+    int commandExitCode = runCommand(command); // ssl command execution
+    if (commandExitCode != 0) {
+      response.setResult(SignCertResponse.ERROR_STATUS);
+      response.setMessage(
+          SecurityUtils.getOpenSslCommandResult(command, commandExitCode));
+      //LOG.warn(ShellCommandUtil.getOpenSslCommandResult(command, commandExitCode));
+      return response;
+    }
+
+    String agentCrtContent = "";
+    try {
+      agentCrtContent = FileUtils.readFileToString(agentCrtFile);
+    } catch (IOException e) {
+      e.printStackTrace();
+      LOG.error("Error reading signed agent certificate");
+      response.setResult(SignCertResponse.ERROR_STATUS);
+      response.setMessage("Error reading signed agent certificate");
+      return response;
+    }
+    response.setResult(SignCertResponse.OK_STATUS);
+    response.setSignedCa(agentCrtContent);
+    //LOG.info(ShellCommandUtil.getOpenSslCommandResult(command, commandExitCode));
+    return response;
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/security/SecurityUtils.java b/slider-core/src/main/java/org/apache/slider/server/services/security/SecurityUtils.java
new file mode 100644
index 0000000..5238d90
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/security/SecurityUtils.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.services.security;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.RandomStringUtils;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.core.conf.MapOperations;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.attribute.PosixFilePermission;
+import java.nio.file.attribute.PosixFilePermissions;
+import java.util.Set;
+
+/**
+ *
+ */
+public class SecurityUtils {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SecurityUtils.class);
+
+  private static String CA_CONFIG_CONTENTS ="[ ca ]\n"
+                                            + "default_ca             = CA_CLIENT\n"
+                                            + "[ CA_CLIENT ]\n"
+                                            + "dir\t\t       = ${SEC_DIR}/db\n"
+                                            + "certs                  = $dir/certs\n"
+                                            + "new_certs_dir          = $dir/newcerts\n"
+                                            + "\n"
+                                            + "database               = $dir/index.txt\n"
+                                            + "serial                 = $dir/serial\n"
+                                            + "default_days           = 365    \n"
+                                            + "\n"
+                                            + "default_crl_days       = 7  \n"
+                                            + "default_md             = md5 \n"
+                                            + "\n"
+                                            + "policy                 = policy_anything \n"
+                                            + "\n"
+                                            + "[ policy_anything ]\n"
+                                            + "countryName            = optional\n"
+                                            + "stateOrProvinceName    = optional \n"
+                                            + "localityName           = optional\n"
+                                            + "organizationName       = optional\n"
+                                            + "organizationalUnitName = optional\n"
+                                            + "commonName             = optional   \n"
+                                            + "emailAddress           = optional       \n"
+                                            + "\n"
+                                            + "[ jdk7_ca ]\n"
+                                            + "subjectKeyIdentifier = hash\n"
+                                            + "authorityKeyIdentifier = keyid:always,issuer:always\n"
+                                            + "basicConstraints = CA:true";
+
+  private static final String PASS_TOKEN = "pass:";
+  private static String keystorePass;
+  private static String securityDir;
+
+  public static void logOpenSslExitCode(String command, int exitCode) {
+    if (exitCode == 0) {
+      LOG.info(getOpenSslCommandResult(command, exitCode));
+    } else {
+      LOG.warn(getOpenSslCommandResult(command, exitCode));
+    }
+
+  }
+
+  public static String hideOpenSslPassword(String command){
+    int start = command.indexOf(PASS_TOKEN)+PASS_TOKEN.length();
+    CharSequence cs = command.subSequence(start, command.indexOf(" ", start));
+    return command.replace(cs, "****");
+  }
+
+  public static String getOpenSslCommandResult(String command, int exitCode) {
+    return new StringBuilder().append("Command ").append(hideOpenSslPassword(command)).append(" was finished with exit code: ")
+        .append(exitCode).append(" - ").append(getOpenSslExitCodeDescription(exitCode)).toString();
+  }
+
+  private static String getOpenSslExitCodeDescription(int exitCode) {
+    switch (exitCode) {
+      case 0: {
+        return "the operation was completed successfully.";
+      }
+      case 1: {
+        return "an error occurred parsing the command options.";
+      }
+      case 2: {
+        return "one of the input files could not be read.";
+      }
+      case 3: {
+        return "an error occurred creating the PKCS#7 file or when reading the MIME message.";
+      }
+      case 4: {
+        return "an error occurred decrypting or verifying the message.";
+      }
+      case 5: {
+        return "the message was verified correctly but an error occurred writing out the signers certificates.";
+      }
+      default:
+        return "unsupported code";
+    }
+  }
+
+  public static void writeCaConfigFile(String path) throws IOException {
+    String contents = CA_CONFIG_CONTENTS.replace("${SEC_DIR}", path);
+    FileUtils.writeStringToFile(new File(path, "ca.config"), contents);
+  }
+
+  public static String getKeystorePass() {
+    return keystorePass;
+  }
+
+  public static String getSecurityDir() {
+    return securityDir;
+  }
+
+  public static void initializeSecurityParameters(MapOperations configMap) {
+    String keyStoreLocation = configMap.getOption(
+        SliderKeys.KEYSTORE_LOCATION, getDefaultKeystoreLocation());
+    File secDirFile = new File(keyStoreLocation).getParentFile();
+    if (!secDirFile.exists()) {
+      // create entire required directory structure
+      File dbDir = new File(secDirFile, "db");
+      File newCertsDir = new File(dbDir, "newcerts");
+      newCertsDir.mkdirs();
+      try {
+        Set<PosixFilePermission> perms =
+            PosixFilePermissions.fromString("rwx------");
+        Files.setPosixFilePermissions(Paths.get(secDirFile.toURI()), perms);
+        Files.setPosixFilePermissions(Paths.get(dbDir.toURI()), perms);
+        Files.setPosixFilePermissions(Paths.get(newCertsDir.toURI()), perms);
+        File indexFile = new File(dbDir, "index.txt");
+        indexFile.createNewFile();
+
+        SecurityUtils.writeCaConfigFile(secDirFile.getAbsolutePath());
+
+      } catch (IOException e) {
+        LOG.error("Unable to create SSL configuration directories/files", e);
+      }
+      // need to create the password
+    }
+    keystorePass = getKeystorePassword(secDirFile);
+    securityDir = secDirFile.getAbsolutePath();
+  }
+
+  private static String getKeystorePassword(File secDirFile) {
+    File passFile = new File(secDirFile, SliderKeys.CRT_PASS_FILE_NAME);
+    String password = null;
+
+    if (!passFile.exists()) {
+      LOG.info("Generation of file with password");
+      try {
+        password = RandomStringUtils.randomAlphanumeric(
+            Integer.valueOf(SliderKeys.PASS_LEN));
+        FileUtils.writeStringToFile(passFile, password);
+        passFile.setWritable(true);
+        passFile.setReadable(true);
+      } catch (IOException e) {
+        e.printStackTrace();
+        throw new RuntimeException(
+            "Error creating certificate password file");
+      }
+    } else {
+      LOG.info("Reading password from existing file");
+      try {
+        password = FileUtils.readFileToString(passFile);
+        password = password.replaceAll("\\p{Cntrl}", "");
+      } catch (IOException e) {
+        e.printStackTrace();
+      }
+    }
+
+    return password;
+  }
+
+  private static String getDefaultKeystoreLocation() {
+    Path workDir = null;
+    try {
+      workDir = Files.createTempDirectory("sec");
+    } catch (IOException e) {
+      LOG.warn("Unable to create security directory");
+      return null;
+    }
+
+    return new StringBuilder().append(workDir.toAbsolutePath())
+        .append(File.separator)
+        .append(SliderKeys.SECURITY_DIR)
+        .append(File.separator)
+        .append(SliderKeys.KEYSTORE_FILE_NAME).toString();
+  }
+
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/security/SignCertResponse.java b/slider-core/src/main/java/org/apache/slider/server/services/security/SignCertResponse.java
new file mode 100644
index 0000000..8437d88
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/security/SignCertResponse.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.security;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlType;
+
+/**
+ *
+ * Sign certificate response data model.
+ *
+ */
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+@XmlType(name = "", propOrder = {})
+public class SignCertResponse {
+	
+  public static final String ERROR_STATUS = "ERROR";
+  public static final String OK_STATUS = "OK";
+
+  @XmlElement
+  private String result;
+  @XmlElement
+  private String signedCa;
+  @XmlElement
+  private String message;
+
+  public String getResult() {
+    return result;
+  }
+  public void setResult(String result) {
+    this.result = result;
+  }
+  public String getSignedCa() {
+    return signedCa;
+  }
+  public void setSignedCa(String signedCa) {
+    this.signedCa = signedCa;
+  }
+
+  public String getMessage() {
+    return message;
+  }
+  public void setMessage(String message) {
+    this.message = message;
+  }
+}
+
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/security/SignMessage.java b/slider-core/src/main/java/org/apache/slider/server/services/security/SignMessage.java
new file mode 100644
index 0000000..4bccb87
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/security/SignMessage.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.security;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlType;
+
+/**
+ *
+ * Sign certificate request data model.
+ *
+ */
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+@XmlType(name = "", propOrder = {})
+public class SignMessage {
+
+  @XmlElement
+  private String csr;
+  @XmlElement
+  private String passphrase;
+  public String getCsr() {
+    return csr;
+  }
+  public void setCsr(String csr) {
+    this.csr = csr;
+  }
+  public String getPassphrase() {
+    return passphrase;
+  }
+  public void setPassphrase(String passphrase) {
+    this.passphrase = passphrase;
+  }
+}
+
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/AbstractSliderLaunchedService.java b/slider-core/src/main/java/org/apache/slider/server/services/utility/AbstractSliderLaunchedService.java
index 5d37c32..6c0edb8 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/AbstractSliderLaunchedService.java
+++ b/slider-core/src/main/java/org/apache/slider/server/services/utility/AbstractSliderLaunchedService.java
@@ -22,6 +22,7 @@
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.slider.common.SliderXmlConfKeys;
 import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
 import org.apache.slider.core.exceptions.BadConfigException;
 import org.apache.slider.core.zk.ZookeeperUtils;
 import org.apache.slider.server.services.curator.CuratorHelper;
@@ -36,7 +37,7 @@
  * Base service for the standard slider client/server services
  */
 public abstract class AbstractSliderLaunchedService extends
-                                                    CompoundLaunchedService {
+    LaunchedWorkflowCompositeService {
   private static final Logger log =
     LoggerFactory.getLogger(AbstractSliderLaunchedService.class);
 
@@ -65,7 +66,8 @@
    * @throws BadConfigException if it is not there or invalid
    */
   public String lookupZKQuorum() throws BadConfigException {
-    String registryQuorum = getConfig().get(SliderXmlConfKeys.REGISTRY_ZK_QUORUM);
+    String registryQuorum = getConfig().get(
+        SliderXmlConfKeys.REGISTRY_ZK_QUORUM);
     if (SliderUtils.isUnset(registryQuorum)) {
       throw new BadConfigException(
           "No Zookeeper quorum provided in the"
@@ -94,5 +96,14 @@
     return registryBinderService;
   }
 
+  protected void requireArgumentSet(String argname, String argfield)
+      throws BadCommandArgumentsException {
+    if (isUnset(argfield)) {
+      throw new BadCommandArgumentsException("Required argument "
+                                             + argname
+                                             + " missing");
+    }
+  }
+
 
 }
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/ClosingService.java b/slider-core/src/main/java/org/apache/slider/server/services/utility/ClosingService.java
deleted file mode 100644
index 8864bf9..0000000
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/ClosingService.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.services.utility;
-
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.service.AbstractService;
-
-import java.io.Closeable;
-
-/**
- * Service that closes the closeable supplied during shutdown, if not null.
- */
-public class ClosingService<C extends Closeable> extends AbstractService {
-
-  private volatile C closeable;
-
-
-  public ClosingService(String name,
-                        C closeable) {
-    super(name);
-    this.closeable = closeable;
-  }
-
-
-  public Closeable getCloseable() {
-    return closeable;
-  }
-
-  public void setCloseable(C closeable) {
-    this.closeable = closeable;
-  }
-
-  /**
-   * Stop routine will close the closeable -if not null - and set the
-   * reference to null afterwards
-   * @throws Exception
-   */
-  @Override
-  protected void serviceStop() throws Exception {
-    IOUtils.closeStream(closeable);
-    closeable = null;
-  }
-}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/CompoundService.java b/slider-core/src/main/java/org/apache/slider/server/services/utility/CompoundService.java
deleted file mode 100644
index 4e97842..0000000
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/CompoundService.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.services.utility;
-
-import org.apache.hadoop.service.CompositeService;
-import org.apache.hadoop.service.Service;
-import org.apache.hadoop.service.ServiceStateChangeListener;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-
-/**
- * An extended composite service which not only makes the 
- * addService method public, it auto-registers
- * itself as a listener for state change events.
- * 
- * When all child services has stopped, this service stops itself.
- */
-public class CompoundService extends CompositeService implements Parent,
-                                                                 ServiceStateChangeListener {
-
-  private static final Logger log =
-    LoggerFactory.getLogger(CompoundService.class);
-
-  public CompoundService(String name) {
-    super(name);
-  }
-
-
-  public CompoundService() {
-    super("CompoundService");
-  }
-
-  /**
-   * Varargs constructor
-   * @param children children
-   */
-  public CompoundService(Service ... children) {
-    this();
-    for (Service child : children) {
-      addService(child);
-    }
-  }
-
-  /**
-   * Add a service, and register it
-   * @param service the {@link Service} to be added.
-   * Important: do not add a service to a parent during your own serviceInit/start,
-   * in Hadoop 2.2; you will trigger a ConcurrentModificationException.
-   */
-  @Override
-  public void addService(Service service) {
-    service.registerServiceListener(this);
-    super.addService(service);
-  }
-
-  /**
-   * When this service is started, any service stopping with a failure
-   * exception is converted immediately into a failure of this service, 
-   * storing the failure and stopping ourselves.
-   * @param child the service that has changed.
-   */
-  @Override
-  public void stateChanged(Service child) {
-    //if that child stopped while we are running:
-    if (isInState(STATE.STARTED) && child.isInState(STATE.STOPPED)) {
-      // a child service has stopped
-      //did the child fail? if so: propagate
-      Throwable failureCause = child.getFailureCause();
-      if (failureCause != null) {
-        log.info("Child service " + child + " failed", failureCause);
-        //failure. Convert to an exception
-        Exception e = SliderServiceUtils.convertToException(failureCause);
-        //flip ourselves into the failed state
-        noteFailure(e);
-        stop();
-      } else {
-        log.info("Child service completed {}", child);
-        if (areAllChildrenStopped()) {
-          log.info("All children are halted: stopping");
-          stop();
-        }
-      }
-    }
-  }
-
-  private boolean areAllChildrenStopped() {
-    List<Service> children = getServices();
-    boolean stopped = true;
-    for (Service child : children) {
-      if (!child.isInState(STATE.STOPPED)) {
-        stopped = false;
-        break;
-      }
-    }
-    return stopped;
-  }
-}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventNotifyingService.java b/slider-core/src/main/java/org/apache/slider/server/services/utility/EventNotifyingService.java
deleted file mode 100644
index e8db69e..0000000
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventNotifyingService.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.services.utility;
-
-import org.apache.hadoop.service.AbstractService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A service that calls the supplied callback when it is started -after the 
- * given delay, then stops itself.
- * Because it calls in on a different thread, it can be used for callbacks
- * that don't 
- */
-public class EventNotifyingService extends AbstractService implements Runnable {
-  protected static final Logger log =
-    LoggerFactory.getLogger(EventNotifyingService.class);
-  private final EventCallback callback;
-  private final int delay;
-
-  public EventNotifyingService(EventCallback callback, int delay) {
-    super("EventNotifyingService");
-    assert callback != null;
-    this.callback = callback;
-    this.delay = delay;
-  }
-
-  @Override
-  protected void serviceStart() throws Exception {
-    log.debug("Notifying {} after a delay of {} millis", callback, delay);
-    new Thread(this, "event").start();
-  }
-
-  @Override
-  public void run() {
-    if (delay > 0) {
-      try {
-        Thread.sleep(delay);
-      } catch (InterruptedException ignored) {
-
-      }
-    }
-    log.debug("Notifying {}", callback);
-    callback.eventCallbackEvent();
-    stop();
-  }
-}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/ForkedProcessService.java b/slider-core/src/main/java/org/apache/slider/server/services/utility/ForkedProcessService.java
deleted file mode 100644
index e6610bb..0000000
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/ForkedProcessService.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.services.utility;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.service.AbstractService;
-import org.apache.hadoop.service.ServiceStateException;
-import org.apache.slider.common.tools.SliderUtils;
-import org.apache.slider.core.exceptions.SliderException;
-import org.apache.slider.core.main.ExitCodeProvider;
-import org.apache.slider.core.main.ServiceLaunchException;
-import org.apache.slider.server.exec.ApplicationEventHandler;
-import org.apache.slider.server.exec.RunLongLivedApp;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * Service wrapper for an external program that is launched and can/will terminate.
- * This service is notified when the subprocess terminates, and stops itself 
- * and converts a non-zero exit code into a failure exception
- */
-public class ForkedProcessService extends AbstractService implements
-                                                          ApplicationEventHandler,
-                                                          ExitCodeProvider,
-                                                          Runnable {
-
-  /**
-   * Log for the forked master process
-   */
-  protected static final Logger log =
-    LoggerFactory.getLogger(ForkedProcessService.class);
-
-  private final String name;
-  private final AtomicBoolean processTerminated = new AtomicBoolean(false);
-  ;
-  private boolean processStarted = false;
-  private RunLongLivedApp process;
-  private Map<String, String> environment;
-  private List<String> commands;
-  private String commandLine;
-  private int executionTimeout = -1;
-  private int timeoutCode = 1;
-
-  /**
-   * Exit code set when the spawned process exits
-   */
-  private AtomicInteger exitCode = new AtomicInteger(0);
-  private Thread timeoutThread;
-
-  public ForkedProcessService(String name) {
-    super(name);
-    this.name = name;
-  }
-
-  @Override //AbstractService
-  protected void serviceInit(Configuration conf) throws Exception {
-    super.serviceInit(conf);
-  }
-
-  @Override //AbstractService
-  protected void serviceStart() throws Exception {
-    if (process == null) {
-      throw new ServiceStateException("Subprocess not yet configured");
-    }
-    //now spawn the process -expect updates via callbacks
-    process.spawnApplication();
-  }
-
-  @Override //AbstractService
-  protected void serviceStop() throws Exception {
-    completed(0);
-    if (process != null) {
-      process.stop();
-    }
-  }
-
-  /**
-   * Set the timeout by which time a process must have finished -or -1 for forever
-   * @param timeout timeout in milliseconds
-   */
-  public void setTimeout(int timeout, int code) {
-    this.executionTimeout = timeout;
-    this.timeoutCode = code;
-  }
-
-  /**
-   * Build the process to execute when the service is started
-   * @param commands list of commands is inserted on the front
-   * @param env environment variables above those generated by
-   * @throws IOException IO problems
-   * @throws SliderException anything internal
-   */
-  public void build(Map<String, String> environment,
-                    List<String> commands) throws
-                                           IOException,
-      SliderException {
-    assert process == null;
-    this.commands = commands;
-    this.commandLine = SliderUtils.join(commands, " ", false);
-    this.environment = environment;
-    process = new RunLongLivedApp(log, commands);
-    process.setApplicationEventHandler(this);
-    //set the env variable mapping
-    process.putEnvMap(environment);
-  }
-
-  @Override // ApplicationEventHandler
-  public synchronized void onApplicationStarted(RunLongLivedApp application) {
-    log.info("Process has started");
-    processStarted = true;
-    if (executionTimeout > 0) {
-      timeoutThread = new Thread(this);
-      timeoutThread.start();
-    }
-  }
-
-  @Override // ApplicationEventHandler
-  public void onApplicationExited(RunLongLivedApp application,
-                                  int exitC) {
-    synchronized (this) {
-      completed(exitC);
-      //note whether or not the service had already stopped
-      log.info("Process has exited with exit code {}", exitC);
-      if (exitC != 0) {
-        reportFailure(exitC, name + " failed with code " +
-                             exitC);
-      }
-    }
-    //now stop itself
-    if (!isInState(STATE.STOPPED)) {
-      stop();
-    }
-  }
-
-  private void reportFailure(int exitC, String text) {
-    this.exitCode.set(exitC);
-    //error
-    ServiceLaunchException execEx =
-      new ServiceLaunchException(exitC,
-                                 text);
-    log.debug("Noting failure", execEx);
-    noteFailure(execEx);
-  }
-
-  /**
-   * handle timeout response by escalating it to a failure
-   */
-  @Override
-  public void run() {
-    try {
-      synchronized (processTerminated) {
-        if (!processTerminated.get()) {
-          processTerminated.wait(executionTimeout);
-        }
-      }
-
-    } catch (InterruptedException e) {
-      //assume signalled; exit
-    }
-    //check the status; if the marker isn't true, bail
-    if (!processTerminated.getAndSet(true)) {
-      log.info("process timeout: reporting error code {}", timeoutCode);
-
-      //timeout
-      if (isInState(STATE.STARTED)) {
-        //trigger a failure
-        process.stop();
-      }
-      reportFailure(timeoutCode, name + ": timeout after " + executionTimeout
-                   + " millis: exit code =" + timeoutCode);
-    }
-  }
-
-  protected void completed(int exitCode) {
-    this.exitCode.set(exitCode);
-    processTerminated.set(true);
-    synchronized (processTerminated) {
-      processTerminated.notify();
-    }
-  }
-
-  public boolean isProcessTerminated() {
-    return processTerminated.get();
-  }
-
-  public synchronized boolean isProcessStarted() {
-    return processStarted;
-  }
-
-
-  @Override // ExitCodeProvider
-  public int getExitCode() {
-    return exitCode.get();
-  }
-
-  public String getCommandLine() {
-    return commandLine;
-  }
-
-  /**
-   * Get the recent output from the process, or [] if not defined
-   * @return a possibly empty list
-   */
-  public List<String> getRecentOutput() {
-    return process != null
-           ? process.getRecentOutput()
-           : new LinkedList<String>();
-  }
-
-}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/CompoundLaunchedService.java b/slider-core/src/main/java/org/apache/slider/server/services/utility/LaunchedWorkflowCompositeService.java
similarity index 71%
rename from slider-core/src/main/java/org/apache/slider/server/services/utility/CompoundLaunchedService.java
rename to slider-core/src/main/java/org/apache/slider/server/services/utility/LaunchedWorkflowCompositeService.java
index 692da38..0d47c3b 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/CompoundLaunchedService.java
+++ b/slider-core/src/main/java/org/apache/slider/server/services/utility/LaunchedWorkflowCompositeService.java
@@ -22,28 +22,24 @@
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.Service;
-import org.apache.slider.core.exceptions.BadCommandArgumentsException;
 import org.apache.slider.core.main.LauncherExitCodes;
 import org.apache.slider.core.main.RunService;
+import org.apache.slider.server.services.workflow.WorkflowCompositeService;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class CompoundLaunchedService extends CompoundService
+public class LaunchedWorkflowCompositeService extends WorkflowCompositeService
     implements RunService {
   private static final Logger log = LoggerFactory.getLogger(
-      CompoundLaunchedService.class);
+      LaunchedWorkflowCompositeService.class);
   private String[] argv;
   
-  public CompoundLaunchedService(String name) {
+  public LaunchedWorkflowCompositeService(String name) {
     super(name);
   }
 
-  public CompoundLaunchedService() {
-    super("CompoundLaunchedService");
-  }
-
-  public CompoundLaunchedService(Service... children) {
-    super(children);
+  public LaunchedWorkflowCompositeService(String name, Service... children) {
+    super(name, children);
   }
 
   /**
@@ -94,8 +90,8 @@
   }
 
   @Override
-  public void addService(Service service) {
-    Preconditions.checkNotNull(service, "null service");
+  public synchronized void addService(Service service) {
+    Preconditions.checkArgument(service != null, "null service argument");
     super.addService(service);
   }
 
@@ -114,21 +110,4 @@
     return false;
   }
 
-  protected void requireArgumentSet(String argname, String argfield)
-      throws BadCommandArgumentsException {
-    if (isUnset(argfield)) {
-      throw new BadCommandArgumentsException("Required argument "
-                                             + argname
-                                             + " missing");
-    }
-  }
-
-  protected void requireArgumentSet(String argname, Object argfield) throws
-                                               BadCommandArgumentsException {
-    if (argfield == null) {
-      throw new BadCommandArgumentsException("Required argument "
-                                             + argname
-                                             + " missing");
-    }
-  }
 }
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/PatternValidator.java b/slider-core/src/main/java/org/apache/slider/server/services/utility/PatternValidator.java
index 3542549..6ab9de6 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/PatternValidator.java
+++ b/slider-core/src/main/java/org/apache/slider/server/services/utility/PatternValidator.java
@@ -22,6 +22,9 @@
 
 import java.util.regex.Pattern;
 
+/**
+ * Utility class to validate strings against a predefined pattern.
+ */
 public class PatternValidator {
 
   public static final String E_INVALID_NAME =
@@ -41,9 +44,18 @@
    * @throws IllegalArgumentException if not a valid name
    */
   public void validate(String name) {
-    if (!valid.matcher(name).matches()) {
+    if (!matches(name)) {
       throw new IllegalArgumentException(
           String.format(E_INVALID_NAME, name, pattern));
     }
   }
+
+  /**
+   * Query to see if the pattern matches
+   * @param name name to validate
+   * @return true if the string matches the pattern
+   */
+  public boolean matches(String name) {
+    return valid.matcher(name).matches();
+  }
 }
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/SecurityCheckerService.java b/slider-core/src/main/java/org/apache/slider/server/services/utility/SecurityCheckerService.java
deleted file mode 100644
index 0a58499..0000000
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/SecurityCheckerService.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.services.utility;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.service.AbstractService;
-import org.apache.slider.common.tools.SliderUtils;
-
-/**
- * A security checker service, which validates that the service
- * is running with security in its init() operation.
- */
-public class SecurityCheckerService extends AbstractService {
-
-  public SecurityCheckerService() {
-    super("Security Checker");
-  }
-
-  @Override
-  protected void serviceInit(Configuration conf) throws Exception {
-    super.serviceInit(conf);
-    SliderUtils.initProcessSecurity(conf);
-  }
-}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/WebAppService.java b/slider-core/src/main/java/org/apache/slider/server/services/utility/WebAppService.java
index de183dd..23c7c9b 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/WebAppService.java
+++ b/slider-core/src/main/java/org/apache/slider/server/services/utility/WebAppService.java
@@ -30,7 +30,7 @@
  */
 public class WebAppService<T extends WebApp> extends AbstractService {
 
-  private T webApp;
+  private volatile T webApp;
 
   public WebAppService(String name) {
     super(name);
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/workflow/AbstractWorkflowExecutorService.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/AbstractWorkflowExecutorService.java
new file mode 100644
index 0000000..c26e3c4
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/AbstractWorkflowExecutorService.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.apache.hadoop.service.AbstractService;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+
+/**
+ * A service that hosts an executor -when the service is stopped,
+ * {@link ExecutorService#shutdownNow()} is invoked.
+ */
+public abstract class AbstractWorkflowExecutorService extends AbstractService {
+
+  private ExecutorService executor;
+
+  /**
+   * Construct an instance with the given name -but
+   * no executor
+   * @param name service name
+   */
+  public AbstractWorkflowExecutorService(String name) {
+    this(name, null);
+  }
+
+  /**
+   * Construct an instance with the given name and executor
+   * @param name service name
+   * @param executor exectuor
+   */
+  protected AbstractWorkflowExecutorService(String name,
+      ExecutorService executor) {
+    super(name);
+    this.executor = executor;
+  }
+
+  /**
+   * Get the executor
+   * @return the executor
+   */
+  public synchronized ExecutorService getExecutor() {
+    return executor;
+  }
+
+  /**
+   * Set the executor. This is protected as it
+   * is intended to be restricted to subclasses
+   * @param executor executor
+   */
+  protected synchronized void setExecutor(ExecutorService executor) {
+    this.executor = executor;
+  }
+
+  /**
+   * Execute the runnable with the executor (which 
+   * must have been created already)
+   * @param runnable runnable to execute
+   */
+  public void execute(Runnable runnable) {
+    getExecutor().execute(runnable);
+  }
+
+  /**
+   * Submit a callable
+   * @param callable callable
+   * @param <V> type of the final get
+   * @return a future to wait on
+   */
+  public <V> Future<V> submit(Callable<V> callable) {
+    return getExecutor().submit(callable);
+  }
+  /**
+   * Stop the service: halt the executor. 
+   * @throws Exception exception.
+   */
+  @Override
+  protected void serviceStop() throws Exception {
+    super.serviceStop();
+    stopExecutor();
+  }
+
+  /**
+   * Stop the executor if it is not null.
+   * This uses {@link ExecutorService#shutdownNow()}
+   * and so does not block until they have completed.
+   */
+  protected synchronized void stopExecutor() {
+    if (executor != null) {
+      executor.shutdownNow();
+    }
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/workflow/ClosingService.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/ClosingService.java
new file mode 100644
index 0000000..7a475cc
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/ClosingService.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.apache.hadoop.service.AbstractService;
+
+import java.io.Closeable;
+import java.io.IOException;
+
+/**
+ * Service that closes the closeable supplied during shutdown, if not null.
+ * 
+ * As the Service interface itself extends Closeable, this service
+ * can be used to shut down other services if desired.
+ */
+public class ClosingService<C extends Closeable> extends AbstractService {
+
+  private C closeable;
+
+
+  /**
+   * Construct an instance of the service
+   * @param name service name
+   * @param closeable closeable to close (may be null)
+   */
+  public ClosingService(String name,
+      C closeable) {
+    super(name);
+    this.closeable = closeable;
+  }
+
+  /**
+   * Construct an instance of the service, using the default name
+   * @param closeable closeable to close (may be null)
+   */
+  public ClosingService(C closeable) {
+    this("ClosingService", closeable);
+  }
+
+
+  /**
+   * Get the closeable
+   * @return the closeable
+   */
+  public synchronized C getCloseable() {
+    return closeable;
+  }
+
+  /**
+   * Set or update the closeable.
+   * @param closeable
+   */
+  public synchronized void setCloseable(C closeable) {
+    this.closeable = closeable;
+  }
+
+  /**
+   * Stop routine will close the closeable -if not null - and set the
+   * reference to null afterwards
+   * This operation does raise any exception on the close, though it does
+   * record it
+   */
+  @Override
+  protected void serviceStop() {
+    C target = getCloseable();
+    if (target != null) {
+      try {
+        target.close();
+      } catch (IOException ioe) {
+        noteFailure(ioe);
+      }
+      setCloseable(null);
+    }
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/workflow/ForkedProcessService.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/ForkedProcessService.java
new file mode 100644
index 0000000..ccce6cb
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/ForkedProcessService.java
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.apache.hadoop.service.ServiceStateException;
+import org.apache.slider.core.main.ServiceLaunchException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Service wrapper for an external program that is launched and can/will terminate.
+ * This service is notified when the subprocess terminates, and stops itself 
+ * and converts a non-zero exit code into a failure exception.
+ * 
+ * <p>
+ * Key Features:
+ * <ol>
+ *   <li>The property {@link #executionTimeout} can be set to set a limit
+ *   on the duration of a process</li>
+ *   <li>Output is streamed to the output logger provided</li>.
+ *   <li>The most recent lines of output are saved to a linked list</li>.
+ *   <li>A synchronous callback, {@link LongLivedProcessLifecycleEvent}, is raised on the start
+ *   and finish of a process.</li>
+ * </ol>
+ *
+ * Usage:
+ * <p></p>
+ * The service can be built in the constructor, {@link #ForkedProcessService(String, Map, List)},
+ * or have its simple constructor used to instantiate the service, then the 
+ * {@link #build(Map, List)} command used to define the environment variables
+ * and list of commands to execute. One of these two options MUST be exercised
+ * before calling the services's {@link #start()} method.
+ * <p></p>
+ * The forked process is executed in the service's {@link #serviceStart()} method;
+ * if still running when the service is stopped, {@link #serviceStop()} will
+ * attempt to stop it.
+ * <p></p>
+ * 
+ * The service delegates process execution to {@link LongLivedProcess},
+ * receiving callbacks via the {@link LongLivedProcessLifecycleEvent}.
+ * When the service receives a callback notifying that the process has completed,
+ * it calls its {@link #stop()} method. If the error code was non-zero, 
+ * the service is logged as having failed.
+ */
+public class ForkedProcessService extends AbstractWorkflowExecutorService implements
+    LongLivedProcessLifecycleEvent, Runnable {
+
+  /**
+   * Log for the forked master process
+   */
+  private static final Logger LOG =
+    LoggerFactory.getLogger(ForkedProcessService.class);
+
+  private final AtomicBoolean processTerminated = new AtomicBoolean(false);
+  private boolean processStarted = false;
+  private LongLivedProcess process;
+  private int executionTimeout = -1;
+  private int timeoutCode = 1;
+
+  /**
+   * Exit code set when the spawned process exits
+   */
+  private AtomicInteger exitCode = new AtomicInteger(0);
+
+  /**
+   * Create an instance of the service
+   * @param name a name
+   */
+  public ForkedProcessService(String name) {
+    super(name);
+  }
+
+  /**
+   * Create an instance of the service,  set up the process
+   * @param name a name
+   * @param commandList list of commands is inserted on the front
+   * @param env environment variables above those generated by
+   * @throws IOException IO problems
+   */
+  public ForkedProcessService(String name, Map<String, String> env,
+      List<String> commandList) throws IOException {
+    super(name);
+    build(env, commandList);
+  }
+
+  @Override //AbstractService
+  protected void serviceStart() throws Exception {
+    if (process == null) {
+      throw new ServiceStateException("Process not yet configured");
+    }
+    //now spawn the process -expect updates via callbacks
+    process.start();
+  }
+
+  @Override //AbstractService
+  protected void serviceStop() throws Exception {
+    completed(0);
+    stopForkedProcess();
+  }
+
+  private void stopForkedProcess() {
+    if (process != null) {
+      process.stop();
+    }
+  }
+
+  /**
+   * Set the timeout by which time a process must have finished -or -1 for forever
+   * @param timeout timeout in milliseconds
+   */
+  public void setTimeout(int timeout, int code) {
+    this.executionTimeout = timeout;
+    this.timeoutCode = code;
+  }
+
+  /**
+   * Build the process to execute when the service is started
+   * @param commandList list of commands is inserted on the front
+   * @param env environment variables above those generated by
+   * @throws IOException IO problems
+   */
+  public void build(Map<String, String> env,
+                    List<String> commandList)
+      throws IOException {
+    assert process == null;
+    process = new LongLivedProcess(getName(), LOG, commandList);
+    process.setLifecycleCallback(this);
+    //set the env variable mapping
+    process.putEnvMap(env);
+  }
+
+  @Override // notification from executed process
+  public synchronized void onProcessStarted(LongLivedProcess process) {
+    LOG.debug("Process has started");
+    processStarted = true;
+    if (executionTimeout > 0) {
+      setExecutor(ServiceThreadFactory.singleThreadExecutor(getName(), true));
+      execute(this);
+    }
+  }
+
+  @Override  // notification from executed process
+  public void onProcessExited(LongLivedProcess process,
+      int uncorrected,
+      int code) {
+    try {
+      synchronized (this) {
+        completed(code);
+        //note whether or not the service had already stopped
+        LOG.debug("Process has exited with exit code {}", code);
+        if (code != 0) {
+          reportFailure(code, getName() + " failed with code " + code);
+        }
+      }
+    } finally {
+      stop();
+    }
+  }
+
+  private void reportFailure(int code, String text) {
+    //error
+    ServiceLaunchException execEx = new ServiceLaunchException(code, text);
+    LOG.debug("Noting failure", execEx);
+    noteFailure(execEx);
+  }
+
+  /**
+   * handle timeout response by escalating it to a failure
+   */
+  @Override
+  public void run() {
+    try {
+      synchronized (processTerminated) {
+        if (!processTerminated.get()) {
+          processTerminated.wait(executionTimeout);
+        }
+      }
+
+    } catch (InterruptedException e) {
+      //assume signalled; exit
+    }
+    //check the status; if the marker isn't true, bail
+    if (!processTerminated.getAndSet(true)) {
+      LOG.info("process timeout: reporting error code {}", timeoutCode);
+
+      //timeout
+      if (isInState(STATE.STARTED)) {
+        //trigger a failure
+        stopForkedProcess();
+      }
+      reportFailure(timeoutCode, getName() + ": timeout after " + executionTimeout
+                   + " millis: exit code =" + timeoutCode);
+    }
+  }
+
+  /**
+   * Note the process as having completed.
+   * The exit code is stored, the process marked as terminated
+   * -and anything synchronized on <code>processTerminated</code>
+   * is notified
+   * @param code exit code
+   */
+  protected void completed(int code) {
+    processTerminated.set(true);
+    synchronized (processTerminated) {
+      processTerminated.notify();
+    }
+  }
+
+  public boolean isProcessTerminated() {
+    return processTerminated.get();
+  }
+
+  public synchronized boolean isProcessStarted() {
+    return processStarted;
+  }
+
+  /**
+   * Is a process running: between started and terminated
+   * @return true if the process is up.
+   */
+  public synchronized boolean isProcessRunning() {
+    return processStarted && !isProcessTerminated();
+  }
+
+
+  public int getExitCode() {
+    return process.getExitCode();
+  }
+  
+  public int getExitCodeSignCorrected() {
+    Integer exitCode = process.getExitCodeSignCorrected();
+    if (exitCode == null) return -1;
+    return exitCode;
+  }
+
+  /**
+   * Get the recent output from the process, or [] if not defined
+   * @return a possibly empty list
+   */
+  public List<String> getRecentOutput() {
+    return process != null
+           ? process.getRecentOutput()
+           : new LinkedList<String>();
+  }
+
+  /**
+   * Get the recent output from the process, or [] if not defined
+   *
+   * @param finalOutput flag to indicate "wait for the final output of the process"
+   * @param duration the duration, in ms, 
+   * ro wait for recent output to become non-empty
+   * @return a possibly empty list
+   */
+  public List<String> getRecentOutput(boolean finalOutput, int duration) {
+    if (process == null) {
+      return new LinkedList<String>();
+    }
+    return process.getRecentOutput(finalOutput, duration);
+  }
+  
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/workflow/LongLivedProcess.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/LongLivedProcess.java
new file mode 100644
index 0000000..ecc26b9
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/LongLivedProcess.java
@@ -0,0 +1,559 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.io.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * Execute a long-lived process.
+ *
+ * <p>
+ * Hadoop's {@link org.apache.hadoop.util.Shell} class assumes it is executing
+ * a short lived application; this class allows for the process to run for the
+ * life of the Java process that forked it.
+ * It is designed to be embedded inside a YARN service, though this is not
+ * the sole way that it can be used
+ * <p>
+ * Key Features:
+ * <ol>
+ *   <li>Output is streamed to the output logger provided</li>.
+ *   <li>The most recent lines of output are saved to a linked list</li>.
+ *   <li>A synchronous callback, {@link LongLivedProcessLifecycleEvent}, is raised on the start
+ *   and finish of a process.</li>
+ * </ol>
+ * 
+ */
+public class LongLivedProcess implements Runnable {
+  /**
+   * Limit on number of lines to retain in the "recent" line list:{@value}
+   */
+  public static final int RECENT_LINE_LOG_LIMIT = 64;
+
+  /**
+   * Const defining the time in millis between polling for new text
+   */
+  private static final int STREAM_READER_SLEEP_TIME = 200;
+  
+  /**
+   * limit on the length of a stream before it triggers an automatic newline
+   */
+  private static final int LINE_LENGTH = 256;
+  private final ProcessBuilder processBuilder;
+  private Process process;
+  private Integer exitCode = null;
+  private final String name;
+  private final ExecutorService processExecutor;
+  private final ExecutorService logExecutor;
+  
+  private ProcessStreamReader processStreamReader;
+  //list of recent lines, recorded for extraction into reports
+  private final List<String> recentLines = new LinkedList<String>();
+  private int recentLineLimit = RECENT_LINE_LOG_LIMIT;
+  private LongLivedProcessLifecycleEvent lifecycleCallback;
+  private final AtomicBoolean finalOutputProcessed = new AtomicBoolean(false);
+
+  
+  /**
+   * Log supplied in the constructor for the spawned process -accessible
+   * to inner classes
+   */
+  private final Logger processLog;
+  
+  /**
+   * Class log -accessible to inner classes
+   */
+  private static final Logger LOG = LoggerFactory.getLogger(LongLivedProcess.class);
+
+  /**
+   *  flag to indicate that the process is done
+   */
+  private final AtomicBoolean finished = new AtomicBoolean(false);
+
+  public LongLivedProcess(String name,
+      Logger processLog,
+      List<String> commands) {
+    Preconditions.checkArgument(processLog != null, "processLog");
+    Preconditions.checkArgument(commands != null, "commands");
+
+    this.name = name;
+    this.processLog = processLog;
+    ServiceThreadFactory factory = new ServiceThreadFactory(name, true);
+    processExecutor = Executors.newSingleThreadExecutor(factory);
+    logExecutor=    Executors.newSingleThreadExecutor(factory);
+    processBuilder = new ProcessBuilder(commands);
+    processBuilder.redirectErrorStream(false);
+  }
+
+  /**
+   * Set the limit on recent lines to retain
+   * @param recentLineLimit size of rolling list of recent lines.
+   */
+  public void setRecentLineLimit(int recentLineLimit) {
+    this.recentLineLimit = recentLineLimit;
+  }
+
+  /**
+   * Set an optional application exit callback
+   * @param lifecycleCallback callback to notify on application exit
+   */
+  public void setLifecycleCallback(LongLivedProcessLifecycleEvent lifecycleCallback) {
+    this.lifecycleCallback = lifecycleCallback;
+  }
+
+  /**
+   * Add an entry to the environment
+   * @param envVar envVar -must not be null
+   * @param val value 
+   */
+  public void setEnv(String envVar, String val) {
+    Preconditions.checkArgument(envVar != null, "envVar");
+    Preconditions.checkArgument(val != null, "val");
+    processBuilder.environment().put(envVar, val);
+  }
+
+  /**
+   * Bulk set the environment from a map. This does
+   * not replace the existing environment, just extend it/overwrite single
+   * entries.
+   * @param map map to add
+   */
+  public void putEnvMap(Map<String, String> map) {
+    for (Map.Entry<String, String> entry : map.entrySet()) {
+      String val = entry.getValue();
+      String key = entry.getKey();
+      setEnv(key, val);
+    }
+  }
+
+  /**
+   * Get the process environment
+   * @param variable environment variable
+   * @return the value or null if there is no match
+   */
+  public String getEnv(String variable) {
+    return processBuilder.environment().get(variable);
+  }
+
+  /**
+   * Get the process reference
+   * @return the process -null if the process is  not started
+   */
+  public Process getProcess() {
+    return process;
+  }
+
+  /**
+   * Get the process builder -this can be manipulated
+   * up to the start() operation. As there is no synchronization
+   * around it, it must only be used in the same thread setting up the commmand.
+   * @return the process builder
+   */
+  public ProcessBuilder getProcessBuilder() {
+    return processBuilder;
+  }
+
+  /**
+   * Get the command list
+   * @return the comands
+   */
+  public List<String> getCommands() {
+    return processBuilder.command();
+  }
+
+  public String getCommand() {
+    return getCommands().get(0);
+  }
+
+  /**
+   * probe to see if the process is running
+   * @return true iff the process has been started and is not yet finished
+   */
+  public boolean isRunning() {
+    return process != null && !finished.get();
+  }
+
+  /**
+   * Get the exit code: null until the process has finished
+   * @return the exit code or null
+   */
+  public Integer getExitCode() {
+    return exitCode;
+  }
+  
+    /**
+   * Get the exit code sign corrected: null until the process has finished
+   * @return the exit code or null
+   */
+  public Integer getExitCodeSignCorrected() {
+    Integer result;
+    if (exitCode != null) {
+      result = (exitCode << 24) >> 24;
+    } else {
+      result = null;
+    }
+    return result;
+  }
+  
+  
+
+  /**
+   * Stop the process if it is running.
+   * This will trigger an application completion event with the given exit code
+   */
+  public void stop() {
+    if (!isRunning()) {
+      return;
+    }
+    process.destroy();
+  }
+
+  /**
+   * Get a text description of the builder suitable for log output
+   * @return a multiline string 
+   */
+  protected String describeBuilder() {
+    StringBuilder buffer = new StringBuilder();
+    for (String arg : processBuilder.command()) {
+      buffer.append('"').append(arg).append("\" ");
+    }
+    return buffer.toString();
+  }
+
+  /**
+   * Dump the environment to a string builder
+   * @param buffer the buffer to append to
+   */
+  public void dumpEnv(StringBuilder buffer) {
+    buffer.append("\nEnvironment\n-----------");
+    Map<String, String> env = processBuilder.environment();
+    Set<String> keys = env.keySet();
+    List<String> sortedKeys = new ArrayList<String>(keys);
+    Collections.sort(sortedKeys);
+    for (String key : sortedKeys) {
+      buffer.append(key).append("=").append(env.get(key)).append('\n');
+    }
+  }
+
+  /**
+   * Exec the process
+   * @return the process
+   * @throws IOException
+   */
+  private Process spawnChildProcess() throws IOException {
+    if (process != null) {
+      throw new IOException("Process already started");
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Spawning process:\n " + describeBuilder());
+    }
+    process = processBuilder.start();
+    return process;
+  }
+
+  /**
+   * Entry point for waiting for the program to finish
+   */
+  @Override // Runnable
+  public void run() {
+    LOG.debug("Lifecycle callback thread running");
+    //notify the callback that the process has started
+    if (lifecycleCallback != null) {
+      lifecycleCallback.onProcessStarted(this);
+    }
+    try {
+      exitCode = process.waitFor();
+    } catch (InterruptedException e) {
+      LOG.debug("Process wait interrupted -exiting thread", e);
+    } finally {
+      //here the process has finished
+      LOG.debug("process {} has finished", name);
+      //tell the logger it has to finish too
+      finished.set(true);
+
+      // shut down the threads
+      logExecutor.shutdown();
+      try {
+        logExecutor.awaitTermination(60, TimeUnit.SECONDS);
+      } catch (InterruptedException ignored) {
+        //ignored
+      }
+
+      //now call the callback if it is set
+      if (lifecycleCallback != null) {
+        lifecycleCallback.onProcessExited(this, exitCode,
+            getExitCodeSignCorrected());
+      }
+    }
+  }
+
+  /**
+   * Spawn the application
+   * @throws IOException IO problems
+   */
+  public void start() throws IOException {
+
+    spawnChildProcess();
+    processExecutor.submit(this);
+    processStreamReader =
+      new ProcessStreamReader(processLog, STREAM_READER_SLEEP_TIME);
+    logExecutor.submit(processStreamReader);
+  }
+
+  /**
+   * Get the lines of recent output
+   * @return the last few lines of output; an empty list if there are none
+   * or the process is not actually running
+   */
+  public synchronized List<String> getRecentOutput() {
+    return new ArrayList<String>(recentLines);
+  }
+
+  /*
+   * @return whether lines of recent output are empty
+   */
+  public synchronized boolean isRecentOutputEmpty() {
+    return recentLines.isEmpty();
+  }
+
+  /**
+   * Query to see if the final output has been processed
+   * @return
+   */
+  public boolean isFinalOutputProcessed() {
+    return finalOutputProcessed.get();
+  }
+
+  /**
+   * Get the recent output from the process, or [] if not defined
+   *
+   * @param finalOutput flag to indicate "wait for the final output of the process"
+   * @param duration the duration, in ms, 
+   * ro wait for recent output to become non-empty
+   * @return a possibly empty list
+   */
+  public List<String> getRecentOutput(boolean finalOutput, int duration) {
+    long start = System.currentTimeMillis();
+    while (System.currentTimeMillis() - start <= duration) {
+      boolean finishedOutput;
+      if (finalOutput) {
+        // final flag means block until all data is done
+        finishedOutput = isFinalOutputProcessed();
+      } else {
+        // there is some output
+        finishedOutput = !isRecentOutputEmpty();
+      }
+      if (finishedOutput) {
+        break;
+      }
+      try {
+        Thread.sleep(100);
+      } catch (InterruptedException ie) {
+        Thread.currentThread().interrupt();
+        break;
+      }
+    }
+    return getRecentOutput();
+  }
+  /**
+   * add the recent line to the list of recent lines; deleting
+   * an earlier on if the limit is reached.
+   *
+   * Implementation note: yes, a circular array would be more
+   * efficient, especially with some power of two as the modulo,
+   * but is it worth the complexity and risk of errors for
+   * something that is only called once per line of IO?
+   * @param line line to record
+   * @param isErrorStream is the line from the error stream
+   * @param logger logger to log to
+   */
+  private synchronized void recordRecentLine(String line,
+      boolean isErrorStream, Logger logger) {
+    if (line == null) {
+      return;
+    }
+    String entry = (isErrorStream ? "[ERR] " : "[OUT] ") + line;
+    recentLines.add(entry);
+    if (recentLines.size() > recentLineLimit) {
+      recentLines.remove(0);
+    }
+    if (isErrorStream) {
+      logger.warn(line);
+    } else {
+      logger.info(line);
+    }
+  }
+
+  /**
+   * Class to read data from the two process streams, and, when run in a thread
+   * to keep running until the <code>done</code> flag is set. 
+   * Lines are fetched from stdout and stderr and logged at info and error
+   * respectively.
+   */
+
+  private class ProcessStreamReader implements Runnable {
+    private final Logger streamLog;
+    private final int sleepTime;
+
+    private ProcessStreamReader(Logger streamLog, int sleepTime) {
+      this.streamLog = streamLog;
+      this.sleepTime = sleepTime;
+    }
+
+    /**
+     * Return a character if there is one, -1 if nothing is ready yet
+     * @param reader reader
+     * @return the value from the reader, or -1 if it is not ready
+     * @throws IOException IO problems
+     */
+    private int readCharNonBlocking(BufferedReader reader) throws IOException {
+      if (reader.ready()) {
+        return reader.read();
+      } else {
+        return -1;
+      }
+    }
+
+    /**
+     * Read in a line, or, if the limit has been reached, the buffer
+     * so far
+     * @param reader source of data
+     * @param line line to build
+     * @param limit limit of line length
+     * @return true if the line can be printed
+     * @throws IOException IO trouble
+     */
+    @SuppressWarnings("NestedAssignment")
+    private boolean readAnyLine(BufferedReader reader,
+                                StringBuilder line,
+                                int limit)
+      throws IOException {
+      int next;
+      while ((-1 != (next = readCharNonBlocking(reader)))) {
+        if (next != '\n') {
+          line.append((char) next);
+          limit--;
+          if (line.length() > limit) {
+            //enough has been read in to print it any
+            return true;
+          }
+        } else {
+          //line end return flag to say so
+          return true;
+        }
+      }
+      //here the end of the stream is hit, or the limit
+      return false;
+    }
+
+
+    @Override //Runnable
+    @SuppressWarnings("IOResourceOpenedButNotSafelyClosed")
+    public void run() {
+      BufferedReader errReader = null;
+      BufferedReader outReader = null;
+      StringBuilder outLine = new StringBuilder(LINE_LENGTH);
+      StringBuilder errorLine = new StringBuilder(LINE_LENGTH);
+      try {
+        errReader = new BufferedReader(
+            new InputStreamReader(process.getErrorStream()));
+        outReader = new BufferedReader(
+            new InputStreamReader(process.getInputStream()));
+        while (!finished.get()) {
+          boolean processed = false;
+          if (readAnyLine(errReader, errorLine, LINE_LENGTH)) {
+            recordRecentLine(errorLine.toString(), true, streamLog);
+            errorLine.setLength(0);
+            processed = true;
+          }
+          if (readAnyLine(outReader, outLine, LINE_LENGTH)) {
+            recordRecentLine(outLine.toString(), false, streamLog);
+            outLine.setLength(0);
+            processed |= true;
+          }
+          if (!processed && !finished.get()) {
+            //nothing processed: wait a bit for data.
+            try {
+              Thread.sleep(sleepTime);
+            } catch (InterruptedException e) {
+              //ignore this, rely on the done flag
+              LOG.debug("Ignoring ", e);
+            }
+          }
+        }
+        // finished: cleanup
+
+        //print the current error line then stream through the rest
+        recordFinalOutput(errReader, errorLine, true, streamLog);
+        //now do the info line
+        recordFinalOutput(outReader, outLine, false, streamLog);
+
+      } catch (Exception ignored) {
+        LOG.warn("encountered {}", ignored, ignored);
+        //process connection has been torn down
+      } finally {
+        // close streams
+        IOUtils.closeStream(errReader);
+        IOUtils.closeStream(outReader);
+        //mark output as done
+        finalOutputProcessed.set(true);
+      }
+    }
+
+    /**
+     * Record the final output of a process stream
+     * @param reader reader of output
+     * @param lineBuilder string builder into which line is built
+     * @param isErrorStream flag to indicate whether or not this is the
+     * is the line from the error stream
+     * @param logger logger to log to
+     * @throws IOException
+     */
+    protected void recordFinalOutput(BufferedReader reader,
+        StringBuilder lineBuilder, boolean isErrorStream, Logger logger) throws
+        IOException {
+      String line = lineBuilder.toString();
+      recordRecentLine(line, isErrorStream, logger);
+      line = reader.readLine();
+      while (line != null) {
+        recordRecentLine(line, isErrorStream, logger);
+        line = reader.readLine();
+        if (Thread.interrupted()) {
+          break;
+        }
+      }
+    }
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/workflow/LongLivedProcessLifecycleEvent.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/LongLivedProcessLifecycleEvent.java
new file mode 100644
index 0000000..a13b508
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/LongLivedProcessLifecycleEvent.java
@@ -0,0 +1,41 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+/**
+ * Callback when a long-lived application exits
+ */
+public interface LongLivedProcessLifecycleEvent {
+
+  /**
+   * Callback when a process is started
+   * @param process the process invoking the callback
+   */
+  void onProcessStarted(LongLivedProcess process);
+
+  /**
+   * Callback when a process has finished
+   * @param process the process invoking the callback
+   * @param exitCode exit code from the process
+   * @param signCorrectedCode the code- as sign corrected
+   */
+  void onProcessExited(LongLivedProcess process,
+      int exitCode,
+      int signCorrectedCode);
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/Parent.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/ServiceParent.java
similarity index 76%
rename from slider-core/src/main/java/org/apache/slider/server/services/utility/Parent.java
rename to slider-core/src/main/java/org/apache/slider/server/services/workflow/ServiceParent.java
index ea1769c..a123584 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/Parent.java
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/ServiceParent.java
@@ -16,18 +16,23 @@
  * limitations under the License.
  */
 
-package org.apache.slider.server.services.utility;
+package org.apache.slider.server.services.workflow;
 
 import org.apache.hadoop.service.Service;
 
 import java.util.List;
 
 /**
- * Interface that services with public methods to manipulate child services
- * should implement
+ * Interface for accessing services that contain one or more child
+ * services. 
  */
-public interface Parent extends Service {
+public interface ServiceParent extends Service {
 
+  /**
+   * Add a child service. It must be in a consistent state with the
+   * service to which it is being added.
+   * @param service the service to add.
+   */
   void addService(Service service);
 
   /**
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/workflow/ServiceTerminatingCallable.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/ServiceTerminatingCallable.java
new file mode 100644
index 0000000..5ebf77c
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/ServiceTerminatingCallable.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.service.Service;
+
+import java.util.concurrent.Callable;
+
+/**
+ * A runnable which terminates its owner; it also catches any
+ * exception raised and can serve it back.  
+ * 
+ */
+public class ServiceTerminatingCallable<V> implements Callable<V> {
+
+  private final Service owner;
+  private Exception exception;
+  /**
+   * This is the callback
+   */
+  private final Callable<V> callable;
+
+
+  /**
+   * Create an instance. If the owner is null, the owning service
+   * is not terminated.
+   * @param owner owning service -can be null
+   * @param callable callback.
+   */
+  public ServiceTerminatingCallable(Service owner,
+      Callable<V> callable) {
+    Preconditions.checkArgument(callable != null, "null callable");
+    this.owner = owner;
+    this.callable = callable;
+  }
+
+
+  /**
+   * Get the owning service
+   * @return the service to receive notification when
+   * the runnable completes.
+   */
+  public Service getOwner() {
+    return owner;
+  }
+
+  /**
+   * Any exception raised by inner <code>action's</code> run.
+   * @return an exception or null.
+   */
+  public Exception getException() {
+    return exception;
+  }
+
+  /**
+   * Delegates the call to the callable supplied in the constructor,
+   * then calls the stop() operation on its owner. Any exception
+   * is caught, noted and rethrown
+   * @return the outcome of the delegated call operation
+   * @throws Exception if one was raised.
+   */
+  @Override
+  public V call() throws Exception {
+    try {
+      return callable.call();
+    } catch (Exception e) {
+      exception = e;
+      throw e;
+    } finally {
+      if (owner != null) {
+        owner.stop();
+      }
+    }
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/workflow/ServiceTerminatingRunnable.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/ServiceTerminatingRunnable.java
new file mode 100644
index 0000000..ec0c61d
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/ServiceTerminatingRunnable.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.service.Service;
+
+/**
+ * A runnable which terminates its after running; it also catches any
+ * exception raised and can serve it back. 
+ */
+public class ServiceTerminatingRunnable implements Runnable {
+
+  private final Service owner;
+  private final Runnable action;
+  private Exception exception;
+
+  /**
+   * Create an instance
+   * @param owner owning service
+   * @param action action to execute before terminating the service
+   */
+  public ServiceTerminatingRunnable(Service owner, Runnable action) {
+    Preconditions.checkArgument(owner != null, "null owner");
+    Preconditions.checkArgument(action != null, "null action");
+    this.owner = owner;
+    this.action = action;
+  }
+
+  /**
+   * Get the owning service
+   * @return the service to receive notification when
+   * the runnable completes.
+   */
+  public Service getOwner() {
+    return owner;
+  }
+
+  /**
+   * Any exception raised by inner <code>action's</code> run.
+   * @return an exception or null.
+   */
+  public Exception getException() {
+    return exception;
+  }
+
+  @Override
+  public void run() {
+    try {
+      action.run();
+    } catch (Exception e) {
+      exception = e;
+    }
+    owner.stop();
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/workflow/ServiceThreadFactory.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/ServiceThreadFactory.java
new file mode 100644
index 0000000..7d7110e
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/ServiceThreadFactory.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import com.google.common.base.Preconditions;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * A thread factory that creates threads (possibly daemon threads)
+ * using the name and naming policy supplied.
+ * The thread counter starts at 1, increments atomically, 
+ * and is supplied as the second argument in the format string.
+ * 
+ * A static method, {@link #singleThreadExecutor(String, boolean)},
+ * exists to simplify the construction of an executor with a single well-named
+ * threads. 
+ * 
+ * Example
+ * <pre>
+ *  ExecutorService exec = ServiceThreadFactory.newSingleThreadExecutor("live", true)
+ * </pre>
+ */
+public class ServiceThreadFactory implements ThreadFactory {
+
+  private static AtomicInteger counter = new AtomicInteger(1);
+  /**
+   * Default format for thread names: {@value}
+   */
+  public static final String DEFAULT_NAMING_FORMAT = "%s-%03d";
+  private final String name;
+  private final boolean daemons;
+  private final String namingFormat;
+
+  /**
+   * Create an instance
+   * @param name base thread name
+   * @param daemons flag to indicate the threads should be marked as daemons
+   * @param namingFormat format string to generate thread names from
+   */
+  public ServiceThreadFactory(String name,
+      boolean daemons,
+      String namingFormat) {
+    Preconditions.checkArgument(name != null, "null name");
+    Preconditions.checkArgument(namingFormat != null, "null naming format");
+    this.name = name;
+    this.daemons = daemons;
+    this.namingFormat = namingFormat;
+  }
+
+  /**
+   *
+   * Create an instance with the default naming format
+   * @param name base thread name
+   * @param daemons flag to indicate the threads should be marked as daemons
+   */
+  public ServiceThreadFactory(String name,
+      boolean daemons) {
+    this(name, daemons, DEFAULT_NAMING_FORMAT);
+  }
+
+  @Override
+  public Thread newThread(Runnable r) {
+    Preconditions.checkArgument(r != null, "null runnable");
+    String threadName =
+        String.format(namingFormat, name, counter.getAndIncrement());
+    return new Thread(r, threadName);
+  }
+
+  /**
+   * Create a single thread executor using this naming policy
+   * @param name base thread name
+   * @param daemons flag to indicate the threads should be marked as daemons
+   * @return an executor
+   */
+  public static ExecutorService singleThreadExecutor(String name,
+      boolean daemons) {
+    return Executors.newSingleThreadExecutor(
+        new ServiceThreadFactory(name, daemons));
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowCallbackService.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowCallbackService.java
new file mode 100644
index 0000000..6c50798
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowCallbackService.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A service that calls the supplied callback when it is started -after the 
+ * given delay. It can be configured to stop itself after the callback has
+ * completed, marking any exception raised as the exception of this service.
+ * The notifications come in on a callback thread -a thread that is only
+ * started in this service's <code>start()</code> operation.
+ */
+public class WorkflowCallbackService<V> extends
+    AbstractWorkflowExecutorService {
+  protected static final Logger LOG =
+    LoggerFactory.getLogger(WorkflowCallbackService.class);
+  private final Callable<V> callback;
+  private final int delay;
+  private final ServiceTerminatingCallable<V> command;
+  /**
+   * This is the callback
+   */
+  private Callable<V> callable;
+  private ScheduledFuture<V> scheduledFuture;
+
+
+  /**
+   * Create an instance of the service
+   * @param name service name
+   * @param callback callback to invoke
+   * @param delay delay -or 0 for no delay
+   * @param terminate terminate this service after the callback?
+   */
+  public WorkflowCallbackService(String name,
+      Callable<V> callback,
+      int delay,
+      boolean terminate) {
+    super(name);
+    Preconditions.checkNotNull(callback, "Null callback argument");
+    this.callback = callback;
+    this.delay = delay;
+    command = new ServiceTerminatingCallable<V>(
+        terminate ? this : null,
+        callback);
+  }
+
+  public ScheduledFuture<V> getScheduledFuture() {
+    return scheduledFuture;
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    LOG.debug("Notifying {} after a delay of {} millis", callback, delay);
+    ScheduledExecutorService executorService =
+        Executors.newSingleThreadScheduledExecutor(
+            new ServiceThreadFactory(getName(), true));
+    setExecutor(executorService);
+    scheduledFuture =
+        executorService.schedule(command, delay, TimeUnit.MILLISECONDS);
+  }
+
+  /**
+   * Stop the service.
+   * If there is any exception noted from any executed notification,
+   * note the exception in this class
+   * @throws Exception exception.
+   */
+  @Override
+  protected void serviceStop() throws Exception {
+    super.serviceStop();
+    // propagate any failure
+    if (getCallbackException() != null) {
+      throw getCallbackException();
+    }
+  }
+
+  /**
+   * Get the exception raised by a callback. Will always be null if the 
+   * callback has not been executed; will only be non-null after any success.
+   * @return a callback
+   */
+  public Exception getCallbackException() {
+    return command.getException();
+  }
+
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowCompositeService.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowCompositeService.java
new file mode 100644
index 0000000..a7d9545
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowCompositeService.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.service.ServiceStateChangeListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+
+/**
+ * An extended composite service which stops itself if any child service
+ * fails, or when all its children have successfully stopped without failure.
+ *
+ * Lifecycle
+ * <ol>
+ *   <li>If any child exits with a failure: this service stops, propagating
+ *   the exception.</li>
+ *   <li>When all child services has stopped, this service stops itself</li>
+ * </ol>
+ *
+ */
+public class WorkflowCompositeService extends CompositeService
+    implements ServiceParent, ServiceStateChangeListener {
+
+  private static final Logger LOG =
+    LoggerFactory.getLogger(WorkflowCompositeService.class);
+
+  /**
+   * Construct an instance
+   * @param name name of this service instance
+   */
+  public WorkflowCompositeService(String name) {
+    super(name);
+  }
+
+
+  /**
+   * Construct an instance with the default name.
+   */
+  public WorkflowCompositeService() {
+    this("WorkflowCompositeService");
+  }
+
+  /**
+   * Varargs constructor
+   * @param name name of this service instance
+   * @param children children
+   */
+  public WorkflowCompositeService(String name, Service... children) {
+    this(name);
+    for (Service child : children) {
+      addService(child);
+    }
+  }
+
+  /**
+   * Construct with a list of children
+   * @param name name of this service instance
+   * @param children children to add
+   */
+  public WorkflowCompositeService(String name, List<Service> children) {
+    this(name);
+    for (Service child : children) {
+      addService(child);
+    }
+  }
+
+  /**
+   * Add a service, and register it
+   * @param service the {@link Service} to be added.
+   * Important: do not add a service to a parent during your own serviceInit/start,
+   * in Hadoop 2.2; you will trigger a ConcurrentModificationException.
+   */
+  @Override
+  public synchronized void addService(Service service) {
+    Preconditions.checkArgument(service != null, "null service argument");
+    service.registerServiceListener(this);
+    super.addService(service);
+  }
+
+  /**
+   * When this service is started, any service stopping with a failure
+   * exception is converted immediately into a failure of this service, 
+   * storing the failure and stopping ourselves.
+   * @param child the service that has changed.
+   */
+  @Override
+  public void stateChanged(Service child) {
+    //if that child stopped while we are running:
+    if (isInState(STATE.STARTED) && child.isInState(STATE.STOPPED)) {
+      // a child service has stopped
+      //did the child fail? if so: propagate
+      Throwable failureCause = child.getFailureCause();
+      if (failureCause != null) {
+        LOG.info("Child service " + child + " failed", failureCause);
+        //failure. Convert to an exception
+        Exception e = (failureCause instanceof Exception) ?
+            (Exception) failureCause : new Exception(failureCause);
+        //flip ourselves into the failed state
+        noteFailure(e);
+        stop();
+      } else {
+        LOG.info("Child service completed {}", child);
+        if (areAllChildrenStopped()) {
+          LOG.info("All children are halted: stopping");
+          stop();
+        }
+      }
+    }
+  }
+
+  /**
+   * Probe to query if all children are stopped -simply
+   * by taking a snapshot of the child service list and enumerating
+   * their state. 
+   * The state of the children may change during this operation -that will
+   * not get picked up.
+   * @return true if all the children are stopped.
+   */
+  private boolean areAllChildrenStopped() {
+    List<Service> children = getServices();
+    boolean stopped = true;
+    for (Service child : children) {
+      if (!child.isInState(STATE.STOPPED)) {
+        stopped = false;
+        break;
+      }
+    }
+    return stopped;
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/RpcService.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowRpcService.java
similarity index 73%
rename from slider-core/src/main/java/org/apache/slider/server/services/utility/RpcService.java
rename to slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowRpcService.java
index 72412d4..b71530f 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/RpcService.java
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowRpcService.java
@@ -16,8 +16,9 @@
  * limitations under the License.
  */
 
-package org.apache.slider.server.services.utility;
+package org.apache.slider.server.services.workflow;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.service.AbstractService;
@@ -26,26 +27,36 @@
 
 /**
  * A YARN service that maps the start/stop lifecycle of an RPC server
- * to the YARN service lifecycle
+ * to the YARN service lifecycle. 
  */
-public class RpcService extends AbstractService {
+public class WorkflowRpcService extends AbstractService {
 
   /** RPC server*/
   private final Server server;
 
   /**
    * Construct an instance
-   * @param server server to manger
+   * @param name service name
+   * @param server service to stop
    */
-  public RpcService(Server server) {
-    super("RpcService");
+  public WorkflowRpcService(String name, Server server) {
+    super(name);
+    Preconditions.checkArgument(server != null, "Null server");
     this.server = server;
   }
 
+  /**
+   * Get the server
+   * @return the server
+   */
   public Server getServer() {
     return server;
   }
 
+  /**
+   * Get the socket address of this server
+   * @return the address this server is listening on
+   */
   public InetSocketAddress getConnectAddress() {
     return NetUtils.getConnectAddress(server);
   }
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/SequenceService.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowSequenceService.java
similarity index 61%
rename from slider-core/src/main/java/org/apache/slider/server/services/utility/SequenceService.java
rename to slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowSequenceService.java
index 5136645..ca07f99 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/SequenceService.java
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowSequenceService.java
@@ -16,8 +16,9 @@
  * limitations under the License.
  */
 
-package org.apache.slider.server.services.utility;
+package org.apache.slider.server.services.workflow;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.Service;
 import org.apache.hadoop.service.ServiceStateChangeListener;
@@ -31,53 +32,103 @@
 
 /**
  * This resembles the YARN CompositeService, except that it
- * starts one service after another: it's init & start operations
- * only work with one service
+ * starts one service after another
+ * 
+ * Workflow
+ * <ol>
+ *   <li>When the <code>WorkflowSequenceService</code> instance is
+ *   initialized, it only initializes itself.</li>
+ *   
+ *   <li>When the <code>WorkflowSequenceService</code> instance is
+ *   started, it initializes then starts the first of its children.
+ *   If there are no children, it immediately stops.</li>
+ *   
+ *   <li>When the active child stops, it did not fail, and the parent has not
+ *   stopped -then the next service is initialized and started. If there is no
+ *   remaining child the parent service stops.</li>
+ *   
+ *   <li>If the active child did fail, the parent service notes the exception
+ *   and stops -effectively propagating up the failure.
+ *   </li>
+ * </ol>
+ * 
+ * New service instances MAY be added to a running instance -but no guarantees
+ * can be made as to whether or not they will be run.
  */
 
-public class SequenceService extends AbstractService implements Parent,
-                                                     ServiceStateChangeListener {
+public class WorkflowSequenceService extends AbstractService implements
+    ServiceParent, ServiceStateChangeListener {
 
-  private static final Logger log =
-    LoggerFactory.getLogger(SequenceService.class);
+  private static final Logger LOG =
+    LoggerFactory.getLogger(WorkflowSequenceService.class);
 
   /**
    * list of services
    */
-  private final List<Service> serviceList = new ArrayList<>();
+  private final List<Service> serviceList = new ArrayList<Service>();
 
   /**
-   * The current service.
+   * The currently active service.
    * Volatile -may change & so should be read into a 
    * local variable before working with
    */
-  private volatile Service currentService;
-  /*
+  private volatile Service activeService;
+
+  /**
   the previous service -the last one that finished. 
-  Null if one did not finish yet
+  null if one did not finish yet
    */
   private volatile Service previousService;
 
   /**
+   * Construct an instance
+   * @param name service name
+   */
+  public WorkflowSequenceService(String name) {
+    super(name);
+  }
+
+  /**
+   * Construct an instance with the default name
+   */
+  public WorkflowSequenceService() {
+    this("WorkflowSequenceService");
+  }
+
+  /**
    * Create a service sequence with the given list of services
    * @param name service name
-   * @param offspring initial sequence
+   * @param children initial sequence
    */
-   public SequenceService(String name, Service... offspring) {
+  public WorkflowSequenceService(String name, Service... children) {
     super(name);
-     for (Service service : offspring) {
-       addService(service);
-     }
+    for (Service service : children) {
+      addService(service);
+    }
+  }  /**
+   * Create a service sequence with the given list of services
+   * @param name service name
+   * @param children initial sequence
+   */
+  public WorkflowSequenceService(String name, List<Service> children) {
+    super(name);
+    for (Service service : children) {
+      addService(service);
+    }
   }
 
   /**
    * Get the current service -which may be null
    * @return service running
    */
-  public Service getCurrentService() {
-    return currentService;
+  public Service getActiveService() {
+    return activeService;
   }
 
+  /**
+   * Get the previously active service
+   * @return the service last run, or null if there is none.
+   */
   public Service getPreviousService() {
     return previousService;
   }
@@ -88,22 +139,24 @@
    */
   @Override
   protected void serviceStart() throws Exception {
-    startNextService();
+    if (!startNextService()) {
+        //nothing to start -so stop
+        stop();
+    }
   }
 
   @Override
   protected void serviceStop() throws Exception {
     //stop current service.
     //this triggers a callback that is caught and ignored
-    Service current = currentService;
+    Service current = activeService;
     previousService = current;
-    currentService = null;
+    activeService = null;
     if (current != null) {
       current.stop();
     }
   }
 
-
   /**
    * Start the next service in the list.
    * Return false if there are no more services to run, or this
@@ -116,7 +169,7 @@
   public synchronized boolean startNextService() {
     if (isInState(STATE.STOPPED)) {
       //downgrade to a failed
-      log.debug("Not starting next service -{} is stopped", this);
+      LOG.debug("Not starting next service -{} is stopped", this);
       return false;
     }
     if (!isInState(STATE.STARTED)) {
@@ -128,10 +181,10 @@
       //nothing left to run
       return false;
     }
-    if (currentService != null && currentService.getFailureCause() != null) {
+    if (activeService != null && activeService.getFailureCause() != null) {
       //did the last service fail? Is this caused by some premature callback?
-      log.debug("Not starting next service due to a failure of {}",
-                currentService);
+      LOG.debug("Not starting next service due to a failure of {}",
+          activeService);
       return false;
     }
     //bear in mind that init & start can fail, which
@@ -140,7 +193,7 @@
     //the start-next-service logic is skipped.
     //now, what does that mean w.r.t exit states?
 
-    currentService = null;
+    activeService = null;
     Service head = serviceList.remove(0);
 
     try {
@@ -153,7 +206,7 @@
     }
     //at this point the service must have explicitly started & not failed,
     //else an exception would have been raised
-    currentService = head;
+    activeService = head;
     return true;
   }
 
@@ -165,7 +218,9 @@
    */
   @Override
   public void stateChanged(Service service) {
-    if (service == currentService && service.isInState(STATE.STOPPED)) {
+    // only react to the state change when it is the current service
+    // and it has entered the STOPPED state
+    if (service == activeService && service.isInState(STATE.STOPPED)) {
       onServiceCompleted(service);
     }
   }
@@ -175,8 +230,8 @@
    * @param service service that has completed
    */
   protected synchronized void onServiceCompleted(Service service) {
-    log.info("Running service stopped: {}", service);
-    previousService = currentService;
+    LOG.info("Running service stopped: {}", service);
+    previousService = activeService;
     
 
     //start the next service if we are not stopped ourselves
@@ -185,7 +240,8 @@
       //did the service fail? if so: propagate
       Throwable failureCause = service.getFailureCause();
       if (failureCause != null) {
-        Exception e = SliderServiceUtils.convertToException(failureCause);
+        Exception e = (failureCause instanceof Exception) ?
+                      (Exception) failureCause : new Exception(failureCause);
         noteFailure(e);
         stop();
       }
@@ -207,18 +263,19 @@
     } else {
       //not started, so just note that the current service
       //has gone away
-      currentService = null;
+      activeService = null;
     }
   }
 
   /**
    * Add the passed {@link Service} to the list of services managed by this
-   * {@link SequenceService}
+   * {@link WorkflowSequenceService}
    * @param service the {@link Service} to be added
    */
-  @Override //Parent
+  @Override
   public synchronized void addService(Service service) {
-    log.debug("Adding service {} ", service.getName());
+    Preconditions.checkArgument(service != null, "null service argument");
+    LOG.debug("Adding service {} ", service.getName());
     synchronized (serviceList) {
       serviceList.add(service);
     }
@@ -236,7 +293,7 @@
 
   @Override // Object
   public synchronized String toString() {
-    return super.toString() + "; current service " + currentService
+    return super.toString() + "; current service " + activeService
            + "; queued service count=" + serviceList.size();
   }
 
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/workflow/package-info.java b/slider-core/src/main/java/org/apache/slider/server/services/workflow/package-info.java
new file mode 100644
index 0000000..fab1b9f
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/server/services/workflow/package-info.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+/**
+
+<p>
+ This package contains classes which can be aggregated to build up
+ complex workflows of services: sequences of operations, callbacks
+ and composite services with a shared lifespan.
+ </p>
+
+<h2>
+ Core concepts:
+</h2>
+
+
+<p>
+The Workflow Services are set of Hadoop YARN services, all implementing
+the {@link org.apache.hadoop.service.Service} API.
+They are designed to be aggregated, to be composed to produce larger
+composite services which than perform ordered operations, notify other services
+when work has completed, and to propagate failure up the service hierarchy.
+</p>
+<p>
+Service instances may a limited lifespan, and may self-terminate when
+they consider it appropriate.</p>
+<p>
+Workflow Services that have children implement the
+{@link org.apache.slider.server.services.workflow.ServiceParent}
+class, which provides (thread-safe) access to the children -allowing new children
+to be added, and existing children to be ennumerated. The implement policies
+on how to react to the termination of children -so can sequence operations
+which terminate themselves when complete.
+</p>
+
+<p>
+Workflow Services may be subclassed to extend their behavior, or to use them
+in specific applications. Just as the standard
+{@link org.apache.hadoop.service.CompositeService}
+is often subclassed to aggregate child services, the
+{@link org.apache.slider.server.services.workflow.WorkflowCompositeService}
+can be used instead -adding the feature that failing services trigger automatic
+parent shutdown. If that is the desired operational mode of a class,
+swapping the composite service implementation may be sufficient to adopt it.
+</p>
+
+
+<h2> How do the workflow services differ from the standard YARN services? </h2>
+
+ <p>
+ 
+ There is exactly one standard YARN service for managing children, the
+ {@link org.apache.hadoop.service.CompositeService}.
+ </p><p>
+ The {@link org.apache.slider.server.services.workflow.WorkflowCompositeService}
+ shares the same model of "child services, all inited and started together".
+ Where it differs is that if any child service stops -either due to a failure
+ or to an action which invokes that service's
+ {@link org.apache.hadoop.service.Service#stop()} method.
+ </p>
+ <p>
+
+In contrast, the original <code>CompositeService</code> class starts its children
+in its{@link org.apache.hadoop.service.Service#start()}  method, but does not
+listen or react to any child service halting. As a result, changes in child 
+state are not automatically detected or propagated, other than failures in
+the actual init() and start() methods.
+</p>
+
+<p>
+If a child service runs until completed -that is it will not be stopped until
+instructed to do so, and if it is only the parent service that attempts to
+stop the child, then this difference is unimportant. 
+</p>
+<p>
+
+However, if any service that depends upon all it child services running -
+and if those child services are written so as to stop when they fail, using
+the <code>WorkflowCompositeService</code> as a base class will enable the 
+parent service to be automatically notified of a child stopping.
+
+</p>
+<p>
+The {@link org.apache.slider.server.services.workflow.WorkflowSequenceService}
+resembles the composite service in API, but its workflow is different. It
+initializes and starts its children one-by-one, only starting the second after
+the first one succeeds, the third after the second, etc. If any service in
+the sequence fails, the parent <code>WorkflowSequenceService</code> stops, 
+reporting the same exception. 
+</p>
+
+<p>
+The {@link org.apache.slider.server.services.workflow.ForkedProcessService}:
+Executes a process when started, and binds to the life of that process. When the
+process terminates, so does the service -and vice versa. This service enables
+external processes to be executed as part of a sequence of operations -or,
+using the {@link org.apache.slider.server.services.workflow.WorkflowCompositeService}
+in parallel with other services, terminating the process when the other services
+stop -and vice versa.
+</p>
+
+<p>
+The {@link org.apache.slider.server.services.workflow.WorkflowCallbackService}
+executes a {@link java.util.concurrent.Callable} callback a specified delay
+after the service is started, then potentially terminates itself.
+This is useful for callbacks when a workflow  reaches a specific point
+-or simply for executing arbitrary code in the workflow.
+
+ </p>
+
+
+<h2>
+Other Workflow Services
+</h2>
+
+There are some minor services that have proven useful within aggregate workflows,
+and simply in applications which are built from composite YARN services.
+
+ <ul>
+ <li>{@link org.apache.slider.server.services.workflow.WorkflowRpcService }:
+ Maintains a reference to an RPC {@link org.apache.hadoop.ipc.Server} instance.
+ When the service is started, so is the RPC server. Similarly, when the service
+ is stopped, so is the RPC server instance. 
+ </li>
+
+ <li>{@link org.apache.slider.server.services.workflow.ClosingService}: Closes
+ an instance of {@link java.io.Closeable} when the service is stopped. This
+ is purely a housekeeping class.
+ </li>
+
+ </ul>
+
+ Lower-level classes 
+ <ul>
+ <li>{@link org.apache.slider.server.services.workflow.ServiceTerminatingRunnable }:
+ A {@link java.lang.Runnable} which runs the runnable supplied in its constructor
+ then signals its owning service to stop once that runnable is completed. 
+ Any exception raised in the run is stored.
+ </li>
+ <li>{@link org.apache.slider.server.services.workflow.AbstractWorkflowExecutorService}:
+ A base class for services that wish to have a {@link java.util.concurrent.ExecutorService}
+ with a lifespan mapped to that of a service. When the service is stopped, the
+ {@link java.util.concurrent.ExecutorService#shutdownNow()} method is called to
+ attempt to shut down all running tasks.
+ </li>
+ <li>{@link org.apache.slider.server.services.workflow.ServiceThreadFactory}:
+ This is a simple {@link java.util.concurrent.ThreadFactory} which generates
+ meaningful thread names. It can be used as a parameter to constructors of 
+ {@link java.util.concurrent.ExecutorService} instances, to ensure that
+ log information can tie back text to the related services</li>
+ </ul>
+
+
+
+ */
diff --git a/slider-core/src/main/proto/SliderClusterMessages.proto b/slider-core/src/main/proto/SliderClusterMessages.proto
index 00da2b8..6b30846 100644
--- a/slider-core/src/main/proto/SliderClusterMessages.proto
+++ b/slider-core/src/main/proto/SliderClusterMessages.proto
@@ -20,6 +20,10 @@
  * These .proto interfaces are private and stable.
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
+ *
+ * The generated java files are checked in to the Slider code base, so
+ * if this file is changed, the java must be generated again with
+ * mvn clean package -Pcompile-protobuf -DskipTests
  */
 
 // This file contains protocol buffers that are used throughout HDFS -- i.e.
diff --git a/slider-core/src/main/proto/SliderClusterProtocol.proto b/slider-core/src/main/proto/SliderClusterProtocol.proto
index a2a1001..2f5289d 100644
--- a/slider-core/src/main/proto/SliderClusterProtocol.proto
+++ b/slider-core/src/main/proto/SliderClusterProtocol.proto
@@ -20,6 +20,10 @@
  * These .proto interfaces are private and stable.
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
+ *
+ * The generated java files are checked in to the Slider code base, so
+ * if this file is changed, the java must be generated again with
+ * mvn clean package -Pcompile-protobuf -DskipTests
  */
 
 // This file contains protocol buffers that are used throughout HDFS -- i.e.
diff --git a/slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/appconf.json b/slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/appconf.json
index f28159a..89095b1 100644
--- a/slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/appconf.json
+++ b/slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/appconf.json
@@ -12,8 +12,9 @@
 
   "components": {
     "slider-appmaster" : {
-      "jvm.heapsize": "256M"
+      "jvm.heapsize": "256M",
+      "ssl.server.keystore.location": "/tmp/work/security/keystore.p12"
     }
 
   }
-}
\ No newline at end of file
+}
diff --git a/slider-core/src/main/resources/webapps/slideragent/.keep b/slider-core/src/main/resources/webapps/slideragent/.keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/slider-core/src/main/resources/webapps/slideragent/.keep
diff --git a/slider-core/src/main/resources/webapps/static/yarn.dt.plugins.js b/slider-core/src/main/resources/webapps/static/yarn.dt.plugins.js
index d0bde29..6b8d16c 100644
--- a/slider-core/src/main/resources/webapps/static/yarn.dt.plugins.js
+++ b/slider-core/src/main/resources/webapps/static/yarn.dt.plugins.js
@@ -22,7 +22,7 @@
 // don't filter on hidden html elements for an sType of title-numeric
 $.fn.dataTableExt.ofnSearch['title-numeric'] = function ( sData ) {
    return sData.replace(/\n/g," ").replace( /<.*?>/g, "" );
-}
+};
 
 // 'title-numeric' sort type
 jQuery.fn.dataTableExt.oSort['title-numeric-asc']  = function(a,b) {
@@ -71,7 +71,7 @@
     return this;
   } );
   return this;
-}
+};
 
 function renderHadoopDate(data, type, full) {
   if (type === 'display' || type === 'filter') {
diff --git a/slider-core/src/test/app_packages/test_command_log/appConfig.json b/slider-core/src/test/app_packages/test_command_log/appConfig.json
index 0bace19..e7f9700 100644
--- a/slider-core/src/test/app_packages/test_command_log/appConfig.json
+++ b/slider-core/src/test/app_packages/test_command_log/appConfig.json
@@ -3,16 +3,16 @@
     "metadata": {
     },
     "global": {
-        "agent.conf": "/slider/agent/conf/agent.ini",
-        "application.def": "/slider/cmd_log_app_pkg.zip",
+        "agent.conf": "agent.ini",
+        "application.def": "apache-slider-command-logger.zip",
         "config_types": "cl-site",
         "java_home": "/usr/jdk64/jdk1.7.0_45",
-        "package_list": "files/command_log_10.tar",
+        "package_list": "files/command-logger.tar",
         "site.global.app_user": "yarn",
         "site.global.application_id": "CommandLogger",
         "site.global.app_log_dir": "${AGENT_LOG_ROOT}/app/log",
         "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
-        "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/hbase-0.96.1-hadoop2",
+        "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/command-logger",
         "site.global.app_install_dir": "${AGENT_WORK_ROOT}/app/install",
         "site.cl-site.logfile.location": "${AGENT_LOG_ROOT}/app/log/operations.log",
         "site.cl-site.datetime.format": "%A, %d. %B %Y %I:%M%p"
diff --git a/slider-core/src/test/app_packages/test_command_log/appConfig_fast_no_reg.json b/slider-core/src/test/app_packages/test_command_log/appConfig_fast_no_reg.json
new file mode 100644
index 0000000..57c935c
--- /dev/null
+++ b/slider-core/src/test/app_packages/test_command_log/appConfig_fast_no_reg.json
@@ -0,0 +1,29 @@
+{
+    "schema": "http://example.org/specification/v2.0.0",
+    "metadata": {
+    },
+    "global": {
+        "heartbeat.monitor.interval": "20000",
+        "agent.instance.debug.data": "ANY:DO_NOT_REGISTER:NONE",
+        "agent.conf": "agent.ini",
+        "application.def": "apache-slider-command-logger.zip",
+        "config_types": "cl-site",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "package_list": "files/command-logger.tar",
+        "site.global.app_user": "yarn",
+        "site.global.application_id": "CommandLogger",
+        "site.global.app_log_dir": "${AGENT_LOG_ROOT}/app/log",
+        "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
+        "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/command-logger",
+        "site.global.app_install_dir": "${AGENT_WORK_ROOT}/app/install",
+        "site.cl-site.logfile.location": "${AGENT_LOG_ROOT}/app/log/operations.log",
+        "site.cl-site.datetime.format": "%A, %d. %B %Y %I:%M%p"
+    },
+    "components": {
+        "COMMAND_LOGGER": {
+        },
+        "slider-appmaster": {
+            "jvm.heapsize": "256M"
+        }
+    }
+}
diff --git a/slider-core/src/test/app_packages/test_command_log/appConfig_no_hb.json b/slider-core/src/test/app_packages/test_command_log/appConfig_no_hb.json
new file mode 100644
index 0000000..e028140
--- /dev/null
+++ b/slider-core/src/test/app_packages/test_command_log/appConfig_no_hb.json
@@ -0,0 +1,29 @@
+{
+    "schema": "http://example.org/specification/v2.0.0",
+    "metadata": {
+    },
+    "global": {
+        "heartbeat.monitor.interval": "20000",
+        "agent.instance.debug.data": "ANY:DO_NOT_HEARTBEAT:DO_NOT_HEARTBEAT:NONE",
+        "agent.conf": "agent.ini",
+        "application.def": "apache-slider-command-logger.zip",
+        "config_types": "cl-site",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "package_list": "files/command-logger.tar",
+        "site.global.app_user": "yarn",
+        "site.global.application_id": "CommandLogger",
+        "site.global.app_log_dir": "${AGENT_LOG_ROOT}/app/log",
+        "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
+        "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/command-logger",
+        "site.global.app_install_dir": "${AGENT_WORK_ROOT}/app/install",
+        "site.cl-site.logfile.location": "${AGENT_LOG_ROOT}/app/log/operations.log",
+        "site.cl-site.datetime.format": "%A, %d. %B %Y %I:%M%p"
+    },
+    "components": {
+        "COMMAND_LOGGER": {
+        },
+        "slider-appmaster": {
+            "jvm.heapsize": "256M"
+        }
+    }
+}
diff --git a/slider-core/src/test/app_packages/test_command_log/cmd_log_app_pkg.zip b/slider-core/src/test/app_packages/test_command_log/cmd_log_app_pkg.zip
deleted file mode 100644
index e44907a..0000000
--- a/slider-core/src/test/app_packages/test_command_log/cmd_log_app_pkg.zip
+++ /dev/null
Binary files differ
diff --git a/slider-core/src/test/app_packages/test_command_log/metainfo.xml b/slider-core/src/test/app_packages/test_command_log/metainfo.xml
deleted file mode 100644
index f5fde30..0000000
--- a/slider-core/src/test/app_packages/test_command_log/metainfo.xml
+++ /dev/null
@@ -1,54 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>TEST_COMMAND_LOG</name>
-      <comment>
-        When started it creates a new log file and stores all commands in the
-        log file. When stopped it renames the file.
-      </comment>
-      <version>0.1.0</version>
-      <components>
-        <component>
-          <name>COMMAND_LOGGER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/cl.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>tarball</type>
-              <name>files/command_log.tar.gz</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-    </service>
-  </services>
-</metainfo>
diff --git a/slider-core/src/test/app_packages/test_command_log/package/files/command_log_10.tar b/slider-core/src/test/app_packages/test_command_log/package/files/command_log_10.tar
deleted file mode 100644
index b8231d1..0000000
--- a/slider-core/src/test/app_packages/test_command_log/package/files/command_log_10.tar
+++ /dev/null
Binary files differ
diff --git a/slider-core/src/test/app_packages/test_command_log/resources_no_role.json b/slider-core/src/test/app_packages/test_command_log/resources_no_role.json
new file mode 100644
index 0000000..7913fe2
--- /dev/null
+++ b/slider-core/src/test/app_packages/test_command_log/resources_no_role.json
@@ -0,0 +1,15 @@
+{
+    "schema": "http://example.org/specification/v2.0.0",
+    "metadata": {
+    },
+    "global": {
+    },
+    "components": {
+        "COMMAND_LOGGER": {
+            "yarn.role.priority": "1",
+            "yarn.component.instances": "0"
+        },
+        "slider-appmaster": {
+        }
+    }
+}
diff --git a/slider-core/src/test/groovy/org/apache/slider/agent/AgentMiniClusterTestBase.groovy b/slider-core/src/test/groovy/org/apache/slider/agent/AgentMiniClusterTestBase.groovy
index 445d0a4..8a4e5d8 100644
--- a/slider-core/src/test/groovy/org/apache/slider/agent/AgentMiniClusterTestBase.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/agent/AgentMiniClusterTestBase.groovy
@@ -20,15 +20,19 @@
 
 import groovy.transform.CompileStatic
 import groovy.util.logging.Slf4j
+import org.apache.commons.compress.archivers.zip.ZipArchiveEntry
+import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream
+import org.apache.commons.compress.utils.IOUtils
+import org.apache.commons.io.FileUtils
 import org.apache.slider.client.SliderClient
 import org.apache.slider.common.SliderXMLConfKeysForTesting
 import org.apache.slider.common.params.Arguments
 import org.apache.slider.core.main.ServiceLauncher
 import org.apache.slider.providers.agent.AgentKeys
 import org.apache.slider.test.YarnZKMiniClusterTestBase
+import org.junit.AfterClass
 import org.junit.BeforeClass
-
-import javax.swing.ListModel
+import org.junit.rules.TemporaryFolder
 
 /**
  * test base for agent clusters
@@ -36,28 +40,52 @@
 @CompileStatic
 @Slf4j
 public abstract class AgentMiniClusterTestBase
-    extends YarnZKMiniClusterTestBase {
+extends YarnZKMiniClusterTestBase {
   protected static File agentConf
   protected static File agentDef
   protected static File imagePath
-  protected static Map<String, String> agentDefOptions 
+  protected static Map<String, String> agentDefOptions
+  private static TemporaryFolder tempFolder = new TemporaryFolder();
 
   @BeforeClass
   public static void createSubConfFiles() {
+
     File destDir = new File("target/agent_minicluster_testbase")
     destDir.mkdirs()
     agentConf = new File(destDir, "agentconf.zip")
     agentConf.createNewFile()
     agentDef = new File(destDir, "agentdef")
     agentDef.createNewFile()
-    File slider_dir = new File(new File(".").absoluteFile, "src/test/python");
-    imagePath = new File(slider_dir, "appdef_1.zip")
+
+    // dynamically create the app package for the test
+    tempFolder.create()
+    def pkgPath = tempFolder.newFolder("testpkg")
+    File imagePath = new File(pkgPath, "appdef_1.zip").canonicalFile
+    File metainfo = new File(new File(".").absoluteFile, "src/test/python/metainfo.xml");
+    ZipArchiveOutputStream zipFile = new ZipArchiveOutputStream(new FileOutputStream(imagePath));
+    try {
+      zipFile.putArchiveEntry(new ZipArchiveEntry(metainfo.name));
+      IOUtils.copy(new FileInputStream(metainfo), zipFile);
+      zipFile.closeArchiveEntry();
+    }
+    finally {
+      zipFile.close();
+    }
+
     agentDefOptions = [
-        (AgentKeys.APP_DEF)   : imagePath.toURI().toString(),
+        (AgentKeys.APP_DEF): imagePath.toURI().toString(),
         (AgentKeys.AGENT_CONF): agentConf.toURI().toString()
     ]
   }
 
+  @AfterClass
+  public static void cleanSubConfFiles() {
+    if (tempFolder.getRoot().exists()) {
+      FileUtils.deleteDirectory(tempFolder.getRoot());
+    }
+  }
+
+
   @Override
   public String getTestConfigurationPath() {
     return "src/main/resources/" + AgentKeys.CONF_RESOURCE;
@@ -76,7 +104,6 @@
   void teardown() {
     super.teardown();
     if (teardownKillall) {
-
     }
   }
 
diff --git a/slider-core/src/test/groovy/org/apache/slider/agent/actions/TestActionExists.groovy b/slider-core/src/test/groovy/org/apache/slider/agent/actions/TestActionExists.groovy
index fefed8e..cb05fd0 100644
--- a/slider-core/src/test/groovy/org/apache/slider/agent/actions/TestActionExists.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/agent/actions/TestActionExists.groovy
@@ -62,7 +62,7 @@
           Arguments.ARG_MANAGER, RMAddr
           ],
       )
-      Assert.fail("expected an exception, got a status code "+ launcher.serviceExitCode)
+      fail("expected an exception, got a status code "+ launcher.serviceExitCode)
     } catch (UnknownApplicationInstanceException e) {
       
     }
@@ -71,11 +71,11 @@
   @Test
   public void testExistsLiveCluster() throws Throwable {
     //launch the cluster
-    String clustername = "testExistsLiveCluster"
-    ServiceLauncher launcher = createMasterlessAM(clustername, 0, true, false)
+    String clustername = createClusterName()
+    ServiceLauncher<SliderClient> launcher = createMasterlessAM(clustername, 0, true, false)
     SliderClient sliderClient = launcher.service
     addToTeardown(launcher)
-    ApplicationReport report = waitForClusterLive((SliderClient) launcher.service)
+    ApplicationReport report = waitForClusterLive(sliderClient)
 
     // exists holds when cluster is running
     launcher = launchClientAgainstMiniMR(
@@ -88,7 +88,7 @@
           Arguments.ARG_MANAGER, RMAddr
           ],
       )
-    SliderTestUtils.assertSucceeded(launcher)
+    assertSucceeded(launcher)
 
     //and when cluster is running
     launcher = launchClientAgainstMiniMR(
@@ -103,7 +103,7 @@
           ],
       )
 
-    SliderTestUtils.assertSucceeded(launcher)
+    assertSucceeded(launcher)
     
     // assert that the cluster exists
 
diff --git a/slider-core/src/test/groovy/org/apache/slider/agent/actions/TestActionList.groovy b/slider-core/src/test/groovy/org/apache/slider/agent/actions/TestActionList.groovy
index 1849807..66fdc15 100644
--- a/slider-core/src/test/groovy/org/apache/slider/agent/actions/TestActionList.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/agent/actions/TestActionList.groovy
@@ -40,7 +40,7 @@
   @Before
   public void setup() {
     super.setup()
-    createMiniCluster("testActionList", configuration, 1, false)
+    createMiniCluster("test_action_list", configuration, 1, false)
   }
 
   /**
@@ -87,7 +87,7 @@
 
   public void testListLiveCluster() throws Throwable {
     //launch the cluster
-    String clustername = "test_list_live_cluster"
+    String clustername = createClusterName()
     ServiceLauncher<SliderClient> launcher = createMasterlessAM(clustername, 0, true, false)
     addToTeardown(launcher)
     //do the low level operations to get a better view of what is going on 
@@ -135,7 +135,7 @@
           //varargs list of command line params
           [
               SliderActions.ACTION_LIST,
-              "testStatusMissingCluster"
+              createClusterName()
           ]
       )
       fail("expected an exception, got a status code " + launcher.serviceExitCode)
diff --git a/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneAgentAM.groovy b/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneAgentAM.groovy
index d700fea..bce24e6 100644
--- a/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneAgentAM.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneAgentAM.groovy
@@ -101,7 +101,7 @@
     dumpRegistryServiceTypes(names)
     describe "service registry instance IDs"
 
-    def instanceIds = client.listRegistedSliderInstances()
+    def instanceIds = client.listRegisteredSliderInstances()
 
     log.info("number of instanceIds: ${instanceIds.size()}")
     instanceIds.each { String it -> log.info(it) }
diff --git a/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneRegistryAM.groovy b/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneRegistryAM.groovy
index 7639375..cb55624 100644
--- a/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneRegistryAM.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneRegistryAM.groovy
@@ -117,7 +117,7 @@
     def serviceTypes = registryService.serviceTypes;
     dumpRegistryServiceTypes(serviceTypes)
 
-    List<String> instanceIds = client.listRegistedSliderInstances()
+    List<String> instanceIds = client.listRegisteredSliderInstances()
 
 
     dumpRegistryInstanceIDs(instanceIds)
diff --git a/slider-core/src/test/groovy/org/apache/slider/common/tools/TestZKIntegration.groovy b/slider-core/src/test/groovy/org/apache/slider/common/tools/TestZKIntegration.groovy
index 3930864..fe3bef7 100644
--- a/slider-core/src/test/groovy/org/apache/slider/common/tools/TestZKIntegration.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/common/tools/TestZKIntegration.groovy
@@ -20,6 +20,7 @@
 
 import groovy.util.logging.Slf4j
 import org.apache.hadoop.conf.Configuration
+import org.apache.slider.client.SliderClient
 import org.apache.slider.core.zk.ZKIntegration
 import org.apache.slider.test.KeysForTests
 import org.apache.slider.test.YarnZKMiniClusterTestBase
@@ -88,10 +89,62 @@
            (c1.endsWith(clusters[1]) && c2.endsWith(clusters[0]))
   }
 
+  @Test
+  public void testCreateAndDeleteDefaultZKPath() throws Throwable {
+    MockSliderClient client = new MockSliderClient()
+
+    String path = client.createZookeeperNode("cl1", true)
+    ZKIntegration zki = client.getLastZKIntegration()
+
+    String zkPath = ZKIntegration.mkClusterPath(USERNAME, "cl1")
+    assert zkPath == "/services/slider/users/" + USERNAME + "/cl1", "zkPath must be as expected"
+    assert path == zkPath
+    assert zki == null, "ZKIntegration should be null."
+    zki = createZKIntegrationInstance(getZKBinding(), "cl1", true, false, 5000);
+    assert false == zki.exists(zkPath), "zkPath should not exist"
+
+    path = client.createZookeeperNode("cl1", false)
+    zki = client.getLastZKIntegration()
+    assert zkPath == "/services/slider/users/" + USERNAME + "/cl1", "zkPath must be as expected"
+    assert path == zkPath
+    assert true == zki.exists(zkPath), "zkPath must exist"
+    zki.createPath(zkPath, "/cn", ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT)
+    assert true == zki.exists(zkPath + "/cn"), "zkPath with child node must exist"
+    client.deleteZookeeperNode("cl1")
+    assert false == zki.exists(zkPath), "zkPath must not exist"
+
+  }
+
   public String createEphemeralChild(ZKIntegration zki, String userPath) {
     return zki.createPath(userPath, "/cluster-",
                           ZooDefs.Ids.OPEN_ACL_UNSAFE,
                           CreateMode.EPHEMERAL_SEQUENTIAL)
   }
 
+  class MockSliderClient extends SliderClient {
+    private ZKIntegration zki;
+
+    @Override
+    public String getUsername() {
+      return USERNAME
+    }
+
+    @Override
+    protected ZKIntegration getZkClient(String clusterName, String user) {
+      zki = createZKIntegrationInstance(getZKBinding(), "cl1", true, false, 5000)
+      return zki;
+    }
+
+    @Override
+    public synchronized Configuration getConfig() {
+      Configuration conf = new Configuration();
+      return conf;
+    }
+
+    public ZKIntegration getLastZKIntegration() {
+      return zki
+    }
+
+  }
+
 }
diff --git a/slider-core/src/test/groovy/org/apache/slider/providers/agent/AgentTestBase.groovy b/slider-core/src/test/groovy/org/apache/slider/providers/agent/AgentTestBase.groovy
index b6d0c6c..9b4c377 100644
--- a/slider-core/src/test/groovy/org/apache/slider/providers/agent/AgentTestBase.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/providers/agent/AgentTestBase.groovy
@@ -20,11 +20,17 @@
 
 import groovy.transform.CompileStatic
 import groovy.util.logging.Slf4j
+import org.apache.commons.compress.archivers.zip.ZipArchiveEntry
+import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream
+import org.apache.commons.compress.utils.IOUtils
 import org.apache.hadoop.yarn.conf.YarnConfiguration
 import org.apache.slider.client.SliderClient
 import org.apache.slider.common.params.SliderActions
 import org.apache.slider.core.main.ServiceLauncher
 import org.apache.slider.test.YarnZKMiniClusterTestBase
+import org.junit.Before
+import org.junit.Rule
+import org.junit.rules.TemporaryFolder
 
 import static org.apache.slider.common.SliderXMLConfKeysForTesting.*
 import static org.apache.slider.providers.agent.AgentKeys.CONF_RESOURCE
@@ -36,17 +42,31 @@
 @Slf4j
 public abstract class AgentTestBase extends YarnZKMiniClusterTestBase {
 
-  public static
-  final int AGENT_CLUSTER_STARTUP_TIME = 1000 * DEFAULT_AGENT_LAUNCH_TIME_SECONDS
+  public static final int AGENT_CLUSTER_STARTUP_TIME = 1000 * DEFAULT_AGENT_LAUNCH_TIME_SECONDS
 
-  /**
-   * The time to sleep before trying to talk to the HBase Master and
-   * expect meaningful results.
-   */
-  public static
-  final int AGENT_CLUSTER_STARTUP_TO_LIVE_TIME = AGENT_CLUSTER_STARTUP_TIME
-  public static final int AGENT_GO_LIVE_TIME = 60000
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
 
+  public String app_def_pkg_path;
+
+  @Before
+  public void setupAppPkg() {
+    if (app_def_pkg_path == null) {
+      def pkgPath = folder.newFolder("testpkg")
+      File zipFileName = new File(pkgPath, "appdef_1.zip").canonicalFile
+      File metainfo = new File(new File(".").absoluteFile, "src/test/python/metainfo.xml");
+      ZipArchiveOutputStream zipFile = new ZipArchiveOutputStream(new FileOutputStream(zipFileName));
+      try {
+        zipFile.putArchiveEntry(new ZipArchiveEntry(metainfo.name));
+        IOUtils.copy(new FileInputStream(metainfo), zipFile);
+        zipFile.closeArchiveEntry();
+      }
+      finally {
+        zipFile.close();
+      }
+      app_def_pkg_path = zipFileName.absolutePath
+    }
+  }
 
   @Override
   public String getTestConfigurationPath() {
@@ -102,7 +122,7 @@
       boolean deleteExistingData,
       boolean create,
       boolean blockUntilRunning) {
-    
+
 
     YarnConfiguration conf = testConfiguration
 
diff --git a/slider-core/src/test/groovy/org/apache/slider/providers/agent/AgentTestUtils.groovy b/slider-core/src/test/groovy/org/apache/slider/providers/agent/AgentTestUtils.groovy
index 5888557..989919f 100644
--- a/slider-core/src/test/groovy/org/apache/slider/providers/agent/AgentTestUtils.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/providers/agent/AgentTestUtils.groovy
@@ -23,8 +23,6 @@
 import com.sun.jersey.api.client.config.DefaultClientConfig
 import com.sun.jersey.api.json.JSONConfiguration
 import org.apache.slider.server.appmaster.web.rest.agent.Register
-import org.codehaus.jettison.json.JSONException
-import org.codehaus.jettison.json.JSONObject
 
 class AgentTestUtils {
 
@@ -36,20 +34,11 @@
     return Client.create(clientConfig);
   }
 
-
-  public static Register createDummyJSONRegister() throws JSONException {
+  public static Register createDummyJSONRegister() {
     Register register = new Register();
     register.setResponseId(-1);
     register.setTimestamp(System.currentTimeMillis());
     register.setHostname("dummyHost");
     return register;
   }
-
-  public static JSONObject createDummyHeartBeat() throws JSONException {
-    JSONObject json = new JSONObject();
-    json.put("responseId", -1);
-    json.put("timestamp", System.currentTimeMillis());
-    json.put("hostname", "dummyHost");
-    return json;
-  }
 }
diff --git a/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestAgentAMManagementWS.groovy b/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestAgentAMManagementWS.groovy
index de20e10..7d68458 100644
--- a/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestAgentAMManagementWS.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestAgentAMManagementWS.groovy
@@ -24,11 +24,17 @@
 import groovy.util.logging.Slf4j
 import org.apache.slider.api.StatusKeys
 import org.apache.slider.client.SliderClient
+import org.apache.slider.common.SliderKeys
+import org.apache.slider.core.conf.MapOperations
 import org.apache.slider.core.main.ServiceLauncher
 import org.apache.slider.server.appmaster.web.SliderAMWebApp
 import org.apache.slider.server.appmaster.web.rest.agent.RegistrationResponse
 import org.apache.slider.server.appmaster.web.rest.agent.RegistrationStatus
+import org.apache.slider.server.services.security.CertificateManager
+import org.apache.slider.server.services.security.SecurityUtils
 import org.junit.Test
+import org.slf4j.Logger
+import org.slf4j.LoggerFactory
 
 import javax.ws.rs.core.MediaType
 
@@ -42,8 +48,42 @@
 @Slf4j
 class TestAgentAMManagementWS extends AgentTestBase {
 
-  public static final String MANAGEMENT_URI = SliderAMWebApp.BASE_PATH +"/ws/v1/slider/mgmt/";
   public static final String AGENT_URI = "ws/v1/slider/agents/";
+    final static Logger logger = LoggerFactory.getLogger(TestAgentAMManagementWS.class)
+    static {
+        //for localhost testing only
+        javax.net.ssl.HttpsURLConnection.setDefaultHostnameVerifier(
+                new javax.net.ssl.HostnameVerifier(){
+                    public boolean verify(String hostname,
+                                          javax.net.ssl.SSLSession sslSession) {
+                        logger.info("verifying hostname ${hostname}")
+                        InetAddress[] addresses =
+                            InetAddress.getAllByName(hostname);
+                        if (hostname.equals("localhost")) {
+                            return true;
+                        }
+                        for (InetAddress address : addresses) {
+                            if (address.getHostName().equals(hostname) ||
+                                address.isAnyLocalAddress() ||
+                                address.isLoopbackAddress()) {
+                                return true;
+                            }
+                        }
+                        return false;
+                    }
+                });
+
+        MapOperations compOperations = new MapOperations();
+        compOperations.put(SliderKeys.KEYSTORE_LOCATION, "/tmp/work/security/keystore.p12");
+        SecurityUtils.initializeSecurityParameters(compOperations);
+        CertificateManager certificateManager = new CertificateManager();
+        certificateManager.initRootCert(compOperations);
+        String keystoreFile = SecurityUtils.getSecurityDir() + File.separator + SliderKeys.KEYSTORE_FILE_NAME;
+        String password = SecurityUtils.getKeystorePass();
+        System.setProperty("javax.net.ssl.trustStore", keystoreFile);
+        System.setProperty("javax.net.ssl.trustStorePassword", password);
+        System.setProperty("javax.net.ssl.trustStoreType", "PKCS12");
+    }
 
   @Test
   public void testAgentAMManagementWS() throws Throwable {
@@ -58,8 +98,7 @@
         false)
     Map<String, Integer> roles = [:]
     File slider_core = new File(new File(".").absoluteFile, "src/test/python");
-    String app_def = "appdef_1.zip"
-    File app_def_path = new File(slider_core, app_def)
+    File app_def_path = new File(app_def_pkg_path)
     String agt_ver = "version"
     File agt_ver_path = new File(slider_core, agt_ver)
     String agt_conf = "agent.ini"
@@ -85,7 +124,7 @@
 
     
     def status = dumpClusterStatus(sliderClient, "agent AM")
-    def liveURL = status.getInfo(StatusKeys.INFO_AM_WEB_URL) 
+    def liveURL = status.getInfo(StatusKeys.INFO_AM_AGENT_URL)
     if (liveURL) {
       agent_url = liveURL + AGENT_URI
     }
@@ -95,7 +134,7 @@
     log.info("conf   is ${liveURL}conf")
 
 
-    def sleeptime = 60
+    def sleeptime = 10
     log.info "sleeping for $sleeptime seconds"
     Thread.sleep(sleeptime * 1000)
     
diff --git a/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestAgentEcho.groovy b/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestAgentEcho.groovy
index 1072ebe..2f03b09 100644
--- a/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestAgentEcho.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestAgentEcho.groovy
@@ -36,10 +36,9 @@
 @Slf4j
 class TestAgentEcho extends AgentTestBase {
 
-
   @Override
   void checkTestAssumptions(YarnConfiguration conf) {
-    
+
   }
 
   @Test
@@ -57,8 +56,7 @@
     File slider_core = new File(new File(".").absoluteFile, "src/test/python");
     String echo_py = "echo.py"
     File echo_py_path = new File(slider_core, echo_py)
-    String app_def = "appdef_1.zip"
-    File app_def_path = new File(slider_core, app_def)
+    File app_def_path = new File(app_def_pkg_path)
     String agt_ver = "version"
     File agt_ver_path = new File(slider_core, agt_ver)
     String agt_conf = "agent.ini"
diff --git a/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestBuildBasicAgent.groovy b/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestBuildBasicAgent.groovy
index a597707..421920f 100644
--- a/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestBuildBasicAgent.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestBuildBasicAgent.groovy
@@ -38,38 +38,34 @@
 @Slf4j
 class TestBuildBasicAgent extends AgentTestBase {
   static String TEST_FILES = "./src/test/resources/org/apache/slider/providers/agent/tests/"
+  static File slider_core = new File(new File(".").absoluteFile, "src/test/python");
+  static String bad_app_def = "appdef_1.tar"
+  static File bad_app_def_path = new File(slider_core, bad_app_def)
+  static String agt_conf = "agent.ini"
+  static File agt_conf_path = new File(slider_core, agt_conf)
 
   @Override
   void checkTestAssumptions(YarnConfiguration conf) {
 
   }
 
-  private static class TestResources {
-    static File slider_core = new File(new File(".").absoluteFile, "src/test/python");
-    static String app_def = "appdef_1.zip"
-    static String bad_app_def = "appdef_1.tar"
-    static File app_def_path = new File(slider_core, app_def)
-    static File bad_app_def_path = new File(slider_core, bad_app_def)
-    static String agt_conf = "agent.ini"
-    static File agt_conf_path = new File(slider_core, agt_conf)
-
-    static public File getAppDef() {
-      return app_def_path;
-    }
-
-    static public File getBadAppDef() {
-      return bad_app_def_path;
-    }
-
-    static public File getAgentConf() {
-      return agt_conf_path;
-    }
-
-    static public File getAgentImg() {
-      return app_def_path;
-    }
+  private File getAppDef() {
+    return new File(app_def_pkg_path);
   }
 
+  private File getBadAppDef() {
+    return bad_app_def_path;
+  }
+
+  private File getAgentConf() {
+    return agt_conf_path;
+  }
+
+  private File getAgentImg() {
+    return new File(app_def_pkg_path);
+  }
+
+
   @Test
   public void testBuildMultipleRoles() throws Throwable {
 
@@ -87,8 +83,8 @@
         [
             ARG_OPTION, CONTROLLER_URL, "http://localhost",
             ARG_PACKAGE, ".",
-            ARG_OPTION, APP_DEF, "file://" + TestResources.getAppDef().absolutePath,
-            ARG_OPTION, AGENT_CONF, "file://" + TestResources.getAgentConf().absolutePath,
+            ARG_OPTION, APP_DEF, "file://" + getAppDef().absolutePath,
+            ARG_OPTION, AGENT_CONF, "file://" + getAgentConf().absolutePath,
             ARG_OPTION, SCRIPT_PATH, "agent/scripts/agent.py",
             ARG_COMP_OPT, ROLE_NODE, SCRIPT_PATH, "agent/scripts/agent.py",
             ARG_RES_COMP_OPT, ROLE_NODE, ResourceKeys.COMPONENT_PRIORITY, "1",
@@ -107,8 +103,8 @@
         [
             ARG_OPTION, CONTROLLER_URL, "http://localhost",
             ARG_OPTION, PACKAGE_PATH, ".",
-            ARG_OPTION, APP_DEF, "file://" + TestResources.getAppDef().absolutePath,
-            ARG_OPTION, AGENT_CONF, "file://" + TestResources.getAgentConf().absolutePath,
+            ARG_OPTION, APP_DEF, "file://" + getAppDef().absolutePath,
+            ARG_OPTION, AGENT_CONF, "file://" + getAgentConf().absolutePath,
             ARG_COMP_OPT, master, SCRIPT_PATH, "agent/scripts/agent.py",
             ARG_COMP_OPT, rs, SCRIPT_PATH, "agent/scripts/agent.py",
             ARG_RES_COMP_OPT, master, ResourceKeys.COMPONENT_PRIORITY, "2",
@@ -189,8 +185,8 @@
             (rs): 5
         ],
         [
-            ARG_OPTION, APP_DEF, "file://" + TestResources.getAppDef().absolutePath,
-            ARG_OPTION, AGENT_CONF, "file://" + TestResources.getAgentConf().absolutePath,
+            ARG_OPTION, APP_DEF, "file://" + getAppDef().absolutePath,
+            ARG_OPTION, AGENT_CONF, "file://" + getAgentConf().absolutePath,
             ARG_PACKAGE, ".",
             ARG_COMP_OPT, SliderKeys.COMPONENT_AM, RoleKeys.JVM_OPTS, jvmopts,
             ARG_COMP_OPT, master, RoleKeys.JVM_OPTS, jvmopts,
@@ -219,8 +215,8 @@
             "role": 1,
         ],
         [
-            ARG_OPTION, APP_DEF, "file://" + TestResources.getAppDef().absolutePath,
-            ARG_OPTION, AGENT_CONF, "file://" + TestResources.getAgentConf().absolutePath,
+            ARG_OPTION, APP_DEF, "file://" + getAppDef().absolutePath,
+            ARG_OPTION, AGENT_CONF, "file://" + getAgentConf().absolutePath,
             ARG_PACKAGE, ".",
             ARG_RES_COMP_OPT, "role", ResourceKeys.COMPONENT_PRIORITY, "3",
         ],
@@ -239,8 +235,40 @@
   }
 
   @Test
+  public void testAgentArgs() throws Throwable {
+    def clustername = "test_good_agent_args"
+    createMiniCluster(
+        clustername,
+        configuration,
+        1,
+        1,
+        1,
+        true,
+        false)
+
+    try {
+      def badArgs1 = "test_good_agent_args-1"
+      buildAgentCluster(clustername,
+          [:],
+          [
+              ARG_OPTION, CONTROLLER_URL, "http://localhost",
+              ARG_PACKAGE, ".",
+              ARG_OPTION, APP_DEF, "file://" + getAppDef().absolutePath,
+              ARG_RESOURCES, TEST_FILES + "good/resources.json",
+              ARG_TEMPLATE, TEST_FILES + "good/appconf.json"
+          ],
+          true, false,
+          false)
+    } catch (BadConfigException exception) {
+      log.error(
+          "Build operation should not have failed with exception : \n$exception")
+      fail("Build operation should not fail")
+    }
+  }
+  
+  @Test
   public void testBadAgentArgs() throws Throwable {
-    def clustername = "test_bad_template_args"
+    def clustername = "test_bad_agent_args"
     createMiniCluster(
         clustername,
         configuration,
@@ -252,12 +280,12 @@
 
     try {
       def badArgs1 = "test_bad_agent_args-1"
-      buildAgentCluster(clustername,
+      buildAgentCluster(badArgs1,
           [:],
           [
               ARG_OPTION, CONTROLLER_URL, "http://localhost",
-              ARG_OPTION, APP_DEF, "file://" + TestResources.getAppDef().absolutePath,
-              ARG_OPTION, AGENT_CONF, "file://" + TestResources.getAgentConf().absolutePath,
+              ARG_OPTION, APP_DEF, "file://" + getAppDef().absolutePath,
+              ARG_OPTION, AGENT_CONF, "file://" + getAgentConf().absolutePath,
               ARG_RESOURCES, TEST_FILES + "good/resources.json",
               ARG_TEMPLATE, TEST_FILES + "good/appconf.json"
           ],
@@ -265,69 +293,58 @@
           false)
       failWithBuildSucceeding(badArgs1, "missing package home or image path")
     } catch (BadConfigException expected) {
-     
+      log.info("Expected failure.", expected)
+      assert expected.message.contains("Either agent package path agent.package.root or image root internal.application.image.path must be provided")
     }
 
     try {
       def badArgs1 = "test_bad_agent_args-2"
-      buildAgentCluster(clustername,
+      buildAgentCluster(badArgs1,
           [:],
           [
               ARG_OPTION, CONTROLLER_URL, "http://localhost",
-              ARG_IMAGE, "file://" + TestResources.getAgentImg().absolutePath + ".badfile",
-              ARG_OPTION, APP_DEF, "file://" + TestResources.getAppDef().absolutePath,
-              ARG_OPTION, AGENT_CONF, "file://" + TestResources.getAgentConf().absolutePath,
+              ARG_IMAGE, "file://" + getAgentImg().absolutePath,
+              ARG_OPTION, APP_DEF, "file://" + getAppDef().absolutePath,
+              ARG_OPTION, AGENT_CONF, "file://" + getAgentConf().absolutePath,
               ARG_RESOURCES, TEST_FILES + "good/resources.json",
               ARG_TEMPLATE, TEST_FILES + "good/appconf.json"
           ],
           true, false,
           false)
-      failWithBuildSucceeding(badArgs1, "bad image path")
+      failWithBuildSucceeding(badArgs1, "both app image path and home dir was provided")
     } catch (BadConfigException expected) {
+      log.info("Expected failure.", expected)
+      assert expected.message.contains("Both application image path and home dir have been provided")
     }
 
     try {
       def badArgs1 = "test_bad_agent_args-3"
-      buildAgentCluster(clustername,
+      buildAgentCluster(badArgs1,
           [:],
           [
               ARG_OPTION, CONTROLLER_URL, "http://localhost",
-              ARG_OPTION, AGENT_CONF, "file://" + TestResources.getAgentConf().absolutePath,
-              ARG_RESOURCES, TEST_FILES + "good/resources.json",
-              ARG_TEMPLATE, TEST_FILES + "good/appconf.json"
-          ],
-          true, false,
-          false)
-      failWithBuildSucceeding(badArgs1, "bad app def file")
-    } catch (BadConfigException expected) {
-    }
-
-    try {
-      def badArgs1 = "test_bad_agent_args-5"
-      buildAgentCluster(clustername,
-          [:],
-          [
-              ARG_OPTION, CONTROLLER_URL, "http://localhost",
+              ARG_OPTION, AGENT_CONF, "file://" + getAgentConf().absolutePath,
               ARG_PACKAGE, ".",
-              ARG_OPTION, APP_DEF, "file://" + TestResources.getAppDef().absolutePath,
               ARG_RESOURCES, TEST_FILES + "good/resources.json",
               ARG_TEMPLATE, TEST_FILES + "good/appconf.json"
           ],
           true, false,
           false)
-      failWithBuildSucceeding(badArgs1, "bad agent conf file")
+      failWithBuildSucceeding(badArgs1, "missing app def file")
     } catch (BadConfigException expected) {
+      log.info("Expected failure.", expected)
+      assert expected.message.contains("Application definition must be provided. Missing option application.def")
     }
 
     try {
       def badArgs1 = "test_bad_agent_args-6"
-      buildAgentCluster(clustername,
+      buildAgentCluster(badArgs1,
           [:],
           [
               ARG_OPTION, CONTROLLER_URL, "http://localhost",
-              ARG_OPTION, AGENT_CONF, "file://" + TestResources.getAgentConf().absolutePath,
+              ARG_OPTION, AGENT_CONF, "file://" + getAgentConf().absolutePath,
               ARG_PACKAGE, ".",
-              ARG_OPTION, APP_DEF, "file://" + TestResources.getBadAppDef().absolutePath,
+              ARG_OPTION, APP_DEF, "file://" + getBadAppDef().absolutePath,
               ARG_RESOURCES, TEST_FILES + "good/resources.json",
               ARG_TEMPLATE, TEST_FILES + "good/appconf.json"
           ],
@@ -357,8 +374,8 @@
         [:],
         [
             ARG_OPTION, CONTROLLER_URL, "http://localhost",
-            ARG_OPTION, APP_DEF, "file://" + TestResources.getAppDef().absolutePath,
-            ARG_OPTION, AGENT_CONF, "file://" + TestResources.getAgentConf().absolutePath,
+            ARG_OPTION, APP_DEF, "file://" + getAppDef().absolutePath,
+            ARG_OPTION, AGENT_CONF, "file://" + getAgentConf().absolutePath,
             ARG_PACKAGE, ".",
             ARG_RESOURCES, TEST_FILES + "good/resources.json",
             ARG_TEMPLATE, TEST_FILES + "good/appconf.json"
diff --git a/slider-core/src/test/groovy/org/apache/slider/registry/curator/TestRegistryRestResources.groovy b/slider-core/src/test/groovy/org/apache/slider/registry/curator/TestRegistryRestResources.groovy
index 303cc73..4cc0f08 100644
--- a/slider-core/src/test/groovy/org/apache/slider/registry/curator/TestRegistryRestResources.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/registry/curator/TestRegistryRestResources.groovy
@@ -75,8 +75,7 @@
         false)
     Map<String, Integer> roles = [:]
     File slider_core = new File(new File(".").absoluteFile, "src/test/python");
-    String app_def = "appdef_1.zip"
-    File app_def_path = new File(slider_core, app_def)
+    File app_def_path = new File(app_def_pkg_path)
     String agt_ver = "version"
     File agt_ver_path = new File(slider_core, agt_ver)
     String agt_conf = "agent.ini"
diff --git a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/model/appstate/TestMockRMOperations.groovy b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/model/appstate/TestMockRMOperations.groovy
index 7f92f9c..168ac9f 100644
--- a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/model/appstate/TestMockRMOperations.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/model/appstate/TestMockRMOperations.groovy
@@ -41,7 +41,7 @@
 
   @Test
   public void testPriorityOnly() throws Throwable {
-    assert 5 == buildPriority(5, false)
+    assert 5 == extractRole(buildPriority(5, false))
   }
 
   @Test
diff --git a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/model/mock/MockProviderService.groovy b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/model/mock/MockProviderService.groovy
index 2199043..7b73451 100644
--- a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/model/mock/MockProviderService.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/model/mock/MockProviderService.groovy
@@ -21,7 +21,6 @@
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.Path
 import org.apache.hadoop.service.LifecycleEvent
-import org.apache.hadoop.service.Service.STATE
 import org.apache.hadoop.service.ServiceStateChangeListener
 import org.apache.hadoop.yarn.api.records.Container
 import org.apache.slider.api.ClusterDescription
@@ -34,6 +33,7 @@
 import org.apache.slider.core.registry.info.ServiceInstanceData
 import org.apache.slider.providers.ProviderRole
 import org.apache.slider.providers.ProviderService
+import org.apache.slider.server.appmaster.AMViewForProviders
 import org.apache.slider.server.appmaster.state.StateAccessForProviders
 import org.apache.slider.server.appmaster.web.rest.agent.AgentRestOperations
 import org.apache.slider.server.appmaster.web.rest.agent.HeartBeat
@@ -42,7 +42,8 @@
 import org.apache.slider.server.appmaster.web.rest.agent.RegistrationResponse
 import org.apache.slider.server.appmaster.web.rest.agent.RegistrationStatus
 import org.apache.slider.server.services.registry.RegistryViewForProviders
-import org.apache.slider.server.services.utility.EventCallback
+import org.apache.slider.providers.ProviderCompleted
+import org.apache.hadoop.service.Service.STATE
 
 class MockProviderService implements ProviderService {
 
@@ -94,12 +95,11 @@
     return null;
   }
 
-  @Override
   public STATE getServiceState() {
-    return null;
+    return null
   }
 
-  @Override
+    @Override
   public long getStartTime() {
     return 0;
   }
@@ -145,7 +145,7 @@
       AggregateConf instanceDefinition,
       File confDir,
       Map<String, String> env,
-      EventCallback execInProgress) throws IOException, SliderException {
+      ProviderCompleted execInProgress) throws IOException, SliderException {
     return false;
   }
 
@@ -187,14 +187,15 @@
   }
 
   @Override
-  public Map<String, URL> buildMonitorDetails(ClusterDescription clusterSpec) {
+  public Map<String, String> buildMonitorDetails(ClusterDescription clusterSpec) {
     return null;
   }
 
   @Override
   void bind(
       StateAccessForProviders stateAccessor,
-      RegistryViewForProviders registry) {
+      RegistryViewForProviders registry,
+      AMViewForProviders amView) {
 
   }
 
@@ -224,8 +225,7 @@
 
   @Override
   void applyInitialRegistryDefinitions(
-      URL amWebAPI,
-      ServiceInstanceData registryInstanceData)
+          URL unsecureWebAPI, URL secureWebAPI, ServiceInstanceData registryInstanceData)
   throws MalformedURLException, IOException {
 
   }
diff --git a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/rest/publisher/TestPublisherRestResources.groovy b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/rest/publisher/TestPublisherRestResources.groovy
index bc7c79a..cd7c9d8 100644
--- a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/rest/publisher/TestPublisherRestResources.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/rest/publisher/TestPublisherRestResources.groovy
@@ -58,8 +58,7 @@
         false)
     Map<String, Integer> roles = [:]
     File slider_core = new File(new File(".").absoluteFile, "src/test/python");
-    String app_def = "appdef_1.zip"
-    File app_def_path = new File(slider_core, app_def)
+    File app_def_path = new File(app_def_pkg_path)
     String agt_ver = "version"
     File agt_ver_path = new File(slider_core, agt_ver)
     String agt_conf = "agent.ini"
@@ -130,6 +129,18 @@
     response = webResource.type(MediaType.TEXT_PLAIN).get(ClientResponse.class);
     assert 404 == response.status
 
+    String classpathUri = publisher_url +"/"+ RestPaths.SLIDER_CLASSPATH
+    webResource = client.resource(classpathUri)
+    Set uris = webResource.type(MediaType.APPLICATION_JSON)
+            .get(Set.class)
+    assert uris.size() > 0
+    log.info("Classpath URIs: {}", uris)
+    // check for some expected classpath elements
+    assert uris.any {it =~ /curator-x-discovery/}
+    assert uris.any {it =~ /hadoop-yarn-api/}
+    assert uris.any {it =~ /hadoop-hdfs/}
+    // and a negative test...
+    assert !uris.any {it =~ /foo-bar/}
   }
 
 }
diff --git a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.groovy b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.groovy
index 4304452..1c98e94 100644
--- a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.groovy
@@ -52,7 +52,7 @@
         appState)
     ProviderService providerService = new MockProviderService();
 
-    WebAppApiImpl inst = new WebAppApiImpl(clusterProto, providerAppState, providerService);
+    WebAppApiImpl inst = new WebAppApiImpl(clusterProto, providerAppState, providerService, null);
 
     Injector injector = Guice.createInjector(new AbstractModule() {
           @Override
diff --git a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.groovy b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.groovy
index 69cdd2b..f871b07 100644
--- a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.groovy
@@ -61,7 +61,7 @@
         "undefined",
         appState)
 
-    WebAppApiImpl inst = new WebAppApiImpl(clusterProto, providerAppState, providerService);
+    WebAppApiImpl inst = new WebAppApiImpl(clusterProto, providerAppState, providerService, null);
 
     Injector injector = Guice.createInjector(new AbstractModule() {
           @Override
diff --git a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/view/TestIndexBlock.groovy b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/view/TestIndexBlock.groovy
index 176299d..6b46591 100644
--- a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/view/TestIndexBlock.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/view/TestIndexBlock.groovy
@@ -51,7 +51,7 @@
         "undefined",
         appState)
 
-    WebAppApiImpl inst = new WebAppApiImpl(clusterProto, providerAppState, providerService);
+    WebAppApiImpl inst = new WebAppApiImpl(clusterProto, providerAppState, providerService, null);
 
     Injector injector = Guice.createInjector(new AbstractModule() {
           @Override
diff --git a/slider-core/src/test/groovy/org/apache/slider/server/services/utility/MockService.groovy b/slider-core/src/test/groovy/org/apache/slider/server/services/utility/MockService.groovy
deleted file mode 100644
index 2906323..0000000
--- a/slider-core/src/test/groovy/org/apache/slider/server/services/utility/MockService.groovy
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.services.utility
-
-import org.apache.hadoop.service.AbstractService
-import org.apache.hadoop.service.ServiceStateException
-import org.apache.slider.core.main.ExitCodeProvider
-
-/**
- * Little mock service to simulate delays
- */
-class MockService extends AbstractService implements ExitCodeProvider {
-
-  boolean fail = false;
-  int exitCode;
-  int lifespan = -1;
-
-  MockService() {
-    super("mock")
-  }
-
-  MockService(String name, boolean fail, int lifespan) {
-    super(name)
-    this.fail = fail
-    this.lifespan = lifespan;
-  }
-
-  @Override
-  protected void serviceStart() throws Exception {
-    //act on the lifespan here
-    if (lifespan > 0) {
-      Thread.start {
-        Thread.sleep(lifespan)
-        finish()
-      }
-    } else {
-      if (lifespan == 0) {
-        finish();
-      } else {
-        //continue until told not to
-      }
-    }
-  }
-
-  void finish() {
-    if (fail) {
-      ServiceStateException e = new ServiceStateException("$name failed")
-      noteFailure(e);
-      stop();
-      throw e
-    } else {
-      stop();
-    }
-  }
-
-}
diff --git a/slider-core/src/test/groovy/org/apache/slider/server/services/utility/TestCompoundService.groovy b/slider-core/src/test/groovy/org/apache/slider/server/services/utility/TestCompoundService.groovy
deleted file mode 100644
index 9e09c91..0000000
--- a/slider-core/src/test/groovy/org/apache/slider/server/services/utility/TestCompoundService.groovy
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.services.utility
-
-import groovy.util.logging.Slf4j
-import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.service.Service
-import org.apache.slider.core.main.ServiceLauncherBaseTest
-import org.junit.Test
-
-@Slf4j
-class TestCompoundService extends ServiceLauncherBaseTest {
-
-
-  @Test
-  public void testSingleCompound() throws Throwable {
-    CompoundService parent = startService([new MockService()])
-    parent.stop();
-  }
-  
-  
-  @Test
-  public void testSingleCompoundTerminating() throws Throwable {
-    CompoundService parent = startService([new MockService("1",false,100)])
-    assert waitForParentToStop(parent);
-  }
-
-  public boolean waitForParentToStop(CompoundService parent) {
-    boolean stop = parent.waitForServiceToStop(1000)
-    if (!stop) {
-      log.error("Service failed to stop $parent")
-      logState(parent)
-    }
-    return stop
-  }
-
-
-  @Test
-  public void testSingleCompoundFailing() throws Throwable {
-    CompoundService parent = startService([new MockService("1",true,100)])
-    assert parent.waitForServiceToStop(1000);
-    assert parent.getFailureCause() != null;
-  }
-  
-  @Test
-  public void testCompound() throws Throwable {
-    MockService one = new MockService("one", false, 100)
-    MockService two = new MockService("two", false, 100)
-    CompoundService parent = startService([one, two])
-    assert waitForParentToStop(parent);
-    assert one.isInState(Service.STATE.STOPPED)
-    assert two.isInState(Service.STATE.STOPPED)
-  }
-
-  @Test
-  public void testCompoundOneLongLived() throws Throwable {
-    MockService one = new MockService("one", false, 500)
-    MockService two = new MockService("two", false, 100)
-    CompoundService parent = startService([one, two])
-    assert waitForParentToStop(parent);
-    assert one.isInState(Service.STATE.STOPPED)
-    assert two.isInState(Service.STATE.STOPPED)
-  }
-
-  
-  @Test
-  public void testNotificationInCompound() throws Throwable {
-    boolean notified = false;
-    EventCallback ecb = new EventCallback() {
-      @Override
-      void eventCallbackEvent() {
-        log.info("EventCallback")
-        notified = true;
-      }
-    }
-    MockService one = new MockService("one", false, 100)
-    EventNotifyingService ens = new EventNotifyingService(ecb, 100);
-    MockService two = new MockService("two", false, 100)
-    CompoundService parent = startService([one, ens, two])
-    assert waitForParentToStop(parent);
-    assert one.isInState(Service.STATE.STOPPED)
-    assert ens.isInState(Service.STATE.STOPPED)
-    assert two.isInState(Service.STATE.STOPPED)
-    assert notified
-  }
-
-  @Test
-  public void testFailingCompound() throws Throwable {
-    MockService one = new MockService("one", true, 100)
-    MockService two = new MockService("two", false, 100)
-    CompoundService parent = startService([one, two])
-    assert waitForParentToStop(parent);
-
-
-
-    assert one.isInState(Service.STATE.STOPPED)
-    assert one.failureCause != null
-    assert two.isInState(Service.STATE.STOPPED)
-  }
-  
-
-
-
-  @Test
-  public void testCompoundInCompound() throws Throwable {
-    MockService one = new MockService("one", false, 100)
-    MockService two = new MockService("two", false, 100)
-    CompoundService parent = buildService([one, two])
-    CompoundService outer = startService([parent])
-        assert outer.waitForServiceToStop(1000);
-    assert one.isInState(Service.STATE.STOPPED)
-    assert two.isInState(Service.STATE.STOPPED)
-  }
-
-  public CompoundService startService(List<Service> services) {
-    CompoundService parent = buildService(services)
-    //expect service to start and stay started
-    parent.start();
-    return parent
-  }
-
-  public CompoundService buildService(List<Service> services) {
-    CompoundService parent = new CompoundService("test")
-    services.each { parent.addService(it) }
-    parent.init(new Configuration())
-    return parent
-  }
-
-
-  void logState(Parent p) {
-    logService(p)
-    for (Service s : p.services) {
-      logService(s)
-    }
-  }
-
-  public void logService(Service s) {
-    log.info(s.toString())
-    if (s.getFailureCause()) {
-      log.info("Failed in state ${s.getFailureState()} with $s.failureCause")
-    }
-  }
-}
diff --git a/slider-core/src/test/groovy/org/apache/slider/server/services/utility/TestMockService.groovy b/slider-core/src/test/groovy/org/apache/slider/server/services/utility/TestMockService.groovy
deleted file mode 100644
index 537f6b1..0000000
--- a/slider-core/src/test/groovy/org/apache/slider/server/services/utility/TestMockService.groovy
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.services.utility
-
-import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.service.Service
-import org.apache.hadoop.service.ServiceStateException
-import org.junit.Test
-
-class TestMockService {
-
-  @Test
-  public void testSimpleLifecycle() throws Throwable {
-    MockService s = new MockService("1",false,-1);
-    s.init(new Configuration())
-    s.start();
-    assert s.isInState(Service.STATE.STARTED)
-  }
-  
-  @Test
-  public void testSimpleLifecycleWait() throws Throwable {
-    MockService s = new MockService("1",false,-1);
-    s.init(new Configuration())
-    s.start();
-    assert s.isInState(Service.STATE.STARTED)
-    s.stop();
-    s.waitForServiceToStop(0);
-  }
-  
-  @Test
-  public void testStoppingService() throws Throwable {
-    MockService s = new MockService("1",false,100);
-    s.init(new Configuration())
-    s.start();
-    Thread.sleep(1000);
-    assert s.isInState(Service.STATE.STOPPED)
-  }
-  
-  @Test
-  public void testStoppingWaitService() throws Throwable {
-    MockService s = new MockService("1",false,100);
-    s.init(new Configuration())
-    s.start();
-    s.waitForServiceToStop(0);
-    assert s.isInState(Service.STATE.STOPPED)
-  }
-    
-  
-  
-  @Test
-  public void testFailingService() throws Throwable {
-    MockService s = new MockService("1",true,100);
-    s.init(new Configuration())
-    s.start();
-    s.waitForServiceToStop(0);
-
-    assert s.isInState(Service.STATE.STOPPED)
-    assert s.failureCause != null
-  }
-      
-  @Test
-  public void testFailingInStart() throws Throwable {
-    MockService s = new MockService("1",true,0);
-    s.init(new Configuration())
-    try {
-      s.start();
-      //failure, raise a fault with some text
-      assert null == s
-    } catch (ServiceStateException e) {
-      //expected
-    }
-    assert s.isInState(Service.STATE.STOPPED)
-    assert s.failureCause != null
-    s.waitForServiceToStop(0);
-  }
-  
-  
-}
diff --git a/slider-core/src/test/groovy/org/apache/slider/server/services/utility/TestSequenceService.groovy b/slider-core/src/test/groovy/org/apache/slider/server/services/utility/TestSequenceService.groovy
deleted file mode 100644
index 968d57a..0000000
--- a/slider-core/src/test/groovy/org/apache/slider/server/services/utility/TestSequenceService.groovy
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.services.utility
-
-import groovy.util.logging.Slf4j
-import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.service.Service
-import org.apache.slider.core.main.ServiceLauncherBaseTest
-import org.junit.Test
-
-@Slf4j
-class TestSequenceService extends ServiceLauncherBaseTest {
-
-
-  @Test
-  public void testSingleSequence() throws Throwable {
-    SequenceService ss = startService([new MockService()])
-    ss.stop();
-  }
-
-  @Test
-  public void testSequence() throws Throwable {
-    MockService one = new MockService("one", false, 100)
-    MockService two = new MockService("two", false, 100)
-    SequenceService ss = startService([one, two])
-    assert ss.waitForServiceToStop(1000);
-    assert one.isInState(Service.STATE.STOPPED)
-    assert two.isInState(Service.STATE.STOPPED)
-    assert ss.previousService == two
-  }
-
-  @Test
-  public void testNotificationInSequence() throws Throwable {
-    boolean notified = false;
-    EventCallback ecb = new EventCallback() {
-      @Override
-      void eventCallbackEvent() {
-        log.info("EventCallback")
-        notified = true;
-      }
-    }
-    MockService one = new MockService("one", false, 100)
-    EventNotifyingService ens = new EventNotifyingService(ecb, 100);
-    MockService two = new MockService("two", false, 100)
-    SequenceService ss = startService([one, ens, two])
-    assert ss.waitForServiceToStop(1000);
-    assert one.isInState(Service.STATE.STOPPED)
-    assert ens.isInState(Service.STATE.STOPPED)
-    assert two.isInState(Service.STATE.STOPPED)
-    assert notified
-  }
-
-  @Test
-  public void testFailingSequence() throws Throwable {
-    MockService one = new MockService("one", true, 100)
-    MockService two = new MockService("two", false, 100)
-    SequenceService ss = startService([one, two])
-    assert ss.waitForServiceToStop(1000);
-    assert one.isInState(Service.STATE.STOPPED)
-    assert two.isInState(Service.STATE.NOTINITED)
-    assert ss.previousService == one
-
-  }
-  
-
-
-  @Test
-  public void testFailInStartNext() throws Throwable {
-    MockService one = new MockService("one", false, 100)
-    MockService two = new MockService("two", true, 0)
-    MockService three = new MockService("3", false, 0)
-    SequenceService ss = startService([one, two, three])
-    assert ss.waitForServiceToStop(1000);
-    assert one.isInState(Service.STATE.STOPPED)
-    assert two.isInState(Service.STATE.STOPPED)
-    Throwable failureCause = two.failureCause
-    assert failureCause != null;
-    Throwable masterFailureCause = ss.failureCause
-    assert masterFailureCause != null;
-    assert masterFailureCause == failureCause
-
-    assert three.isInState(Service.STATE.NOTINITED)
-  }
-
-  @Test
-  public void testSequenceInSequence() throws Throwable {
-    MockService one = new MockService("one", false, 100)
-    MockService two = new MockService("two", false, 100)
-    SequenceService ss = buildService([one, two])
-    SequenceService outer = startService([ss])
-    
-    assert outer.waitForServiceToStop(1000);
-    assert one.isInState(Service.STATE.STOPPED)
-    assert two.isInState(Service.STATE.STOPPED)
-  }
-
-
-  @Test
-  public void testVarargsCtor() throws Throwable {
-    MockService one = new MockService("one", false, 100)
-    MockService two = new MockService("two", false, 100)
-    SequenceService ss = new SequenceService("test", one, two);
-    ss.init(new Configuration())
-    ss.start();
-    assert ss.waitForServiceToStop(1000);
-    assert one.isInState(Service.STATE.STOPPED)
-    assert two.isInState(Service.STATE.STOPPED)
-
-
-  }
-  public SequenceService startService(List<Service> services) {
-    SequenceService ss = buildService(services)
-    //expect service to start and stay started
-    ss.start();
-    return ss
-  }
-
-  public SequenceService buildService(List<Service> services) {
-    SequenceService ss = new SequenceService("test")
-    services.each { ss.addService(it) }
-    ss.init(new Configuration())
-    return ss
-  }
-
-
-}
diff --git a/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy b/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy
index 2045f11..a250e55 100644
--- a/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy
@@ -481,8 +481,8 @@
     ServiceLauncher<SliderClient> serviceLauncher =
         new ServiceLauncher<SliderClient>(SliderClient.name);
     serviceLauncher.launchService(conf,
-                                  toArray(args),
-                                  false);
+        toArray(args),
+        false);
     return serviceLauncher
   }
 
@@ -493,8 +493,8 @@
     ServiceLauncher serviceLauncher =
         new ServiceLauncher(serviceClass.name);
     serviceLauncher.launchService(conf,
-                                  toArray(args),
-                                  false);
+        toArray(args),
+        false);
     return serviceLauncher;
   }
 
diff --git a/slider-core/src/test/groovy/org/apache/slider/test/YarnMiniClusterTestBase.groovy b/slider-core/src/test/groovy/org/apache/slider/test/YarnMiniClusterTestBase.groovy
index d1cc5ca..2c1b270 100644
--- a/slider-core/src/test/groovy/org/apache/slider/test/YarnMiniClusterTestBase.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/test/YarnMiniClusterTestBase.groovy
@@ -28,12 +28,11 @@
 import org.apache.hadoop.fs.Path
 import org.apache.hadoop.hdfs.MiniDFSCluster
 import org.apache.hadoop.service.ServiceOperations
+import org.apache.hadoop.util.Shell
 import org.apache.hadoop.yarn.api.records.ApplicationReport
 import org.apache.hadoop.yarn.api.records.YarnApplicationState
 import org.apache.hadoop.yarn.conf.YarnConfiguration
 import org.apache.hadoop.yarn.server.MiniYARNCluster
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler
 import org.apache.slider.api.ClusterNode
 import org.apache.slider.client.SliderClient
 import org.apache.slider.common.SliderExitCodes
@@ -50,12 +49,17 @@
 import org.apache.slider.core.main.ServiceLauncherBaseTest
 import org.apache.slider.server.appmaster.SliderAppMaster
 import org.junit.After
+import org.junit.Assert
+import org.junit.Before
+import org.junit.BeforeClass
 import org.junit.Rule
+import org.junit.rules.TestName
 import org.junit.rules.Timeout
 
-import static org.apache.slider.common.SliderXMLConfKeysForTesting.*
 import static org.apache.slider.test.KeysForTests.*
 
+import static org.apache.slider.common.SliderKeys.*;
+import static org.apache.slider.common.SliderXMLConfKeysForTesting.*;
 /**
  * Base class for mini cluster tests -creates a field for the
  * mini yarn cluster
@@ -79,6 +83,9 @@
    */
   public static final String YRAM = "256"
 
+  public static final String FIFO_SCHEDULER = "org.apache.hadoop.yarn.server" +
+    ".resourcemanager.scheduler.fifo.FifoScheduler";
+
 
   public static final YarnConfiguration SLIDER_CONFIG = SliderUtils.createConfiguration();
   static {
@@ -108,18 +115,13 @@
   protected boolean switchToImageDeploy = false
   protected boolean imageIsRemote = false
   protected URI remoteImageURI
+  private int clusterCount =1;
 
   protected List<SliderClient> clustersToTeardown = [];
 
   /**
    * This is set in a system property
    */
-/*
-  @Rule
-  public Timeout testTimeout = new Timeout(1000* 
-      Integer.getInteger(KEY_TEST_TIMEOUT, DEFAULT_TEST_TIMEOUT))
-
-*/
 
   @Rule
   public Timeout testTimeout = new Timeout(
@@ -127,6 +129,35 @@
           KEY_TEST_TIMEOUT,
           DEFAULT_TEST_TIMEOUT_SECONDS * 1000)
   )
+  @BeforeClass
+  public static void checkWindowsSupport() {
+    if (Shell.WINDOWS) {
+      assertNotNull("winutils.exe not found", Shell.WINUTILS)
+    }
+  } 
+
+
+  @Rule
+  public TestName methodName = new TestName();
+
+  @Before
+  public void nameThread() {
+    Thread.currentThread().setName("JUnit");
+  }
+
+  /**
+   * Create the cluster name from the method name and an auto-incrementing
+   * counter.
+   * @return a cluster name
+   */
+  protected String createClusterName() {
+    def base = methodName.getMethodName().toLowerCase(Locale.ENGLISH)
+    if (clusterCount++>1) {
+      base += "-$clusterCount"
+    }
+    return base
+  }
+
 
   @Override
   void setup() {
@@ -152,7 +183,7 @@
         hbaseLaunchWaitTime)
 
     accumuloTestsEnabled =
-        testConf.getBoolean(KEY_TEST_ACCUMULO_ENABLED, hbaseTestsEnabled)
+        testConf.getBoolean(KEY_TEST_ACCUMULO_ENABLED, accumuloTestsEnabled)
     accumuloLaunchWaitTime = getTimeOptionMillis(testConf,
         KEY_ACCUMULO_LAUNCH_TIME,
         accumuloLaunchWaitTime)
@@ -214,8 +245,7 @@
                                    int numLogDirs,
                                    boolean startHDFS) {
     conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
-    conf.setClass(YarnConfiguration.RM_SCHEDULER,
-        FifoScheduler.class, ResourceScheduler.class);
+    conf.set(YarnConfiguration.RM_SCHEDULER, FIFO_SCHEDULER);
     SliderUtils.patchConfiguration(conf)
     miniCluster = new MiniYARNCluster(name, noOfNodeManagers, numLocalDirs, numLogDirs)
     miniCluster.init(conf)
diff --git a/slider-core/src/test/groovy/org/apache/slider/test/YarnZKMiniClusterTestBase.groovy b/slider-core/src/test/groovy/org/apache/slider/test/YarnZKMiniClusterTestBase.groovy
index 691cd2f..77d47f4 100644
--- a/slider-core/src/test/groovy/org/apache/slider/test/YarnZKMiniClusterTestBase.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/test/YarnZKMiniClusterTestBase.groovy
@@ -29,7 +29,9 @@
 
 import java.util.concurrent.atomic.AtomicBoolean
 
-import static org.apache.slider.test.KeysForTests.USERNAME
+import static org.apache.slider.common.SliderKeys.*;
+import static org.apache.slider.common.SliderXMLConfKeysForTesting.*;
+import static org.apache.slider.test.KeysForTests.*;
 
 /**
  * Base class for mini cluster tests that use Zookeeper
diff --git a/slider-core/src/test/java/org/apache/slider/common/tools/TestSliderUtils.java b/slider-core/src/test/java/org/apache/slider/common/tools/TestSliderUtils.java
index 7cac439..be850da 100644
--- a/slider-core/src/test/java/org/apache/slider/common/tools/TestSliderUtils.java
+++ b/slider-core/src/test/java/org/apache/slider/common/tools/TestSliderUtils.java
@@ -19,23 +19,30 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.apache.slider.tools.TestUtility;
 import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.InputStream;
-import java.net.URI;
 
-/**
- *
- */
+/** Test slider util methods. */
 public class TestSliderUtils {
   protected static final Logger log =
       LoggerFactory.getLogger(TestSliderUtils.class);
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
 
   @Test
-  public void testGetMetaInfoStreamFromZip () throws Exception {
+  public void testGetMetaInfoStreamFromZip() throws Exception {
+    String zipFileName = TestUtility.createAppPackage(
+        folder,
+        "testpkg",
+        "test.zip",
+        "target/test-classes/org/apache/slider/common/tools/test");
     Configuration configuration = new Configuration();
     FileSystem fs = FileSystem.getLocal(configuration);
     log.info("fs working dir is {}", fs.getWorkingDirectory().toString());
@@ -43,14 +50,14 @@
 
     InputStream stream = SliderUtils.getApplicationResourceInputStream(
         sliderFileSystem.getFileSystem(),
-        new Path("target/test-classes/org/apache/slider/common/tools/test.zip"),
+        new Path(zipFileName),
         "metainfo.xml");
     Assert.assertTrue(stream != null);
     Assert.assertTrue(stream.available() > 0);
   }
 
   @Test
-  public void testTruncate () {
+  public void testTruncate() {
     Assert.assertEquals(SliderUtils.truncate(null, 5), null);
     Assert.assertEquals(SliderUtils.truncate("323", -1), "323");
     Assert.assertEquals(SliderUtils.truncate("3232", 5), "3232");
diff --git a/slider-core/src/test/java/org/apache/slider/providers/agent/TestAgentClientProvider.java b/slider-core/src/test/java/org/apache/slider/providers/agent/TestAgentClientProvider.java
index 69f5a1c..4cb35aa 100644
--- a/slider-core/src/test/java/org/apache/slider/providers/agent/TestAgentClientProvider.java
+++ b/slider-core/src/test/java/org/apache/slider/providers/agent/TestAgentClientProvider.java
@@ -18,27 +18,41 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.slider.common.tools.SliderFileSystem;
-import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.tools.TestUtility;
+import org.junit.Assert;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import java.io.InputStream;
 import java.util.Set;
 
 /**
  *
  */
 public class TestAgentClientProvider {
+  protected static final Logger log =
+      LoggerFactory.getLogger(TestAgentClientProvider.class);
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
   @Test
-  public void testGetApplicationTags () throws Exception {
+  public void testGetApplicationTags() throws Exception {
     Configuration configuration = new Configuration();
     FileSystem fs = FileSystem.getLocal(configuration);
     SliderFileSystem sliderFileSystem = new SliderFileSystem(fs, configuration);
 
     AgentClientProvider provider = new AgentClientProvider(null);
-    Set<String> tags = provider.getApplicationTags(sliderFileSystem,
-      "target/test-classes/org/apache/slider/common/tools/test.zip");
+    String zipFileName = TestUtility.createAppPackage(
+        folder,
+        "testpkg",
+        "test.zip",
+        "target/test-classes/org/apache/slider/common/tools/test");
+    Set<String> tags = provider.getApplicationTags(sliderFileSystem, zipFileName);
     assert tags != null;
     assert !tags.isEmpty();
     assert tags.contains("Name: STORM");
@@ -46,4 +60,18 @@
     assert tags.contains("Version: 0.9.1.2.1");
 
   }
+
+  @Test
+  public void testValidateInstanceDefinition() throws Exception {
+    AgentClientProvider provider = new AgentClientProvider(null);
+    AggregateConf instanceDefinition = new AggregateConf();
+
+    try {
+      provider.validateInstanceDefinition(instanceDefinition);
+      Assert.assertFalse("Should fail with BadConfigException", true);
+    } catch (BadConfigException e) {
+      log.info(e.toString());
+      Assert.assertTrue(e.getMessage().contains("Application definition must be provided"));
+    }
+  }
 }
diff --git a/slider-core/src/test/java/org/apache/slider/providers/agent/TestAgentLaunchParameter.java b/slider-core/src/test/java/org/apache/slider/providers/agent/TestAgentLaunchParameter.java
new file mode 100644
index 0000000..ec62b54
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/providers/agent/TestAgentLaunchParameter.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers.agent;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ *
+ */
+public class TestAgentLaunchParameter {
+  protected static final Logger log =
+      LoggerFactory.getLogger(TestAgentLaunchParameter.class);
+
+  @Test
+  public void testTestAgentLaunchParameter() throws Exception {
+    AgentLaunchParameter alp = new AgentLaunchParameter("");
+    Assert.assertEquals("", alp.getNextLaunchParameter("abc"));
+    Assert.assertEquals("", alp.getNextLaunchParameter("HBASE_MASTER"));
+
+    alp = new AgentLaunchParameter("a:1:2:3|b:5:6:NONE");
+    Assert.assertEquals("1", alp.getNextLaunchParameter("a"));
+    Assert.assertEquals("2", alp.getNextLaunchParameter("a"));
+    Assert.assertEquals("3", alp.getNextLaunchParameter("a"));
+    Assert.assertEquals("3", alp.getNextLaunchParameter("a"));
+
+    Assert.assertEquals("5", alp.getNextLaunchParameter("b"));
+    Assert.assertEquals("6", alp.getNextLaunchParameter("b"));
+    Assert.assertEquals("", alp.getNextLaunchParameter("b"));
+    Assert.assertEquals("", alp.getNextLaunchParameter("b"));
+    Assert.assertEquals("", alp.getNextLaunchParameter("c"));
+
+    alp = new AgentLaunchParameter("|a:1:3|b::5:NONE:");
+    Assert.assertEquals("1", alp.getNextLaunchParameter("a"));
+    Assert.assertEquals("3", alp.getNextLaunchParameter("a"));
+    Assert.assertEquals("3", alp.getNextLaunchParameter("a"));
+
+    Assert.assertEquals("", alp.getNextLaunchParameter("b"));
+    Assert.assertEquals("5", alp.getNextLaunchParameter("b"));
+    Assert.assertEquals("", alp.getNextLaunchParameter("b"));
+    Assert.assertEquals("", alp.getNextLaunchParameter("b"));
+
+    alp = new AgentLaunchParameter("|:");
+    Assert.assertEquals("", alp.getNextLaunchParameter("b"));
+    Assert.assertEquals("", alp.getNextLaunchParameter("a"));
+
+    alp = new AgentLaunchParameter("HBASE_MASTER:a,b:DO_NOT_REGISTER:");
+    Assert.assertEquals("a,b", alp.getNextLaunchParameter("HBASE_MASTER"));
+    Assert.assertEquals("DO_NOT_REGISTER", alp.getNextLaunchParameter("HBASE_MASTER"));
+    Assert.assertEquals("DO_NOT_REGISTER", alp.getNextLaunchParameter("HBASE_MASTER"));
+
+    alp = new AgentLaunchParameter("HBASE_MASTER:a,b:DO_NOT_REGISTER::c:::");
+    Assert.assertEquals("a,b", alp.getNextLaunchParameter("HBASE_MASTER"));
+    Assert.assertEquals("DO_NOT_REGISTER", alp.getNextLaunchParameter("HBASE_MASTER"));
+    Assert.assertEquals("", alp.getNextLaunchParameter("HBASE_MASTER"));
+    Assert.assertEquals("c", alp.getNextLaunchParameter("HBASE_MASTER"));
+    Assert.assertEquals("c", alp.getNextLaunchParameter("HBASE_MASTER"));
+  }
+}
diff --git a/slider-core/src/test/java/org/apache/slider/providers/agent/TestAgentProviderService.java b/slider-core/src/test/java/org/apache/slider/providers/agent/TestAgentProviderService.java
index b0ac967..c10b60a 100644
--- a/slider-core/src/test/java/org/apache/slider/providers/agent/TestAgentProviderService.java
+++ b/slider-core/src/test/java/org/apache/slider/providers/agent/TestAgentProviderService.java
@@ -43,13 +43,13 @@
 import org.apache.slider.core.conf.MapOperations;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.launch.ContainerLauncher;
+import org.apache.slider.providers.agent.application.metadata.Application;
 import org.apache.slider.providers.agent.application.metadata.CommandOrder;
 import org.apache.slider.providers.agent.application.metadata.Component;
 import org.apache.slider.providers.agent.application.metadata.Export;
 import org.apache.slider.providers.agent.application.metadata.ExportGroup;
 import org.apache.slider.providers.agent.application.metadata.Metainfo;
 import org.apache.slider.providers.agent.application.metadata.MetainfoParser;
-import org.apache.slider.providers.agent.application.metadata.Service;
 import org.apache.slider.server.appmaster.model.mock.MockContainerId;
 import org.apache.slider.server.appmaster.model.mock.MockFileSystem;
 import org.apache.slider.server.appmaster.model.mock.MockNodeId;
@@ -97,8 +97,7 @@
       LoggerFactory.getLogger(TestAgentProviderService.class);
   private static final String metainfo_1_str = "<metainfo>\n"
                                                + "  <schemaVersion>2.0</schemaVersion>\n"
-                                               + "  <services>\n"
-                                               + "    <service>\n"
+                                               + "  <application>\n"
                                                + "      <name>HBASE</name>\n"
                                                + "      <comment>\n"
                                                + "        Apache HBase\n"
@@ -164,15 +163,11 @@
                                                + "          </packages>\n"
                                                + "        </osSpecific>\n"
                                                + "      </osSpecifics>\n"
-                                               + "    </service>\n"
-                                               + "  </services>\n"
+                                               + "  </application>\n"
                                                + "</metainfo>";
-
-
   private static final String metainfo_2_str = "<metainfo>\n"
                                                + "  <schemaVersion>2.0</schemaVersion>\n"
-                                               + "  <services>\n"
-                                               + "    <service>\n"
+                                               + "  <application>\n"
                                                + "      <name>HBASE</name>\n"
                                                + "      <comment>\n"
                                                + "        Apache HBase\n"
@@ -203,9 +198,9 @@
                                                + "          </commandScript>\n"
                                                + "        </component>\n"
                                                + "      </components>\n"
-                                               + "    </service>\n"
-                                               + "  </services>\n"
+                                               + "  </application>\n"
                                                + "</metainfo>";
+
   @Test
   public void testRegistration() throws IOException {
 
@@ -244,7 +239,7 @@
     doReturn(access).when(mockAps).getAmState();
     doReturn("scripts/hbase_master.py").when(mockAps).getScriptPathFromMetainfo(anyString());
     Metainfo metainfo = new Metainfo();
-    metainfo.addService(new Service());
+    metainfo.setApplication(new Application());
     doReturn(metainfo).when(mockAps).getApplicationMetainfo(any(SliderFileSystem.class), anyString());
 
     try {
@@ -260,9 +255,9 @@
     expect(access.isApplicationLive()).andReturn(true).anyTimes();
     ClusterDescription desc = new ClusterDescription();
     desc.setInfo(StatusKeys.INFO_AM_HOSTNAME, "host1");
-    desc.setInfo(StatusKeys.INFO_AM_WEB_PORT, "8088");
+    desc.setInfo(StatusKeys.INFO_AM_AGENT_PORT, "8088");
+    desc.setInfo(StatusKeys.INFO_AM_SECURED_AGENT_PORT, "8089");
     desc.setInfo(OptionKeys.APPLICATION_NAME, "HBASE");
-    desc.getOrAddRole("HBASE_MASTER").put(AgentKeys.COMPONENT_SCRIPT, "scripts/hbase_master.py");
     expect(access.getClusterStatus()).andReturn(desc).anyTimes();
 
     AggregateConf aggConf = new AggregateConf();
@@ -352,7 +347,7 @@
   public void testProcessConfig() throws Exception {
     InputStream metainfo_1 = new ByteArrayInputStream(metainfo_1_str.getBytes());
     Metainfo metainfo = new MetainfoParser().parse(metainfo_1);
-    assert metainfo.getServices().size() == 1;
+    Assert.assertNotNull(metainfo.getApplication());
     AgentProviderService aps = new AgentProviderService();
     HeartBeat hb = new HeartBeat();
     ComponentStatus status = new ComponentStatus();
@@ -405,12 +400,12 @@
   public void testMetainfoParsing() throws Exception {
     InputStream metainfo_1 = new ByteArrayInputStream(metainfo_1_str.getBytes());
     Metainfo metainfo = new MetainfoParser().parse(metainfo_1);
-    Assert.assertEquals(metainfo.getServices().size(), 1);
-    Service service = metainfo.getServices().get(0);
-    log.info("Service: " + service.toString());
-    Assert.assertEquals(service.getName(), "HBASE");
-    Assert.assertEquals(service.getComponents().size(), 2);
-    List<Component> components = service.getComponents();
+    Assert.assertNotNull(metainfo.getApplication());
+    Application application = metainfo.getApplication();
+    log.info("Service: " + application.toString());
+    Assert.assertEquals(application.getName(), "HBASE");
+    Assert.assertEquals(application.getComponents().size(), 2);
+    List<Component> components = application.getComponents();
     int found = 0;
     for (Component component : components) {
       if (component.getName().equals("HBASE_MASTER")) {
@@ -430,8 +425,8 @@
     }
     Assert.assertEquals(found, 2);
 
-    assert service.getExportGroups().size() == 1;
-    List<ExportGroup> egs = service.getExportGroups();
+    assert application.getExportGroups().size() == 1;
+    List<ExportGroup> egs = application.getExportGroups();
     ExportGroup eg = egs.get(0);
     assert eg.getName().equals("QuickLinks");
     assert eg.getExports().size() == 2;
@@ -441,20 +436,20 @@
       if (export.getName().equals("JMX_Endpoint")) {
         found++;
         Assert.assertEquals(export.getValue(),
-            "http://${HBASE_MASTER_HOST}:${site.hbase-site.hbase.master.info.port}/jmx");
+                            "http://${HBASE_MASTER_HOST}:${site.hbase-site.hbase.master.info.port}/jmx");
       }
       if (export.getName().equals("Master_Status")) {
         found++;
         Assert.assertEquals(export.getValue(),
-            "http://${HBASE_MASTER_HOST}:${site.hbase-site.hbase.master.info.port}/master-status");
+                            "http://${HBASE_MASTER_HOST}:${site.hbase-site.hbase.master.info.port}/master-status");
       }
     }
     Assert.assertEquals(found, 2);
 
-    List<CommandOrder> cmdOrders = service.getCommandOrder();
+    List<CommandOrder> cmdOrders = application.getCommandOrder();
     Assert.assertEquals(cmdOrders.size(), 2);
     found = 0;
-    for (CommandOrder co : service.getCommandOrder()) {
+    for (CommandOrder co : application.getCommandOrder()) {
       if (co.getCommand().equals("HBASE_REGIONSERVER-START")) {
         Assert.assertTrue(co.getRequires().equals("HBASE_MASTER-STARTED"));
         found++;
@@ -584,7 +579,8 @@
     expect(access.isApplicationLive()).andReturn(true).anyTimes();
     ClusterDescription desc = new ClusterDescription();
     desc.setInfo(StatusKeys.INFO_AM_HOSTNAME, "host1");
-    desc.setInfo(StatusKeys.INFO_AM_WEB_PORT, "8088");
+    desc.setInfo(StatusKeys.INFO_AM_AGENT_PORT, "8088");
+    desc.setInfo(StatusKeys.INFO_AM_SECURED_AGENT_PORT, "8089");
     desc.setInfo(OptionKeys.APPLICATION_NAME, "HBASE");
     expect(access.getClusterStatus()).andReturn(desc).anyTimes();
 
@@ -660,14 +656,16 @@
       cr.setRole("HBASE_REGIONSERVER");
       cr.setRoleCommand("INSTALL");
       cr.setStatus("COMPLETED");
-      cr.setFolders(new HashMap<String, String>() {{put("a", "b");}});
+      cr.setFolders(new HashMap<String, String>() {{
+        put("a", "b");
+      }});
       hb.setReports(Arrays.asList(cr));
       hbr = mockAps.handleHeartBeat(hb);
       Assert.assertEquals(3, hbr.getResponseId());
       Mockito.verify(mockAps, Mockito.times(0)).addStartCommand(anyString(),
-                                                                  anyString(),
-                                                                  any(HeartBeatResponse.class),
-                                                                  anyString());
+                                                                anyString(),
+                                                                any(HeartBeatResponse.class),
+                                                                anyString());
       // RS still does not start
       hb = new HeartBeat();
       hb.setResponseId(3);
@@ -723,8 +721,8 @@
       hb.setReports(Arrays.asList(cr));
       mockAps.handleHeartBeat(hb);
       Mockito.verify(mockAps, Mockito.times(1)).addGetConfigCommand(anyString(),
-                                                                anyString(),
-                                                                any(HeartBeatResponse.class));
+                                                                    anyString(),
+                                                                    any(HeartBeatResponse.class));
 
       // RS starts now
       hb = new HeartBeat();
@@ -746,7 +744,6 @@
         anyCollection());
   }
 
-
   @Test
   public void testAddStartCommand() throws Exception {
     AgentProviderService aps = new AgentProviderService();
@@ -761,6 +758,7 @@
     treeOps.getGlobalOptions().put(AgentKeys.JAVA_HOME, "java_home");
     treeOps.set(OptionKeys.APPLICATION_NAME, "HBASE");
     treeOps.set("site.fs.defaultFS", "hdfs://HOST1:8020/");
+    treeOps.set("internal.data.dir.path", "hdfs://HOST1:8020/database");
     treeOps.set(OptionKeys.ZOOKEEPER_HOSTS, "HOST1");
     treeOps.set("config_types", "hbase-site");
     treeOps.getGlobalOptions().put("site.hbase-site.a.port", "${HBASE_MASTER.ALLOCATED_PORT}");
diff --git a/slider-core/src/test/java/org/apache/slider/providers/agent/TestHeartbeatMonitor.java b/slider-core/src/test/java/org/apache/slider/providers/agent/TestHeartbeatMonitor.java
new file mode 100644
index 0000000..c2cfafd
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/providers/agent/TestHeartbeatMonitor.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers.agent;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+
+/**
+ *
+ */
+public class TestHeartbeatMonitor {
+  protected static final Logger log =
+      LoggerFactory.getLogger(TestHeartbeatMonitor.class);
+
+  @Test
+  public void testRegularHeartbeat() throws Exception {
+    AgentProviderService provider = createNiceMock(AgentProviderService.class);
+    HeartbeatMonitor hbm = new HeartbeatMonitor(provider, 1 * 1000);
+    Assert.assertFalse(hbm.isAlive());
+    expect(provider.getComponentStatuses()).andReturn(null).anyTimes();
+    replay(provider);
+    hbm.start();
+    Assert.assertTrue(hbm.isAlive());
+    hbm.shutdown();
+    Thread.sleep(1 * 1000);
+    Assert.assertFalse(hbm.isAlive());
+  }
+
+  @Test
+  public void testHeartbeatMonitorWithHealthy() throws Exception {
+    AgentProviderService provider = createNiceMock(AgentProviderService.class);
+    HeartbeatMonitor hbm = new HeartbeatMonitor(provider, 500);
+    Assert.assertFalse(hbm.isAlive());
+    Map<String, ComponentInstanceState> statuses = new HashMap<>();
+    ComponentInstanceState state = new ComponentInstanceState("HBASE_MASTER", "Cid", "Aid");
+    state.setState(State.STARTED);
+    state.setLastHeartbeat(System.currentTimeMillis());
+    statuses.put("label_1", state);
+    expect(provider.getComponentStatuses()).andReturn(statuses).anyTimes();
+    replay(provider);
+    hbm.start();
+    Assert.assertTrue(hbm.isAlive());
+    Thread.sleep(1 * 1000);
+    hbm.shutdown();
+    Thread.sleep(1 * 1000);
+    Assert.assertFalse(hbm.isAlive());
+  }
+
+  @Test
+  public void testHeartbeatMonitorWithUnhealthyAndThenLost() throws Exception {
+    AgentProviderService provider = createNiceMock(AgentProviderService.class);
+    HeartbeatMonitor hbm = new HeartbeatMonitor(provider, 2 * 1000);
+    Assert.assertFalse(hbm.isAlive());
+    Map<String, ComponentInstanceState> statuses = new HashMap<>();
+    ComponentInstanceState masterState = new ComponentInstanceState("HBASE_MASTER", "Cid1", "Aid1");
+    masterState.setState(State.STARTED);
+    masterState.setLastHeartbeat(System.currentTimeMillis());
+    statuses.put("Aid1_Cid1_HBASE_MASTER", masterState);
+
+    ComponentInstanceState slaveState = new ComponentInstanceState("HBASE_REGIONSERVER", "Cid2", "Aid1");
+    slaveState.setState(State.STARTED);
+    slaveState.setLastHeartbeat(System.currentTimeMillis());
+    statuses.put("Aid1_Cid2_HBASE_REGIONSERVER", slaveState);
+
+    expect(provider.getComponentStatuses()).andReturn(statuses).anyTimes();
+    expect(provider.releaseContainer("Aid1_Cid2_HBASE_REGIONSERVER")).andReturn(true).once();
+    replay(provider);
+    hbm.start();
+
+    Thread.sleep(1 * 1000);
+    // just dial back by at least 2 sec but no more than 4
+    slaveState.setLastHeartbeat(System.currentTimeMillis() - (2 * 1000 + 100));
+    masterState.setLastHeartbeat(System.currentTimeMillis());
+
+    Thread.sleep(1 * 1000 + 500);
+    masterState.setLastHeartbeat(System.currentTimeMillis());
+
+    log.info("Slave container state {}", slaveState.getContainerState());
+    Assert.assertEquals(ContainerState.HEALTHY, masterState.getContainerState());
+    Assert.assertEquals(ContainerState.UNHEALTHY, slaveState.getContainerState());
+
+    Thread.sleep(1 * 1000);
+    // some lost heartbeats are ignored (e.g. ~ 1 sec)
+    masterState.setLastHeartbeat(System.currentTimeMillis() - 1 * 1000);
+
+    Thread.sleep(1 * 1000 + 500);
+
+    log.info("Slave container state {}", slaveState.getContainerState());
+    Assert.assertEquals(ContainerState.HEALTHY, masterState.getContainerState());
+    Assert.assertEquals(ContainerState.HEARTBEAT_LOST, slaveState.getContainerState());
+    hbm.shutdown();
+  }
+
+  @Test
+  public void testHeartbeatTransitions() {
+    ComponentInstanceState slaveState = new ComponentInstanceState("HBASE_REGIONSERVER", "Cid2", "Aid1");
+    slaveState.setState(State.STARTED);
+
+    Assert.assertEquals(ContainerState.INIT, slaveState.getContainerState());
+    slaveState.setLastHeartbeat(System.currentTimeMillis());
+    Assert.assertEquals(ContainerState.HEALTHY, slaveState.getContainerState());
+
+    slaveState.setContainerState(ContainerState.UNHEALTHY);
+    Assert.assertEquals(ContainerState.UNHEALTHY, slaveState.getContainerState());
+    slaveState.setLastHeartbeat(System.currentTimeMillis());
+    Assert.assertEquals(ContainerState.HEALTHY, slaveState.getContainerState());
+
+    slaveState.setContainerState(ContainerState.HEARTBEAT_LOST);
+    Assert.assertEquals(ContainerState.HEARTBEAT_LOST, slaveState.getContainerState());
+    slaveState.setLastHeartbeat(System.currentTimeMillis());
+    Assert.assertEquals(ContainerState.HEARTBEAT_LOST, slaveState.getContainerState());
+  }
+}
diff --git a/slider-core/src/test/java/org/apache/slider/providers/agent/application/metadata/MetainfoParserTest.java b/slider-core/src/test/java/org/apache/slider/providers/agent/application/metadata/MetainfoParserTest.java
index ac1bd81..61c53df 100644
--- a/slider-core/src/test/java/org/apache/slider/providers/agent/application/metadata/MetainfoParserTest.java
+++ b/slider-core/src/test/java/org/apache/slider/providers/agent/application/metadata/MetainfoParserTest.java
@@ -16,6 +16,7 @@
  */
 package org.apache.slider.providers.agent.application.metadata;
 
+import org.junit.Assert;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -39,16 +40,16 @@
         METAINFO_XML);
     MetainfoParser parser = new MetainfoParser();
     Metainfo metainfo = parser.parse(resStream);
-    assert metainfo != null;
-    assert metainfo.services.size() == 1;
-    Service service = metainfo.getServices().get(0);
-    assert "STORM".equals(service.getName());
-    assert 5 == service.getComponents().size();
-    OSPackage pkg = service.getOSSpecifics().get(0).getPackages().get(0);
+    Assert.assertNotNull(metainfo);
+    Assert.assertNotNull(metainfo.getApplication());
+    Application application = metainfo.getApplication();
+    assert "STORM".equals(application.getName());
+    assert 5 == application.getComponents().size();
+    OSPackage pkg = application.getOSSpecifics().get(0).getPackages().get(0);
     assert "tarball".equals(pkg.getType());
     assert "files/apache-storm-0.9.1.2.1.1.0-237.tar.gz".equals(pkg.getName());
     boolean found = false;
-    for (Component comp : service.getComponents()) {
+    for (Component comp : application.getComponents()) {
       if (comp != null && comp.getName().equals("NIMBUS")) {
         found = true;
       }
diff --git a/slider-core/src/test/java/org/apache/slider/server/appmaster/web/TestSliderAmFilter.java b/slider-core/src/test/java/org/apache/slider/server/appmaster/web/TestSliderAmFilter.java
index bdc11df..00e193d 100644
--- a/slider-core/src/test/java/org/apache/slider/server/appmaster/web/TestSliderAmFilter.java
+++ b/slider-core/src/test/java/org/apache/slider/server/appmaster/web/TestSliderAmFilter.java
@@ -21,7 +21,6 @@
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServlet;
 import org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter;
 import org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpServletRequestWrapper;
-import org.glassfish.grizzly.servlet.HttpServletResponseImpl;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -31,8 +30,11 @@
 import javax.servlet.ServletException;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
+import javax.servlet.ServletResponseWrapper;
 import javax.servlet.http.Cookie;
 import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.servlet.http.HttpServletResponseWrapper;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.Enumeration;
@@ -126,7 +128,9 @@
     SliderAmIpFilter testFilter = new SliderAmIpFilter();
     testFilter.init(config);
 
-    HttpServletResponseForTest response = new HttpServletResponseForTest();
+    HttpServletResponse mockResponse = Mockito.mock(HttpServletResponse.class);
+    HttpServletResponseForTest response =
+        new HttpServletResponseForTest(mockResponse);
     // Test request should implements HttpServletRequest
 
     ServletRequest failRequest = Mockito.mock(ServletRequest.class);
@@ -170,9 +174,13 @@
 
   }
 
-  private class HttpServletResponseForTest extends HttpServletResponseImpl {
+  private class HttpServletResponseForTest extends HttpServletResponseWrapper {
     String redirectLocation = "";
 
+    public HttpServletResponseForTest(HttpServletResponse response) {
+      super(response);
+    }
+
     public String getRedirect() {
       return redirectLocation;
     }
diff --git a/slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/agent/TestAMAgentWebServices.java b/slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/agent/TestAMAgentWebServices.java
index 75ee7db..17fbe2b 100644
--- a/slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/agent/TestAMAgentWebServices.java
+++ b/slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/agent/TestAMAgentWebServices.java
@@ -18,26 +18,20 @@
 
 package org.apache.slider.server.appmaster.web.rest.agent;
 
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.servlet.GuiceServletContextListener;
-import com.google.inject.servlet.ServletModule;
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.api.client.config.ClientConfig;
 import com.sun.jersey.api.client.config.DefaultClientConfig;
 import com.sun.jersey.api.json.JSONConfiguration;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.JerseyTest;
-import com.sun.jersey.test.framework.WebAppDescriptor;
 import junit.framework.Assert;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.conf.MapOperations;
 import org.apache.slider.server.appmaster.model.mock.MockFactory;
 import org.apache.slider.server.appmaster.model.mock.MockProviderService;
 import org.apache.slider.server.appmaster.model.mock.MockRecordFactory;
@@ -46,137 +40,130 @@
 import org.apache.slider.server.appmaster.state.ProviderAppState;
 import org.apache.slider.server.appmaster.web.WebAppApi;
 import org.apache.slider.server.appmaster.web.WebAppApiImpl;
-import org.apache.slider.server.appmaster.web.rest.AMWebServices;
-import org.apache.slider.server.appmaster.web.rest.SliderJacksonJaxbJsonProvider;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
+import org.apache.slider.server.appmaster.web.rest.RestPaths;
+import org.apache.slider.server.services.security.CertificateManager;
+import org.apache.slider.server.services.security.SecurityUtils;
+import org.apache.slider.test.SliderTestBase;
+import org.junit.After;
+import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.ws.rs.Path;
 import javax.ws.rs.core.MediaType;
 import java.io.File;
+import java.io.IOException;
 import java.net.URI;
-import java.util.HashMap;
-import java.util.Map;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
 
 import static org.junit.Assert.assertEquals;
 
-public class TestAMAgentWebServices extends JerseyTest {
+public class TestAMAgentWebServices {
+
+  static {
+    //for localhost testing only
+    javax.net.ssl.HttpsURLConnection.setDefaultHostnameVerifier(
+        new javax.net.ssl.HostnameVerifier(){
+
+          public boolean verify(String hostname,
+                                javax.net.ssl.SSLSession sslSession) {
+            if (hostname.equals("localhost")) {
+              return true;
+            }
+            return false;
+          }
+        });
+
+    SecurityUtils.initializeSecurityParameters(new MapOperations());
+    MapOperations compOperations = new MapOperations();
+    CertificateManager certificateManager = new CertificateManager();
+    certificateManager.initRootCert(compOperations);
+    String keystoreFile = SecurityUtils.getSecurityDir() + File.separator + SliderKeys.KEYSTORE_FILE_NAME;
+    String password = SecurityUtils.getKeystorePass();
+    System.setProperty("javax.net.ssl.trustStore", keystoreFile);
+    System.setProperty("javax.net.ssl.trustStorePassword", password);
+    System.setProperty("javax.net.ssl.trustStoreType", "PKCS12");
+  }
+
   protected static final Logger log =
     LoggerFactory.getLogger(TestAMAgentWebServices.class);
   
   public static final int RM_MAX_RAM = 4096;
   public static final int RM_MAX_CORES = 64;
   public static final String AGENT_URL =
-    "http://localhost:9998/slideram/ws/v1/slider/agents/";
+    "https://localhost:${PORT}/ws/v1/slider/agents/";
   
   static MockFactory factory = new MockFactory();
   private static Configuration conf = new Configuration();
   private static WebAppApi slider;
 
-  private static Injector injector = createInjector();
   private static FileSystem fs;
-
-  public static class GuiceServletConfig extends GuiceServletContextListener {
-
-    public GuiceServletConfig() {
-      super();
-    }
-
-    @Override
-    protected Injector getInjector() {
-      return injector;
-    }
-  }
-
-//  @Path("/ws/v1/slider/agent")
-  @Path("/ws/v1/slider")
-  public static class MockAMWebServices extends AMWebServices {
-
-    @Inject
-    public MockAMWebServices(WebAppApi slider) {
-      super(slider);
-    }
-
-  }
+  private AgentWebApp webApp;
+  private String base_url;
 
   @Before
-  @Override
   public void setUp() throws Exception {
-    super.setUp();
-    injector = createInjector();
     YarnConfiguration conf = SliderUtils.createConfiguration();
     fs = FileSystem.get(new URI("file:///"), conf);
+    AppState appState = null;
+    try {
+      fs = FileSystem.get(new URI("file:///"), conf);
+      File
+          historyWorkDir =
+          new File("target/history", "TestAMAgentWebServices");
+      org.apache.hadoop.fs.Path
+          historyPath =
+          new org.apache.hadoop.fs.Path(historyWorkDir.toURI());
+      fs.delete(historyPath, true);
+      appState = new AppState(new MockRecordFactory());
+      appState.setContainerLimits(RM_MAX_RAM, RM_MAX_CORES);
+      appState.buildInstance(
+          factory.newInstanceDefinition(0, 0, 0),
+          new Configuration(false),
+          factory.ROLES,
+          fs,
+          historyPath,
+          null, null);
+    } catch (Exception e) {
+      log.error("Failed to set up app {}", e);
+    }
+    ProviderAppState providerAppState = new ProviderAppState("undefined",
+                                                             appState);
+
+    slider = new WebAppApiImpl(new MockSliderClusterProtocol(), providerAppState,
+                               new MockProviderService(), null);
+
+    MapOperations compOperations = new MapOperations();
+
+    webApp = AgentWebApp.$for(AgentWebApp.BASE_PATH, slider,
+                              RestPaths.WS_AGENT_CONTEXT_ROOT)
+        .withComponentConfig(compOperations)
+        .start();
+    base_url = AGENT_URL.replace("${PORT}",
+                                 Integer.toString(webApp.getSecuredPort()));
+
   }
 
-  private static Injector createInjector() {
-    return Guice.createInjector(new ServletModule() {
-      @Override
-      protected void configureServlets() {
-
-        AppState appState = null;
-        try {
-          fs = FileSystem.get(new URI("file:///"), conf);
-          File
-              historyWorkDir =
-              new File("target/history", "TestAMAgentWebServices");
-          org.apache.hadoop.fs.Path
-              historyPath =
-              new org.apache.hadoop.fs.Path(historyWorkDir.toURI());
-          fs.delete(historyPath, true);
-          appState = new AppState(new MockRecordFactory());
-          appState.setContainerLimits(RM_MAX_RAM, RM_MAX_CORES);
-          appState.buildInstance(
-              factory.newInstanceDefinition(0, 0, 0),
-              new Configuration(false),
-              factory.ROLES,
-              fs,
-              historyPath,
-              null, null);
-        } catch (Exception e) {
-          log.error("Failed to set up app {}", e);
-        }
-        ProviderAppState providerAppState = new ProviderAppState("undefined",
-            appState);
-
-        slider = new WebAppApiImpl(new MockSliderClusterProtocol(), providerAppState,
-                                   new MockProviderService());
-
-        bind(SliderJacksonJaxbJsonProvider.class);
-        bind(GenericExceptionHandler.class);
-        bind(MockAMWebServices.class);
-        bind(WebAppApi.class).toInstance(slider);
-        bind(Configuration.class).toInstance(conf);
-
-        Map<String, String> params = new HashMap<String, String>();
-        addLoggingFilter(params);
-        serve("/*").with(GuiceContainer.class, params);
-      }
-    });
-  }
-
-  private static void addLoggingFilter(Map<String, String> params) {
-    params.put("com.sun.jersey.spi.container.ContainerRequestFilters", "com.sun.jersey.api.container.filter.LoggingFilter");
-    params.put("com.sun.jersey.spi.container.ContainerResponseFilters", "com.sun.jersey.api.container.filter.LoggingFilter");
+  @After
+  public void tearDown () throws Exception {
+    webApp.stop();
+    webApp = null;
   }
 
   public TestAMAgentWebServices() {
-    super(new WebAppDescriptor.Builder(
-      "org.apache.hadoop.yarn.appmaster.web")
-            .contextListenerClass(GuiceServletConfig.class)
-            .filterClass(com.google.inject.servlet.GuiceFilter.class)
-            .initParam("com.sun.jersey.api.json.POJOMappingFeature", "true")
-            .contextPath("slideram").servletPath("/").build());
   }
 
   @Test
-  public void testRegistration() throws JSONException, Exception {
+  public void testRegistration() throws Exception {
     RegistrationResponse response;
     Client client = createTestClient();
-    WebResource webResource = client.resource(AGENT_URL + "test/register");
+    WebResource webResource = client.resource(base_url + "test/register");
     response = webResource.type(MediaType.APPLICATION_JSON)
         .post(RegistrationResponse.class, createDummyJSONRegister());
     Assert.assertEquals(RegistrationStatus.OK, response.getResponseStatus());
@@ -189,31 +176,31 @@
   }
 
   @Test
-  public void testHeartbeat() throws JSONException, Exception {
+  public void testHeartbeat() throws Exception {
     HeartBeatResponse response;
     Client client = createTestClient();
-    WebResource webResource = client.resource(AGENT_URL + "test/heartbeat");
+    WebResource webResource = client.resource(base_url + "test/heartbeat");
     response = webResource.type(MediaType.APPLICATION_JSON)
         .post(HeartBeatResponse.class, createDummyHeartBeat());
     assertEquals(response.getResponseId(), 0L);
   }
 
   @Test
-  public void testHeadURL() throws JSONException, Exception {
+  public void testHeadURL() throws Exception {
     Client client = createTestClient();
-    WebResource webResource = client.resource(AGENT_URL);
+    WebResource webResource = client.resource(base_url);
     ClientResponse response = webResource.type(MediaType.APPLICATION_JSON)
                                          .head();
     assertEquals(200, response.getStatus());
   }
 
-  @Test
-  public void testSleepForAWhile() throws Throwable {
-    log.info("Agent is running at {}", AGENT_URL);
-    Thread.sleep(60 * 1000);
-  }
+//  @Test
+//  public void testSleepForAWhile() throws Throwable {
+//    log.info("Agent is running at {}", base_url);
+//    Thread.sleep(60 * 1000);
+//  }
   
-  private Register createDummyJSONRegister() throws JSONException {
+  private Register createDummyJSONRegister() {
     Register register = new Register();
     register.setResponseId(-1);
     register.setTimestamp(System.currentTimeMillis());
@@ -221,12 +208,32 @@
     return register;
   }
 
-  private JSONObject createDummyHeartBeat() throws JSONException {
-    JSONObject json = new JSONObject();
-    json.put("responseId", -1);
-    json.put("timestamp", System.currentTimeMillis());
-    json.put("hostname", "dummyHost");
+  private HeartBeat createDummyHeartBeat() {
+    HeartBeat json = new HeartBeat();
+    json.setResponseId(-1);
+    json.setTimestamp(System.currentTimeMillis());
+    json.setHostname("dummyHost");
     return json;
   }
 
+  @AfterClass
+  public static void tearDownClass() throws Exception{
+    Path directory = Paths.get(SecurityUtils.getSecurityDir());
+    Files.walkFileTree(directory, new SimpleFileVisitor<Path>() {
+      @Override
+      public FileVisitResult visitFile(Path file, BasicFileAttributes attrs)
+          throws IOException {
+        Files.delete(file);
+        return FileVisitResult.CONTINUE;
+      }
+
+      @Override
+      public FileVisitResult postVisitDirectory(Path dir, IOException exc)
+          throws IOException {
+        Files.delete(dir);
+        return FileVisitResult.CONTINUE;
+      }
+
+    });
+  }
 }
diff --git a/slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/management/TestAMManagementWebServices.java b/slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/management/TestAMManagementWebServices.java
index c958081..134f0bc 100644
--- a/slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/management/TestAMManagementWebServices.java
+++ b/slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/management/TestAMManagementWebServices.java
@@ -25,6 +25,7 @@
 import com.google.inject.servlet.ServletModule;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
@@ -48,8 +49,8 @@
 import org.apache.slider.server.appmaster.web.WebAppApiImpl;
 import org.apache.slider.server.appmaster.web.rest.AMWebServices;
 import org.apache.slider.server.appmaster.web.rest.SliderJacksonJaxbJsonProvider;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
+import org.apache.slider.server.appmaster.web.rest.management.resources.AggregateConfResource;
+import org.apache.slider.server.appmaster.web.rest.management.resources.ConfTreeResource;
 import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -61,6 +62,7 @@
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.Map;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -180,7 +182,7 @@
             appState);
 
         slider = new WebAppApiImpl(new MockSliderClusterProtocol(), providerAppState,
-                                   new MockProviderService());
+                                   new MockProviderService(), null);
 
         bind(SliderJacksonJaxbJsonProvider.class);
         bind(MockSliderAMWebServices.class);
@@ -198,28 +200,30 @@
         "org.apache.hadoop.yarn.appmaster.web")
               .contextListenerClass(GuiceServletConfig.class)
               .filterClass(com.google.inject.servlet.GuiceFilter.class)
-              .contextPath("slideram").servletPath("/").build());
+              .contextPath("slideram").servletPath("/")
+              .clientConfig(
+                  new DefaultClientConfig(SliderJacksonJaxbJsonProvider.class))
+              .build());
   }
 
   @Test
-  public void testAppResource() throws JSONException, Exception {
+  public void testAppResource() throws Exception {
     WebResource r = resource();
     ClientResponse response = r.path("ws").path("v1").path("slider").path("mgmt").path("app")
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(200, response.getStatus());
     assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
-    JSONObject json = response.getEntity(JSONObject.class);
-    assertEquals("incorrect number of elements", 4, json.length());
+    AggregateConfResource json = response.getEntity(AggregateConfResource.class);
     assertEquals("wrong href",
                  "http://localhost:9998/slideram/ws/v1/slider/mgmt/app",
-                 json.getString("href"));
-    assertNotNull("no resources", json.getJSONObject("resources"));
-    assertNotNull("no internal", json.getJSONObject("internal"));
-    assertNotNull("no appConf", json.getJSONObject("appConf"));
+                 json.getHref());
+    assertNotNull("no resources", json.getResources());
+    assertNotNull("no internal", json.getInternal());
+    assertNotNull("no appConf", json.getAppConf());
   }
 
   @Test
-  public void testAppInternal() throws JSONException, Exception {
+  public void testAppInternal() throws Exception {
     WebResource r = resource();
     ClientResponse
         response =
@@ -228,18 +232,17 @@
             .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(200, response.getStatus());
     assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
-    JSONObject json = response.getEntity(JSONObject.class);
-    assertEquals("incorrect number of elements", 4, json.length());
+    ConfTreeResource json = response.getEntity(ConfTreeResource.class);
     assertEquals("wrong href",
                  "http://localhost:9998/slideram/ws/v1/slider/mgmt/app/configurations/internal",
-                 json.getString("href"));
+                 json.getHref());
     assertEquals("wrong description",
-                 "Internal configuration DO NOT EDIT",
-                 json.getJSONObject("metadata").getString("description"));
+        "Internal configuration DO NOT EDIT",
+        json.getMetadata().get("description"));
   }
 
   @Test
-  public void testAppResources() throws JSONException, Exception {
+  public void testAppResources() throws Exception {
     WebResource r = resource();
     ClientResponse
         response =
@@ -249,19 +252,18 @@
 
     assertEquals(200, response.getStatus());
     assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
-    JSONObject json = response.getEntity(JSONObject.class);
-    assertEquals("incorrect number of elements", 4, json.length());
+    ConfTreeResource json = response.getEntity(ConfTreeResource.class);
     assertEquals("wrong href",
                  "http://localhost:9998/slideram/ws/v1/slider/mgmt/app/configurations/resources",
-                 json.getString("href"));
-    json = json.getJSONObject("components");
-    assertNotNull("no components", json);
-    assertEquals("incorrect number of components", 2, json.length());
-    assertNotNull("wrong component", json.getJSONObject("worker"));
+                 json.getHref());
+    Map<String,Map<String, String>> components = json.getComponents();
+    assertNotNull("no components", components);
+    assertEquals("incorrect number of components", 2, components.size());
+    assertNotNull("wrong component", components.get("worker"));
   }
 
   @Test
-  public void testAppAppConf() throws JSONException, Exception {
+  public void testAppAppConf() throws Exception {
     WebResource r = resource();
     ClientResponse
         response =
@@ -270,14 +272,13 @@
             .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(200, response.getStatus());
     assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
-    JSONObject json = response.getEntity(JSONObject.class);
-    assertEquals("incorrect number of elements", 4, json.length());
+    ConfTreeResource json = response.getEntity(ConfTreeResource.class);
     assertEquals("wrong href",
                  "http://localhost:9998/slideram/ws/v1/slider/mgmt/app/configurations/appConf",
-                 json.getString("href"));
-    json = json.getJSONObject("components");
-    assertNotNull("no components", json);
-    assertEquals("incorrect number of components", 2, json.length());
-    assertNotNull("wrong component", json.getJSONObject("worker"));
+                 json.getHref());
+    Map<String,Map<String, String>> components = json.getComponents();
+    assertNotNull("no components", components);
+    assertEquals("incorrect number of components", 2, components.size());
+    assertNotNull("wrong component", components.get("worker"));
   }
 }
diff --git a/slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/publisher/TestAgentProviderService.java b/slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/publisher/TestAgentProviderService.java
index 97199f4..2427009 100644
--- a/slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/publisher/TestAgentProviderService.java
+++ b/slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/publisher/TestAgentProviderService.java
@@ -17,6 +17,7 @@
 package org.apache.slider.server.appmaster.web.rest.publisher;
 
 import org.apache.slider.providers.agent.AgentProviderService;
+import org.apache.slider.server.appmaster.AMViewForProviders;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
 import org.apache.slider.server.services.registry.RegistryViewForProviders;
 import org.slf4j.Logger;
@@ -39,8 +40,8 @@
 
   @Override
   public void bind(StateAccessForProviders stateAccessor,
-                   RegistryViewForProviders reg) {
-    super.bind(stateAccessor, reg);
+                   RegistryViewForProviders reg, AMViewForProviders amView) {
+    super.bind(stateAccessor, reg, amView);
     Map<String,String> dummyProps = new HashMap<>();
     dummyProps.put("prop1", "val1");
     dummyProps.put("prop2", "val2");
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/security/TestCertificateManager.java b/slider-core/src/test/java/org/apache/slider/server/services/security/TestCertificateManager.java
new file mode 100644
index 0000000..6d2d051
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/security/TestCertificateManager.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.services.security;
+
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.core.conf.MapOperations;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.io.File;
+
+/**
+ *
+ */
+public class TestCertificateManager {
+  @Rule
+  public TemporaryFolder workDir = new TemporaryFolder();
+  private File secDir;
+
+  @Before
+  public void setup() throws Exception {
+    CertificateManager certMan = new CertificateManager();
+    MapOperations compOperations = new MapOperations();
+    secDir = new File(workDir.getRoot(), SliderKeys.SECURITY_DIR);
+    File keystoreFile = new File(secDir, SliderKeys.KEYSTORE_FILE_NAME);
+    compOperations.put(SliderKeys.KEYSTORE_LOCATION,
+                       keystoreFile.getAbsolutePath());
+    certMan.initRootCert(compOperations);
+  }
+
+  @Test
+  public void testServerCertificateGenerated() throws Exception {
+    File serverCrt = new File(secDir, SliderKeys.CRT_FILE_NAME);
+    Assert.assertTrue(serverCrt.exists());
+  }
+
+  @Test
+  public void testKeystoreGenerated() throws Exception {
+    File keystore = new File(secDir, SliderKeys.KEYSTORE_FILE_NAME);
+    Assert.assertTrue(keystore.exists());
+  }
+
+}
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/workflow/EndOfServiceWaiter.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/EndOfServiceWaiter.java
new file mode 100644
index 0000000..5e6df3b
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/EndOfServiceWaiter.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.service.ServiceStateChangeListener;
+import org.junit.Assert;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * Wait for a service to stop
+ */
+public class EndOfServiceWaiter implements ServiceStateChangeListener {
+
+  private final AtomicBoolean finished = new AtomicBoolean(false);
+
+  public EndOfServiceWaiter(Service svc) {
+    svc.registerServiceListener(this);
+  }
+
+  public synchronized void waitForServiceToStop(long timeout) throws
+      InterruptedException {
+    if (!finished.get()) {
+      wait(timeout);
+    }
+    Assert.assertTrue("Service did not finish in time period",
+        finished.get());
+  }
+
+  @Override
+  public synchronized void stateChanged(Service service) {
+    if (service.isInState(Service.STATE.STOPPED)) {
+      finished.set(true);
+      notify();
+    }
+  }
+
+
+}
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/workflow/MockService.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/MockService.java
new file mode 100644
index 0000000..588f621
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/MockService.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.service.ServiceStateException;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+public class MockService extends AbstractService {
+  private final boolean fail;
+  private final int lifespan;
+  private final ExecutorService executorService =
+      Executors.newSingleThreadExecutor();
+
+  MockService() {
+    this("mock", false, -1);
+  }
+
+  MockService(String name, boolean fail, int lifespan) {
+    super(name);
+    this.fail = fail;
+    this.lifespan = lifespan;
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    //act on the lifespan here
+    if (lifespan > 0) {
+      executorService.submit(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            Thread.sleep(lifespan);
+          } catch (InterruptedException ignored) {
+
+          }
+          finish();
+        }
+      });
+    } else {
+      if (lifespan == 0) {
+        finish();
+      } else {
+        //continue until told not to
+      }
+    }
+  }
+
+  void finish() {
+    if (fail) {
+      ServiceStateException e =
+          new ServiceStateException(getName() + " failed");
+
+      noteFailure(e);
+      stop();
+      throw e;
+    } else {
+      stop();
+    }
+  }
+
+}
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/workflow/ParentWorkflowTestBase.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/ParentWorkflowTestBase.java
new file mode 100644
index 0000000..a11a1cf
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/ParentWorkflowTestBase.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.apache.hadoop.service.Service;
+
+/**
+ * Extends {@link WorkflowServiceTestBase} with parent-specific operations
+ * and logic to build up and run the parent service
+ */
+public abstract class ParentWorkflowTestBase extends WorkflowServiceTestBase {
+
+  /**
+   * Wait a second for the service parent to stop
+   * @param parent the service to wait for
+   */
+  protected void waitForParentToStop(ServiceParent parent) {
+    waitForParentToStop(parent, 1000);
+  }
+
+  /**
+   * Wait for the service parent to stop
+   * @param parent the service to wait for
+   * @param timeout time in milliseconds
+   */
+  protected void waitForParentToStop(ServiceParent parent, int timeout) {
+    boolean stop = parent.waitForServiceToStop(timeout);
+    if (!stop) {
+      logState(parent);
+      fail("Service failed to stop : after " + timeout + " millis " + parent);
+    }
+  }
+
+  /**
+   * Subclasses are require to implement this and return an instance of a
+   * ServiceParent
+   * @param services a possibly empty list of services
+   * @return an inited -but -not-started- service parent instance
+   */
+  protected abstract ServiceParent buildService(Service... services);
+
+  /**
+   * Use {@link #buildService(Service...)} to create service and then start it
+   * @param services
+   * @return
+   */
+  protected ServiceParent startService(Service... services) {
+    ServiceParent parent = buildService(services);
+    //expect service to start and stay started
+    parent.start();
+    return parent;
+  }
+
+}
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/workflow/ProcessCommandFactory.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/ProcessCommandFactory.java
new file mode 100644
index 0000000..e77eeb3
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/ProcessCommandFactory.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A source of commands, with the goal being to allow for adding different
+ * implementations for different platforms
+ */
+public class ProcessCommandFactory {
+
+  protected ProcessCommandFactory() {
+  }
+
+  /**
+   * The command to list a directory
+   * @param dir directory
+   * @return commands
+   */
+  public List<String> ls(File dir) {
+    List<String> commands = new ArrayList<>(5);
+    commands.add("ls");
+    commands.add("-1");
+    commands.add(dir.getAbsolutePath());
+    return commands;
+  }
+
+  /**
+   * Echo some text to stdout
+   * @param text text
+   * @return commands
+   */
+  public List<String> echo(String text) {
+    List<String> commands = new ArrayList<>(5);
+    commands.add("echo");
+    commands.add(text);
+    return commands;
+  }
+
+  /**
+   * print env variables
+   * @return commands
+   */
+  public List<String> env() {
+    List<String> commands = new ArrayList<String>(1);
+    commands.add("env");
+    return commands;
+  }
+
+  /**
+   * execute a command that returns with an error code that will
+   * be converted into a number
+   * @return commands
+   */
+  public List<String> exitFalse() {
+    List<String> commands = new ArrayList<>(2);
+    commands.add("false");
+    return commands;
+  }
+
+  /**
+   * Create a process command factory for this OS
+   * @return
+   */
+  public static ProcessCommandFactory createProcessCommandFactory() {
+    return new ProcessCommandFactory();
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/SliderServiceUtils.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/SimpleRunnable.java
similarity index 60%
rename from slider-core/src/main/java/org/apache/slider/server/services/utility/SliderServiceUtils.java
rename to slider-core/src/test/java/org/apache/slider/server/services/workflow/SimpleRunnable.java
index 4fc1525..1f330f4 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/SliderServiceUtils.java
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/SimpleRunnable.java
@@ -16,14 +16,31 @@
  * limitations under the License.
  */
 
-package org.apache.slider.server.services.utility;
+package org.apache.slider.server.services.workflow;
 
-public class SliderServiceUtils {
+/**
+ * Test runnable that can be made to exit, or throw an exception
+ * during its run
+ */
+class SimpleRunnable implements Runnable {
+  boolean throwException = false;
 
-  public static Exception convertToException(Throwable failureCause) {
-    return (failureCause instanceof Exception) ?
-                      (Exception)failureCause
-                      : new Exception(failureCause);
+
+  SimpleRunnable() {
   }
 
+  SimpleRunnable(boolean throwException) {
+    this.throwException = throwException;
+  }
+
+  @Override
+  public synchronized void run() {
+    try {
+      if (throwException) {
+        throw new RuntimeException("SimpleRunnable");
+      }
+    } finally {
+      this.notify();
+    }
+  }
 }
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestLongLivedProcess.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestLongLivedProcess.java
new file mode 100644
index 0000000..668bcca
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestLongLivedProcess.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.util.List;
+
+/**
+ * Test the long lived process by executing a command that works and a command
+ * that fails
+ */
+public class TestLongLivedProcess extends WorkflowServiceTestBase implements
+    LongLivedProcessLifecycleEvent {
+  private static final Logger
+      log = LoggerFactory.getLogger(TestLongLivedProcess.class);
+
+  private static final Logger
+      processLog =
+      LoggerFactory.getLogger("org.apache.hadoop.services.workflow.Process");
+
+
+  private LongLivedProcess process;
+  private File testDir = new File("target");
+  private ProcessCommandFactory commandFactory;
+  private volatile boolean started, stopped;
+
+  @Before
+  public void setupProcesses() {
+    commandFactory = ProcessCommandFactory.createProcessCommandFactory();
+  }
+
+  @After
+  public void stopProcesses() {
+    if (process != null) {
+      process.stop();
+    }
+  }
+
+  @Test
+  public void testLs() throws Throwable {
+
+    initProcess(commandFactory.ls(testDir));
+    process.start();
+    //in-thread wait
+    process.run();
+
+    //here stopped
+    assertTrue("process start callback not received", started);
+    assertTrue("process stop callback not received", stopped);
+    assertFalse(process.isRunning());
+    assertEquals(0, process.getExitCode().intValue());
+
+    assertStringInOutput("test-classes", getFinalOutput());
+  }
+
+  @Test
+  public void testExitCodes() throws Throwable {
+
+    initProcess(commandFactory.exitFalse());
+    process.start();
+    //in-thread wait
+    process.run();
+
+    //here stopped
+
+    assertFalse(process.isRunning());
+    int exitCode = process.getExitCode();
+    assertTrue(exitCode != 0);
+    int corrected = process.getExitCodeSignCorrected();
+
+    assertEquals(1, corrected);
+  }
+
+  @Test
+  public void testEcho() throws Throwable {
+
+    String echoText = "hello, world";
+    initProcess(commandFactory.echo(echoText));
+    process.start();
+    //in-thread wait
+    process.run();
+
+    //here stopped
+    assertTrue("process stop callback not received", stopped);
+    assertEquals(0, process.getExitCode().intValue());
+    assertStringInOutput(echoText, getFinalOutput());
+  }
+
+  @Test
+  public void testSetenv() throws Throwable {
+
+    String var = "TEST_RUN";
+    String val = "TEST-RUN-ENV-VALUE";
+    initProcess(commandFactory.env());
+    process.setEnv(var, val);
+    process.start();
+    //in-thread wait
+    process.run();
+
+    //here stopped
+    assertTrue("process stop callback not received", stopped);
+    assertEquals(0, process.getExitCode().intValue());
+    assertStringInOutput(val, getFinalOutput());
+  }
+
+  /**
+   * Get the final output. 
+   * @return the last output
+   */
+  private List<String> getFinalOutput() {
+    return process.getRecentOutput(true, 4000);
+  }
+
+
+  private LongLivedProcess initProcess(List<String> commands) {
+    process = new LongLivedProcess(name.getMethodName(), log, commands);
+    process.setLifecycleCallback(this);
+    return process;
+  }
+
+  /**
+   * Handler for callback events on the process
+   */
+
+  @Override
+  public void onProcessStarted(LongLivedProcess process) {
+    started = true;
+  }
+
+  /**
+   * Handler for callback events on the process
+   */
+  @Override
+  public void onProcessExited(LongLivedProcess process,
+      int exitCode,
+      int signCorrectedCode) {
+    stopped = true;
+  }
+}
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowClosingService.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowClosingService.java
new file mode 100644
index 0000000..638547f
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowClosingService.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import java.io.Closeable;
+import java.io.IOException;
+
+public class TestWorkflowClosingService extends WorkflowServiceTestBase {
+
+  @Test
+  public void testSimpleClose() throws Throwable {
+    ClosingService<OpenClose> svc = instance(false);
+    OpenClose openClose = svc.getCloseable();
+    assertFalse(openClose.closed);
+    svc.stop();
+    assertTrue(openClose.closed);
+  }
+
+  @Test
+  public void testNullClose() throws Throwable {
+    ClosingService<OpenClose> svc = new ClosingService<OpenClose>(null);
+    svc.init(new Configuration());
+    svc.start();
+    assertNull(svc.getCloseable());
+    svc.stop();
+  }
+
+  @Test
+  public void testFailingClose() throws Throwable {
+    ClosingService<OpenClose> svc = instance(false);
+    OpenClose openClose = svc.getCloseable();
+    openClose.raiseExceptionOnClose = true;
+    svc.stop();
+    assertTrue(openClose.closed);
+    Throwable cause = svc.getFailureCause();
+    assertNotNull(cause);
+
+    //retry should be a no-op
+    svc.close();
+  }
+
+  @Test
+  public void testDoubleClose() throws Throwable {
+    ClosingService<OpenClose> svc = instance(false);
+    OpenClose openClose = svc.getCloseable();
+    openClose.raiseExceptionOnClose = true;
+    svc.stop();
+    assertTrue(openClose.closed);
+    Throwable cause = svc.getFailureCause();
+    assertNotNull(cause);
+    openClose.closed = false;
+    svc.stop();
+    assertEquals(cause, svc.getFailureCause());
+  }
+
+  /**
+   * This does not recurse forever, as the service has already entered the
+   * STOPPED state before the inner close tries to stop it -that operation
+   * is a no-op
+   * @throws Throwable
+   */
+  @Test
+  public void testCloseSelf() throws Throwable {
+    ClosingService<ClosingService> svc =
+        new ClosingService<ClosingService>(null);
+    svc.setCloseable(svc);
+    svc.stop();
+  }
+
+
+  private ClosingService<OpenClose> instance(boolean raiseExceptionOnClose) {
+    ClosingService<OpenClose> svc = new ClosingService<OpenClose>(new OpenClose(
+        raiseExceptionOnClose));
+    svc.init(new Configuration());
+    svc.start();
+    return svc;
+  }
+
+  private static class OpenClose implements Closeable {
+    public boolean closed = false;
+    public boolean raiseExceptionOnClose;
+
+    private OpenClose(boolean raiseExceptionOnClose) {
+      this.raiseExceptionOnClose = raiseExceptionOnClose;
+    }
+
+    @Override
+    public void close() throws IOException {
+      if (!closed) {
+        closed = true;
+        if (raiseExceptionOnClose) {
+          throw new IOException("OpenClose");
+        }
+      }
+    }
+  }
+}
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowCompositeService.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowCompositeService.java
new file mode 100644
index 0000000..5780149
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowCompositeService.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.Service;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TestWorkflowCompositeService extends ParentWorkflowTestBase {
+  private static final Logger
+      log = LoggerFactory.getLogger(TestWorkflowCompositeService.class);
+
+  @Test
+  public void testSingleChild() throws Throwable {
+    Service parent = startService(new MockService());
+    parent.stop();
+  }
+
+  @Test
+  public void testSingleChildTerminating() throws Throwable {
+    ServiceParent parent =
+        startService(new MockService("1", false, 100));
+    waitForParentToStop(parent);
+  }
+
+  @Test
+  public void testSingleChildFailing() throws Throwable {
+    ServiceParent parent =
+        startService(new MockService("1", true, 100));
+    waitForParentToStop(parent);
+    assert parent.getFailureCause() != null;
+  }
+
+  @Test
+  public void testTwoChildren() throws Throwable {
+    MockService one = new MockService("one", false, 100);
+    MockService two = new MockService("two", false, 100);
+    ServiceParent parent = startService(one, two);
+    waitForParentToStop(parent);
+    assertStopped(one);
+    assertStopped(two);
+  }
+
+  @Test
+  public void testCallableChild() throws Throwable {
+
+    MockService one = new MockService("one", false, 100);
+    CallableHandler handler = new CallableHandler("hello");
+    WorkflowCallbackService<String> ens =
+        new WorkflowCallbackService<String>("handler", handler, 100, true);
+    MockService two = new MockService("two", false, 100);
+    ServiceParent parent = startService(one, ens, two);
+    waitForParentToStop(parent);
+    assertStopped(one);
+    assertStopped(ens);
+    assertStopped(two);
+    assertTrue(handler.notified);
+    String s = ens.getScheduledFuture().get();
+    assertEquals("hello", s);
+  }
+
+  @Test
+  public void testNestedComposite() throws Throwable {
+    MockService one = new MockService("one", false, 100);
+    MockService two = new MockService("two", false, 100);
+    ServiceParent parent = buildService(one, two);
+    ServiceParent outer = startService(parent);
+    assertTrue(outer.waitForServiceToStop(1000));
+    assertStopped(one);
+    assertStopped(two);
+  }
+
+  @Test
+  public void testFailingComposite() throws Throwable {
+    MockService one = new MockService("one", true, 10);
+    MockService two = new MockService("two", false, 1000);
+    ServiceParent parent = startService(one, two);
+    waitForParentToStop(parent);
+    assertStopped(one);
+    assertStopped(two);
+    assertNotNull(one.getFailureCause());
+    assertNotNull(parent.getFailureCause());
+    assertEquals(one.getFailureCause(), parent.getFailureCause());
+  }
+
+  @Override
+  public ServiceParent buildService(Service... services) {
+    ServiceParent parent =
+        new WorkflowCompositeService("test", services);
+    parent.init(new Configuration());
+    return parent;
+  }
+
+}
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowExecutorService.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowExecutorService.java
new file mode 100644
index 0000000..9514f47
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowExecutorService.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.junit.Test;
+
+
+public class TestWorkflowExecutorService extends WorkflowServiceTestBase {
+
+
+  @Test
+  public void testAsyncRun() throws Throwable {
+
+    ExecutorSvc svc = run(new ExecutorSvc());
+    ServiceTerminatingRunnable runnable = new ServiceTerminatingRunnable(svc,
+        new SimpleRunnable());
+
+    // synchronous in-thread execution
+    svc.execute(runnable);
+    Thread.sleep(1000);
+    assertStopped(svc);
+  }
+
+  @Test
+  public void testFailureRun() throws Throwable {
+
+    ExecutorSvc svc = run(new ExecutorSvc());
+    ServiceTerminatingRunnable runnable = new ServiceTerminatingRunnable(svc,
+        new SimpleRunnable(true));
+
+    // synchronous in-thread execution
+    svc.execute(runnable);
+    Thread.sleep(1000);
+    assertStopped(svc);
+    assertNotNull(runnable.getException());
+  }
+
+  private static class ExecutorSvc extends AbstractWorkflowExecutorService {
+    private ExecutorSvc() {
+      super("ExecutorService",
+          ServiceThreadFactory.singleThreadExecutor("test", true));
+    }
+
+  }
+}
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowForkedProcessService.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowForkedProcessService.java
new file mode 100644
index 0000000..6d08156
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowForkedProcessService.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.ServiceOperations;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Test the long lived process by executing a command that works and a command
+ * that fails
+ */
+public class TestWorkflowForkedProcessService extends WorkflowServiceTestBase {
+  private static final Logger
+      log = LoggerFactory.getLogger(TestWorkflowForkedProcessService.class);
+
+  private static final Logger
+      processLog =
+      LoggerFactory.getLogger("org.apache.hadoop.services.workflow.Process");
+  public static final int RECENT_OUTPUT_SLEEP_DURATION = 4000;
+
+
+  private ForkedProcessService process;
+  private File testDir = new File("target");
+  private ProcessCommandFactory commandFactory;
+  private Map<String, String> env = new HashMap<String, String>();
+
+  @Before
+  public void setupProcesses() {
+    commandFactory = ProcessCommandFactory.createProcessCommandFactory();
+  }
+
+  @After
+  public void stopProcesses() {
+    ServiceOperations.stop(process);
+  }
+
+  @Test
+  public void testLs() throws Throwable {
+
+    initProcess(commandFactory.ls(testDir));
+    exec();
+    assertFalse(process.isProcessRunning());
+    assertEquals(0, process.getExitCode());
+
+    assertStringInOutput("test-classes", getFinalOutput());
+    // assert that the service did not fail
+    assertNull(process.getFailureCause());
+  }
+
+  @Test
+  public void testExitCodes() throws Throwable {
+
+    initProcess(commandFactory.exitFalse());
+    exec();
+    assertFalse(process.isProcessRunning());
+    int exitCode = process.getExitCode();
+    assertTrue(exitCode != 0);
+    int corrected = process.getExitCodeSignCorrected();
+    assertEquals(1, corrected);
+    // assert that the exit code was uprated to a service failure
+    assertNotNull(process.getFailureCause());
+
+  }
+
+  @Test
+  public void testEcho() throws Throwable {
+
+    String echoText = "hello, world";
+    initProcess(commandFactory.echo(echoText));
+    exec();
+
+    assertEquals(0, process.getExitCode());
+    assertStringInOutput(echoText, getFinalOutput());
+
+  }
+
+  @Test
+  public void testSetenv() throws Throwable {
+
+    String var = "TEST_RUN";
+    String val = "TEST-RUN-ENV-VALUE";
+    env.put(var, val);
+    initProcess(commandFactory.env());
+    exec();
+
+    assertEquals(0, process.getExitCode());
+    assertStringInOutput(val, getFinalOutput());
+  }
+
+  /**
+   * Get the final output. includes a quick sleep for the tail output
+   * @return the last output
+   */
+  private List<String> getFinalOutput() {
+    return process.getRecentOutput(true, RECENT_OUTPUT_SLEEP_DURATION);
+  }
+
+  private ForkedProcessService initProcess(List<String> commands) throws
+      IOException {
+    process = new ForkedProcessService(name.getMethodName(), env,
+        commands);
+    process.init(new Configuration());
+
+    return process;
+  }
+
+  public void exec() throws InterruptedException {
+    assertNotNull(process);
+    EndOfServiceWaiter waiter = new EndOfServiceWaiter(process);
+    process.start();
+    waiter.waitForServiceToStop(5000);
+  }
+
+}
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowRpcService.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowRpcService.java
new file mode 100644
index 0000000..c7910ff
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowRpcService.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.Server;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+public class TestWorkflowRpcService extends WorkflowServiceTestBase {
+
+  @Test
+  public void testCreateMockRPCService() throws Throwable {
+    MockRPC rpc = new MockRPC();
+    rpc.start();
+    assertTrue(rpc.started);
+    rpc.getListenerAddress();
+    rpc.stop();
+    assertTrue(rpc.stopped);
+  }
+
+  @Test
+  public void testLifecycle() throws Throwable {
+    MockRPC rpc = new MockRPC();
+    WorkflowRpcService svc = new WorkflowRpcService("test", rpc);
+    run(svc);
+    assertTrue(rpc.started);
+    svc.getConnectAddress();
+    svc.stop();
+    assertTrue(rpc.stopped);
+  }
+  
+  @Test
+  public void testStartFailure() throws Throwable {
+    MockRPC rpc = new MockRPC();
+    rpc.failOnStart = true;
+    WorkflowRpcService svc = new WorkflowRpcService("test", rpc);
+    svc.init(new Configuration());
+    try {
+      svc.start();
+      fail("expected an exception");
+    } catch (RuntimeException e) {
+      assertEquals("failOnStart", e.getMessage());
+    }
+    svc.stop();
+    assertTrue(rpc.stopped);
+  }
+  
+  private static class MockRPC extends Server {
+
+    public boolean stopped;
+    public boolean started;
+    public boolean failOnStart;
+
+    private MockRPC() throws IOException {
+      super("localhost", 0, null, 1, new Configuration());
+    }
+
+    @Override
+    public synchronized void start() {
+      if (failOnStart) {
+        throw new RuntimeException("failOnStart");
+      }
+      started = true;
+      super.start();
+    }
+
+    @Override
+    public synchronized void stop() {
+      stopped = true;
+      super.stop();
+    }
+
+    @Override
+    public synchronized InetSocketAddress getListenerAddress() {
+      return super.getListenerAddress();
+    }
+
+    @Override
+    public Writable call(RPC.RpcKind rpcKind,
+        String protocol,
+        Writable param,
+        long receiveTime) throws Exception {
+      return null;
+    }
+  }
+}
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowSequenceService.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowSequenceService.java
new file mode 100644
index 0000000..581e3ed
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowSequenceService.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.Service;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TestWorkflowSequenceService extends ParentWorkflowTestBase {
+  private static final Logger
+      log = LoggerFactory.getLogger(TestWorkflowSequenceService.class);
+
+  @Test
+  public void testSingleSequence() throws Throwable {
+    ServiceParent parent = startService(new MockService());
+    parent.stop();
+  }
+
+  @Test
+  public void testEmptySequence() throws Throwable {
+    ServiceParent parent = startService();
+    waitForParentToStop(parent);
+  }
+
+  @Test
+  public void testSequence() throws Throwable {
+    MockService one = new MockService("one", false, 100);
+    MockService two = new MockService("two", false, 100);
+    ServiceParent parent = startService(one, two);
+    waitForParentToStop(parent);
+    assertStopped(one);
+    assertStopped(two);
+    assert ((WorkflowSequenceService) parent).getPreviousService().equals(two);
+  }
+
+  @Test
+  public void testCallableChild() throws Throwable {
+
+    MockService one = new MockService("one", false, 100);
+    CallableHandler handler = new CallableHandler("hello");
+    WorkflowCallbackService<String> ens =
+        new WorkflowCallbackService<String>("handler", handler, 100, true);
+    MockService two = new MockService("two", false, 100);
+    ServiceParent parent = startService(one, ens, two);
+    waitForParentToStop(parent);
+    assertStopped(one);
+    assertStopped(ens);
+    assertStopped(two);
+    assertTrue(handler.notified);
+    String s = ens.getScheduledFuture().get();
+    assertEquals("hello", s);
+  }
+
+
+  @Test
+  public void testFailingSequence() throws Throwable {
+    MockService one = new MockService("one", true, 100);
+    MockService two = new MockService("two", false, 100);
+    WorkflowSequenceService parent =
+        (WorkflowSequenceService) startService(one, two);
+    waitForParentToStop(parent);
+    assertStopped(one);
+    assertInState(two, Service.STATE.NOTINITED);
+    assertEquals(one, parent.getPreviousService());
+  }
+
+
+  @Test
+  public void testFailInStartNext() throws Throwable {
+    MockService one = new MockService("one", false, 100);
+    MockService two = new MockService("two", true, 0);
+    MockService three = new MockService("3", false, 0);
+    ServiceParent parent = startService(one, two, three);
+    waitForParentToStop(parent);
+    assertStopped(one);
+    assertStopped(two);
+    Throwable failureCause = two.getFailureCause();
+    assertNotNull(failureCause);
+    Throwable parentFailureCause = parent.getFailureCause();
+    assertNotNull(parentFailureCause);
+    assertEquals(parentFailureCause, failureCause);
+    assertInState(three, Service.STATE.NOTINITED);
+  }
+
+  @Test
+  public void testSequenceInSequence() throws Throwable {
+    MockService one = new MockService("one", false, 100);
+    MockService two = new MockService("two", false, 100);
+    ServiceParent parent = buildService(one, two);
+    ServiceParent outer = startService(parent);
+    waitForParentToStop(parent);
+    assertStopped(one);
+    assertStopped(two);
+  }
+
+  @Test
+  public void testVarargsConstructor() throws Throwable {
+    MockService one = new MockService("one", false, 100);
+    MockService two = new MockService("two", false, 100);
+    ServiceParent parent = new WorkflowSequenceService("test", one, two);
+    parent.init(new Configuration());
+    parent.start();
+    waitForParentToStop(parent);
+    assertStopped(one);
+    assertStopped(two);
+  }
+
+
+  @Test
+  public void testAddChild() throws Throwable {
+    MockService one = new MockService("one", false, 5000);
+    MockService two = new MockService("two", false, 100);
+    ServiceParent parent = startService(one, two);
+    CallableHandler handler = new CallableHandler("hello");
+    WorkflowCallbackService<String> ens =
+        new WorkflowCallbackService<String>("handler", handler, 100, true);
+    parent.addService(ens);
+    waitForParentToStop(parent, 10000);
+    assertStopped(one);
+    assertStopped(two);
+    assertStopped(ens);
+    assertStopped(two);
+    assertEquals("hello", ens.getScheduledFuture().get());
+  }
+
+  public WorkflowSequenceService buildService(Service... services) {
+    WorkflowSequenceService parent =
+        new WorkflowSequenceService("test", services);
+    parent.init(new Configuration());
+    return parent;
+  }
+
+}
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowServiceTerminatingRunnable.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowServiceTerminatingRunnable.java
new file mode 100644
index 0000000..15be1dc
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/TestWorkflowServiceTerminatingRunnable.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.junit.Test;
+
+
+public class TestWorkflowServiceTerminatingRunnable extends WorkflowServiceTestBase {
+
+  @Test
+  public void testNoservice() throws Throwable {
+
+    try {
+      new ServiceTerminatingRunnable(null, new SimpleRunnable());
+      fail("unexpected ");
+    } catch (IllegalArgumentException e) {
+
+      // expected 
+    }
+  }
+
+
+  @Test
+  public void testBasicRun() throws Throwable {
+
+    WorkflowCompositeService svc = run(new WorkflowCompositeService());
+    ServiceTerminatingRunnable runnable = new ServiceTerminatingRunnable(svc,
+        new SimpleRunnable());
+
+    // synchronous in-thread execution
+    runnable.run();
+    assertStopped(svc);
+  }
+
+  @Test
+  public void testFailureRun() throws Throwable {
+
+    WorkflowCompositeService svc = run(new WorkflowCompositeService());
+    ServiceTerminatingRunnable runnable =
+        new ServiceTerminatingRunnable(svc, new SimpleRunnable(true));
+
+    // synchronous in-thread execution
+    runnable.run();
+    assertStopped(svc);
+    assertNotNull(runnable.getException());
+  }
+
+}
diff --git a/slider-core/src/test/java/org/apache/slider/server/services/workflow/WorkflowServiceTestBase.java b/slider-core/src/test/java/org/apache/slider/server/services/workflow/WorkflowServiceTestBase.java
new file mode 100644
index 0000000..3049d8f
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/server/services/workflow/WorkflowServiceTestBase.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.Service;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.concurrent.Callable;
+
+/**
+ * Test base for workflow service tests.
+ */
+public abstract class WorkflowServiceTestBase extends Assert {
+  private static final Logger
+      log = LoggerFactory.getLogger(WorkflowServiceTestBase.class);
+
+  /**
+   * Set the timeout for every test
+   */
+  @Rule
+  public Timeout testTimeout = new Timeout(15000);
+
+  @Rule
+  public TestName name = new TestName();
+
+  @Before
+  public void nameThread() {
+    Thread.currentThread().setName("JUnit");
+  }
+
+
+  protected void assertInState(Service service, Service.STATE expected) {
+    Service.STATE actual = service.getServiceState();
+    if (actual != expected) {
+      fail("Service " + service.getName() + " in state " + actual
+           + " -expected " + expected);
+    }
+  }
+
+  protected void assertStopped(Service service) {
+    assertInState(service, Service.STATE.STOPPED);
+  }
+
+  protected void logState(ServiceParent p) {
+    logService(p);
+    for (Service s : p.getServices()) {
+      logService(s);
+    }
+  }
+
+  protected void logService(Service s) {
+    log.info(s.toString());
+    Throwable failureCause = s.getFailureCause();
+    if (failureCause != null) {
+      log.info("Failed in state {} with {}", s.getFailureState(),
+          failureCause);
+    }
+  }
+
+  /**
+   * Init and start a service
+   * @param svc the service
+   * @return the service
+   */
+  protected <S extends Service> S run(S svc) {
+    svc.init(new Configuration());
+    svc.start();
+    return svc;
+  }
+
+  /**
+   * Handler for callable events
+   */
+  public static class CallableHandler implements Callable<String> {
+    public volatile boolean notified = false;
+    public final String result;
+
+    public CallableHandler(String result) {
+      this.result = result;
+    }
+
+    @Override
+    public String call() throws Exception {
+      log.info("CallableHandler::call");
+      notified = true;
+      return result;
+    }
+  }
+
+  /**
+   * Assert that a string is in an output list. Fails fast if the output
+   * list is empty
+   * @param text text to scan for
+   * @param output list of output lines.
+   */
+  public void assertStringInOutput(String text, List<String> output) {
+    assertTrue("Empty output list", !output.isEmpty());
+    boolean found = false;
+    StringBuilder builder = new StringBuilder();
+    for (String s : output) {
+      builder.append(s).append('\n');
+      if (s.contains(text)) {
+        found = true;
+        break;
+      }
+    }
+
+    if (!found) {
+      String message =
+          "Text \"" + text + "\" not found in " + output.size() + " lines\n";
+      fail(message + builder.toString());
+    }
+  }
+}
diff --git a/slider-core/src/test/java/org/apache/slider/tools/TestUtility.java b/slider-core/src/test/java/org/apache/slider/tools/TestUtility.java
new file mode 100644
index 0000000..a8b14ac
--- /dev/null
+++ b/slider-core/src/test/java/org/apache/slider/tools/TestUtility.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.tools;
+
+import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
+import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream;
+import org.apache.commons.compress.utils.IOUtils;
+import org.junit.rules.TemporaryFolder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+
+/** Various utility methods */
+public class TestUtility {
+  protected static final Logger log =
+      LoggerFactory.getLogger(TestUtility.class);
+
+  public static void addDir(File dirObj, ZipArchiveOutputStream zipFile, String prefix) throws IOException {
+    for (File file : dirObj.listFiles()) {
+      if (file.isDirectory()) {
+        addDir(file, zipFile, prefix + file.getName() + File.separator);
+      } else {
+        log.info("Adding to zip - " + prefix + file.getName());
+        zipFile.putArchiveEntry(new ZipArchiveEntry(prefix + file.getName()));
+        IOUtils.copy(new FileInputStream(file), zipFile);
+        zipFile.closeArchiveEntry();
+      }
+    }
+  }
+
+  public static void zipDir(String zipFile, String dir) throws IOException {
+    File dirObj = new File(dir);
+    ZipArchiveOutputStream out = new ZipArchiveOutputStream(new FileOutputStream(zipFile));
+    log.info("Creating : " + zipFile);
+    try {
+      addDir(dirObj, out, "");
+    } finally {
+      out.close();
+    }
+  }
+
+  public static String createAppPackage(
+      TemporaryFolder folder, String subDir, String pkgName, String srcPath) throws IOException {
+    String zipFileName;
+    File pkgPath = folder.newFolder(subDir);
+    File zipFile = new File(pkgPath, pkgName).getAbsoluteFile();
+    zipFileName = zipFile.getAbsolutePath();
+    TestUtility.zipDir(zipFileName, srcPath);
+    log.info("Created temporary zip file at {}", zipFileName);
+    return zipFileName;
+  }
+
+}
diff --git a/slider-core/src/test/python/agent.py b/slider-core/src/test/python/agent.py
index 4be2cd9..4177074 100644
--- a/slider-core/src/test/python/agent.py
+++ b/slider-core/src/test/python/agent.py
@@ -21,21 +21,22 @@
 import sys
 import datetime
 import time
-import argparse
+from optparse import OptionParser
 import os
 
 # A representative Agent code for the embedded agent
 def main():
   print "Executing echo"
-  print 'Argument List: {}'.format(str(sys.argv))
+  print 'Argument List: {0}'.format(str(sys.argv))
 
-  parser = argparse.ArgumentParser()
-  parser.add_argument('--log', dest='log_folder', help='log destination')
-  parser.add_argument('--config', dest='conf_folder', help='conf folder')
-  args = parser.parse_args()
-  if args.log_folder:
+  parser = OptionParser()
+  parser.add_option("--log", dest="log_folder", help="log destination")
+  parser.add_option("--config", dest="conf_folder", help="conf folder")
+  (options, args) = parser.parse_args()
+
+  if options.log_folder:
     log_file_name = "echo" + str(datetime.datetime.now()) + ".log"
-    log_file_path = os.path.join(args.log_folder, log_file_name)
+    log_file_path = os.path.join(options.log_folder, log_file_name)
     logging.basicConfig(filename=log_file_path, level=logging.DEBUG)
     print log_file_path
   logging.debug('Starting echo script ...')
diff --git a/slider-core/src/test/python/agent/main.py b/slider-core/src/test/python/agent/main.py
index fd8b262..8b7044e 100755
--- a/slider-core/src/test/python/agent/main.py
+++ b/slider-core/src/test/python/agent/main.py
@@ -21,25 +21,28 @@
 import sys
 import datetime
 import time
-import argparse
+from optparse import OptionParser
 import os
 
 
 def main():
   print "Executing echo"
-  print 'Argument List: {}'.format(str(sys.argv))
+  print 'Argument List: {0}'.format(str(sys.argv))
 
-  parser = argparse.ArgumentParser()
-  parser.add_argument('--log', dest='log_folder', help='log destination')
-  parser.add_argument('--config', dest='conf_folder', help='conf folder')
-  parser.add_argument('--command', dest='command', help='command to execute')
-  parser.add_argument('--label', dest='label', help='label')
-  parser.add_argument('--host', dest='host', help='port')
-  parser.add_argument('--port', dest='port', help='host')
-  args = parser.parse_args()
-  if args.log_folder:
+  parser = OptionParser()
+  parser.add_option("--log", dest="log_folder", help="log destination")
+  parser.add_option("--config", dest="conf_folder", help="conf folder")
+  parser.add_option('--command', dest='command', help='command to execute')
+  parser.add_option('--label', dest='label', help='label')
+  parser.add_option('--host', dest='host', help='port')
+  parser.add_option('--port', dest='port', help='host')
+  parser.add_option('--secured_port', dest='secured_port', help='host')
+
+  (options, args) = parser.parse_args()
+
+  if options.log_folder:
     log_file_name = "echo" + str(datetime.datetime.now()) + ".log"
-    log_file_path = os.path.join(args.log_folder, log_file_name)
+    log_file_path = os.path.join(options.log_folder, log_file_name)
     logging.basicConfig(filename=log_file_path, level=logging.DEBUG)
     print log_file_path
   logging.debug('Starting echo script ...')
diff --git a/slider-core/src/test/python/appdef_1.zip b/slider-core/src/test/python/appdef_1.zip
deleted file mode 100644
index 6ee6af4..0000000
--- a/slider-core/src/test/python/appdef_1.zip
+++ /dev/null
Binary files differ
diff --git a/slider-core/src/test/python/echo.py b/slider-core/src/test/python/echo.py
index 2bcab20..ea5e8ce 100644
--- a/slider-core/src/test/python/echo.py
+++ b/slider-core/src/test/python/echo.py
@@ -21,22 +21,23 @@
 import sys
 import datetime
 import time
-import argparse
+from optparse import OptionParser
 import os
 
 
 def main():
   print "Executing echo"
-  print 'Argument List: {}'.format(str(sys.argv))
+  print 'Argument List: {0}'.format(str(sys.argv))
 
-  parser = argparse.ArgumentParser()
-  parser.add_argument('--log', dest='log_folder', help='log destination')
-  parser.add_argument('--config', dest='conf_folder', help='conf folder')
-  parser.add_argument('--command', dest='command', help='command to execute')
-  args = parser.parse_args()
-  if args.log_folder:
+  parser = OptionParser()
+  parser.add_option("--log", dest="log_folder", help="log destination")
+  parser.add_option("--config", dest="conf_folder", help="conf folder")
+  parser.add_option('--command', dest='command', help='command to execute')
+  (options, args) = parser.parse_args()
+
+  if options.log_folder:
     log_file_name = "echo" + str(datetime.datetime.now()) + ".log"
-    log_file_path = os.path.join(args.log_folder, log_file_name)
+    log_file_path = os.path.join(options.log_folder, log_file_name)
     logging.basicConfig(filename=log_file_path, level=logging.DEBUG)
     print log_file_path
   logging.debug('Starting echo script ...')
diff --git a/slider-core/src/test/python/metainfo.xml b/slider-core/src/test/python/metainfo.xml
index beefd9c..09b314e 100644
--- a/slider-core/src/test/python/metainfo.xml
+++ b/slider-core/src/test/python/metainfo.xml
@@ -17,41 +17,39 @@
 -->
 <metainfo>
   <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>ECHO</name>
-      <comment>
-        Echo
-      </comment>
-      <version>0.1</version>
-      <type>YARN-APP</type>
-      <minHadoopVersion>2.1.0</minHadoopVersion>
-      <components>
-        <component>
-          <name>echo</name>
-          <category>MASTER</category>
-          <minInstanceCount>1</minInstanceCount>
-          <maxInstanceCount>2</maxInstanceCount>
-          <commandScript>
-            <script>echo.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>tarball</type>
-              <name>files/echo.tar.gz</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
+  <application>
+    <name>ECHO</name>
+    <comment>
+      Echo
+    </comment>
+    <version>0.1</version>
+    <type>YARN-APP</type>
+    <minHadoopVersion>2.1.0</minHadoopVersion>
+    <components>
+      <component>
+        <name>echo</name>
+        <category>MASTER</category>
+        <minInstanceCount>1</minInstanceCount>
+        <maxInstanceCount>2</maxInstanceCount>
+        <commandScript>
+          <script>echo.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+    </components>
+    <osSpecifics>
+      <osSpecific>
+        <osType>any</osType>
+        <packages>
+          <package>
+            <type>tarball</type>
+            <name>files/echo.tar.gz</name>
+          </package>
+        </packages>
+      </osSpecific>
+    </osSpecifics>
 
-    </service>
-  </services>
+  </application>
 </metainfo>
 
diff --git a/slider-core/src/test/resources/example-slider-test.xml b/slider-core/src/test/resources/example-slider-test.xml
index 084557f..a752cfd 100644
--- a/slider-core/src/test/resources/example-slider-test.xml
+++ b/slider-core/src/test/resources/example-slider-test.xml
@@ -98,7 +98,7 @@
 
   <property>
     <name>slider.test.accumulo.tar</name>
-    <value>/home/slider/Projects/accumulo/accumulo-1.5.1-bin.tar</value>
+    <value>/home/slider/Projects/accumulo/accumulo-1.6.0-bin.tar.gz</value>
     <description>Accumulo archive URI</description>
   </property>
 
@@ -114,4 +114,4 @@
     <description>Hadoop home dir on target systems</description>
   </property>
   
-</configuration>
\ No newline at end of file
+</configuration>
diff --git a/slider-core/src/test/resources/org/apache/slider/common/tools/test.zip b/slider-core/src/test/resources/org/apache/slider/common/tools/test.zip
deleted file mode 100644
index 18acf1c..0000000
--- a/slider-core/src/test/resources/org/apache/slider/common/tools/test.zip
+++ /dev/null
Binary files differ
diff --git a/src/site/markdown/architecture/index.md b/slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.txt
similarity index 76%
copy from src/site/markdown/architecture/index.md
copy to slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.txt
index d77a58e..a1d7780 100644
--- a/src/site/markdown/architecture/index.md
+++ b/slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.txt
@@ -1,4 +1,4 @@
-<!---
+<!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
    this work for additional information regarding copyright ownership.
@@ -13,15 +13,4 @@
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
--->
-  
-# Architecture
-
-* [Overview](architecture.html)
-* [Application Needs](application_needs.html)
-* [Specification](../specification/index.html)
-* [Service Registry](../registry/index.html)
-* [Role history](rolehistory.html) 
-
-
- 
+-->
\ No newline at end of file
diff --git a/slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.xml b/slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.xml
new file mode 100644
index 0000000..3d24f96
--- /dev/null
+++ b/slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.xml
@@ -0,0 +1,95 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <application>
+    <name>STORM</name>
+    <comment>Apache Hadoop Stream processing framework</comment>
+    <version>0.9.1.2.1</version>
+    <components>
+
+      <component>
+        <name>NIMBUS</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/nimbus.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>STORM_REST_API</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/rest_api.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>SUPERVISOR</name>
+        <category>SLAVE</category>
+        <commandScript>
+          <script>scripts/supervisor.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>STORM_UI_SERVER</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/ui_server.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>DRPC_SERVER</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/drpc_server.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+    </components>
+
+    <osSpecifics>
+      <osSpecific>
+        <osType>any</osType>
+        <packages>
+          <package>
+            <type>tarball</type>
+            <name>files/apache-storm-0.9.1.2.1.1.0-237.tar.gz</name>
+          </package>
+        </packages>
+      </osSpecific>
+    </osSpecifics>
+
+    <configuration-dependencies>
+      <config-type>storm-site</config-type>
+      <config-type>global</config-type>
+    </configuration-dependencies>
+  </application>
+</metainfo>
diff --git a/src/site/markdown/architecture/index.md b/slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.txt
similarity index 76%
copy from src/site/markdown/architecture/index.md
copy to slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.txt
index d77a58e..a1d7780 100644
--- a/src/site/markdown/architecture/index.md
+++ b/slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.txt
@@ -1,4 +1,4 @@
-<!---
+<!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
    this work for additional information regarding copyright ownership.
@@ -13,15 +13,4 @@
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
--->
-  
-# Architecture
-
-* [Overview](architecture.html)
-* [Application Needs](application_needs.html)
-* [Specification](../specification/index.html)
-* [Service Registry](../registry/index.html)
-* [Role history](rolehistory.html) 
-
-
- 
+-->
\ No newline at end of file
diff --git a/src/site/markdown/architecture/index.md b/slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.xml
similarity index 76%
copy from src/site/markdown/architecture/index.md
copy to slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.xml
index d77a58e..a1d7780 100644
--- a/src/site/markdown/architecture/index.md
+++ b/slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.xml
@@ -1,4 +1,4 @@
-<!---
+<!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
    this work for additional information regarding copyright ownership.
@@ -13,15 +13,4 @@
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
--->
-  
-# Architecture
-
-* [Overview](architecture.html)
-* [Application Needs](application_needs.html)
-* [Specification](../specification/index.html)
-* [Service Registry](../registry/index.html)
-* [Role history](rolehistory.html) 
-
-
- 
+-->
\ No newline at end of file
diff --git a/slider-core/src/test/resources/org/apache/slider/providers/agent/application/metadata/metainfo.xml b/slider-core/src/test/resources/org/apache/slider/providers/agent/application/metadata/metainfo.xml
index 2fcf4cd..3d24f96 100644
--- a/slider-core/src/test/resources/org/apache/slider/providers/agent/application/metadata/metainfo.xml
+++ b/slider-core/src/test/resources/org/apache/slider/providers/agent/application/metadata/metainfo.xml
@@ -18,80 +18,78 @@
 
 <metainfo>
   <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>STORM</name>
-      <comment>Apache Hadoop Stream processing framework</comment>
-      <version>0.9.1.2.1</version>
-      <components>
+  <application>
+    <name>STORM</name>
+    <comment>Apache Hadoop Stream processing framework</comment>
+    <version>0.9.1.2.1</version>
+    <components>
 
-        <component>
-          <name>NIMBUS</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/nimbus.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
+      <component>
+        <name>NIMBUS</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/nimbus.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
 
-        <component>
-          <name>STORM_REST_API</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/rest_api.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
+      <component>
+        <name>STORM_REST_API</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/rest_api.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
 
-        <component>
-          <name>SUPERVISOR</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/supervisor.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
+      <component>
+        <name>SUPERVISOR</name>
+        <category>SLAVE</category>
+        <commandScript>
+          <script>scripts/supervisor.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
 
-        <component>
-          <name>STORM_UI_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/ui_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
+      <component>
+        <name>STORM_UI_SERVER</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/ui_server.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
 
-        <component>
-          <name>DRPC_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/drpc_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
+      <component>
+        <name>DRPC_SERVER</name>
+        <category>MASTER</category>
+        <commandScript>
+          <script>scripts/drpc_server.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+    </components>
 
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>tarball</type>
-              <name>files/apache-storm-0.9.1.2.1.1.0-237.tar.gz</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
+    <osSpecifics>
+      <osSpecific>
+        <osType>any</osType>
+        <packages>
+          <package>
+            <type>tarball</type>
+            <name>files/apache-storm-0.9.1.2.1.1.0-237.tar.gz</name>
+          </package>
+        </packages>
+      </osSpecific>
+    </osSpecifics>
 
-      <configuration-dependencies>
-        <config-type>storm-site</config-type>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
+    <configuration-dependencies>
+      <config-type>storm-site</config-type>
+      <config-type>global</config-type>
+    </configuration-dependencies>
+  </application>
 </metainfo>
diff --git a/slider-funtest/pom.xml b/slider-funtest/pom.xml
index 529c496..cb16669 100644
--- a/slider-funtest/pom.xml
+++ b/slider-funtest/pom.xml
@@ -17,7 +17,6 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <artifactId>slider-funtest</artifactId>
-  <version>0.30</version>
   <name>Slider Functional Tests</name>
   <packaging>jar</packaging>
   <description>
@@ -26,8 +25,11 @@
   <parent>
     <groupId>org.apache.slider</groupId>
     <artifactId>slider</artifactId>
-    <version>0.30</version>
+    <version>0.40</version>
   </parent>
+  <properties>
+    <work.dir>package-tmp</work.dir>
+  </properties>
 
   <build>
 
@@ -102,8 +104,8 @@
           </forkedProcessTimeoutInSeconds>
           <threadCount>1</threadCount>
           <argLine>${test.argLine}</argLine>
-          <failIfNoTests>${test.failIfNoTests}</failIfNoTests>
-          
+          <failIfNoTests>${test.funtests.failIfNoTests}</failIfNoTests>
+
           <trimStackTrace>false</trimStackTrace>
           <redirectTestOutputToFile>${build.redirect.test.output.to.file}</redirectTestOutputToFile>
           <systemPropertyVariables>
@@ -114,6 +116,10 @@
             <!-- this property must be supplied-->
             <slider.conf.dir>${slider.conf.dir}</slider.conf.dir>
             <slider.bin.dir>../slider-assembly/target/slider-${project.version}-all/slider-${project.version}</slider.bin.dir>
+            <test.app.pkg.dir>../app-packages/command-logger/slider-pkg/target</test.app.pkg.dir>
+            <test.app.pkg.file>apache-slider-command-logger.zip</test.app.pkg.file>
+            <test.app.resource>../slider-core/src/test/app_packages/test_command_log/resources.json</test.app.resource>
+            <test.app.template>../slider-core/src/test/app_packages/test_command_log/appConfig.json</test.app.template>
           </systemPropertyVariables>
           <includes>
             <include>**/Test*.java</include>
@@ -144,6 +150,45 @@
           </excludes>
         </configuration>
       </plugin>
+
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <version>${maven-dependency-plugin.version}</version>
+        <executions>
+          <execution>
+            <id>copy-dependencies</id>
+            <phase>process-resources</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <includeArtifactIds>apache-slider-command-logger</includeArtifactIds>
+              <includeTypes>zip</includeTypes>
+              <outputDirectory>${project.build.directory}/${work.dir}</outputDirectory>
+            </configuration>
+          </execution>
+          <execution>
+            <id>copy</id>
+            <phase>test</phase>
+            <goals>
+              <goal>copy</goal>
+            </goals>
+            <configuration>
+              <artifactItems>
+                <artifactItem>
+                  <groupId>org.apache.slider</groupId>
+                  <artifactId>apache-slider-command-logger</artifactId>
+                  <type>zip</type>
+                  <overWrite>false</overWrite>
+                  <outputDirectory>${project.build.directory}/${work.dir}</outputDirectory>
+                  <destFileName>apache-slider-command-logger.zip</destFileName>
+                </artifactItem>
+              </artifactItems>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
  
     </plugins>
   </build>
@@ -161,7 +206,6 @@
     <dependency>
       <groupId>org.apache.slider</groupId>
       <artifactId>slider-core</artifactId>
-      <version>${project.version}</version>
         <exclusions>
           <exclusion>
             <groupId>org.apache.hadoop</groupId>
@@ -173,14 +217,12 @@
     <dependency>
       <groupId>org.apache.slider</groupId>
       <artifactId>slider-core</artifactId>
-      <version>${project.version}</version>
       <type>test-jar</type>
     </dependency>
 
     <dependency>
       <groupId>org.apache.slider</groupId>
       <artifactId>slider-assembly</artifactId>
-      <version>${project.version}</version>
       <classifier>all</classifier>
       <type>tar.gz</type>
       <scope>test</scope>
@@ -188,13 +230,7 @@
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <type>test-jar</type>
+      <artifactId>hadoop-client</artifactId>
     </dependency>
 
     <dependency>
@@ -208,7 +244,14 @@
       <artifactId>hadoop-minicluster</artifactId>
       <scope>test</scope>
     </dependency>
-    
+
+    <dependency>
+      <groupId>org.apache.slider</groupId>
+      <artifactId>apache-slider-command-logger</artifactId>
+      <version>${project.version}</version>
+      <type>zip</type>
+    </dependency>
+
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-server</artifactId>
@@ -253,13 +296,61 @@
     <dependency>
       <groupId>org.apache.slider</groupId>
       <artifactId>slider-agent</artifactId>
-      <version>${project.version}</version>
       <scope>test</scope>
       <type>tar.gz</type>
     </dependency>
 
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+    </dependency>
+
+      <dependency>
+        <groupId>org.codehaus.groovy</groupId>
+        <artifactId>groovy-all</artifactId>
+      </dependency>
+
 
   </dependencies>
 
+  <profiles>
+    <profile>
+      <id>tests-on-from-CLI</id>
+      <activation>
+        <property>
+          <name>slider.conf.dir</name>
+        </property>
+      </activation>
+      <properties>
+        <maven.test.skip>false</maven.test.skip>
+      </properties>
+    </profile>
+    <profile>
+      <id>tests-on-from-build.properties</id>
+      <activation>
+        <file>
+          <exists>../build.properties</exists>
+        </file>
+      </activation>
+      <properties>
+        <maven.test.skip>false</maven.test.skip>
+      </properties>
+    </profile>
+    <profile>
+      <id>tests-off</id>
+      <activation>
+        <activeByDefault>true</activeByDefault>
+      </activation>
+      <properties>
+        <maven.test.skip>true</maven.test.skip>
+      </properties>
+    </profile>
+
+  </profiles>
 
 </project>
diff --git a/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/AgentCommandTestBase.groovy b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/AgentCommandTestBase.groovy
new file mode 100644
index 0000000..26ae2bb
--- /dev/null
+++ b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/AgentCommandTestBase.groovy
@@ -0,0 +1,237 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.funtest.framework
+
+import groovy.util.logging.Slf4j
+import org.apache.hadoop.fs.Path
+import org.apache.slider.common.SliderExitCodes
+import org.apache.slider.common.params.Arguments
+import org.apache.slider.common.params.SliderActions
+import org.apache.slider.funtest.framework.AgentUploads
+import org.apache.slider.funtest.framework.CommandTestBase
+import org.apache.slider.funtest.framework.FuntestProperties
+import org.apache.slider.funtest.framework.SliderShell
+import org.apache.tools.zip.ZipEntry
+import org.apache.tools.zip.ZipOutputStream
+import org.junit.Before
+import org.junit.BeforeClass
+import org.junit.Rule
+import org.junit.rules.TemporaryFolder
+
+@Slf4j
+class AgentCommandTestBase extends CommandTestBase
+implements FuntestProperties, Arguments, SliderExitCodes, SliderActions {
+
+  public static final boolean AGENTTESTS_ENABLED
+  private static String TEST_APP_PKG_DIR_PROP = "test.app.pkg.dir"
+  private static String TEST_APP_PKG_FILE_PROP = "test.app.pkg.file"
+  private static String TEST_APP_RESOURCE = "test.app.resource"
+  private static String TEST_APP_TEMPLATE = "test.app.template"
+
+
+  protected String APP_RESOURCE = sysprop(TEST_APP_RESOURCE)
+  protected String APP_TEMPLATE = sysprop(TEST_APP_TEMPLATE)
+  public static final String TEST_APP_PKG_DIR = sysprop(TEST_APP_PKG_DIR_PROP)
+  public static final String TEST_APP_PKG_FILE = sysprop(TEST_APP_PKG_FILE_PROP)
+
+
+  protected static Path agentTarballPath;
+  protected static Path appPkgPath;
+  protected static Path agtIniPath;
+
+  protected static boolean setup_failed
+
+  static {
+    AGENTTESTS_ENABLED = SLIDER_CONFIG.getBoolean(KEY_TEST_AGENT_ENABLED, false)
+  }
+
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  public static void assumeAgentTestsEnabled() {
+    assumeFunctionalTestsEnabled()
+    assume(AGENTTESTS_ENABLED, "Agent tests disabled")
+  }
+
+  @BeforeClass
+  public static void setupAgent() {
+    assumeAgentTestsEnabled()
+
+  }
+
+  @Before
+  public void uploadAgentTarball() {
+    def agentUploads = new AgentUploads(SLIDER_CONFIG)
+    (agentTarballPath, agtIniPath) =
+        agentUploads.uploadAgentFiles(SLIDER_TAR_DIRECTORY, false)
+  }
+
+
+  @Before
+  public void setupApplicationPackage() {
+    try {
+      AgentUploads agentUploads = new AgentUploads(SLIDER_CONFIG)
+      agentUploads.uploader.mkHomeDir()
+
+      appPkgPath = new Path(clusterFS.homeDirectory, TEST_APP_PKG_FILE)
+      if (clusterFS.exists(appPkgPath)) {
+        clusterFS.delete(appPkgPath, false)
+        log.info "Existing app pkg deleted from $appPkgPath"
+      }
+
+      File zipFileName = new File(TEST_APP_PKG_DIR, TEST_APP_PKG_FILE).canonicalFile
+      agentUploads.uploader.copyIfOutOfDate(zipFileName, appPkgPath, false)
+      assume(clusterFS.exists(appPkgPath), "App pkg not uploaded to $appPkgPath")
+      log.info "App pkg uploaded at $appPkgPath"
+    } catch (Exception e) {
+      setup_failed = true
+      fail("Setup failed "+e)
+    }
+  }
+
+  public static void logShell(SliderShell shell) {
+    for (String str in shell.out) {
+      log.info str
+    }
+  }
+
+  public static void assertComponentCount(String component, int count, SliderShell shell) {
+    log.info("Asserting component count.")
+    String entry = findLineEntry(shell, ["instances", component] as String[])
+    log.info(entry)
+    assert entry != null
+    int instanceCount = 0
+    int index = entry.indexOf("container_")
+    while (index != -1) {
+      instanceCount++;
+      index = entry.indexOf("container_", index + 1)
+    }
+
+    assert instanceCount == count, 'Instance count for component did not match expected. Parsed: ' + entry
+  }
+
+  public static String findLineEntry(SliderShell shell, String[] locaters) {
+    int index = 0;
+    for (String str in shell.out) {
+      if (str.contains("\"" + locaters[index] + "\"")) {
+        if (locaters.size() == index + 1) {
+          return str;
+        } else {
+          index++;
+        }
+      }
+    }
+
+    return null;
+  }
+
+  public static String findLineEntryValue(SliderShell shell, String[] locaters) {
+    String line = findLineEntry(shell, locaters);
+
+    if (line != null) {
+      log.info("Parsing {} for value.", line)
+      int dividerIndex = line.indexOf(":");
+      if (dividerIndex > 0) {
+        String value = line.substring(dividerIndex + 1).trim()
+        if (value.endsWith(",")) {
+          value = value.subSequence(0, value.length() - 1)
+        }
+        return value;
+      }
+    }
+    return null;
+  }
+
+  public static boolean isApplicationInState(String text, String applicationName) {
+    boolean exists = false
+    SliderShell shell = slider(EXIT_SUCCESS,
+        [
+            ACTION_LIST,
+            applicationName])
+    for (String str in shell.out) {
+      if (str.contains(text)) {
+        exists = true
+      }
+    }
+
+    return exists
+  }
+
+  protected void ensureApplicationIsUp(String clusterName) {
+    repeatUntilTrue(this.&isApplicationUp, 15, 1000 * 3, ['arg1': clusterName],
+        true, 'Application did not start, aborting test.')
+  }
+
+  boolean isApplicationUp(Map<String, String> args) {
+    String applicationName = args['arg1'];
+    return isApplicationInState("RUNNING", applicationName);
+  }
+
+  public static void addDir(File dirObj, ZipOutputStream zipFile, String prefix) {
+    dirObj.eachFile() { file ->
+      if (file.directory) {
+        addDir(file, zipFile, prefix + file.name + File.separator)
+      } else {
+        log.info("Adding to zip - " + prefix + file.getName())
+        zipFile.putNextEntry(new ZipEntry(prefix + file.getName()))
+        file.eachByte(1024) { buffer, len -> zipFile.write(buffer, 0, len) }
+        zipFile.closeEntry()
+      }
+    }
+  }
+
+  protected void repeatUntilTrue(Closure c, int maxAttempts, int sleepDur, Map args,
+                                 boolean failIfUnsuccessful = false, String message = "") {
+    int attemptCount = 0
+    while (attemptCount < maxAttempts) {
+      if (c(args)) {
+        break
+      };
+      attemptCount++;
+
+      if (failIfUnsuccessful) {
+        assert attemptCount != maxAttempts, message
+      }
+
+      sleep(sleepDur)
+    }
+  }
+
+  protected void cleanup(String applicationName) throws Throwable {
+    if (setup_failed) {
+      // cleanup probably won't work if setup failed
+      return
+    }
+
+    log.info "Cleaning app instance, if exists, by name " + applicationName
+    teardown(applicationName)
+
+    // sleep till the instance is frozen
+    sleep(1000 * 3)
+
+    SliderShell shell = slider([
+        ACTION_DESTROY,
+        applicationName])
+
+    if (shell.ret != 0 && shell.ret != EXIT_UNKNOWN_INSTANCE) {
+      logShell(shell)
+      assert fail("Old cluster either should not exist or should get destroyed.")
+    }
+  }
+}
diff --git a/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/AgentUploads.groovy b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/AgentUploads.groovy
new file mode 100644
index 0000000..2cec5c2
--- /dev/null
+++ b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/AgentUploads.groovy
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.funtest.framework
+
+import groovy.util.logging.Slf4j
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.fs.Path
+import org.apache.hadoop.security.UserGroupInformation
+import org.apache.hadoop.fs.FileSystem as HadoopFS
+
+@Slf4j
+class AgentUploads implements FuntestProperties {
+  final Configuration conf
+  public final FileUploader uploader
+  public final HadoopFS clusterFS
+  public final Path homeDir
+
+  AgentUploads(Configuration conf) {
+    this.conf = conf
+    uploader = new FileUploader(conf, UserGroupInformation.currentUser)
+    clusterFS = uploader.fileSystem
+    homeDir = clusterFS.homeDirectory
+  }
+
+  /**
+   * Upload agent-related files
+   * @param tarballDir
+   * @param force
+   * @return
+   */
+  def uploadAgentFiles(File tarballDir, boolean force) {
+    def localAgentTar = new File(tarballDir, AGENT_SLIDER_GZ_IN_SLIDER_TAR)
+    def agentTarballPath = new Path(
+        homeDir,
+        AGENT_TAR_FILENAME)
+
+    //create the home dir or fail
+    uploader.mkHomeDir()
+    // Upload the agent tarball
+    uploader.copyIfOutOfDate(localAgentTar, agentTarballPath, force)
+
+    File localAgentIni = new File(tarballDir, AGENT_INI_IN_SLIDER_TAR)
+    // Upload the agent.ini
+    def agentIniPath = new Path(homeDir, AGENT_INI)
+    uploader.copyIfOutOfDate(localAgentIni, agentIniPath, force)
+    return [agentTarballPath, agentIniPath]
+  }
+
+}
diff --git a/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/CommandTestBase.groovy b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/CommandTestBase.groovy
index d0d7fc9..08d352a 100644
--- a/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/CommandTestBase.groovy
+++ b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/CommandTestBase.groovy
@@ -30,7 +30,6 @@
 import org.apache.slider.api.ClusterDescription
 import org.apache.slider.core.exceptions.SliderException
 import org.apache.slider.common.tools.SliderUtils
-import org.apache.slider.common.params.Arguments
 import org.apache.slider.client.SliderClient
 import org.apache.slider.test.SliderTestUtils
 import org.junit.Before
@@ -51,17 +50,16 @@
       LoggerFactory.getLogger(CommandTestBase.class);
 
   public static final String SLIDER_CONF_DIR = sysprop(SLIDER_CONF_DIR_PROP)
-  public static final String SLIDER_BIN_DIR = sysprop(SLIDER_BIN_DIR_PROP)
-  public static final File SLIDER_BIN_DIRECTORY = new File(
-      SLIDER_BIN_DIR).canonicalFile
+  public static final String SLIDER_TAR_DIR = sysprop(SLIDER_BIN_DIR_PROP)
+  public static final File SLIDER_TAR_DIRECTORY = new File(
+      SLIDER_TAR_DIR).canonicalFile
   public static final File SLIDER_SCRIPT = new File(
-      SLIDER_BIN_DIRECTORY,
+      SLIDER_TAR_DIRECTORY,
       BIN_SLIDER).canonicalFile
   public static final File SLIDER_CONF_DIRECTORY = new File(
       SLIDER_CONF_DIR).canonicalFile
   public static final File SLIDER_CONF_XML = new File(SLIDER_CONF_DIRECTORY,
       CLIENT_CONFIG_FILENAME).canonicalFile
-
   public static final YarnConfiguration SLIDER_CONFIG
   public static final int THAW_WAIT_TIME
   public static final int FREEZE_WAIT_TIME
@@ -105,15 +103,13 @@
       log.debug("Security enabled")
       SliderUtils.forceLogin()
     } else {
-      log.info "Security off, making cluster dirs broadly accessible"
+      log.info "Security is off"
     }
     SliderShell.confDir = SLIDER_CONF_DIRECTORY
     SliderShell.script = SLIDER_SCRIPT
     log.info("Test using ${HadoopFS.getDefaultUri(SLIDER_CONFIG)} " +
              "and YARN RM @ ${SLIDER_CONFIG.get(YarnConfiguration.RM_ADDRESS)}")
 
-    // now patch the settings with the path of the conf direcotry
-
   }
 
   /**
@@ -164,7 +160,7 @@
    * @return
    */
   public static SliderShell slider(int exitCode, Collection<String> commands) {
-    return SliderShell.run(commands, exitCode)
+    return SliderShell.run(exitCode, commands)
   }
 
   /**
diff --git a/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/FileUploader.groovy b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/FileUploader.groovy
new file mode 100644
index 0000000..921adbf
--- /dev/null
+++ b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/FileUploader.groovy
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.funtest.framework
+
+import groovy.transform.CompileStatic
+import groovy.util.logging.Slf4j
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.fs.FileSystem as HadoopFS
+import org.apache.hadoop.fs.FileUtil
+import org.apache.hadoop.fs.Path
+import org.apache.hadoop.fs.permission.FsPermission
+import org.apache.hadoop.security.AccessControlException
+import org.apache.hadoop.security.UserGroupInformation
+
+@SuppressWarnings("GroovyOctalInteger")
+@Slf4j
+@CompileStatic
+class FileUploader {
+  final Configuration conf
+  final UserGroupInformation user
+
+  FileUploader(Configuration conf, UserGroupInformation user) {
+    this.conf = conf
+    this.user = user
+  }
+
+  /**
+   * Copy if the file is considered out of date
+   * @param src
+   * @param destPath
+   * @param force
+   * @return
+   */
+  public boolean copyIfOutOfDate(File src, Path destPath, boolean force) {
+    if (!src.exists()) {
+      throw new FileNotFoundException("Source file $src not found")
+    }
+    def srcLen = src.length()
+    def fs = getFileSystem(destPath)
+    boolean toCopy = force
+    if (!toCopy) {
+      try {
+        def status = fs.getFileStatus(destPath)
+        toCopy = status.len != srcLen
+      } catch (FileNotFoundException fnfe) {
+        toCopy = true;
+      }
+    }
+    if (toCopy) {
+      log.info("Copying $src to $destPath")
+      def dir = destPath.getParent()
+      try {
+        fs.delete(destPath, true)
+        fs.mkdirs(dir, FsPermission.dirDefault)
+        return FileUtil.copy(src, fs, destPath, false, conf)
+      } catch (AccessControlException ace) {
+        log.error("No write access to destination directory $dir" +
+                  "Ensure home directory exists and has correct permissions. $ace",
+                  ace)
+        throw ace
+      }
+    } else {
+      log.debug(
+          "Skipping copy as the destination $destPath considered up to date")
+      return false;
+    }
+  }
+
+  public HadoopFS getFileSystem(Path dest) {
+    getFileSystem(user, dest)
+  }
+
+  public HadoopFS getFileSystem() {
+    getFileSystem(user, HadoopFS.getDefaultUri(conf))
+  }
+
+
+  public def getFileSystem(
+      UserGroupInformation user, final Path path) {
+    return getFileSystem(user, path.toUri())
+
+  }
+
+  public def getFileSystem(
+      UserGroupInformation user, final URI uri) {
+
+    SudoClosure.sudo(user) {
+      HadoopFS.get(uri, conf);
+    }
+  }
+
+  public def getFileSystemAsUserName(String username) {
+
+    def user = UserGroupInformation.createRemoteUser(username)
+    getFileSystem(user, HadoopFS.getDefaultUri(conf))
+  }
+
+  /**
+   * Create the home dir. If it can't be created as the user,
+   * try to become the user 'hdfs' and try there, setting the
+   * user and group after.
+   * @return the home dir
+   */
+  public def mkHomeDir() {
+    def fs = fileSystem
+    def home = fs.homeDirectory
+    if (!fs.exists(home)) {
+      try {
+        fs.mkdirs(home)
+      } catch (AccessControlException ace) {
+        log.info("Failed to mkdir $home as $user -impersonating 'hdfs")
+        if (UserGroupInformation.securityEnabled) {
+          // in a secure cluster, we cannot impersonate HDFS, so rethrow
+          throw ace;
+        }
+        //now create as hdfs
+        try {
+          attemptToCreateHomeDir("hdfs", home)
+        } catch (AccessControlException ace2) {
+
+          log.info("Failed to mkdir $home as $user -impersonating 'hadoop'")
+          attemptToCreateHomeDir("hadoop", home)
+
+        }
+      }
+    }
+    return home
+  }
+
+  public void attemptToCreateHomeDir(String username, Path home) {
+    def privilegedFS = getFileSystemAsUserName(username)
+    privilegedFS.mkdirs(home, new FsPermission((short) 00755))
+    privilegedFS.setOwner(home, user.userName, user.primaryGroupName)
+  }
+}
diff --git a/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/FuntestProperties.groovy b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/FuntestProperties.groovy
index 8cbc098..9b63c22 100644
--- a/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/FuntestProperties.groovy
+++ b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/FuntestProperties.groovy
@@ -57,8 +57,12 @@
   String ENV_SLIDER_CLASSPATH_EXTRA = "SLIDER_CLASSPATH_EXTRA"
 
   String SCRIPT_NAME = "slider"
-  static final String KEY_TEST_CONF_XML = "slider.test.conf.xml"
-  static final String KEY_TEST_CONF_DIR = "slider.test.conf.dir"
-  static final String BIN_SLIDER = "bin/slider"
-  static final String AGENT_SLIDER_GZ = "agent/slider-agent.tar.gz"
+  String KEY_TEST_CONF_XML = "slider.test.conf.xml"
+  String KEY_TEST_CONF_DIR = "slider.test.conf.dir"
+  String BIN_SLIDER = "bin/slider"
+  String AGENT_INI = "agent.ini"
+  String AGENT_INI_IN_SLIDER_TAR = "agent/conf/" + AGENT_INI
+
+  String AGENT_TAR_FILENAME = "slider-agent.tar.gz"
+  String AGENT_SLIDER_GZ_IN_SLIDER_TAR = "agent/" + AGENT_TAR_FILENAME
 }
diff --git a/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/SliderShell.groovy b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/SliderShell.groovy
index 068c330..804e791 100644
--- a/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/SliderShell.groovy
+++ b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/SliderShell.groovy
@@ -18,14 +18,15 @@
 
 package org.apache.slider.funtest.framework
 
-import groovy.util.logging.Slf4j
 import org.apache.bigtop.itest.shell.Shell
 import org.apache.slider.core.exceptions.SliderException
 import org.apache.slider.common.tools.SliderUtils
+import org.slf4j.Logger
+import org.slf4j.LoggerFactory
 
-@Slf4j
 
 class SliderShell extends Shell {
+  private static final Logger log = LoggerFactory.getLogger(SliderShell.class);
 
 
   public static final String BASH = '/bin/bash -s'
@@ -86,10 +87,14 @@
    int signCorrectReturnCode() {
      ret = signCorrect(ret)
    }
-  
-  int execute(int expectedExitCode) {
+
+  /**
+   * Execute expecting a specific exit code
+   * @param expectedExitCode the expected exit code
+   */
+  void execute(int expectedExitCode) {
     execute()
-    return assertExitCode(expectedExitCode)
+    assertExitCode(expectedExitCode)
   }
   
   /**
@@ -98,14 +103,19 @@
    * @param commands
    * @return the shell
    */
-  public static SliderShell run(Collection<String> commands, int exitCode) {
+  public static SliderShell run(int exitCode, Collection<String> commands) {
     SliderShell shell = new SliderShell(commands)
     shell.execute(exitCode);
     return shell
   }
 
-  public static int signCorrect(int u) {
-    return (u << 24) >> 24;
+  /**
+   * Sign-correct a process exit code
+   * @param exitCode the incoming exit code
+   * @return the sign-corrected version
+   */
+  public static int signCorrect(int exitCode) {
+    return (exitCode << 24) >> 24;
   }
   
   @Override
@@ -113,9 +123,13 @@
     return ret + " =>" + command
   }
 
-  public void dump() {
+  /**
+   * Dump the command, return code and outputs to the log.
+   * stdout is logged at info; stderr at error.
+   */
+  public void dumpOutput() {
     log.error(toString())
-    log.error("return code = $ret")
+    log.error("return code = ${signCorrectReturnCode()}")
     if (out.size() != 0) {
       log.info("\n<stdout>\n${out.join('\n')}\n</stdout>");
     }
@@ -123,34 +137,20 @@
       log.error("\n<stderr>\n${err.join('\n')}\n</stderr>");
     }
   }
-  /**
-   * Assert a shell exited with a given error code
-   * if not the output is printed and an assertion is raised
-   * @param shell shell
-   * @param errorCode expected error code
-   */
-  public int assertExitCode(int errorCode) {
-    return assertExitCode(this, errorCode)
-  }
   
   /**
-   * Assert a shell exited with a given error code
+   * Assert the shell exited with a given error code
    * if not the output is printed and an assertion is raised
-   * @param shell shell
    * @param errorCode expected error code
-   * @throws SliderException if the exit code is wrong (the value in the exception
-   * is the exit code received)
    */
-  public static int assertExitCode(SliderShell shell, int errorCode) throws
-      SliderException {
-    assert shell != null
-    if (shell.ret != errorCode) {
-      shell.dump()
-      throw new SliderException(shell.ret,
-          "Expected exit code of command %s : %d - actual=%d",
-          shell.command,
-          errorCode, shell.ret)
+  public void assertExitCode(int errorCode) {
+    if (this.ret != errorCode) {
+      dumpOutput()
+      throw new SliderException(ret,
+          "Expected exit code of command ${command} : ${errorCode} - actual=${ret}")
+      
     }
-    return errorCode
   }
-}
+  
+
+  }
diff --git a/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/SudoClosure.groovy b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/SudoClosure.groovy
new file mode 100644
index 0000000..363e1b3
--- /dev/null
+++ b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/SudoClosure.groovy
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.funtest.framework
+
+import org.apache.hadoop.security.UserGroupInformation
+
+import java.security.PrivilegedExceptionAction
+
+/**
+ * Bridge from groovy closures to doAs
+ * @param < T >
+ */
+class SudoClosure<T> implements PrivilegedExceptionAction<T> {
+  
+  final Closure<T> closure;
+
+  SudoClosure(Closure<T> closure) {
+    this.closure = closure
+  }
+
+  @Override
+  T run() throws Exception {
+    return closure()
+  }
+
+  /**
+   * 
+   * @param user
+   * @param closure
+   * @return
+   */
+  public static <T2> T2 sudo(UserGroupInformation user,
+      Closure<T2> closure) {
+    
+    user.doAs(new SudoClosure<T2>(closure))
+    
+  }
+}
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java b/slider-funtest/src/main/java/org/apache/slider/funtest/accumulo/StubToForceGroovySrcToCompile.java
similarity index 85%
copy from slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
copy to slider-funtest/src/main/java/org/apache/slider/funtest/accumulo/StubToForceGroovySrcToCompile.java
index 7af463d..eefccbb 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
+++ b/slider-funtest/src/main/java/org/apache/slider/funtest/accumulo/StubToForceGroovySrcToCompile.java
@@ -16,10 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.slider.server.services.utility;
+package org.apache.slider.funtest.accumulo;
 
-public interface EventCallback {
-  
-  public void eventCallbackEvent();
-  
+class StubToForceGroovySrcToCompile {
 }
diff --git a/slider-funtest/src/test/groovy/org/apache/slider/funtest/basic/TestClusterConnectivity.groovy b/slider-funtest/src/test/groovy/org/apache/slider/funtest/basic/TestClusterConnectivity.groovy
new file mode 100644
index 0000000..b9d768a
--- /dev/null
+++ b/slider-funtest/src/test/groovy/org/apache/slider/funtest/basic/TestClusterConnectivity.groovy
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.funtest.basic
+
+import groovy.util.logging.Slf4j
+import org.apache.hadoop.fs.Path
+import org.apache.hadoop.net.NetUtils
+import org.apache.hadoop.yarn.conf.YarnConfiguration
+import org.apache.slider.client.SliderYarnClientImpl
+import org.apache.slider.common.SliderXmlConfKeys
+import org.apache.slider.core.zk.ZookeeperUtils
+import org.apache.slider.funtest.framework.CommandTestBase
+import org.junit.BeforeClass
+import org.junit.Test
+
+@Slf4j
+/**
+ * Test basic connectivity with the target cluster, including 
+ * HDFS, YARN and ZK
+ */
+class TestClusterConnectivity extends CommandTestBase {
+
+
+  public static final int CONNECT_TIMEOUT = 2000
+
+  @BeforeClass
+  public static void setup() {
+    assumeFunctionalTestsEnabled()
+  }
+  
+  @Test
+  public void testFileSystemUp() throws Throwable {
+
+    def fs = clusterFS
+    def status = fs.listStatus(new Path("/"))
+    status.each {
+      log.info("${it.path} = ${it}")
+    }
+    
+  }
+
+  @Test
+  public void testZKBinding() throws Throwable {
+    def quorum = SLIDER_CONFIG.getTrimmed(SliderXmlConfKeys.REGISTRY_ZK_QUORUM)
+    assert quorum
+    def tuples = ZookeeperUtils.splitToHostsAndPortsStrictly(quorum);
+    tuples.each {
+      telnet(it.hostText, it.port)
+    }
+    
+  }
+
+  @Test
+  public void testRMTelnet() throws Throwable {
+    def rmAddr = SLIDER_CONFIG.getSocketAddr(YarnConfiguration.RM_ADDRESS, "", 0)
+    telnet(rmAddr.hostName, rmAddr.port)
+  }
+  
+  @Test
+  public void testRMBinding() throws Throwable {
+    testRMTelnet()
+    SliderYarnClientImpl yarnClient = new SliderYarnClientImpl()
+    try {
+      SLIDER_CONFIG.setInt("ipc.client.connect.retry.interval",100)
+      SLIDER_CONFIG.setInt(
+          YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,5000)
+      SLIDER_CONFIG.setInt(
+          YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,50)
+      
+      yarnClient.init(SLIDER_CONFIG)
+      yarnClient.start();
+      def instances = yarnClient.listInstances("")
+      instances.each {it -> log.info("Instance $it.applicationId")}
+    } finally {
+      yarnClient.stop()
+    }
+  }
+  
+  def telnet(String host, int port) {
+    assert host != ""
+    assert port != 0
+    try {
+      def socket = new Socket();
+      def addr = new InetSocketAddress(host, port)
+      socket.connect(addr, CONNECT_TIMEOUT)
+      socket.close()
+    } catch (IOException e) {
+      throw NetUtils.wrapException(host, port, "localhost", 0, e)
+    }
+
+  }
+  
+}
diff --git a/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/AgentCommandTestBase.groovy b/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/AgentCommandTestBase.groovy
deleted file mode 100644
index daf091c..0000000
--- a/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/AgentCommandTestBase.groovy
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.funtest.lifecycle
-
-import groovy.util.logging.Slf4j
-import org.apache.hadoop.fs.Path
-import org.apache.slider.common.SliderExitCodes
-import org.apache.slider.common.params.Arguments
-import org.apache.slider.common.params.SliderActions
-import org.apache.slider.funtest.framework.CommandTestBase
-import org.apache.slider.funtest.framework.FuntestProperties
-import org.apache.slider.funtest.framework.SliderShell
-import org.junit.Before
-import org.junit.BeforeClass
-
-
-@Slf4j
-class AgentCommandTestBase extends CommandTestBase
-    implements FuntestProperties, Arguments, SliderExitCodes, SliderActions {
-
-  public static final boolean AGENTTESTS_ENABLED
-  
-  protected static String APP_RESOURCE = "../slider-core/src/test/app_packages/test_command_log/resources.json"
-  protected static String APP_TEMPLATE = "../slider-core/src/test/app_packages/test_command_log/appConfig.json"
-  protected static String APP_PKG = "../slider-core/src/test/app_packages/test_command_log/cmd_log_app_pkg.zip"
-  protected static String AGENT_CONF = "../slider-agent/conf/agent.ini"
-  protected static final File LOCAL_SLIDER_AGENT_TARGZ
-  protected static final File LOCAL_APP_PKZ
-  protected static final File LOCAL_AGENT_CONF
-
-  protected static Path agentTarballPath;
-  protected static Path appPkgPath;
-  protected static Path agtIniPath;
-
-  static {
-    AGENTTESTS_ENABLED = SLIDER_CONFIG.getBoolean(KEY_TEST_AGENT_ENABLED, false)
-    LOCAL_SLIDER_AGENT_TARGZ = new File(
-        SLIDER_BIN_DIRECTORY,
-        AGENT_SLIDER_GZ).canonicalFile
-    LOCAL_APP_PKZ = new File(APP_PKG).canonicalFile
-    LOCAL_AGENT_CONF = new File(AGENT_CONF).canonicalFile
-  }
-
-  @BeforeClass
-  public static void setupAgent() {
-    assumeAgentTestsEnabled()
-
-    // Upload the agent tarball
-    assume(LOCAL_SLIDER_AGENT_TARGZ.exists(), "Slider agent not found at $LOCAL_SLIDER_AGENT_TARGZ")
-    agentTarballPath = new Path(clusterFS.homeDirectory, "/slider/agent/slider-agent.tar.gz")
-    Path localTarball = new Path(LOCAL_SLIDER_AGENT_TARGZ.toURI());
-    clusterFS.copyFromLocalFile(false, true, localTarball, agentTarballPath)
-
-    // Upload the app pkg
-    assume(LOCAL_APP_PKZ.exists(), "App pkg not found at $LOCAL_APP_PKZ")
-    appPkgPath = new Path(clusterFS.homeDirectory, "/slider/cmd_log_app_pkg.zip")
-    Path localAppPkg = new Path(LOCAL_APP_PKZ.toURI());
-    clusterFS.copyFromLocalFile(false, true, localAppPkg, appPkgPath)
-
-    // Upload the agent.ini
-    assume(LOCAL_AGENT_CONF.exists(), "Agent config not found at $LOCAL_AGENT_CONF")
-    agtIniPath = new Path(clusterFS.homeDirectory, "/slider/agent/conf/agent.ini")
-    Path localAgtIni = new Path(LOCAL_AGENT_CONF.toURI());
-    clusterFS.copyFromLocalFile(false, true, localAgtIni, agtIniPath)
-  }
-
-  public static void assumeAgentTestsEnabled() {
-    assumeFunctionalTestsEnabled()
-    assume(AGENTTESTS_ENABLED, "Agent tests disabled")
-  }
-
-  public static void logShell(SliderShell shell) {
-    for (String str in shell.out) {
-      log.info str
-    }
-  }
-
-  public static void assertComponentCount(String component, int count, SliderShell shell) {
-    log.info("Asserting component count.")
-    String entry = findLineEntry(shell, ["instances", component] as String[])
-    log.info(entry)
-    assert entry != null
-    int instanceCount = 0
-    int index = entry.indexOf("container_")
-    while (index != -1) {
-      instanceCount++;
-      index = entry.indexOf("container_", index + 1)
-    }
-
-    assert instanceCount == count, 'Instance count for component did not match expected. Parsed: ' + entry
-  }
-
-  public static String findLineEntry(SliderShell shell, String[] locators) {
-    int index = 0;
-    for (String str in shell.out) {
-      if (str.contains("\"" + locators[index] + "\"")) {
-        if (locators.size() == index + 1) {
-          return str;
-        } else {
-          index++;
-        }
-      }
-    }
-
-    return null;
-  }
-
-  public static boolean isAppRunning(String text, SliderShell shell) {
-    boolean exists = false
-    for (String str in shell.out) {
-      if (str.contains(text)) {
-        exists = true
-      }
-    }
-
-    return exists
-  }
-}
diff --git a/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAgentClusterLifecycle.groovy b/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAgentClusterLifecycle.groovy
index 7769ce1..0d643ca 100644
--- a/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAgentClusterLifecycle.groovy
+++ b/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAgentClusterLifecycle.groovy
@@ -27,7 +27,9 @@
 import org.apache.slider.common.SliderXmlConfKeys
 import org.apache.slider.common.params.Arguments
 import org.apache.slider.common.params.SliderActions
+import org.apache.slider.funtest.framework.AgentCommandTestBase
 import org.apache.slider.funtest.framework.FuntestProperties
+import org.apache.slider.funtest.framework.SliderShell
 import org.junit.After
 import org.junit.Before
 import org.junit.Test
@@ -35,20 +37,23 @@
 @CompileStatic
 @Slf4j
 public class TestAgentClusterLifecycle extends AgentCommandTestBase
-    implements FuntestProperties, Arguments, SliderExitCodes {
+  implements FuntestProperties, Arguments, SliderExitCodes, SliderActions {
 
 
   static String CLUSTER = "test_agent_cluster_lifecycle"
 
+  static String APP_RESOURCE2 = "../slider-core/src/test/app_packages/test_command_log/resources_no_role.json"
+
 
   @Before
   public void prepareCluster() {
     setupCluster(CLUSTER)
+    describe("Create a 0-role cluster, so testing AM start/stop")
   }
 
   @After
   public void destroyCluster() {
-    teardown(CLUSTER)
+    cleanup(CLUSTER)
   }
 
   @Test
@@ -60,26 +65,27 @@
     def clusterpath = buildClusterPath(CLUSTER)
     assert !clusterFS.exists(clusterpath)
 
-/*
+    SliderShell shell = slider(EXIT_SUCCESS,
+        [
+            ACTION_CREATE, CLUSTER,
+            ARG_IMAGE, agentTarballPath.toString(),
+            ARG_TEMPLATE, APP_TEMPLATE,
+            ARG_RESOURCES, APP_RESOURCE2
+        ])
 
-    Map<String, Integer> roleMap = createHBaseCluster(CLUSTER,
-                                         0,
-                                         0,
-                                         [],
-                                         [:])
-    
-*/
+    logShell(shell)
+
+    ensureApplicationIsUp(CLUSTER)
 
     //at this point the cluster should exist.
-    assertPathExists(clusterFS,"Cluster parent directory does not exist", clusterpath.parent)
-    
-    assertPathExists(clusterFS,"Cluster directory does not exist", clusterpath)
+    assertPathExists(clusterFS, "Cluster parent directory does not exist", clusterpath.parent)
+
+    assertPathExists(clusterFS, "Cluster directory does not exist", clusterpath)
 
     // assert it exists on the command line
     exists(0, CLUSTER)
 
     //destroy will fail in use
-
     destroy(EXIT_APPLICATION_IN_USE, CLUSTER)
 
     //thaw will fail as cluster is in use
@@ -98,10 +104,10 @@
     File jsonStatus = File.createTempFile("tempfile", ".json")
     try {
       slider(0,
-           [
-               SliderActions.ACTION_STATUS, CLUSTER,
-               ARG_OUTPUT, jsonStatus.canonicalPath
-           ])
+          [
+              SliderActions.ACTION_STATUS, CLUSTER,
+              ARG_OUTPUT, jsonStatus.canonicalPath
+          ])
 
       assert jsonStatus.exists()
       ClusterDescription cd = ClusterDescription.fromFile(jsonStatus)
@@ -122,22 +128,23 @@
       //freeze
       freeze(CLUSTER, [
           ARG_WAIT, Integer.toString(FREEZE_WAIT_TIME),
-          ARG_MESSAGE, "freeze-in-test cluster lifecycle"
+          ARG_MESSAGE, "freeze-in-test-cluster-lifecycle"
       ])
+      describe " >>> Cluster is now frozen."
 
       //cluster exists if you don't want it to be live
       exists(0, CLUSTER, false)
-      // condition returns false if it is required to be live
+      //condition returns false if it is required to be live
       exists(EXIT_FALSE, CLUSTER, true)
 
-
-      // thaw then freeze the cluster
-
+      //thaw then freeze the cluster
       thaw(CLUSTER,
-           [
-               ARG_WAIT, Integer.toString(THAW_WAIT_TIME),
-           ])
+          [
+              ARG_WAIT, Integer.toString(THAW_WAIT_TIME),
+          ])
       exists(0, CLUSTER)
+      describe " >>> Cluster is now thawed."
+
       freeze(CLUSTER,
           [
               ARG_FORCE,
@@ -145,23 +152,28 @@
               ARG_MESSAGE, "forced-freeze-in-test"
           ])
 
+      describe " >>> Cluster is now frozen - 2nd time."
+
       //cluster is no longer live
       exists(0, CLUSTER, false)
-      
-      // condition returns false if it is required to be live
+
+      //condition returns false if it is required to be live
       exists(EXIT_FALSE, CLUSTER, true)
 
-      // thaw with a restart count set to enable restart
-
+      //thaw with a restart count set to enable restart
       describe "the kill/restart phase may fail if yarn.resourcemanager.am.max-attempts is too low"
       thaw(CLUSTER,
-           [
-               ARG_WAIT, Integer.toString(THAW_WAIT_TIME),
-               ARG_DEFINE, SliderXmlConfKeys.KEY_AM_RESTART_LIMIT + "=3"
-           ])
+          [
+              ARG_WAIT, Integer.toString(THAW_WAIT_TIME),
+              ARG_DEFINE, SliderXmlConfKeys.KEY_AM_RESTART_LIMIT + "=3"
+          ])
+
+      describe " >>> Cluster is now thawed - 2nd time."
 
       ClusterDescription status = killAmAndWaitForRestart(sliderClient, CLUSTER)
 
+      describe " >>> Kill AM and wait for restart."
+
       def restarted = status.getInfo(
           StatusKeys.INFO_CONTAINERS_AM_RESTART)
       assert restarted != null
@@ -176,9 +188,5 @@
     } finally {
       jsonStatus.delete()
     }
-
-
   }
-
-
 }
diff --git a/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAgentFailures.groovy b/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAgentFailures.groovy
new file mode 100644
index 0000000..ea58d5f
--- /dev/null
+++ b/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAgentFailures.groovy
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.funtest.lifecycle
+
+import groovy.transform.CompileStatic
+import groovy.util.logging.Slf4j
+import org.apache.slider.common.SliderExitCodes
+import org.apache.slider.common.params.Arguments
+import org.apache.slider.common.params.SliderActions
+import org.apache.slider.funtest.framework.AgentCommandTestBase
+import org.apache.slider.funtest.framework.FuntestProperties
+import org.apache.slider.funtest.framework.SliderShell
+import org.junit.After
+import org.junit.Test
+
+@CompileStatic
+@Slf4j
+public class TestAgentFailures extends AgentCommandTestBase
+implements FuntestProperties, Arguments, SliderExitCodes, SliderActions {
+
+  private static String COMMAND_LOGGER = "COMMAND_LOGGER"
+  private static String APPLICATION_NAME = "one-container-fail-register"
+  private static String APP_TEMPLATE2 =
+    "../slider-core/src/test/app_packages/test_command_log/appConfig_fast_no_reg.json"
+
+
+  @After
+  public void destroyCluster() {
+    cleanup(APPLICATION_NAME)
+  }
+
+  @Test
+  public void testAgentFailRegistrationOnce() throws Throwable {
+    if (!AGENTTESTS_ENABLED) {
+      log.info "TESTS are not run."
+      return
+    }
+
+    cleanup(APPLICATION_NAME)
+    SliderShell shell = slider(EXIT_SUCCESS,
+        [
+            ACTION_CREATE, APPLICATION_NAME,
+            ARG_IMAGE, agentTarballPath.toString(),
+            ARG_TEMPLATE, APP_TEMPLATE2,
+            ARG_RESOURCES, APP_RESOURCE
+        ])
+
+    logShell(shell)
+
+    ensureApplicationIsUp(APPLICATION_NAME)
+
+    repeatUntilTrue(this.&hasContainerCountExceeded, 15, 1000 * 10, ['arg1': '2']);
+
+    sleep(1000 * 20)
+
+    shell = slider(EXIT_SUCCESS,
+        [
+            ACTION_STATUS,
+            APPLICATION_NAME])
+
+    assertComponentCount(COMMAND_LOGGER, 1, shell)
+    String requested = findLineEntryValue(shell, ["statistics", COMMAND_LOGGER, "containers.requested"] as String[])
+    assert requested != null && requested.isInteger() && requested.toInteger() >= 2,
+        'At least 2 containers must be requested'
+
+    assert isApplicationInState("RUNNING", APPLICATION_NAME), 'App is not running.'
+
+    assertSuccess(shell)
+  }
+
+
+  boolean hasContainerCountExceeded(Map<String, String> args) {
+    int expectedCount = args['arg1'].toInteger();
+    SliderShell shell = slider(EXIT_SUCCESS,
+        [
+            ACTION_STATUS,
+            APPLICATION_NAME])
+
+    //logShell(shell)
+    String requested = findLineEntryValue(
+        shell, ["statistics", COMMAND_LOGGER, "containers.requested"] as String[])
+    if (requested != null && requested.isInteger() && requested.toInteger() >= expectedCount) {
+      return true
+    }
+
+    return false
+  }
+}
diff --git a/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAgentFailures2.groovy b/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAgentFailures2.groovy
new file mode 100644
index 0000000..7804042
--- /dev/null
+++ b/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAgentFailures2.groovy
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.funtest.lifecycle
+
+import groovy.transform.CompileStatic
+import groovy.util.logging.Slf4j
+import org.apache.slider.common.SliderExitCodes
+import org.apache.slider.common.params.Arguments
+import org.apache.slider.common.params.SliderActions
+import org.apache.slider.funtest.framework.AgentCommandTestBase
+import org.apache.slider.funtest.framework.FuntestProperties
+import org.apache.slider.funtest.framework.SliderShell
+import org.junit.After
+import org.junit.Test
+
+@CompileStatic
+@Slf4j
+public class TestAgentFailures2 extends AgentCommandTestBase
+implements FuntestProperties, Arguments, SliderExitCodes, SliderActions {
+
+  private static String COMMAND_LOGGER = "COMMAND_LOGGER"
+  private static String APPLICATION_NAME = "two-container-fail-heartbeat"
+  private static String APP_TEMPLATE3 =
+    "../slider-core/src/test/app_packages/test_command_log/appConfig_no_hb.json"
+
+
+  @After
+  public void destroyCluster() {
+    cleanup(APPLICATION_NAME)
+  }
+
+  @Test
+  public void testAgentFailHeartbeatingTwiceOnce() throws Throwable {
+    if (!AGENTTESTS_ENABLED) {
+      log.info "TESTS are not run."
+      return
+    }
+
+    cleanup(APPLICATION_NAME)
+    SliderShell shell = slider(EXIT_SUCCESS,
+        [
+            ACTION_CREATE, APPLICATION_NAME,
+            ARG_IMAGE, agentTarballPath.toString(),
+            ARG_TEMPLATE, APP_TEMPLATE3,
+            ARG_RESOURCES, APP_RESOURCE
+        ])
+
+    logShell(shell)
+
+    ensureApplicationIsUp(APPLICATION_NAME)
+
+    repeatUntilTrue(this.&hasContainerCountExceeded, 20, 1000 * 10, ['arg1': '3']);
+
+    sleep(1000 * 20)
+
+    shell = slider(EXIT_SUCCESS,
+        [
+            ACTION_STATUS,
+            APPLICATION_NAME])
+
+    assertComponentCount(COMMAND_LOGGER, 1, shell)
+    String requested = findLineEntryValue(shell, ["statistics", COMMAND_LOGGER, "containers.requested"] as String[])
+    assert requested != null && requested.isInteger() && requested.toInteger() >= 3,
+        'At least 2 containers must be requested'
+
+    assert isApplicationInState("RUNNING", APPLICATION_NAME), 'App is not running.'
+
+    assertSuccess(shell)
+  }
+
+
+  boolean hasContainerCountExceeded(Map<String, String> args) {
+    int expectedCount = args['arg1'].toInteger();
+    SliderShell shell = slider(EXIT_SUCCESS,
+        [
+            ACTION_STATUS,
+            APPLICATION_NAME])
+
+    //logShell(shell)
+    String requested = findLineEntryValue(
+        shell, ["statistics", COMMAND_LOGGER, "containers.requested"] as String[])
+    if (requested != null && requested.isInteger() && requested.toInteger() >= expectedCount) {
+      return true
+    }
+
+    return false
+  }
+}
diff --git a/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAppsThroughAgent.groovy b/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAppsThroughAgent.groovy
index c56056c..6b0f678 100644
--- a/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAppsThroughAgent.groovy
+++ b/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestAppsThroughAgent.groovy
@@ -23,115 +23,62 @@
 import org.apache.slider.common.SliderExitCodes
 import org.apache.slider.common.params.Arguments
 import org.apache.slider.common.params.SliderActions
+import org.apache.slider.funtest.framework.AgentCommandTestBase
 import org.apache.slider.funtest.framework.FuntestProperties
 import org.apache.slider.funtest.framework.SliderShell
+import org.junit.After
 import org.junit.Test
 
 @CompileStatic
 @Slf4j
 public class TestAppsThroughAgent extends AgentCommandTestBase
-    implements FuntestProperties, Arguments, SliderExitCodes, SliderActions {
+implements FuntestProperties, Arguments, SliderExitCodes, SliderActions {
 
   private static String COMMAND_LOGGER = "COMMAND_LOGGER"
-  private static String APPLICATION_NAME = "agenttst"
+  private static String APPLICATION_NAME = "happy-path-with-flex"
 
-  @Test
-  public void testUsage() throws Throwable {
-    SliderShell shell = slider(EXIT_SUCCESS, [ACTION_USAGE])
-    assertSuccess(shell)
+  @After
+  public void destroyCluster() {
+    cleanup(APPLICATION_NAME)
   }
 
   @Test
   public void testCreateFlex() throws Throwable {
-    if (!AGENTTESTS_ENABLED) {
-      log.info "TESTS are not run."
-      return
-    }
+    assumeAgentTestsEnabled()
 
-    cleanup()
-    try {
-      SliderShell shell = slider(EXIT_SUCCESS,
-          [
-          ACTION_CREATE, APPLICATION_NAME,
-          ARG_IMAGE, agentTarballPath.toString(),
-          ARG_TEMPLATE, APP_TEMPLATE,
-          ARG_RESOURCES, APP_RESOURCE
-      ])
+    cleanup(APPLICATION_NAME)
+    SliderShell shell = slider(EXIT_SUCCESS,
+        [
+            ACTION_CREATE, APPLICATION_NAME,
+            ARG_IMAGE, agentTarballPath.toString(),
+            ARG_TEMPLATE, APP_TEMPLATE,
+            ARG_RESOURCES, APP_RESOURCE
+        ])
 
-      logShell(shell)
+    logShell(shell)
 
-      int attemptCount = 0
-      while (attemptCount < 10) {
-        shell = slider(EXIT_SUCCESS, [
-            ACTION_LIST,
+    ensureApplicationIsUp(APPLICATION_NAME)
+
+    //flex
+    slider(EXIT_SUCCESS,
+        [
+            ACTION_FLEX,
+            APPLICATION_NAME,
+            ARG_COMPONENT,
+            COMMAND_LOGGER,
+            "2"])
+
+    // sleep till the new instance starts
+    sleep(1000 * 10)
+
+    shell = slider(EXIT_SUCCESS,
+        [
+            ACTION_STATUS,
             APPLICATION_NAME])
 
-        if (isAppRunning("RUNNING", shell)) {
-          break
-        }
+    assertComponentCount(COMMAND_LOGGER, 2, shell)
 
-        attemptCount++
-        assert attemptCount != 10, 'Application did not start, aborting test.'
-
-        sleep(1000 * 5)
-      }
-
-      //flex
-      slider(EXIT_SUCCESS,
-          [
-          ACTION_FLEX,
-          APPLICATION_NAME,
-          ARG_COMPONENT,
-          COMMAND_LOGGER,
-          "2"])
-
-      // sleep till the new instance starts
-      sleep(1000 * 10)
-
-      shell = slider(EXIT_SUCCESS,
-          [
-          ACTION_STATUS,
-          APPLICATION_NAME])
-
-      assertComponentCount(COMMAND_LOGGER, 2, shell)
-
-      shell = slider(EXIT_SUCCESS,
-          [
-          ACTION_LIST,
-          APPLICATION_NAME])
-
-      assert isAppRunning("RUNNING", shell), 'App is not running.'
-
-      assertSuccess(shell)
-    } finally {
-      cleanup()
-    }
+    assertSuccess(shell)
+    assert isApplicationInState("RUNNING", APPLICATION_NAME), 'App is not running.'
   }
-
-
-  public void cleanup() throws Throwable {
-    log.info "Cleaning app instance, if exists, by name " + APPLICATION_NAME
-    SliderShell shell = slider([
-        ACTION_FREEZE,
-        APPLICATION_NAME])
-
-    if (shell.ret != 0 && shell.ret != EXIT_UNKNOWN_INSTANCE) {
-      logShell(shell)
-      assert fail("Old cluster either should not exist or should get frozen.")
-    }
-
-    // sleep till the instance is frozen
-    sleep(1000 * 5)
-
-    shell = slider([
-        ACTION_DESTROY,
-        APPLICATION_NAME])
-
-    if (shell.ret != 0 && shell.ret != EXIT_UNKNOWN_INSTANCE) {
-      logShell(shell)
-      assert fail("Old cluster either should not exist or should get destroyed.")
-    }
-  }
-
-
 }
diff --git a/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestClusterBuildDestroy.groovy b/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestClusterBuildDestroy.groovy
index 0c3924f..ead1601 100644
--- a/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestClusterBuildDestroy.groovy
+++ b/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/TestClusterBuildDestroy.groovy
@@ -26,6 +26,7 @@
 import org.apache.slider.common.SliderXmlConfKeys
 import org.apache.slider.common.params.Arguments
 import org.apache.slider.common.params.SliderActions
+import org.apache.slider.funtest.framework.AgentCommandTestBase
 import org.apache.slider.funtest.framework.FuntestProperties
 import org.junit.AfterClass
 import org.junit.BeforeClass
@@ -42,7 +43,7 @@
 
   @BeforeClass
   public static void prepareCluster() {
-    assumeFunctionalTestsEnabled();
+    
     setupCluster(CLUSTER)
   }
 
@@ -60,8 +61,11 @@
         [
             ACTION_BUILD,
             CLUSTER,
+            ARG_IMAGE, agentTarballPath.toString(),
             ARG_ZKHOSTS,
             SLIDER_CONFIG.get(SliderXmlConfKeys.REGISTRY_ZK_QUORUM, DEFAULT_SLIDER_ZK_HOSTS),
+            ARG_TEMPLATE, APP_TEMPLATE,
+            ARG_RESOURCES, APP_RESOURCE
         ])
 
 
diff --git a/slider-install/README.md b/slider-install/README.md
new file mode 100644
index 0000000..a4b7b08
--- /dev/null
+++ b/slider-install/README.md
@@ -0,0 +1,102 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+
+slider_setup
+============
+
+Tools for installing, starting, and destroying HBase, Accumulo, and Storm slider apps on YARN.
+
+**WARNING: This is intended for POC/sandbox testing, may not be idempotent so DO NOT use on an existing Production cluster!!!**
+
+Setup
+-----
+1. Clone the repo
+2. Set the necessary cluster variables in `slider_setup.conf`, it shouldn't be necessary to change any other values but thoe ones below
+
+
+    # Zookeeper nodes
+    ZK_QUORUM="zk1:2181,zk2:2181,zk3:2181"
+    
+    # Resource Manager address (yarn.resourcemanager.address)
+    RM_ADDRESS="rm1:8050"
+    
+    # Resource Manager scheduler address (yarn.resourcemanager.scheduler.address)
+    RM_SCHED_ADDRESS="rm1:8030"
+    
+    # Default FS (fs.defaultFS)
+    DEFAULT_FS="hdfs://nn1:8020"
+
+Running
+-------
+* slider_setup is the main script and handles the following
+  1. Pulls down slider and extracts the contents to the SLIDER_INST_DIR
+  2. Modifies slider-client.xml with cluster related info
+  3. Pulls down the slider enabled version of the specified product
+  4. Creates necessary directories and copies required files to HDFS
+  5. For HBase, creates the app dir in HDFS
+  6. Submits the slider base application to the YARN cluster
+
+* The following args are required
+  * -f - The path to the slider_setup.conf that has been modified with cluster info
+  * -p - The product to run (hbase, accumulo, or storm are all that are supported at this time)
+  * -w - The number of "worker" nodes. This has different meaning depending on product.
+    * HBase - number of region servers
+    * Accumulo - number of tablet servers
+    * Storm - number of supervisors
+  * -n - The name of the app, this will be the display name in the resource manager and is used by the teardown process
+
+* HBase Example:
+
+
+    ./slider_setup -f slider_setup.conf -p hbase -w 5 -n hbase-slider
+
+* Accumulo Example:
+
+
+    ./slider_setup -f slider_setup.conf -p accumulo -w 3 -n accumulo-slider
+
+* Storm Example:
+
+
+    ./slider_setup -f slider_setup.conf -p storm -w 3 -n storm-slider
+
+Tear Down
+---------
+
+* slider_destroy will do the following
+  1. Freeze the slider application based on provided name
+  2. Destory the slider application based on provided name
+
+* The following args are required
+  * `-f` - The path to the `slider_setup.conf` that has been modified with cluster info
+  * `-n` - The name of the app, this was provided to the slider_setup tool
+
+* HBase Example:
+
+
+    ./slider_destroy -f slider_setup.conf -n hbase-slider
+
+* Accumulo Example:
+
+
+    ./slider_destroy -f slider_setup.conf -n accumulo-slider
+
+* Storm Example:
+
+
+    ./slider_destroy -f slider_setup.conf -n storm-slider
diff --git a/slider-install/pom.xml b/slider-install/pom.xml
new file mode 100644
index 0000000..b08895f
--- /dev/null
+++ b/slider-install/pom.xml
@@ -0,0 +1,111 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>slider-install</artifactId>
+  <name>Slider Install</name>
+  <packaging>pom</packaging>
+  <description>
+    
+    Builds any RPMs and other install packaging for Slider.
+    This is downstream of the slider assembly and the app packages, so
+    that anything from these can be installed in the RPM.
+  </description>
+  <parent>
+    <groupId>org.apache.slider</groupId>
+    <artifactId>slider</artifactId>
+    <version>0.40</version>
+  </parent>
+
+  <properties>
+    <basedir>/usr/local/slider</basedir>
+    <confdir>${basedir}/conf</confdir>
+    <bindir>${basedir}/bin</bindir>
+  </properties>
+  
+  <build>
+    <plugins>
+      <!--read in a build.properties file if defined-->
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>properties-maven-plugin</artifactId>
+        <version>${maven.properties.version}</version>
+        <executions>
+          <execution>
+            <phase>initialize</phase>
+            <goals>
+              <goal>read-project-properties</goal>
+            </goals>
+            <configuration>
+              <quiet>true</quiet>
+              <files>
+                <file>build.properties</file>
+                <file>../build.properties</file>
+              </files>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+
+    </plugins>
+    
+    
+  </build>
+
+  <reporting>
+    <plugins>
+ 
+
+
+    </plugins>
+  </reporting>
+
+  <dependencies>
+
+    <dependency>
+      <groupId>org.apache.slider</groupId>
+      <artifactId>slider-core</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+
+    <!--
+     needed to order the build and ensure the agent tar is found
+     the test scope ensures that it isn't copied into the lib dir
+     -->
+    <dependency>
+      <groupId>org.apache.slider</groupId>
+      <artifactId>slider-agent</artifactId>
+      <version>${project.version}</version>
+      <scope>test</scope>
+      <type>tar.gz</type>
+    </dependency>
+
+    <dependency>
+      <groupId>com.beust</groupId>
+      <artifactId>jcommander</artifactId>
+    </dependency>
+
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client</artifactId>
+    </dependency>
+ 
+  </dependencies>
+
+
+</project>
diff --git a/slider-install/src/main/bash/slider-client.xml b/slider-install/src/main/bash/slider-client.xml
new file mode 100644
index 0000000..f7060c1
--- /dev/null
+++ b/slider-install/src/main/bash/slider-client.xml
@@ -0,0 +1,83 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!--
+  Properties set here are picked up in the client.
+  They are not passed to the AM -though the filesystem
+  binding details (URL And principal) are added to the
+  hbase-site.xml file when a cluster is created.
+-->
+<configuration>
+
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+  </property>
+  
+
+  <property>
+    <name>slider.yarn.queue</name>
+    <value>default</value>
+    <description>YARN queue for the Application Master</description>
+  </property>
+  
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>@@RM_ADDRESS@@</value>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.address</name>
+    <value>@@RM_SCHED_ADDRESS@@</value>
+  </property>
+
+  <property>
+    <name>fs.defaultFS</name>
+    <value>@@DEFAULT_FS@@</value>
+  </property>
+
+  <property>
+     <name>yarn.application.classpath</name>
+     <value>@@YARN_CP@@</value>
+  </property>
+
+  <property>
+     <name>slider.zookeeper.quorum</name>
+     <value>@@ZK_QUORUM@@</value>
+  </property>
+
+<!--
+  <property>
+    <name>yarn.resourcemanager.principal</name>
+    <value>yarn/master@MINICLUSTER</value>
+  </property>
+
+  <property>
+    <name>slider.security.enabled</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>dfs.namenode.kerberos.principal</name>
+    <value>hdfs/master@MINICLUSTER</value>
+  </property>
+-->
+
+
+</configuration>
diff --git a/slider-install/src/main/bash/slider_destroy b/slider-install/src/main/bash/slider_destroy
new file mode 100755
index 0000000..9039751
--- /dev/null
+++ b/slider-install/src/main/bash/slider_destroy
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Base Vars
+#
+SCRIPT_NAME=`basename $0`
+SCRIPT_DIR=`cd $(dirname $0) && pwd`
+
+#
+# Functions
+#
+usage() { 
+    echo "Usage: $SCRIPT_NAME -f </path/to/config> -n <app name>"
+    exit 1
+}
+
+#
+# Parse cmd line args
+#
+while getopts "f:n:" opt; do
+    case "$opt" in
+        f) config=$OPTARG;;
+        n) app_name=$OPTARG;;
+        *) usage;;
+    esac
+done
+shift $((OPTIND-1))
+
+if [ -z "$config" ] || [ -z "$app_name" ]; then
+    usage
+fi
+
+#
+# Source the config
+#
+source $config
+
+#
+# Main
+#
+echo -e "\n## Freezing app $app_name"
+sudo -u yarn $SLIDER_INST_DIR/bin/slider freeze $app_name --manager $RM_ADDRESS || exit 1
+echo "SUCCESS"
+
+echo -e "\n## Destroying app $app_name"
+sudo -u yarn $SLIDER_INST_DIR/bin/slider destroy $app_name --manager $RM_ADDRESS || exit 1
+echo "SUCCESS"
+
+exit 0
diff --git a/slider-install/src/main/bash/slider_setup b/slider-install/src/main/bash/slider_setup
new file mode 100755
index 0000000..2c15c95
--- /dev/null
+++ b/slider-install/src/main/bash/slider_setup
@@ -0,0 +1,173 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+#
+# Base Vars
+#
+SCRIPT_NAME=`basename $0`
+SCRIPT_DIR=`cd $(dirname $0) && pwd`
+
+#
+# Functions
+#
+usage() { 
+    echo "Usage: $SCRIPT_NAME -f </path/to/config> -p <product (hbase or storm)> -w <worker count> -n <app name>"
+    exit 1
+}
+
+#
+# Parse cmd line args
+#
+while getopts "f:p:w:n:" opt; do
+    case "$opt" in
+        f) config=$OPTARG;;
+        p) product=$OPTARG;;
+        w) worker_cnt=$OPTARG;;
+        n) app_name=$OPTARG;;
+        *) usage;;
+    esac
+done
+shift $((OPTIND-1))
+
+if [ -z "$config" ] || [ -z "$product" ] || [ -z "$worker_cnt" ] || [ -z "$app_name" ]; then
+    usage
+fi
+
+# Only support know products for now
+if [ $product != "hbase" ] && [ $product != "storm" ] && [ $product != "accumulo"]; then
+   echo "ERROR: Only HBase, Storm, and Accumulo are currently supported"
+   usage
+fi
+
+#
+# Source the config
+#
+source $config
+
+#
+# Product URLs
+#
+if [ $product = "hbase" ]; then
+    app_url=$HBASE_APP_URL
+elif [ $product = "storm" ]; then
+    app_url=$STORM_APP_URL
+elif [ $product = "accumulo" ]; then
+    app_url=$ACCUMULO_APP_URL
+fi
+
+#
+# Main
+#
+echo -e "\n## Creating slider install dir: $SLIDER_INST_DIR"
+mkdir -p $SLIDER_INST_DIR || exit 1
+chown yarn:hadoop $SLIDER_INST_DIR || exit 1
+echo "SUCCESS"
+
+echo -e "\n## Downloading slider from: $SLIDER_URL"
+if [ -f /tmp/${SLIDER_VER}*tar ]; then
+   rm -f /tmp/${SLIDER_VER}*tar
+fi
+cd /tmp && wget $SLIDER_URL || exit 1
+echo "SUCCESS"
+
+echo -e "\n## Extracting slider to $SLIDER_INST_DIR"
+tar -xf /tmp/${SLIDER_VER}*tar --strip-components=1 -C $SLIDER_INST_DIR || exit 1
+chown -R yarn:hadoop $SLIDER_INST_DIR || exit 1
+echo "SUCCESS"
+
+echo -e "\n## Setting conf values"
+(cd $SCRIPT_DIR && cp slider-client.xml $SLIDER_INST_DIR/conf) || exit 1
+sed -i 's|@@RM_ADDRESS@@|'$RM_ADDRESS'|g' $SLIDER_INST_DIR/conf/slider-client.xml || exit 1
+sed -i 's|@@RM_SCHED_ADDRESS@@|'$RM_SCHED_ADDRESS'|g' $SLIDER_INST_DIR/conf/slider-client.xml || exit 1
+sed -i 's|@@DEFAULT_FS@@|'$DEFAULT_FS'|g' $SLIDER_INST_DIR/conf/slider-client.xml || exit 1
+sed -i 's|@@YARN_CP@@|'$YARN_CP'|g' $SLIDER_INST_DIR/conf/slider-client.xml || exit 1
+sed -i 's|@@ZK_QUORUM@@|'$ZK_QUORUM'|g' $SLIDER_INST_DIR/conf/slider-client.xml || exit 1
+echo "SUCCESS"
+
+echo -e "\n## Setting PATH to include the JDK bin: $JDK_BIN"
+export PATH=$PATH:$JDK_BIN
+echo "SUCCESS"
+
+echo -e "\n## Checking version of Hadoop slider was compiled against"
+hadoop_compiled_ver=`$SLIDER_INST_DIR/bin/slider version | grep "Compiled against Hadoop" | awk '{print $NF}'`
+if [ "$hadoop_compiled_ver" != "2.4.0" ]; then
+   echo "ERROR: Compiled against Hadoop version $hadoop_compiled_ver instead of 2.4.0" && exit 1
+else
+   echo "Compiled against Hadoop version: $hadoop_compiled_ver"
+fi
+echo "SUCCESS"
+
+echo -e "\n## Setting up HDFS directories for slider"
+sudo -u hdfs hdfs dfs -mkdir -p /slider || exit 1
+sudo -u hdfs hdfs dfs -chown yarn:hdfs /slider || exit 1
+sudo -u hdfs hdfs dfs -mkdir -p /user/yarn || exit 1
+sudo -u hdfs hdfs dfs -chown yarn:hdfs /user/yarn || exit 1
+echo "SUCCESS"
+
+echo -e "\n## Loading the Slider agent"
+sudo -u yarn hdfs dfs -mkdir -p /slider/agent/conf || exit 1
+sudo -u yarn hdfs dfs -copyFromLocal $SLIDER_INST_DIR/agent/* /slider/agent
+echo "SUCCESS"
+
+echo -e "\n## Downloading $product to $SLIDER_INST_DIR/apps/$product"
+sudo -u yarn mkdir -p $SLIDER_INST_DIR/apps/$product || exit 1
+(cd $SLIDER_INST_DIR/apps/$product && sudo -u yarn wget $app_url) || exit 1
+echo "SUCCESS"
+
+echo -e "\n## Extracting $product in $SLIDER_INST_DIR/apps/$product"
+(cd $SLIDER_INST_DIR/apps/$product && sudo -u yarn unzip -o $product*zip) || exit 1
+echo "SUCCESS"
+
+echo -e "\n## Adding $product to HDFS slider dir"
+sudo -u yarn hdfs dfs -copyFromLocal $SLIDER_INST_DIR/apps/$product/$product*zip /slider
+echo "SUCCESS"
+
+echo -e "\n## Setting number of workers in $SLIDER_INST_DIR/apps/$product/resources.json"
+if [ $product = "hbase" ]; then
+   component="HBASE_REGIONSERVER"
+elif [ $product = "storm" ]; then
+   component="SUPERVISOR"
+elif [ $product = "accumulo" ]; then
+   component="ACCUMULO_TSERVER"
+fi
+# Update the resource.json file with worker_cnt
+python << END
+import json
+with open("$SLIDER_INST_DIR/apps/$product/resources.json", "r+") as f:
+    data = json.load(f)
+    data["components"]["$component"]["yarn.component.instances"] = "$worker_cnt"
+
+with open("$SLIDER_INST_DIR/apps/$product/resources.json", "w+") as f:
+    f.write(json.dumps(data, sort_keys = False, indent = 4))
+END
+echo "SUCCESS"
+
+# Handle HBase HDFS dir needs
+if [ $product = "hbase" ]; then
+    echo -e "\n## Creating hbase HDFS dir /apps/hbase"
+    sudo -u hdfs hdfs dfs -mkdir -p /apps/hbase || exit 1
+    sudo -u hdfs hdfs dfs -chown yarn:hdfs /apps/hbase || exit 1
+    echo "SUCCESS"
+fi
+
+echo -e "\n##Starting app $product with $worker_cnt workers via slider"
+sudo -u yarn $SLIDER_INST_DIR/bin/slider create $app_name \
+    --image $DEFAULT_FS/slider/agent/slider-agent.tar.gz \
+    --template $SLIDER_INST_DIR/apps/$product/appConfig.json \
+    --resources $SLIDER_INST_DIR/apps/$product/resources.json || exit 1
+echo "SUCCESS"
diff --git a/slider-install/src/main/bash/slider_setup.conf b/slider-install/src/main/bash/slider_setup.conf
new file mode 100644
index 0000000..fed3f4c
--- /dev/null
+++ b/slider-install/src/main/bash/slider_setup.conf
@@ -0,0 +1,60 @@
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Change values below to match the cluster being deployed on
+#
+
+# Zookeeper nodes
+ZK_QUORUM="zk1:2181,zk2:2181,zk3:2181"
+
+# Resource Manager address (yarn.resourcemanager.address)
+RM_ADDRESS="rm1:8050"
+
+# Resource Manager scheduler address (yarn.resourcemanager.scheduler.address)
+RM_SCHED_ADDRESS="rm1:8030"
+
+# Default FS (fs.defaultFS)
+DEFAULT_FS="hdfs://nn1:8020"
+
+
+#
+# Should not be necessary to change anything below except on new version releases
+#
+
+# Slider version
+SLIDER_VER="slider-0.30"
+
+# Slider URL
+SLIDER_URL="http://public-repo-1.hortonworks.com/slider/0.30/slider-0.30-all.tar"
+
+# HBase Slider App URL
+HBASE_APP_URL="http://public-repo-1.hortonworks.com/slider/0.30/apps/hbase_v096.zip"
+
+# Storm Slider App URL
+STORM_APP_URL="http://public-repo-1.hortonworks.com/slider/0.30/apps/storm_v091.zip"
+
+# Accumulo Slider App URL
+ACCUMULO_APP_URL="http://public-repo-1.hortonworks.com/slider/0.30/apps/accumulo_v151.zip"
+
+# Where to install Slider
+SLIDER_INST_DIR="/usr/lib/slider"
+
+# JDK bin to add to PATH
+JDK_BIN="/usr/jdk64/jdk1.7.0_45/bin"
+
+# Yarn container classpath (yarn.application.classpath)
+YARN_CP="/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*"
+
diff --git a/slider-providers/accumulo/accumulo-funtests/pom.xml b/slider-providers/accumulo/accumulo-funtests/pom.xml
index 3513526..1b25b33 100644
--- a/slider-providers/accumulo/accumulo-funtests/pom.xml
+++ b/slider-providers/accumulo/accumulo-funtests/pom.xml
@@ -18,7 +18,6 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <artifactId>accumulo-funtests</artifactId>
-  <version>0.30</version>
   <name>Slider Accumulo Provider Functional Tests</name>
   <packaging>jar</packaging>
   <description>
@@ -28,7 +27,7 @@
   <parent>
     <groupId>org.apache.slider</groupId>
     <artifactId>slider</artifactId>
-    <version>0.30</version>
+    <version>0.40</version>
     <relativePath>../../../</relativePath>
   </parent>
 
@@ -97,7 +96,7 @@
           </forkedProcessTimeoutInSeconds>
           <threadCount>1</threadCount>
           <argLine>${test.argLine}</argLine>
-          <failIfNoTests>${test.failIfNoTests}</failIfNoTests>
+          <failIfNoTests>${test.funtests.failIfNoTests}</failIfNoTests>
 
           <trimStackTrace>false</trimStackTrace>
           <redirectTestOutputToFile>${build.redirect.test.output.to.file}</redirectTestOutputToFile>
@@ -184,14 +183,7 @@
     
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <type>test-jar</type>
-     <scope>test</scope>
+      <artifactId>hadoop-client</artifactId>
     </dependency>
 
     <dependency>
@@ -214,12 +206,6 @@
 
     <dependency>
       <groupId>org.apache.accumulo</groupId>
-      <artifactId>accumulo-start</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-trace</artifactId>
       <scope>test</scope>
     </dependency>
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java b/slider-providers/accumulo/accumulo-funtests/src/test/groovy/org/apache/slider/providers/accumulo/TestStub.groovy
similarity index 79%
copy from slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
copy to slider-providers/accumulo/accumulo-funtests/src/test/groovy/org/apache/slider/providers/accumulo/TestStub.groovy
index 7af463d..3d9abb7 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
+++ b/slider-providers/accumulo/accumulo-funtests/src/test/groovy/org/apache/slider/providers/accumulo/TestStub.groovy
@@ -16,10 +16,17 @@
  * limitations under the License.
  */
 
-package org.apache.slider.server.services.utility;
+package org.apache.slider.providers.accumulo
 
-public interface EventCallback {
-  
-  public void eventCallbackEvent();
-  
+import org.junit.Test
+
+/**
+ *  this is here to ensure there is always a test
+ */
+class TestStub {
+
+  @Test
+  public void testStubTest() throws Throwable {
+
+  }
 }
diff --git a/slider-providers/accumulo/slider-accumulo-provider/pom.xml b/slider-providers/accumulo/slider-accumulo-provider/pom.xml
index 14cbae6..cabea00 100644
--- a/slider-providers/accumulo/slider-accumulo-provider/pom.xml
+++ b/slider-providers/accumulo/slider-accumulo-provider/pom.xml
@@ -18,7 +18,6 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <artifactId>slider-accumulo-provider</artifactId>
-  <version>0.30</version>
   <name>Slider Accumulo Provider</name>
   <packaging>jar</packaging>
   <description>
@@ -29,7 +28,7 @@
   <parent>
     <groupId>org.apache.slider</groupId>
     <artifactId>slider</artifactId>
-    <version>0.30</version>
+    <version>0.40</version>
     <relativePath>../../../</relativePath>
   </parent>
 
@@ -165,14 +164,7 @@
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
+      <artifactId>hadoop-client</artifactId>
     </dependency>
 
     <dependency>
@@ -182,12 +174,6 @@
     </dependency>
 
     <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-server</artifactId>
-      <scope>test</scope>
-    </dependency>
-    
-    <dependency>
       <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-core</artifactId>
       <scope>test</scope>
@@ -201,12 +187,6 @@
 
     <dependency>
       <groupId>org.apache.accumulo</groupId>
-      <artifactId>accumulo-start</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-trace</artifactId>
       <scope>test</scope>
     </dependency>
@@ -217,6 +197,33 @@
       <scope>test</scope>
     </dependency>
 
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+    </dependency>
+
+
   </dependencies>
 
 
diff --git a/slider-providers/accumulo/slider-accumulo-provider/src/main/java/org/apache/slider/providers/accumulo/AccumuloProviderService.java b/slider-providers/accumulo/slider-accumulo-provider/src/main/java/org/apache/slider/providers/accumulo/AccumuloProviderService.java
index d35ed66..c511efb 100644
--- a/slider-providers/accumulo/slider-accumulo-provider/src/main/java/org/apache/slider/providers/accumulo/AccumuloProviderService.java
+++ b/slider-providers/accumulo/slider-accumulo-provider/src/main/java/org/apache/slider/providers/accumulo/AccumuloProviderService.java
@@ -23,6 +23,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.service.Service;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.slider.common.SliderKeys;
@@ -39,6 +40,8 @@
 import org.apache.slider.core.exceptions.BadConfigException;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.providers.AbstractProviderService;
+import org.apache.slider.providers.ProviderCompleted;
+import org.apache.slider.providers.ProviderCompletedCallable;
 import org.apache.slider.providers.ProviderCore;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.ProviderUtils;
@@ -46,9 +49,8 @@
 import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.core.zk.BlockingZKWatcher;
 import org.apache.slider.common.tools.ConfigHelper;
-import org.apache.slider.server.services.utility.EventCallback;
-import org.apache.slider.server.services.utility.EventNotifyingService;
-import org.apache.slider.server.services.utility.ForkedProcessService;
+import org.apache.slider.server.services.workflow.ForkedProcessService;
+import org.apache.slider.server.services.workflow.WorkflowCallbackService;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooKeeper;
 import org.slf4j.Logger;
@@ -264,9 +266,8 @@
   public boolean exec(AggregateConf instanceDefinition,
                       File confDir,
                       Map<String, String> env,
-                      EventCallback execInProgress) throws
-                                                 IOException,
-      SliderException {
+                      ProviderCompleted execInProgress)
+      throws IOException, SliderException {
 
 
     //now pull in these files and do a bit of last-minute validation
@@ -331,10 +332,17 @@
     
     //callback to AM to trigger cluster review is set up to happen after
     //the init/verify action has succeeded
-    EventNotifyingService notifier = new EventNotifyingService(execInProgress,
-           internalOperations.getGlobalOptions().getOptionInt(
-             OptionKeys.INTERNAL_CONTAINER_STARTUP_DELAY,
-             OptionKeys.DEFAULT_CONTAINER_STARTUP_DELAY));
+    int delay = internalOperations.getGlobalOptions().getOptionInt(
+        OptionKeys.INTERNAL_CONTAINER_STARTUP_DELAY,
+        OptionKeys.DEFAULT_CONTAINER_STARTUP_DELAY);
+    ProviderCompletedCallable completedCallable =
+        new ProviderCompletedCallable(execInProgress, null);
+    Service notifier = new WorkflowCallbackService<>(
+        "accumulo notifier",
+        completedCallable,
+        delay,
+        true);
+    
     // register the service for lifecycle management; 
     // this service is started after the accumulo process completes
     addService(notifier);
@@ -402,9 +410,13 @@
     String monitorKey = "Active Accumulo Monitor: ";
     String monitorAddr = getInfoAvoidingNull(clusterDesc, AccumuloKeys.MONITOR_ADDRESS);
     if (!StringUtils.isBlank(monitorAddr)) {
+      try {
         HostAndPort hostPort = HostAndPort.fromString(monitorAddr);
         details.put(monitorKey,
             String.format("http://%s:%d", hostPort.getHostText(), hostPort.getPort()));
+      } catch (Exception e) {
+        details.put(monitorKey + "N/A", null);
+      }
     } else {
       details.put(monitorKey + "N/A", null);
     }
diff --git a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/AccumuloTestBase.groovy b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/AccumuloTestBase.groovy
index 53ac7f5..1e2e27f 100644
--- a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/AccumuloTestBase.groovy
+++ b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/AccumuloTestBase.groovy
@@ -40,23 +40,18 @@
 @Slf4j
 public abstract class AccumuloTestBase extends YarnZKMiniClusterTestBase {
 
-  public static final int ACCUMULO_LAUNCH_WAIT_TIME
-  public static final boolean ACCUMULO_TESTS_ENABLED
 
-
-  public static final int ACCUMULO_CLUSTER_STARTUP_TIME = ACCUMULO_LAUNCH_WAIT_TIME
-  public static final int ACCUMULO_CLUSTER_STOP_TIME = 1 * 60 * 1000
+  public final int accumulo_cluster_startup_time = accumuloLaunchWaitTime
 
   /**
    * The time to sleep before trying to talk to the HBase Master and
    * expect meaningful results.
    */
-  public static final int ACCUMULO_CLUSTER_STARTUP_TO_LIVE_TIME = ACCUMULO_CLUSTER_STARTUP_TIME
+  public final int accumulo_cluster_startup_to_live_time = accumulo_cluster_startup_time
   public static final int ACCUMULO_GO_LIVE_TIME = 60000
-
   @Override
   public String getTestConfigurationPath() {
-    return "src/main/resources/" + CONF_RESOURCE; 
+    return "src/main/resources/" + CONF_RESOURCE;
   }
 
   @Override
@@ -79,7 +74,7 @@
       killAllAccumuloProcesses();
     }
   }
-  
+
   void killAllAccumuloProcesses() {
     killJavaProcesses("org.apache.accumulo.start.Main", SIGKILL)
   }
@@ -103,7 +98,7 @@
    * path is valid -that is expected to be a failure on tests that require
    * HBase home to be set.
    */
-  
+
   public void assumeOtherSettings(YarnConfiguration conf) {
     assumeStringOptionSet(conf, OPTION_ZK_HOME)
   }
@@ -118,13 +113,23 @@
    * @param blockUntilRunning block until the AM is running
    * @return launcher which will have executed the command.
    */
-  public ServiceLauncher<SliderClient> createAccCluster(String clustername, int tablets, List<String> extraArgs, boolean deleteExistingData, boolean blockUntilRunning) {
+  public ServiceLauncher<SliderClient> createAccCluster(
+      String clustername,
+      int tablets,
+      List<String> extraArgs,
+      boolean deleteExistingData,
+      boolean blockUntilRunning) {
     Map<String, Integer> roles = [
         (ROLE_MASTER): 1,
         (ROLE_TABLET): tablets,
     ];
-    return createAccCluster(clustername, roles, extraArgs, deleteExistingData, blockUntilRunning);
-}
+    return createAccCluster(
+        clustername,
+        roles,
+        extraArgs,
+        deleteExistingData,
+        blockUntilRunning);
+  }
 
   /**
    * Create an accumulo cluster
@@ -135,30 +140,43 @@
    * @param blockUntilRunning
    * @return the cluster launcher
    */
-  public ServiceLauncher<SliderClient> createAccCluster(String clustername, Map<String, Integer> roles, List<String> extraArgs, boolean deleteExistingData, boolean blockUntilRunning) {
+  public ServiceLauncher<SliderClient> createAccCluster(
+      String clustername,
+      Map<String, Integer> roles,
+      List<String> extraArgs,
+      boolean deleteExistingData,
+      boolean blockUntilRunning) {
     extraArgs << ARG_PROVIDER << PROVIDER_ACCUMULO;
 
     YarnConfiguration conf = testConfiguration
 
     def clusterOps = [
-        (OPTION_ZK_HOME): conf.getTrimmed(OPTION_ZK_HOME),
-        (OPTION_HADOOP_HOME): conf.getTrimmed(OPTION_HADOOP_HOME),
-        ("site." + AccumuloConfigFileOptions.MONITOR_PORT_CLIENT): AccumuloConfigFileOptions.MONITOR_PORT_CLIENT_DEFAULT,
-        ("site." + AccumuloConfigFileOptions.MASTER_PORT_CLIENT): AccumuloConfigFileOptions.MASTER_PORT_CLIENT_DEFAULT,
+        (OPTION_ZK_HOME)                                         : conf.getTrimmed(
+            OPTION_ZK_HOME),
+        (OPTION_HADOOP_HOME)                                     : conf.getTrimmed(
+            OPTION_HADOOP_HOME),
+        ("site." +
+         AccumuloConfigFileOptions.MONITOR_PORT_CLIENT)          : AccumuloConfigFileOptions.MONITOR_PORT_CLIENT_DEFAULT,
+        ("site." +
+         AccumuloConfigFileOptions.MASTER_PORT_CLIENT)           : AccumuloConfigFileOptions.MASTER_PORT_CLIENT_DEFAULT,
     ]
 
 
-    extraArgs << ARG_RES_COMP_OPT << ROLE_MASTER << ResourceKeys.YARN_MEMORY << YRAM; 
-    extraArgs << ARG_RES_COMP_OPT << ROLE_TABLET << ResourceKeys.YARN_MEMORY << YRAM
-    extraArgs << ARG_RES_COMP_OPT << ROLE_MONITOR << ResourceKeys.YARN_MEMORY << YRAM
-    extraArgs << ARG_RES_COMP_OPT << ROLE_GARBAGE_COLLECTOR << ResourceKeys.YARN_MEMORY << YRAM
+    extraArgs << ARG_RES_COMP_OPT << ROLE_MASTER << ResourceKeys.YARN_MEMORY <<
+    YRAM;
+    extraArgs << ARG_RES_COMP_OPT << ROLE_TABLET << ResourceKeys.YARN_MEMORY <<
+    YRAM
+    extraArgs << ARG_RES_COMP_OPT << ROLE_MONITOR << ResourceKeys.YARN_MEMORY <<
+    YRAM
+    extraArgs << ARG_RES_COMP_OPT << ROLE_GARBAGE_COLLECTOR <<
+    ResourceKeys.YARN_MEMORY << YRAM
 
     return createCluster(clustername,
-                             roles,
-                             extraArgs,
-                             deleteExistingData,
-                             blockUntilRunning, 
-                             clusterOps)
+        roles,
+        extraArgs,
+        deleteExistingData,
+        blockUntilRunning,
+        clusterOps)
   }
 
   def getAccClusterStatus() {
@@ -166,11 +184,11 @@
     instance.getConnector("user", "pass").instanceOperations().tabletServers;
   }
 
-  
+
   public String fetchLocalPage(int port, String page) {
-    String url = "http://localhost:" + port+ page
+    String url = "http://localhost:" + port + page
     return fetchWebPage(url)
-    
+
   }
 
   public ClusterDescription flexAccClusterTestRun(
@@ -178,22 +196,22 @@
     int planCount = plan.size()
     assert planCount > 0
     createMiniCluster(clustername, getConfiguration(),
-                      1,
-                      true);
+        1,
+        true);
     //now launch the cluster
     SliderClient sliderClient = null;
     ServiceLauncher launcher = createAccCluster(clustername,
-                                                 plan[0],
-                                                 [],
-                                                 true,
-                                                 true);
+        plan[0],
+        [],
+        true,
+        true);
     sliderClient = (SliderClient) launcher.service;
     try {
 
       //verify the #of roles is as expected
       //get the hbase status
       waitForRoleCount(sliderClient, plan[0],
-                       ACCUMULO_CLUSTER_STARTUP_TO_LIVE_TIME);
+          accumulo_cluster_startup_to_live_time);
       sleep(ACCUMULO_GO_LIVE_TIME);
 
       plan.remove(0)
@@ -209,12 +227,12 @@
             flexTarget
         );
         cd = waitForRoleCount(sliderClient, flexTarget,
-                              ACCUMULO_CLUSTER_STARTUP_TO_LIVE_TIME);
+            accumulo_cluster_startup_to_live_time);
 
         sleep(ACCUMULO_GO_LIVE_TIME);
 
       }
-      
+
       return cd;
 
     } finally {
@@ -222,5 +240,5 @@
     }
 
   }
-  
+
 }
diff --git a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccCorrectInstanceName.groovy b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccCorrectInstanceName.groovy
index 89dc90c..348ccd4 100644
--- a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccCorrectInstanceName.groovy
+++ b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccCorrectInstanceName.groovy
@@ -65,7 +65,7 @@
 
     waitWhileClusterLive(sliderClient);
     assert sliderClient.applicationReport.yarnApplicationState == YarnApplicationState.RUNNING
-    waitForRoleCount(sliderClient, roles, ACCUMULO_CLUSTER_STARTUP_TO_LIVE_TIME)
+    waitForRoleCount(sliderClient, roles, accumulo_cluster_startup_to_live_time)
     describe("Cluster status")
     ClusterDescription status
     status = sliderClient.getClusterDescription(clustername)
diff --git a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccFreezeThaw.groovy b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccFreezeThaw.groovy
index 3983a5d..143974f 100644
--- a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccFreezeThaw.groovy
+++ b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccFreezeThaw.groovy
@@ -56,7 +56,7 @@
     addToTeardown(sliderClient);
 
     
-    waitForRoleCount(sliderClient, roles, ACCUMULO_CLUSTER_STARTUP_TO_LIVE_TIME)
+    waitForRoleCount(sliderClient, roles, accumulo_cluster_startup_to_live_time)
     //now give the cluster a bit of time to actually start work
 
     log.info("Sleeping for a while")
@@ -93,7 +93,7 @@
     ServiceLauncher launcher2 = thawCluster(clustername, [], true);
     SliderClient sliderClient2 = (SliderClient) launcher2.service
     addToTeardown(sliderClient2)
-    waitForRoleCount(sliderClient, roles, ACCUMULO_CLUSTER_STARTUP_TO_LIVE_TIME, "thawing")
+    waitForRoleCount(sliderClient, roles, accumulo_cluster_startup_to_live_time, "thawing")
 
 
     sleepForAccumuloClusterLive();
diff --git a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccLiveHDFSArchive.groovy b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccLiveHDFSArchive.groovy
index 2220cdc..2f744bb 100644
--- a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccLiveHDFSArchive.groovy
+++ b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccLiveHDFSArchive.groovy
@@ -61,7 +61,7 @@
 
     waitWhileClusterLive(sliderClient);
     assert sliderClient.applicationReport.yarnApplicationState == YarnApplicationState.RUNNING
-    waitForRoleCount(sliderClient, roles, ACCUMULO_CLUSTER_STARTUP_TO_LIVE_TIME)
+    waitForRoleCount(sliderClient, roles, accumulo_cluster_startup_to_live_time)
     describe("Cluster status")
     ClusterDescription status
     status = sliderClient.getClusterDescription(clustername)
diff --git a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccLiveLocalArchive.groovy b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccLiveLocalArchive.groovy
index 1e3adbd..98b8b44 100644
--- a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccLiveLocalArchive.groovy
+++ b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccLiveLocalArchive.groovy
@@ -63,7 +63,7 @@
 
     waitWhileClusterLive(sliderClient);
     assert sliderClient.applicationReport.yarnApplicationState == YarnApplicationState.RUNNING
-    waitForRoleCount(sliderClient, roles, ACCUMULO_CLUSTER_STARTUP_TO_LIVE_TIME)
+    waitForRoleCount(sliderClient, roles, accumulo_cluster_startup_to_live_time)
     describe("Cluster status")
     ClusterDescription status
     status = sliderClient.getClusterDescription(clustername)
diff --git a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccM1T1GC1Mon1.groovy b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccM1T1GC1Mon1.groovy
index 9d2cee6..7074294 100644
--- a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccM1T1GC1Mon1.groovy
+++ b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccM1T1GC1Mon1.groovy
@@ -59,7 +59,7 @@
 
     waitWhileClusterLive(sliderClient);
     assert sliderClient.applicationReport.yarnApplicationState == YarnApplicationState.RUNNING
-    waitForRoleCount(sliderClient, roles, ACCUMULO_CLUSTER_STARTUP_TO_LIVE_TIME)
+    waitForRoleCount(sliderClient, roles, accumulo_cluster_startup_to_live_time)
     describe("Cluster status")
     ClusterDescription status
     status = sliderClient.getClusterDescription(clustername)
diff --git a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccM2T2GC1Mon1.groovy b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccM2T2GC1Mon1.groovy
index 2494a56..253192b 100644
--- a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccM2T2GC1Mon1.groovy
+++ b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccM2T2GC1Mon1.groovy
@@ -61,7 +61,7 @@
 
     waitWhileClusterLive(sliderClient);
     assert sliderClient.applicationReport.yarnApplicationState == YarnApplicationState.RUNNING
-    waitForRoleCount(sliderClient, roles, ACCUMULO_CLUSTER_STARTUP_TO_LIVE_TIME)
+    waitForRoleCount(sliderClient, roles, accumulo_cluster_startup_to_live_time)
     describe("Cluster status")
     ClusterDescription status
     status = sliderClient.getClusterDescription(clustername)
diff --git a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccumuloAMWebApp.groovy b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccumuloAMWebApp.groovy
index bd975e4..df40df7 100644
--- a/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccumuloAMWebApp.groovy
+++ b/slider-providers/accumulo/slider-accumulo-provider/src/test/groovy/org/apache/slider/providers/accumulo/live/TestAccumuloAMWebApp.groovy
@@ -62,7 +62,7 @@
 
     waitWhileClusterLive(sliderClient);
     assert sliderClient.applicationReport.yarnApplicationState == YarnApplicationState.RUNNING
-    waitForRoleCount(sliderClient, roles, ACCUMULO_CLUSTER_STARTUP_TO_LIVE_TIME)
+    waitForRoleCount(sliderClient, roles, accumulo_cluster_startup_to_live_time)
     describe("Cluster status")
     ClusterDescription status
     status = sliderClient.getClusterDescription(clustername)
diff --git a/slider-providers/hbase/hbase-funtests/pom.xml b/slider-providers/hbase/hbase-funtests/pom.xml
index 28dba7f..f5a155b 100644
--- a/slider-providers/hbase/hbase-funtests/pom.xml
+++ b/slider-providers/hbase/hbase-funtests/pom.xml
@@ -18,7 +18,6 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hbase-funtests</artifactId>
-  <version>0.30</version>
   <name>Slider HBase Provider Functional Tests</name>
   <packaging>jar</packaging>
   <description>
@@ -28,7 +27,7 @@
   <parent>
     <groupId>org.apache.slider</groupId>
     <artifactId>slider</artifactId>
-    <version>0.30</version>
+    <version>0.40</version>
     <relativePath>../../../</relativePath>
   </parent>
 
@@ -98,7 +97,7 @@
           </forkedProcessTimeoutInSeconds>
           <threadCount>1</threadCount>
           <argLine>${test.argLine}</argLine>
-          <failIfNoTests>${test.failIfNoTests}</failIfNoTests>
+          <failIfNoTests>${test.funtests.failIfNoTests}</failIfNoTests>
           
           <trimStackTrace>false</trimStackTrace>
           <redirectTestOutputToFile>${build.redirect.test.output.to.file}</redirectTestOutputToFile>
@@ -108,7 +107,7 @@
             <java.security.krb5.realm>${slider.test.java.security.krb5.realm}</java.security.krb5.realm>
             <java.security.krb5.kdc>${slider.test.java.security.krb5.kdc}</java.security.krb5.kdc>
             <!-- this property must be supplied-->
-            <slider.conf.dir>${slider.conf.dir}</slider.conf.dir>
+            <slider.conf.dir>../../../src/test/clusters/offline/slider</slider.conf.dir>
             <slider.bin.dir>../../../slider-assembly/target/slider-${project.version}-all/slider-${project.version}</slider.bin.dir>
           </systemPropertyVariables>
           <includes>
@@ -134,13 +133,11 @@
     <dependency>
       <groupId>org.apache.slider</groupId>
       <artifactId>slider-hbase-provider</artifactId>
-      <version>${project.version}</version>
     </dependency>
 
     <dependency>
       <groupId>org.apache.slider</groupId>
       <artifactId>slider-hbase-provider</artifactId>
-      <version>${project.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
@@ -148,7 +145,6 @@
     <dependency>
       <groupId>org.apache.slider</groupId>
       <artifactId>slider-core</artifactId>
-      <version>${project.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
@@ -162,7 +158,6 @@
     <dependency>
       <groupId>org.apache.slider</groupId>
       <artifactId>slider-assembly</artifactId>
-      <version>${project.version}</version>
       <classifier>all</classifier>
       <type>tar.gz</type>
       <scope>test</scope>
@@ -171,20 +166,12 @@
     <dependency>
       <groupId>org.apache.slider</groupId>
       <artifactId>slider-funtest</artifactId>
-      <version>${project.version}</version>
       <scope>test</scope>
     </dependency>
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <type>test-jar</type>
-     <scope>test</scope>
+      <artifactId>hadoop-client</artifactId>
     </dependency>
 
     <dependency>
@@ -241,6 +228,24 @@
       <classifier>tests</classifier>
     </dependency>
 
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+      <scope>test</scope>
+    </dependency>
+
     
   </dependencies>
 
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java b/slider-providers/hbase/hbase-funtests/src/test/groovy/org/apache/slider/providers/hbase/TestStub.groovy
similarity index 79%
copy from slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
copy to slider-providers/hbase/hbase-funtests/src/test/groovy/org/apache/slider/providers/hbase/TestStub.groovy
index 7af463d..f683ded 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/utility/EventCallback.java
+++ b/slider-providers/hbase/hbase-funtests/src/test/groovy/org/apache/slider/providers/hbase/TestStub.groovy
@@ -16,10 +16,17 @@
  * limitations under the License.
  */
 
-package org.apache.slider.server.services.utility;
+package org.apache.slider.providers.hbase
 
-public interface EventCallback {
-  
-  public void eventCallbackEvent();
-  
+import org.junit.Test
+
+/**
+ *  this is here to ensure there is always a test
+ */
+class TestStub {
+
+  @Test
+  public void testStubTest() throws Throwable {
+
+  }
 }
diff --git a/slider-providers/hbase/hbase-funtests/src/test/groovy/org/apache/slider/providers/hbase/funtest/HBaseCommandTestBase.groovy b/slider-providers/hbase/hbase-funtests/src/test/groovy/org/apache/slider/providers/hbase/funtest/HBaseCommandTestBase.groovy
index fe9370e..8bad590 100644
--- a/slider-providers/hbase/hbase-funtests/src/test/groovy/org/apache/slider/providers/hbase/funtest/HBaseCommandTestBase.groovy
+++ b/slider-providers/hbase/hbase-funtests/src/test/groovy/org/apache/slider/providers/hbase/funtest/HBaseCommandTestBase.groovy
@@ -52,6 +52,7 @@
 
   @BeforeClass
   public static void extendClasspath() {
+    assumeFunctionalTestsEnabled()
     addExtraJar(HBaseClientProvider)
   }
 
diff --git a/slider-providers/hbase/slider-hbase-provider/pom.xml b/slider-providers/hbase/slider-hbase-provider/pom.xml
index 15319cf..381a1b6 100644
--- a/slider-providers/hbase/slider-hbase-provider/pom.xml
+++ b/slider-providers/hbase/slider-hbase-provider/pom.xml
@@ -18,7 +18,6 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <artifactId>slider-hbase-provider</artifactId>
-  <version>0.30</version>
   <name>Slider HBase Provider</name>
   <packaging>jar</packaging>
   <description>
@@ -30,7 +29,7 @@
   <parent>
     <groupId>org.apache.slider</groupId>
     <artifactId>slider</artifactId>
-    <version>0.30</version>
+    <version>0.40</version>
     <relativePath>../../../</relativePath>
   </parent>
 
@@ -151,13 +150,11 @@
     <dependency>
       <groupId>org.apache.slider</groupId>
       <artifactId>slider-core</artifactId>
-      <version>${project.version}</version>
     </dependency>
 
     <dependency>
       <groupId>org.apache.slider</groupId>
       <artifactId>slider-core</artifactId>
-      <version>${project.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
@@ -170,13 +167,7 @@
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <type>test-jar</type>
+      <artifactId>hadoop-client</artifactId>
     </dependency>
 
     <dependency>
@@ -197,6 +188,16 @@
 
     <dependency>
       <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-protocol</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-common</artifactId>
       <classifier>tests</classifier>
       <scope>test</scope>
@@ -235,6 +236,18 @@
       <classifier>tests</classifier>
       <scope>test</scope>
     </dependency>
+
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+
     
   </dependencies>
 
diff --git a/slider-providers/hbase/slider-hbase-provider/src/main/java/org/apache/slider/providers/hbase/HBaseProviderService.java b/slider-providers/hbase/slider-hbase-provider/src/main/java/org/apache/slider/providers/hbase/HBaseProviderService.java
index fc23970..f9a5628 100644
--- a/slider-providers/hbase/slider-hbase-provider/src/main/java/org/apache/slider/providers/hbase/HBaseProviderService.java
+++ b/slider-providers/hbase/slider-hbase-provider/src/main/java/org/apache/slider/providers/hbase/HBaseProviderService.java
@@ -38,6 +38,7 @@
 import org.apache.slider.core.registry.docstore.PublishedConfiguration;
 import org.apache.slider.core.registry.info.ServiceInstanceData;
 import org.apache.slider.providers.AbstractProviderService;
+import org.apache.slider.providers.ProviderCompleted;
 import org.apache.slider.providers.ProviderCore;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.ProviderUtils;
@@ -50,7 +51,6 @@
 import org.apache.slider.server.appmaster.web.rest.agent.Register;
 import org.apache.slider.server.appmaster.web.rest.agent.RegistrationResponse;
 import org.apache.slider.server.appmaster.web.rest.agent.RegistrationStatus;
-import org.apache.slider.server.services.utility.EventCallback;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -60,6 +60,7 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Random;
 
 import static org.apache.slider.server.appmaster.web.rest.RestPaths.SLIDER_PATH_PUBLISHER;
 
@@ -127,13 +128,21 @@
     // Set the environment
     launcher.putEnv(SliderUtils.buildEnvMap(appComponent));
 
-    launcher.setEnv(HBASE_LOG_DIR, providerUtils.getLogdir());
+    String logDirs = providerUtils.getLogdir();
+    String logDir;
+    int idx = logDirs.indexOf(",");
+    if (idx > 0) {
+      // randomly choose a log dir candidate
+      String[] segments = logDirs.split(",");
+      Random rand = new Random();
+      logDir = segments[rand.nextInt(segments.length)];
+    } else logDir = logDirs;
+    launcher.setEnv(HBASE_LOG_DIR, logDir);
 
     launcher.setEnv(PROPAGATED_CONFDIR,
         ProviderUtils.convertToAppRelativePath(
             SliderKeys.PROPAGATED_CONF_DIR_NAME) );
 
-
     //local resources
 
     //add the configuration resources
@@ -196,9 +205,10 @@
 
   @Override
   public void applyInitialRegistryDefinitions(URL web,
-      ServiceInstanceData instanceData) throws
+                                              URL secureWebAPI,
+                                              ServiceInstanceData instanceData) throws
       IOException {
-    super.applyInitialRegistryDefinitions(web, instanceData);
+    super.applyInitialRegistryDefinitions(web, secureWebAPI, instanceData);
   }
 
   @Override
@@ -240,7 +250,7 @@
   public boolean exec(AggregateConf instanceDefinition,
                       File confDir,
                       Map<String, String> env,
-                      EventCallback execInProgress) throws
+                      ProviderCompleted execInProgress) throws
                                                  IOException,
       SliderException {
 
diff --git a/slider-providers/hbase/slider-hbase-provider/src/main/java/org/apache/slider/providers/hbase/HBaseRoles.java b/slider-providers/hbase/slider-hbase-provider/src/main/java/org/apache/slider/providers/hbase/HBaseRoles.java
index 552374c..01776f7 100644
--- a/slider-providers/hbase/slider-hbase-provider/src/main/java/org/apache/slider/providers/hbase/HBaseRoles.java
+++ b/slider-providers/hbase/slider-hbase-provider/src/main/java/org/apache/slider/providers/hbase/HBaseRoles.java
@@ -42,8 +42,7 @@
    */
   static {
     ROLES.add(new ProviderRole(HBaseKeys.ROLE_WORKER, KEY_WORKER));
-    // Master doesn't need data locality
-    ROLES.add(new ProviderRole(HBaseKeys.ROLE_MASTER, KEY_MASTER,PlacementPolicy.NO_DATA_LOCALITY));
+    ROLES.add(new ProviderRole(HBaseKeys.ROLE_MASTER, KEY_MASTER));
   }
 
 
diff --git a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/failures/TestKilledHBaseAM.groovy b/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/failures/TestKilledHBaseAM.groovy
index c585dc1..2237c5d 100644
--- a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/failures/TestKilledHBaseAM.groovy
+++ b/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/failures/TestKilledHBaseAM.groovy
@@ -27,8 +27,6 @@
 import org.apache.hadoop.yarn.api.records.ApplicationReport
 import org.apache.hadoop.yarn.api.records.YarnApplicationState
 import org.apache.hadoop.yarn.conf.YarnConfiguration
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler
 import org.apache.slider.core.main.ServiceLauncher
 import org.apache.slider.common.SliderXmlConfKeys
 import org.apache.slider.api.ClusterDescription
@@ -38,6 +36,7 @@
 import org.apache.slider.common.params.ActionAMSuicideArgs
 import org.apache.slider.providers.hbase.minicluster.HBaseMiniClusterTestBase
 import org.junit.Test
+import static org.apache.slider.test.SliderTestUtils.log
 
 /**
  * test create a live region service
@@ -58,8 +57,7 @@
     // patch the configuration for AM restart
     conf.setInt(SliderXmlConfKeys.KEY_AM_RESTART_LIMIT, 3)
 
-    conf.setClass(YarnConfiguration.RM_SCHEDULER,
-        FifoScheduler, ResourceScheduler);
+    conf.set(YarnConfiguration.RM_SCHEDULER, FIFO_SCHEDULER);
     createMiniCluster(clustername, conf, 1, 1, 1, true, false)
     describe(" Kill the AM, expect cluster to die");
 
diff --git a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/flexing/TestClusterFlex0To1.groovy b/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/flexing/TestClusterFlex0To1.groovy
index 587e41f..3a66873 100644
--- a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/flexing/TestClusterFlex0To1.groovy
+++ b/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/flexing/TestClusterFlex0To1.groovy
@@ -30,7 +30,7 @@
 
   @Test
   public void testClusterFlex0To1() throws Throwable {
-    assert flexHBaseClusterTestRun("test_cluster_flex_0To1", 1, 1, 0, 1, false)
+    assert flexHBaseClusterTestRun("test_cluster_flex_0to1", 1, 1, 0, 1, false)
   }
 
 }
diff --git a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy b/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy
index a58f99a..0140030 100644
--- a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy
+++ b/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy
@@ -88,7 +88,7 @@
     def names = registry.getServiceTypes();
     dumpRegistryServiceTypes(names)
 
-    List<String> instanceIds = sliderClient.listRegistedSliderInstances()
+    List<String> instanceIds = sliderClient.listRegisteredSliderInstances()
 
 
     dumpRegistryInstanceIDs(instanceIds)
diff --git a/src/docs/reports/2014-06-04-report.txt b/src/docs/reports/2014-06-04-report.txt
new file mode 100644
index 0000000..802faee
--- /dev/null
+++ b/src/docs/reports/2014-06-04-report.txt
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+Slider is a collection of tools & technologies to package, deploy, and manage
+long running applications on Apache Hadoop YARN clusters.
+ 
+There are no Board-level issues at this time.
+
+
+When did the project last make any releases? 
+--------------------------------------------
+
+We have made our first ASF-hosted release on June 2
+
+This was a source-only release; some of the reviewers have made suggestions
+which will need to be incorporated into the next. We plan to do another
+release in a few weeks time, and so evolve the project rapidly.
+
+
+Describe the overall activity in the project over the past quarter
+-------------------------------------------------------------------
+
+Our main activity has been setting up the incubator-hosted project, JIRA,
+site, mailing list. etc. The Infra team have been very helpful here.
+
+-Making and publishing our site has educated us on site publishing
+
+-The initial release has taught us of the release process for incubating
+projects, and where we need to improve it.
+
+When were the last committers or PMC members elected?
+-----------------------------------------------------
+
+We have only just set up and so the committer and PMC membership is
+as covered in the incubator proposal.
+
+PMC and committer diversity
+---------------------------
+
+We're only getting started, and do not have any diversity yet. Our first
+goal will be to get users, bug reporters and developers. The dev list
+is up and running -and we do not have any separate user list, so we hope
+to pull users into coding. 
+
+The initial release will help to gain awareness, and should bring in users.
+
+Infrastructure issues or strategic needs
+----------------------------------------
+
+We're still trying to get Jenkins building, but that's a matter of
+handling protobuf-versions on the build machines, rather than infra
+involvement.
diff --git a/src/site/markdown/architecture/architecture.md b/src/site/markdown/architecture/architecture.md
deleted file mode 100644
index a08baac..0000000
--- a/src/site/markdown/architecture/architecture.md
+++ /dev/null
@@ -1,142 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Architecture
-
-## Summary
-
-Slider is a YARN application to deploy non-YARN-enabled applications in a YARN cluster
-
-Slider consists of a YARN application master, the "Slider AM", and a client
-application which communicates with YARN and the Slider AM via remote procedure
-calls and/or REST requests. The client application offers command line access
- as well as low-level API access for test purposes
-
-The deployed application must be a program that can be run across a pool of
-YARN-managed servers, dynamically locating its peers. It is not Slider's
-responsibility to configure up the peer servers, apart from some initial
-application-specific application instance configuration. (The full requirements
-of an application are [described in another document](app_needs.md).
-
-Every application instance is described as a set of one or more *component*; each
-component can have a different program/command, and a different set of configuration
-options and parameters.
-
-The AM takes the details on which roles to start, and requests a YARN container
-for each component; It then monitors the state of the application instance, receiving messages
-from YARN when a remotely executed process finishes. It then deploys another instance of 
-that component.
-
-
-## Slider Packaging
-
-A key goal of Slider is to support the deployment of existing applications into
-a YARN application instance, without having to extend Slider itself. 
-
-
-
-## AM Architecture
-
-The application master consists of
-
- 1. The AM engine which handles all integration with external services, specifically YARN and any Slider clients
- 1. A *provider* specific to deploying a class of applications.
- 1. The Application State. 
-
-The Application State is the model of the application instance, containing
-
- 1. A specification of the desired state of the application instance -the number of instances of each role, their YARN and process memory requirements and some other options. 
- 1. A map of the current instances of each role across the YARN cluster, including reliability statistics of each node in the application instance used.
- 1. [The Role History](rolehistory.html) -a record of which nodes roles were deployed on for re-requesting the same nodes in future. This is persisted to disk and re-read if present, for faster application startup times.
- 1. Queues of track outstanding requests, released and starting nodes
-
-The Application Engine integrates with the outside world: the YARN Resource Manager ("the RM"), and the node-specific Node Managers, receiving events from the services, requesting or releasing containers via the RM,  and starting applications on assigned containers.
-
-After any notification of a change in the state of the cluster (or an update to the client-supplied cluster specification), the Application Engine passes the information on to the Application State class, which updates its state and then returns a list of cluster operations to be submitted: requests for containers of different types -potentially on specified nodes, or requests to release containers.
-
-As those requests are met and allocation messages passed to the Application Engine, it works with the Application State to assign them to specific components, then invokes the provider to build up the launch context for that application.
-
-The provider has the task of populating  container requests with the file references, environment variables and commands needed to start the provider's supported programs.  
-
-The core provider deploys a minimal agent on the target containers, then, as the agent checks in to the agent provider's REST API, executes commands issued to it. 
-
-The set of commands this agent executes focuses on downloading archives from HDFS, expanding them, then running Python scripts which perform the
-actual configuration and execution of the target problem -primarily through template expansion.
-
-
-To summarize: Slider is not an classic YARN analysis application, which allocates and schedules work across the cluster in short-to-medium life containers with the lifespan of a query or an analytics session, but instead for an application with a lifespan of days to months. Slider works to keep the actual state of its application cluster to match the desired state, while the application has the tasks of recovering from node failure, locating peer nodes and working with data in an HDFS filesystem. 
-
-As such it is one of the first applications designed to use YARN as a platform for long-lived services -Samza being the other key example. These application's  needs of YARN are different, and their application manager design is focused around maintaining the distributed application in its desired state rather than the ongoing progress of submitted work.
-
-The clean model-view-controller split was implemented to isolate the model and aid mock testing of large clusters with simulated scale, and hence increase confidence that Slider can scale to work in large YARN clusters and with larger application instances. 
-
-
-
-### Failure Model
-
-The application master is designed to be a [crash-only application](https://www.usenix.org/legacy/events/hotos03/tech/full_papers/candea/candea.pdf), clients are free to terminate
-the application instance by asking YARN directly. 
-
-There is an RPC call to stop the application instance - this is a nicety which includes a message in the termination log, and
-could, in future, perhaps warn the provider that the application instance is being torn down. That is a potentially dangerous feature
-to add -as provider implementors may start to expect the method to be called reliably. Slider is designed to fail without
-warning, to rebuild its state on a YARN-initiated restart, and to be manually terminated without any advance notice.
-
-### RPC Interface
-
-
-The RPC interface allows the client to query the current application state, and to update it by pushing out a new JSON specification. 
-
-The core operations are
-
-* `getJSONClusterStatus()`: get the status of the application instance as a JSON document.
-* `flexCluster()` update the desired count of role instances in the running application instance.
-* `stopCluster` stop the application instance
-
-There are some other low-level operations for extra diagnostics and testing, but they are of limited importancs 
-
-The `flexCluster()` call takes a JSON application instance specification and forwards it to the AM -which extracts the desired counts of each role to update the Application State. A change in the desired size of the application instance, is treated as any reported failure of node:
-it triggers a re-evaluation of the application state, building up the list of container add and release requests to make of
-the YARN resource manager.
-
-The final operation, `stopCluster()`, stops the application instance. 
-
-### Security and Identity
-
-Slider's security model is described in detail in [an accompanying document](security.html)
-
-A Slider application instance is expected to access data belonging to the user creating the instance. 
-
-In a secure YARN cluster, this is done by acquiring Kerberos tokens in the client when the application instance is updated, tokens which
-are propagated to the Slider AM and thence to the deployed application containers themselves. These
-tokens are valid for a finite time period. 
-
-HBase has always required keytab files to be installed on every node in the Hadoop for it to have secure access -this requirement
-holds for Slider-deployed HBase clusters. Slider does not itself adopt the responsibility of preparing or distributing these files;
-this must be done via another channel.
-
-In Hadoop 2.2, the tokens for communication between the Slider AM and YARN expire after -by default- 72 hours. The
-HDFS tokens will also expire after some time period. This places an upper bound on the lifespan of a Slider application (or any
-other long-lived YARN application) in a secure Hadoop cluster. 
-
-
-
-In an insecure Hadoopp cluster, the Slider AM and its containers are likely to run in a different OS account from the submitting user.
-To enable access to the database files as that submitting use, the identity of the user is provided when the AM is created; the
-AM will pass this same identity down to the created containers. This information *identifies* the user -but does not *authenticate* them: they are trusted to be who they claim to be.
-
- 
diff --git a/src/site/markdown/architecture/rolehistory.md b/src/site/markdown/architecture/rolehistory.md
deleted file mode 100644
index 83f2010..0000000
--- a/src/site/markdown/architecture/rolehistory.md
+++ /dev/null
@@ -1,1010 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Role History: how Slider brings back nodes in the same location
-
-### Last updated  2013-12-06
-
-* This document uses the pre-slider terminology of role/cluster and not
-component and application instance *
-
-
-## Outstanding issues
-
-1. Can we use the history to implement anti-affinity: for any role with this flag,
-use our knowledge of the cluster to ask for all nodes that aren't in use already
-
-1. How to add blacklisting here? We are tracking failures and startup failures
-per node (not persisted), but not using this in role placement requests yet.
-
-## Introduction
-
-Slider needs to bring up instances of a given role on the machine(s) on which
-they last ran -it should remember after shrinking or freezing a cluster  which
-servers were last used for a role -and use this (persisted) data to select
-clusters next time
-
-It does this in the basis that the role instances prefer node-local
-access to data previously persisted to HDFS. This is precisely the case
-for Apache HBase, which can use Unix Domain Sockets to talk to the DataNode
-without using the TCP stack. The HBase master persists to HDFS the tables
-assigned to specific Region Servers, and when HBase is restarted its master
-tries to reassign the same tables back to Region Servers on the same machine.
-
-For this to work in a dynamic cluster, Slider needs to bring up Region Servers
-on the previously used hosts, so that the HBase Master can re-assign the same
-tables.
-
-Note that it does not need to care about the placement of other roles, such
-as the HBase masters -there anti-affinity between other instances is
-the key requirement.
-
-### Terminology
-
-* **Role Instance** : a single instance of a role.
-* **Node** : A server in the YARN Physical (or potentially virtual) Cluster of servers.
-* **Slider Cluster**: The set of role instances deployed by Slider so as to 
- create a single aggregate application.
-* **Slider AM**: The Application Master of Slider: the program deployed by YARN to
-manage its Slider Cluster.
-* **RM** YARN Resource Manager
-
-### Assumptions
-
-Here are some assumptions in Slider's design
-
-1. Instances of a specific role should preferably be deployed onto different
-servers. This enables Slider to only remember the set of server nodes onto
-which instances were created, rather than more complex facts such as "two Region
-Servers were previously running on Node #17. On restart Slider can simply request
-one instance of a Region Server on a specific node, leaving the other instance
-to be arbitrarily deployed by YARN. This strategy should help reduce the *affinity*
-in the role deployment, so increase their resilience to failure.
-
-1. There is no need to make sophisticated choices on which nodes to request
-re-assignment -such as recording the amount of data persisted by a previous
-instance and prioritizing nodes based on such data. More succinctly 'the
-only priority needed when asking for nodes is *ask for the most recently used*.
-
-1. Different roles are independent: it is not an issue if a role of one type
- (example, an Accumulo Monitor and an Accumulo Tablet Server) are on the same
- host. This assumption allows Slider to only worry about affinity issues within
- a specific role, rather than across all roles.
- 
-1. After a cluster has been started, the rate of change of the cluster is
-low: both node failures and cluster flexing happen at the rate of every few
-hours, rather than every few seconds. This allows Slider to avoid needing
-data structures and layout persistence code designed for regular and repeated changes.
-
-1. Instance placement is best-effort: if the previous placement cannot be satisfied,
-the application will still perform adequately with role instances deployed
-onto new servers. More specifically, if a previous server is unavailable
-for hosting a role instance due to lack of capacity or availability, Slider
-will not decrement the number of instances to deploy: instead it will rely
-on YARN to locate a new node -ideally on the same rack.
-
-1. If two instances of the same role do get assigned to the same server, it
-is not a failure condition. (This may be problematic for some roles 
--we may need a role-by-role policy here, so that master nodes can be anti-affine)
-[specifically, >1 HBase master mode will not come up on the same host]
-
-1. If a role instance fails on a specific node, asking for a container on
-that same node for the replacement instance is a valid recovery strategy.
-This contains assumptions about failure modes -some randomness here may
-be a valid tactic, especially for roles that do not care about locality.
-
-1. Tracking failure statistics of nodes may be a feature to add in future;
-designing the Role History datastructures to enable future collection
-of rolling statistics on recent failures would be a first step to this 
-
-### The Role History
-
-The `RoleHistory` is a datastructure which models the role assignment, and
-can persist it to and restore it from the (shared) filesystem.
-
-* For each role, there is a list of cluster nodes which have supported this role
-used in the past.
-
-* This history is used when selecting a node for a role.
-
-* This history remembers when nodes were allocated. These are re-requested
-when thawing a cluster.
-
-* It must also remember when nodes were released -these are re-requested
-when returning the cluster size to a previous size during flex operations.
-
-* It has to track nodes for which Slider has an outstanding container request
-with YARN. This ensures that the same node is not requested more than once
-due to outstanding requests.
-
-* It does not retain a complete history of the role -and does not need to.
-All it needs to retain is the recent history for every node onto which a role
-instance has been deployed. Specifically, the last allocation or release
-operation on a node is all that needs to be persisted.
-
-* On AM startup, all nodes in the history are considered candidates, even those nodes currently marked
-as active -as they were from the previous instance.
-
-* On AM restart, nodes in the role history marked as active have to be considered
-still active -the YARN RM will have to provide the full list of which are not.
-
-* During cluster flexing, nodes marked as released -and for which there is no
-outstanding request - are considered candidates for requesting new instances.
-
-* When choosing a candidate node for hosting a role instance, it from the head
-of the time-ordered list of nodes that last ran an instance of that role
-
-### Persistence
-
-The state of the role is persisted to HDFS on changes -but not on cluster
-termination.
-
-1. When nodes are allocated, the Role History is marked as dirty
-1. When container release callbacks are received, the Role History is marked as dirty
-1. When nodes are requested or a release request made, the Role History is *not*
- marked as dirty. This information is not relevant on AM restart.
-
-As at startup, a large number of allocations may arrive in a short period of time,
-the Role History may be updated very rapidly -yet as the containers are
-only recently activated, it is not likely that an immediately restarted Slider
-cluster would gain by re-requesting containers on them -their historical
-value is more important than their immediate past.
-
-Accordingly, the role history may be persisted to HDFS asynchronously, with
-the dirty bit triggering an flushing of the state to HDFS. The datastructure
-will still need to be synchronized for cross thread access, but the 
-sync operation will not be a major deadlock, compared to saving the file on every
-container allocation response (which will actually be the initial implementation).
-
-There's no need to persist the format in a human-readable form; while protobuf
-might seem the approach most consistent with the rest of YARN, it's not
-an easy structure to work with.
-
-The initial implementation will use Apache Avro as the persistence format,
-with the data saved in JSON or compressed format.
-
-
-## Weaknesses in this design
-
-**Blacklisting**: even if a node fails repeatedly, this design will still try to re-request
-instances on this node; there is no blacklisting. As a central blacklist
-for YARN has been proposed, it is hoped that this issue will be addressed centrally,
-without Slider having to remember which nodes are unreliable *for that particular
-Slider cluster*.
-
-**Anti-affinity**: If multiple role instances are assigned to the same node,
-Slider has to choose on restart or flexing whether to ask for multiple
-nodes on that node again, or to pick other nodes. The assumed policy is
-"only ask for one node"
-
-**Bias towards recent nodes over most-used**: re-requesting the most
-recent nodes, rather than those with the most history of use, may
-push Slider to requesting nodes that were only briefly in use -and so have
-on a small amount of local state, over nodes that have had long-lived instances.
-This is a problem that could perhaps be addressed by preserving more
-history of a node -maintaining some kind of moving average of
-node use and picking the heaviest used, or some other more-complex algorithm.
-This may be possible, but we'd need evidence that the problem existed before
-trying to address it.
-
-# The NodeMap: the core of the Role History
-
-The core data structure, the `NodeMap` is a map of every known node in the cluster, tracking
-how many containers are allocated to specific roles in it, and, when there
-are no active instances, when it was last used. This history is used to
-choose where to request new containers. Because of the asynchronous
-allocation and release of containers, the Role History also needs to track
-outstanding release requests --and, more critically, outstanding allocation
-requests. If Slider has already requested a container for a specific role
-on a host, then asking for another container of that role would break
-anti-affinity requirements. Note that not tracking outstanding requests would
-radically simplify some aspects of the design, especially the complexity
-of correlating allocation responses with the original requests -and so the
-actual hosts originally requested.
-
-1. Slider builds up a map of which nodes have recently been used.
-1. Every node counts the number. of active containers in each role.
-1. Nodes are only chosen for allocation requests when there are no
-active or requested containers on that node.
-1. When choosing which instances to release, Slider could pick the node with the
-most containers on it. This would spread the load.
-1. When there are no empty nodes to request containers on, a request would
-let YARN choose.
-
-#### Strengths
-
-* Handles the multi-container on one node problem
-* By storing details about every role, cross-role decisions could be possible
-* Simple counters can track the state of pending add/release requests
-* Scales well to a rapidly flexing cluster
-* Simple to work with and persist
-* Easy to view and debug
-* Would support cross-role collection of node failures in future
-
-#### Weaknesses
-
-* Size of the data structure is `O(nodes * role-instances`). This
-could be mitigated by regular cleansing of the structure. For example, at
-thaw time (or intermittently) all unused nodes > 2 weeks old could be dropped.
-* Locating a free node could take `O(nodes)` lookups -and if the criteria of "newest"
-is included, will take exactly `O(nodes)` lookups. As an optimization, a list
-of recently explicitly released nodes can be maintained.
-* Need to track outstanding requests against nodes, so that if a request
-was satisfied on a different node, the original node's request count is
- decremented, *not that of the node actually allocated*. 
-* In a virtual cluster, may fill with node entries that are no longer in the cluster.
-Slider should query the RM (or topology scripts?) to determine if nodes are still
-parts of the YARN cluster. 
-
-## Data Structures
-
-### RoleHistory
-
-    startTime: long
-    saveTime: long
-    dirty: boolean
-    nodemap: NodeMap
-    roles: RoleStatus[]
-    outstandingRequests: transient OutstandingRequestTracker
-    availableNodes: transient List<NodeInstance>[]
-
-This is the aggregate data structure that is persisted to/from file
-
-### NodeMap
-
-    clusterNodes: Map: NodeId -> NodeInstance
-    clusterNodes(): Iterable<NodeInstance>
-    getOrCreate(NodeId): NodeInstance
-
-  Maps a YARN NodeID record to a Slider `NodeInstance` structure
-
-### NodeInstance
-
-Every node in the cluster is modeled as an ragged array of `NodeEntry` instances, indexed
-by role index -
-
-    NodeEntry[roles]
-    get(roleId): NodeEntry or null
-    create(roleId): NodeEntry
-    getNodeEntries(): NodeEntry[roles]
-    getOrCreate(roleId): NodeEntry
-    remove(roleId): NodeEntry
-
-This could be implemented in a map or an indexed array; the array is more
-efficient but it does mandate that the number of roles are bounded and fixed.
-
-### NodeEntry
-
-Records the details about all of a roles containers on a node. The
-`active` field records the number of containers currently active.
-
-    active: int
-    requested: transient int
-    releasing: transient int
-    last_used: long
-
-    NodeEntry.available(): boolean = active - releasing == 0 && requested == 0
-
-The two fields `releasing` and `requested` are used to track the ongoing
-state of YARN requests; they do not need to be persisted across freeze/thaw
-cycles. They may be relevant across AM restart, but without other data
-structures in the AM, not enough to track what the AM was up to before
-it was restarted. The strategy will be to ignore unexpected allocation
-responses (which may come from pre-restart) requests, while treating
-unexpected container release responses as failures.
-
-The `active` counter is only decremented after a container release response
-has been received.
-
-### RoleStatus
-
-This is the existing `org.apache.hoya.yarn.appmaster.state.RoleStatus` class
-
-### RoleList
-
-A list mapping role to int enum is needed to index NodeEntry elements in
-the NodeInstance arrays. Although such an enum is already implemented in the Slider
-Providers, explicitly serializing and deserializing it would make
-the persistent structure easier to parse in other tools, and resilient
-to changes in the number or position of roles.
-
-This list could also retain information about recently used/released nodes,
-so that the selection of containers to request could shortcut a search
-
-
-### ContainerPriority
-
-The container priority field (a 32 bit integer) is used by Slider (0.5.x)
-to index the specific role in a container so as to determine which role
-has been offered in a container allocation message, and which role has
-been released on a release event.
-
-The Role History needs to track outstanding requests, so that
-when an allocation comes in, it can be mapped back to the original request.
-Simply looking up the nodes on the provided container and decrementing
-its request counter is not going to work -the container may be allocated
-on a different node from that requested.
-
-**Proposal**: The priority field of a request is divided by Slider into 8 bits for
-`roleID` and 24 bits for `requestID`. The request ID will be a simple
-rolling integer -Slider will assume that after 2^24 requests per role, it can be rolled,
--though as we will be retaining a list of outstanding requests, a clash should not occur.
-The main requirement  is: not have > 2^24 outstanding requests for instances of a specific role,
-which places an upper bound on the size of a Slider cluster.
-
-The splitting and merging will be implemented in a ContainerPriority class,
-for uniform access.
-
-### OutstandingRequest ###
-
-Tracks an outstanding request. This is used to correlate an allocation response
-(whose Container Priority file is used to locate this request), with the
-node and role used in the request.
-
-      roleId:  int
-      requestID :  int
-      node: string (may be null)
-      requestedTime: long
-      priority: int = requestID << 24 | roleId
-
-The node identifier may be null -which indicates that a request was made without
-a specific target node
-
-### OutstandingRequestTracker ###
-
-Contains a map from requestID to the specific `OutstandingRequest` made,
-and generates the request ID
-
-    nextRequestId: int
-    requestMap(RequestID) -> OutstandingRequest
-
-Operations
-
-    addRequest(NodeInstance, RoleId): OutstandingRequest
-        (and an updated request Map with a new entry)
-    lookup(RequestID): OutstandingRequest
-    remove(RequestID): OutstandingRequest
-    listRequestsForNode(ClusterID): [OutstandingRequest]
-
-The list operation can be implemented inefficiently unless it is found
-to be important -if so a more complex structure will be needed.
-
-### AvailableNodes
-
-This is a field in `RoleHistory`
-
-    availableNodes: List<NodeInstance>[]
-
-
-For each role, lists nodes that are available for data-local allocation,
-ordered by more recently released - To accelerate node selection
-
-The performance benefit is most significant when requesting multiple nodes,
-as the scan for M locations from N nodes is reduced from `M*N` comparisons
-to 1 Sort + M list lookups.
-
-Each list can be created off the Node Map by building for each role a sorted
-list of all Nodes which are available for an instance of that role, 
-using a comparator that places the most recently released node ahead of older
-nodes.
-
-This list is not persisted -when a Slider Cluster is frozen it is moot, and when
-an AM is restarted this structure will be rebuilt.
-
-1. When a node is needed for a new request, this list is consulted first.
-1. After the request is issued it can be removed from the list
-1. Whenever a container is released, if the node is now available for
-requests for that node, should be added to to the front
-of the list for that role.
-
-If the list is empty during a container request operation, it means
-that the Role History does not know of any nodes
-in the cluster that have hosted instances of that role and which are not
-in use. There are then two possible strategies to select a role
-
-1. Ask for an instance anywhere in the cluster (policy in Slider 0.5)
-1. Search the node map to identify other nodes which are (now) known about,
-but which are not hosting instances of a specific role -this can be used
-as the target for the next resource request.
-
-Strategy #1 is simpler; Strategy #2 *may* decrease the affinity in the cluster,
-as the AM will be explicitly requesting an instance on a node which it knows
-is not running an instance of that role.
-
-
-#### ISSUE What to do about failing nodes?
-
-Should a node whose container just failed be placed at the
-top of the stack, ready for the next request? 
-
-If the container failed due to an unexpected crash in the application, asking
-for that container back *is the absolute right strategy* -it will bring
-back a new role instance on that machine. 
-
-If the container failed because the node is now offline, the container request 
-will not be satisfied by that node.
-
-If there is a problem with the node, such that containers repeatedly fail on it,
-then re-requesting containers on it will amplify the damage.
-
-## Actions
-
-### Bootstrap
-
-1. Persistent Role History file not found; empty data structures created.
-
-### Thaw
-
-When thawing, the Role History should be loaded -if it is missing Slider
-must revert to the bootstrap actions.
-
-If found, the Role History will contain Slider's view of the Slider Cluster's
-state at the time the history was saved, explicitly recording the last-used
-time of all nodes no longer hosting a role's container. By noting which roles
-were actually being served, it implicitly notes which nodes have a `last_used`
-value greater than any of the `last_used` fields persisted in the file. That is:
-all node entries listed as having active nodes at the time the history was
-saved must have more recent data than those nodes listed as inactive.
-
-When rebuilding the data structures, the fact that nodes were active at
-save time must be converted into the data that indicates that the nodes
-were at least in use *at the time the data was saved*. The state of the cluster
-after the last save is unknown.
-
-1: Role History loaded; Failure => Bootstrap.
-2: Future: if role list enum != current enum, remapping could take place. Until then: fail.
-3: Mark all nodes as active at save time to that of the
-
-   //define a threshold
-   threshold = rolehistory.saveTime - 7*24*60*60* 1000
-
-
-    for (clusterId, clusternode) in rolehistory.clusterNodes().entries() :
-      for (role, nodeEntry) in clusterNode.getNodeEntries():
-        nodeEntry.requested = 0
-        nodeEntry.releasing = 0
-        if nodeEntry.active > 0 :
-          nodeEntry.last_used = rolehistory.saveTime;
-        nodeEntry.n.active = 0
-        if nodeEntry.last_used < threshold :
-          clusterNode.remove(role)
-        else:
-         availableNodes[role].add(clusterId)
-       if clusterNode.getNodeEntries() isEmpty :
-         rolehistory.clusterNodes.remove(clusterId)
-
-
-    for availableNode in availableNodes:
-      sort(availableNode,new last_used_comparator())
-
-After this operation, the structures are purged with all out of date entries,
-and the available node list contains a sorted list of the remainder.
-
-### AM Restart
-
-
-1: Create the initial data structures as the thaw operation
-2: update the structure with the list of live nodes, removing those nodes
-from the list of available nodes
-
-    now = time()
-    activeContainers = RM.getActiveContainers()
-
-    for container in activeContainers:
-       nodeId = container.nodeId
-       clusterNode = roleHistory.nodemap.getOrCreate(nodeId)
-       role = extractRoleId(container.getPriority)
-       nodeEntry = clusterNode.getOrCreate(role)
-       nodeEntry.active++
-       nodeEntry.last_used = now
-       availableNodes[role].remove(nodeId)
-
-There's no need to resort the available node list -all that has happened
-is that some entries have been removed
-
-
-**Issue**: what if requests come in for a `(role, requestID)` for
-the previous instance of the AM? Could we just always set the initial
-requestId counter to a random number and hope the collision rate is very, very 
-low (2^24 * #(outstanding_requests)). If YARN-1041 ensures that
-a restarted AM does not receive outstanding requests, this issue goes away.
-
-
-### Teardown
-
-1. If dirty, save role history to its file.
-1. Issue release requests
-1. Maybe update data structures on responses, but do not mark Role History
-as dirty or flush it to disk.
-
-This strategy is designed to eliminate the expectation that there will ever
-be a clean shutdown -and so that the startup-time code should expect
-the Role History to have been written during shutdown. Instead the code
-should assume that the history was saved to disk at some point during the life
-of the Slider Cluster -ideally after the most recent change, and that the information
-in it is only an approximate about what the previous state of the cluster was.
-
-### Flex: Requesting a container in role `role`
-
-
-    node = availableNodes[roleId].pop() 
-    if node != null :
-      node.nodeEntry[roleId].requested++;
-    outstanding = outstandingRequestTracker.addRequest(node, roleId)
-    request.node = node
-    request.priority = outstanding.priority
-      
-    //update existing Slider role status
-    roleStatus[roleId].incRequested();
-      
-
-There is a bias here towards previous nodes, even if the number of nodes
-in the cluster has changed. This is why a node is picked where the number
-of `active-releasing == 0 and requested == 0`, rather than where it is simply the lowest
-value of `active + requested - releasing`: if there is no node in the nodemap that
-is not running an instance of that role, it is left to the RM to decide where
-the role instance should be instantiated.
-
-This bias towards previously used nodes also means that (lax) requests
-will be made of nodes that are currently unavailable either because they
-are offline or simply overloaded with other work. In such circumstances,
-the node will have an active count of zero -so the search will find these
-nodes and request them -even though the requests cannot be satisfied.
-As a result, the request will be downgraded to a rack-local or cluster-wide,
-request -an acceptable degradation on a cluster where all the other entries
-in the nodemap have instances of that specific node -but not when there are
-empty nodes. 
-
-
-#### Solutions
-
-1. Add some randomness in the search of the datastructure, rather than simply
-iterate through the values. This would prevent the same unsatisfiable
-node from being requested first.
-
-1. Keep track of requests, perhaps through a last-requested counter -and use
-this in the selection process. This would radically complicate the selection
-algorithm, and would not even distinguish "node recently released that was
-also the last requested" from "node that has not recently satisfied requests
-even though it was recently requested".
-  
-1. Keep track of requests that weren't satisfied, so identify a node that
-isn't currently satisfying requests.
-
-
-#### History Issues 
-
-Without using that history, there is a risk that a very old assignment
-is used in place of a recent one and the value of locality decreased.
-
-But there are consequences:
-
-**Performance**:
-
-Using the history to pick a recent node may increase selection times on a
-large cluster, as for every instance needed, a scan of all nodes in the
-nodemap is required (unless there is some clever bulk assignment list being built
-up), or a sorted version of the nodemap is maintained, with a node placed
-at the front of this list whenever its is updated.
-
-**Thaw-time problems**
-
-There is also the risk that while thawing, the `rolehistory.saved`
-flag may be updated while the cluster flex is in progress, so making the saved
-nodes appear out of date. Perhaps the list of recently released nodes could
-be rebuilt at thaw time.
-
-The proposed `recentlyReleasedList` addresses this, though it creates
-another data structure to maintain and rebuild at cluster thaw time
-from the last-used fields in the node entries.
-
-### AM Callback : onContainersAllocated 
-
-    void onContainersAllocated(List<Container> allocatedContainers) 
-
-This is the callback received when containers have been allocated.
-Due to (apparently) race conditions, the AM may receive duplicate
-container allocations -Slider already has to recognize this and 
-currently simply discards any surplus.
-
-If the AM tracks outstanding requests made for specific hosts, it
-will need to correlate allocations with the original requests, so as to decrement
-the node-specific request count. Decrementing the request count
-on the allocated node will not work, as the allocation may not be
-to the node originally requested.
-
-    assignments = []
-    operations =  []
-    for container in allocatedContainers:
-      cid = container.getId();
-      roleId = container.priority & 0xff
-      nodeId = container.nodeId
-      outstanding = outstandingRequestTracker.remove(C.priority)
-      roleStatus = lookupRoleStatus(container);
-      roleStatus.decRequested();
-      allocated = roleStatus.incActual();
-      if outstanding == null || allocated > desired :
-        operations.add(new ContainerReleaseOperation(cid))
-        surplusNodes.add(cid);
-        surplusContainers++
-        roleStatus.decActual();
-      else:
-        assignments.add(new ContainerAssignment(container, role))
-        node = nodemap.getOrCreate(nodeId)
-        nodeentry = node.get(roleId)
-        if nodeentry == null :
-          nodeentry = new NodeEntry()
-          node[roleId] = nodeentry
-          nodeentry.active = 1
-        else:
-          if nodeentry.requested > 0 :
-            nodeentry.requested--
-          nodeentry.active++
-        nodemap.dirty = true
-    
-        // work back from request ID to node where the 
-        // request was outstanding
-        requestID = outstanding != null? outstanding.nodeId : null
-        if requestID != null:
-          reqNode = nodeMap.get(requestID)
-          reqNodeEntry = reqNode.get(roleId)
-          reqNodeEntry.requested--
-          if reqNodeEntry.available() :
-            availableNodeList.insert(reqNodeEntry)
-
-
- 
-1. At end of this, there is a node in the nodemap, which has recorded that
-there is now an active node entry for that role. The outstanding request has
-been removed.
-
-1. If a callback comes in for which there is no outstanding request, it is rejected
-(logged, ignored, etc). This handles duplicate responses as well as any
-other sync problem.
-
-1. The node selected for the original request has its request for a role instance
-decremented, so that it may be viewed as available again. The node is also
-re-inserted into the AvailableNodes list -not at its head, but at its position
-in the total ordering of the list.
- 
-### NMClientAsync Callback:  onContainerStarted()
-
-
-    onContainerStarted(ContainerId containerId)
- 
-The AM uses this as a signal to remove the container from the list
-of starting containers, moving it into the map of live nodes; the counters
-in the associated `RoleInstance` are updated accordingly; the node entry
-adjusted to indicate it has one more live node and one less starting node.
-
- 
-### NMClientAsync Callback:  onContainerStartFailed()
-
-
-The AM uses this as a signal to remove the container from the list
-of starting containers -the count of starting containers for the relevant
-NodeEntry is decremented. If the node is now available for instances of this
-container, it is returned to the queue of available nodes.
-
-
-### Flex: Releasing a  role instance from the cluster
-
-Simple strategy: find a node with at least one active container
-
-    select a node N in nodemap where for NodeEntry[roleId]: active > releasing; 
-    nodeentry = node.get(roleId)
-    nodeentry.active--;
-
-Advanced Strategy:
-
-    Scan through the map looking for a node where active >1 && active > releasing.
-    If none are found, fall back to the previous strategy
-
-This is guaranteed to release a container on any node with >1 container in use,
-if such a node exists. If not, the scan time has increased to #(nodes).
-
-Once a node has been identified
-
-1. a container on it is located (via the existing container map). This container
-must: be of the target role, and not already be queued for release.
-1. A release operation is queued trigger a request for the RM.
-1. The (existing) `containersBeingReleased` Map has the container inserted into it
-
-After the AM processes the request, it triggers a callback
- 
-### AM callback onContainersCompleted: 
-
-    void onContainersCompleted(List<ContainerStatus> completedContainers)
-
-This callback returns a list of containers that have completed.
-
-These need to be split into successful completion of a release request
-and containers which have failed. 
-
-This is currently done by tracking which containers have been queued
-for release, as well as which were rejected as surplus before even having
-any role allocated onto them.
-
-A container  is considered to  have failed if it  was an active  container which
-has completed although it wasn't on the list of containers to release
-
-    shouldReview = false
-    for container in completedContainers:
-      containerId = container.containerId
-      nodeId = container.nodeId
-      node = nodemap.get(nodeId)
-      if node == null :
-        // unknown node
-        continue
-      roleId = node.roleId
-      nodeentry = node.get(roleId)
-      nodeentry.active--
-      nodemap.dirty = true
-      if getContainersBeingReleased().containsKey(containerId) :
-        // handle container completion
-        nodeentry.releasing --
-         
-        // update existing Slider role status
-        roleStatus[roleId].decReleasing();
-        containersBeingReleased.remove(containerId)
-      else: 
-        //failure of a live node
-        roleStatus[roleId].decActual();
-        shouldReview = true
-            
-      if nodeentry.available():
-        nodentry.last_used = now()
-        availableNodes[roleId].insert(node)      
-      //trigger a comparison of actual vs desired
-    if shouldReview :
-      reviewRequestAndReleaseNodes()
-
-By calling `reviewRequestAndReleaseNodes()` the AM triggers
-a re-evaluation of how many instances of each node a cluster has, and how many
-it needs. If a container has failed and that freed up all role instances
-on that node, it will have been inserted at the front of the `availableNodes` list.
-As a result, it is highly likely that a new container will be requested on 
-the same node. (The only way a node the list would be newer is 
-be if other containers were completed in the same callback)
-
-
-
-### Implementation Notes ###
-
-Notes made while implementing the design.
-
-`OutstandingRequestTracker` should also track requests made with
-no target node; this makes seeing what is going on easier. `ARMClientImpl`
-is doing something similar, on a priority-by-priority basis -if many
-requests are made, each with their own priority, that base class's hash tables
-may get overloaded. (it assumes a limited set of priorities)
-
-Access to the role history datastructures was restricted to avoid
-synchronization problems. Protected access is permitted so that a
-test subclass can examine (and change?) the internals.
-
-`NodeEntries need to add a launching value separate from active so that
-when looking for nodes to release, no attempt is made to release
-a node that has been allocated but is not yet live.
-
-We can't reliably map from a request to a response. Does that matter?
-If we issue a request for a host and it comes in on a different port, do we
-care? Yes -but only because we are trying to track nodes which have requests
-outstanding so as not to issue new ones. But if we just pop the entry
-off the available list, that becomes moot.
-
-Proposal: don't track the requesting numbers in the node entries, just
-in the role status fields.
-
-but: this means that we never re-insert nodes onto the available list if a
-node on them was requested but not satisfied.
-
-Other issues: should we place nodes on the available list as soon as all the entries
-have been released?  I.e. Before YARN has replied
-
-RoleStats were removed -left in app state. Although the rolestats would
-belong here, leaving them where they were reduced the amount of change
-in the `AppState` class, so risk of something breaking.
-
-## MiniYARNCluster node IDs
-
-Mini YARN cluster NodeIDs all share the same hostname , at least when running
-against file://; so mini tests with >1 NM don't have a 1:1 mapping of
-`NodeId:NodeInstance`. What will happen is that 
-`NodeInstance getOrCreateNodeInstance(Container container) '
-will always return the same (now shared) `NodeInstance`.
-
-## Releasing Containers when shrinking a cluster
-
-When identifying instances to release in a bulk downscale operation, the full
-list of targets must be identified together. This is not just to eliminate
-multiple scans of the data structures, but because the containers are not
-released until the queued list of actions are executed -the nodes' release-in-progress
-counters will not be incremented until after all the targets have been identified.
-
-It also needs to handle the scenario where there are many role instances on a
-single server -it should prioritize those. 
-
-
-The NodeMap/NodeInstance/NodeEntry structure is adequate for identifying nodes,
-at least provided there is a 1:1 mapping of hostname to NodeInstance. But it
-is not enough to track containers in need of release: the AppState needs
-to be able to work backwards from a NodeEntry to container(s) stored there.
-
-The `AppState` class currently stores this data in a `ConcurrentMap<ContainerId, RoleInstance>`
-
-To map from NodeEntry/NodeInstance to containers to delete, means that either
-a new datastructure is created to identify containers in a role on a specific host
-(e.g a list of ContainerIds under each NodeEntry), or we add an index reference
-in a RoleInstance that identifies the node. We already effectively have that
-in the container
-
-### dropping any available nodes that are busy
-
-When scanning the available list, any nodes that are no longer idle for that
-role should be dropped from the list.
-
-This can happen when an instance was allocated on a different node from
-that requested.
-
-### Finding a node when a role has instances in the cluster but nothing
-known to be available
-
-One condition found during testing is the following: 
-
-1. A role has one or more instances running in the cluster
-1. A role has no entries in its available list: there is no history of the 
-role ever being on nodes other than which is currently in use.
-1. A new instance is requested.
-
-In this situation, the `findNodeForNewInstance` method returns null: there
-is no recommended location for placement. However, this is untrue: all
-nodes in the cluster `other` than those in use are the recommended nodes. 
-
-It would be possible to build up a list of all known nodes in the cluster that
-are not running this role and use that in the request, effectively telling the
-AM to pick one of the idle nodes. By not doing so, we increase the probability
-that another instance of the same role will be allocated on a node in use,
-a probability which (were there capacity on these nodes and placement random), be
-`1/(clustersize-roleinstances)`. The smaller the cluster and the bigger the
-application, the higher the risk.
-
-This could be revisited, if YARN does not support anti-affinity between new
-requests at a given priority and existing ones: the solution would be to
-issue a relaxed placement request listing all nodes that are in the NodeMap and
-which are not running an instance of the specific role. [To be even more rigorous,
-the request would have to omit those nodes for which an allocation has already been
-made off the available list and yet for which no container has yet been
-granted]. 
-
-
-## Reworked Outstanding Request Tracker
-
-The reworked request tracker behaves as follows
-
-1. outstanding requests with specific placements are tracked by `(role, hostname)`
-1. container assigments are attempted to be resolved against the same parameters.
-1. If found: that request is considered satisfied *irrespective of whether or not
-the request that satisfied the allocation was the one that requested that location.
-1. When all instances of a specific role have been allocated, the hostnames of
-all outstanding requests are returned to the available node list on the basis
-that they have been satisifed elswhere in the YARN cluster. This list is
-then sorted.
-
-This strategy returns unused hosts to the list of possible hosts, while retaining
-the ordering of that list in most-recent-first.
-
-### Weaknesses
-
-if one or more container requests cannot be satisifed, then all the hosts in
-the set of outstanding requests will be retained, so all these hosts in the
-will be considered unavailable for new location-specific requests.
-This may imply that new requests that could be explicity placed will now only
-be randomly placed -however, it is moot on the basis that if there are outstanding
-container requests it means the RM cannot grant resources: new requests at the
-same priority (i.e. same Slider Role ID) will not be granted either.
-
-The only scenario where this would be different is if the resource requirements
-of instances of the target role were decreated during a cluster flex such that
-the placement could now be satisfied on the target host. This is not considered
-a significant problem.
-
-# Persistence
-
-The initial implementation uses the JSON-formatted Avro format; while significantly
-less efficient than a binary format, it is human-readable
-
-Here are sequence of entries from a test run on a single node cluster; running 1 HBase Master
-and two region servers.
-
-Initial save; the instance of Role 1 (HBase master) is live, Role 2 (RS) is not.
-
-    {"entry":{"org.apache.hoya.avro.RoleHistoryHeader":{"version":1,"saved":1384183475949,"savedx":"14247c3aeed","roles":3}}}
-    {"entry":{"org.apache.hoya.avro.NodeEntryRecord":{"host":"192.168.1.85","role":1,"active":true,"last_used":0}}}
-    {"entry":{"org.apache.hoya.avro.NodeEntryRecord":{"host":"192.168.1.85","role":2,"active":false,"last_used":0}}}
-  
-At least one RS is live: 
-  
-    {"entry":{"org.apache.hoya.avro.RoleHistoryFooter":{"count":2}}}{"entry":{"org.apache.hoya.avro.RoleHistoryHeader":{"version":1,"saved":1384183476010,"savedx":"14247c3af2a","roles":3}}}
-    {"entry":{"org.apache.hoya.avro.NodeEntryRecord":{"host":"192.168.1.85","role":1,"active":true,"last_used":0}}}
-    {"entry":{"org.apache.hoya.avro.NodeEntryRecord":{"host":"192.168.1.85","role":2,"active":true,"last_used":0}}}
-
-Another entry is saved -presumably the second RS is now live, which triggered another write
-  
-    {"entry":{"org.apache.hoya.avro.RoleHistoryFooter":{"count":2}}}{"entry":{"org.apache.hoya.avro.RoleHistoryHeader":{"version":1,"saved":1384183476028,"savedx":"14247c3af3c","roles":3}}}
-    {"entry":{"org.apache.hoya.avro.NodeEntryRecord":{"host":"192.168.1.85","role":1,"active":true,"last_used":0}}}
-    {"entry":{"org.apache.hoya.avro.NodeEntryRecord":{"host":"192.168.1.85","role":2,"active":true,"last_used":0}}}
-
-At this point the cluster was frozen and thawed. Slider does not save the cluster state
-at freeze time, but does as it is rebuilt.
-
-When the cluster is restarted, every node that was active for a role at the time the file was saved `1384183476028`
-is given a last_used timestamp of that time. 
-
-When the history is next saved, the master has come back onto the (single) node,
-it is active while its `last_used` timestamp is the previous file's timestamp.
-No region servers are yet live.
-
-    {"entry":{"org.apache.hoya.avro.RoleHistoryFooter":{"count":2}}}{"entry":{"org.apache.hoya.avro.RoleHistoryHeader":{"version":1,"saved":1384183512173,"savedx":"14247c43c6d","roles":3}}}
-    {"entry":{"org.apache.hoya.avro.NodeEntryRecord":{"host":"192.168.1.85","role":1,"active":true,"last_used":1384183476028}}}
-    {"entry":{"org.apache.hoya.avro.NodeEntryRecord":{"host":"192.168.1.85","role":2,"active":false,"last_used":1384183476028}}}
-
-Here a region server is live
-
-    {"entry":{"org.apache.hoya.avro.RoleHistoryFooter":{"count":2}}}{"entry":{"org.apache.hoya.avro.RoleHistoryHeader":{"version":1,"saved":1384183512199,"savedx":"14247c43c87","roles":3}}}
-    {"entry":{"org.apache.hoya.avro.NodeEntryRecord":{"host":"192.168.1.85","role":1,"active":true,"last_used":1384183476028}}}
-    {"entry":{"org.apache.hoya.avro.NodeEntryRecord":{"host":"192.168.1.85","role":2,"active":true,"last_used":1384183476028}}}
-
-And here, another region server has started. This does not actually change the contents of the file
-
-    {"entry":{"org.apache.hoya.avro.RoleHistoryFooter":{"count":2}}}{"entry":{"org.apache.hoya.avro.RoleHistoryHeader":{"version":1,"saved":1384183512217,"savedx":"14247c43c99","roles":3}}}
-    {"entry":{"org.apache.hoya.avro.NodeEntryRecord":{"host":"192.168.1.85","role":1,"active":true,"last_used":1384183476028}}}
-    {"entry":{"org.apache.hoya.avro.NodeEntryRecord":{"host":"192.168.1.85","role":2,"active":true,"last_used":1384183476028}}}
-
-The `last_used` timestamps will not be changed until the cluster is shrunk or thawed, as the `active` flag being set
-implies that the server is running both roles at the save time of `1384183512217`.
-
-## Resolved issues
-
-> How best to distinguish at thaw time from nodes used just before thawing
-from nodes used some period before? Should the RoleHistory simply forget
-about nodes which are older than some threshold when reading in the history?
-
-we just track last used times
-
-
-> Is there a way to avoid tracking the outstanding requests?
- 
-No 
- 
-> What will the strategy of picking the most-recently-used node do if
-that node creates the container and then fails to start it up. Do we need
-to add blacklisting too? Or actually monitor the container start time, and
-if a container hasn't been there for very long, don't pick it.
-
-Startup failures drop the node from the ready-to-use list; the node is no longer
-trusted. We don't blacklist it (yet)
-
-
-> Should we prioritise a node that was used for a long session ahead of
-a node that was used more recently for a shorter session? Maybe, but
-it complicates selection as generating a strict order of nodes gets
-significantly harder.
-
-No: you need to start tracking aggregate execution time, for the last session.
-In a stable state, all servers recorded in the history will have spread the
-data amongst them, so its irrelevant.
diff --git a/src/site/markdown/client-configuration.md b/src/site/markdown/client-configuration.md
deleted file mode 100644
index 9b13021..0000000
--- a/src/site/markdown/client-configuration.md
+++ /dev/null
@@ -1,310 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Client Configuration
-
-This document covers how the client application is itself configured.
-
-## Summary
-
-The client application can be configured
-
-1. On the command line, which can set client options and JVM system properties.
-2. With Hadoop-style configuration options in the file `slider-client.xml`
- in the configuration directory`conf/` dir
-2. Or, if the environment variable `SLIDER_CONF_DIR` is set, in the
- file `$SLIDER_CONF_DIR/slider-client.xml`
-1. Logging is defined in the `log4j.properties` file in the same configuration
-directory.
-1. VM options can be defined in `SLIDER_JVM_OPTS`
-
-The options defined in a Slider cluster configuration are only used by the client
-when creating a cluster -not for the actual client itself.
-
-## Introduction
-
-The Slider client needs to be configured to talk to a Hadoop filesystem and a
-YARN resource manager ("the RM"). In a secure cluster it needs to be told the Kerberos
-identity, the *principal* of both the HDFS namenode and the YARN RM -and it may
-also need some JVM options set in order for Java's Kerberos module to
-correctly identify itself to these services.
-
-It cannot rely on local `HADOOP_PREFIX/conf/hadoop-site.xml` and
-`$YARN_PREFIX/conf/yarn-site.xml` files -because it is designed to
-work on client machines that may not have Hadoop and YARN installed.
-
-Instead all client-side (non-JVM) options can be predefined in the
-configuration file `slider-client.xml`. 
-
-## Setting Slider JVM options
-
-Core JVM options can be set in the environment variable `SLIDER_JVM_OPTS`;
-if unset the `bin/slider` script will use the default values that were
-current when that version of Slider was released. These values may change
-across versions, and may in fact be.
-
-At the time of writing, the default values were:
-
-    "-Djava.net.preferIPv4Stack=true -Djava.awt.headless=true -Xmx256m -Dslider.confdir=${confdir}"
-
-To allow some Java system properties to be set without editing this
-environment variable, such system properties may be set on the Slider command
-line through the `-S` parameter. For example, the following two operations are
-equivalent in terms of setting the system property `java.security.krb5.realm`
-to the value `LOCAL`.
-
-    export SLIDER_JVM_OPTS="-Djava.security.krb5.realm=LOCAL"
-
-and
-
-    slider -S java.security.krb5.realm=LOCAL
-
-Note that the first declaration invalidates all default JVM options; if any of
-those were desired, they should be included in the new definition.
-
-Multiple system property declarations are allowed on the command line -including
-duplicate declarations. In such a case the order of assignment is undefined.
-
-For any system property that the user expects to have to issue on every command
--including any kerberos-related properties, adding them to the JVM options
-environment variable guarantees that they are always set.
-
-## Setting Slider client options on the command line with the `-D` parameter
-
-The slider client is configured via Hadoop-style configuration options. 
-To be precise, all standard Hadoop-common, hadoop-hdfs client and hadoop-yar
-client-side options control how Slider communicates with the Hadoop YARN cluster.
-
-There are extra options specific to Slider itself, options which
-are again set as Hadoop configuration parameters.
-
-All Hadoop and Slider options can be set on the command line using the `-D`
-parameter followed by the appropriate `key=value` argument
-
-
-For example, here is a definition of the default Hadoop filesystem:
-
-    -D fs.defaultFS=hdfs://namenode:9000
-    
-Multiple definitions are of course allowed on the command line    
- 
-    -D fs.defaultFS=hdfs://namenode:9000 -D dfs.namenode.kerberos.principal=hdfs/namenode@LOCAL
-
-Slider-specific options can be made the same way
-
-    -D slider.kerberos.principal=
-
-If duplicate declarations are made the order of assignment is undefined.
-
-# Setting common options through specific command-line arguments
-
-Some Hadoop and Slider options are so common that they have specific
-shortcut commands to aid their use
-
-`-m`, `--manager` : sets the YARN resource manager. Equivalent to setting the 
-`yarn.resourcemanager.address` option
-
-`--fs`,  `--filesystem`: defines the filesystem. Equivalent to setting the
-`fs.defaultFS` option
-
-If these shortcuts are used and the options are also defined via `-D`
-declarations, the order of assignment is undefined.
-    
-# Defining Hadoop and Slider Options in the `slider-client.xml` file.
-
-In the Slider installation, alongside the `bin/slider` script is
-a configuration directory `conf`. This contains the files:
-
-1. `log4j.properties`
-1. `slider-client.xml`
-
-The `log4j.properties` file is not covered here -it is a standard Log4J file.
-At the time of writing, this log configuration file is used on both the
-client and the server.
-
-The `slider-client.xml` file is a hadoop-formatted XML options file, which
-is read by the Slider client -but not by they Slider Application Master.
-
-Here is an example file:
-
-    <property>
-      <name>yarn.resourcemanager.address</name>
-      <value>namenode:8033</value>
-    </property>
-    
-    <property>
-      <name>fs.defaultFS</name>
-      <value>hdfs://namenode:9000</value>
-    </property>
- 
-    <property>
-      <name>ipc.client.fallback-to-simple-auth-allowed</name>
-      <value>false</value>
-    </property>
-
-
-This defines both the filesystem and the YARN RM, and so obviates the need
-to declare either on the command line.
-
-If an option is defined in the `slider-client.xml` file and on the command line
--be it by a `-D key=value` declaration or a `--manager` or `--filesystem` 
-definition. (this holds even if the value is declared with `<final>true</final>`).
-
-## Selecting an alternate Slider configuration directory
-
-The environment variable `SLIDER_CONF_DIR` can be used to declare an alternate
-configuration directory. If set, the directory it identifies will be used
-as the source of the `log4j.properties` and `slider-client.xml` files.
-
-## Slider Client Configuration options
-
-As well as standard YARN and Hadoop configuration options, Slider supports
-a limited number of slider-specific configuration parameters.
-
-    <property>
-      <name>slider.zookeeper.quorum</name>
-      <value>localhost:2181,zookeeper2:4545</value>
-    </property>
-    
-    <property>
-      <name>slider.yarn.queue</name>
-      <value>default</value>
-    </property>
-    
-    <property>
-      <name>slider.security.enabled</name>
-      <value>false</value>
-    </property>
-    
-    <property>
-      <name>slider.yarn.queue</name>
-      <value>default</value>
-    </property>
-
-    <property>
-      <name>slider.yarn.queue.priority</name>
-      <value>1</value>
-    </property>
-
-    <property>
-      <name>slider.yarn.restart.limit</name>
-      <value>5</value>
-      <description>How many times to start/restart the Slider AM</description>
-    </property>
-    
-    <property>
-      <name>slider.cluster.directory.permissions</name>
-      <value>750</value>
-    </property>
-    
-    <property>
-      <name>slider.data.directory.permissions</name>
-      <value>750</value>
-    </property>
-
-### `slider.zookeeper.quorum` - the zookeeper quorum.
-
-This defines the zookeeper quorum for this YARN cluster. 
-
-It is used to locate the service registry, enable running instances to publish
-information about their application, and for clients to query this. 
-
-It is also used as the default zookeeper binding for any application that
-uses zookeeper in its configuration -the value set when the application is
-defined will be copied into the instance definition file.
-
-### `"slider.registry.path"` - the zookeeper path for the service registry
-
-This declares the the zookeeper path for the service registry. 
-
-### `slider.security.enabled` - enable security.
-
-This turns security on; consult [Security](security.html) for more information.
-
-
-### `slider.yarn.restart.limit` - set limit on Application Master Restarts
-
-This limits how many times YARN should start a failed application master.
-
-A short restart limit is useful when initially creating a cluster, as it
-ensures that YARN does not repeatedly try to restart a failing application.
-
-In production, however, a large number prevents YARN from halting a Slider
-application merely because failures in the underlying YARN cluster have
-triggered restarts.
-
-*Important:* The cluster-wide limit of `yarn.resourcemanager.am.max-attempts`
-places an upper limit on the number of retries that any application can request.
-If the application fails after less restarts than requested, check this cluster
-setting.
-
-### `slider.yarn.queue` - the name of the YARN queue for the cluster.
-
-This identifies the queue submit the application creation request to, which can
-define the priority, resource limits and other values of an application. All
-containers created in the Slider cluster will share this same queue.
-
-Default value: `default`.
-
-### `slider.yarn.queue.priority` - the name of the YARN queue for the cluster.
-
-This identifies the priority within the queue. The lower the value, the higher the
-priority
-
-Default value: `1`.
-
-    bin/slider thaw cl1 -D slider.yarn.queue.priority=5
-
-
-
-#### `slider.cluster.directory.permissions`
-
-An octal-format (`chmod`-style) permissions mask for the directory
-that contains the cluster specification `${user.home}/.slider/clusters/${clustername}`
-
-    <property>
-      <name>slider.cluster.directory.permissions</name>
-      <value>750</value>
-    </property>
-
-#### `slider.data.directory.permissions`
-
-An octal-format (`chmod`-style) permissions mask for the directory
-that contains the application data `${user.home}/.slider/clusters/${clustername}/database`
-
-    <property>
-      <name>slider.data.directory.permissions</name>
-      <value>750</value>
-    </property>
-
-
-## Debugging configuration issues
-
-If the slider packages are set to log at debug level in the log4j configuration
-file, details on properties will be part of the copious output.
-
-
-## How client options are passed down to created clusters.
-
-Apart from the filesystem bindings, Client configuration options are
-not passed down to the XML site specification of the created cluster.
-
-The sole options passed down are the HDFS bindings: `fs.defaultFS`,
-which is passed down both as that property and as `fs.default.name`,
-and, in a secure cluster, the security flag (`slider.security.enabled`)
-and the HDFS Kerberos principal.
-
diff --git a/src/site/markdown/configuration/core.md b/src/site/markdown/configuration/core.md
deleted file mode 100644
index 46e4b63..0000000
--- a/src/site/markdown/configuration/core.md
+++ /dev/null
@@ -1,407 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Core Configuration Specification
-
-
-## Terminology
-
-
-*Application* A single application, such as an HBase cluster. An application
-is distribed across the YARN cluster.
-
-*Component* A single executable part of the larger application. An application
-may have multiple components, and multiple instances of each component. 
-
-*YARN* Yet Another Resource Negotiator
-
-*YARN Resource Requirements* The requirements for a YARN resource request.
-Currently this consists of RAM and CPU requirements.
-
-*YARN Container*. An allocation portion of a servers resources granted
-to satisfy the requested YARN resource requirements. A process can be deployed
-to a container.
-
-
-*`resources.json`*: A file that describes the
-size of the application in terms of its component requirements: how many,
-and what their resource requirements are. 
-
-*`application.json`*: A file that describes the
-size of the application in terms of its component requirements: how many,
-and what their resource requirements are. 
-
-## Structure
-
-Configurations are stored in well-formed JSON files. 
-1. Text MUST be saved in the UTF-8 format.
-1. Duplicate entries MUST NOT occur in any section.
-1. The ordering of elements is NOT significant.
-
-The JSON specification files all have a similar structure
-
-1. A `schema` string indicating version. Currently this is temporarily set to
-
-        "http://example.org/specification/v2.0.0"
-   
-        
-1. A global section, `/global` containing string properties
-1. A component  section, `/components`.
-1. 0 or more sections under `/components` for each component, identified by component name,
- containing string properties.
-1. 0 or 1 section `/metadata` containing arbitrary metadata (such as a description,
-author, or any other information that is not parsed or processed directly).
-
-
-The simplest valid specification file is 
-    
-    {
-      "schema": "http://example.org/specification/v2.0.0",
-
-      "global": {
-      },
-      "components": {
-      }
-    }
-
-
-## Property inheritance model and *resolution*
-
-
-There is a simple global to component inheritance model.
-
-1. Properties defined in `/global` define parameters across the entire application.
-1. Properties defined a section under `/components` define parameters for
-a specific component in the application.
-1. All global properties are propagated to each component.
-1. A component section may override any global property.
-1. The final set of configuration properties for a component is the global
-properties extended and overridden by the global set.
-1. The process of expanding the properties is termed *resolution*; the *resolved*
-specification is the outcome.
-1. There is NO form of explicitly cross-referencing another attribute. This
-MAY be added in future.
-1. There is NO sharing of information from the different `.json` files in a
-an application configuration.
-
-### Example
-
-Here is an example configuration
-
-    {
-      "schema": "http://example.org/specification/v2.0.0",
-
-      "global": {
-        "g1": "a",
-        "g2": "b"
-      },
-      "components": {
-        "simple": {
-        },
-        "master": {
-          "name": "m",
-          "g1": "overridden"
-    
-        },
-        "worker": {
-          "name": "w",
-          "g1": "overridden-by-worker",
-          "timeout": "1000"
-        }
-      }
-    }
-    
-The `/global` section defines two properties
-
-    g1="a"
-    g2="b"
- 
-These are the values visible to any part of the application which is
-not itself one of the components. 
-
-
-There are three components defined, `simple`, `master` and `worker`.
- 
-
-#### component `simple`:
- 
-    g1="a"
-    g2="b"
-
-
-No settings have been defined specifically for the component; the global
-settings are applied.
-
-#### component `master`:
- 
-    name="m",
-    g1="overridden"
-    g2="b"
-
-A new attribute, `name`, has been defined with the value `"m"`, and the 
-global property `g1` has been overridden with the new value, `"overridden"`.
-The global property `g2` is passed down unchanged.
-
-
-#### component `worker`:
- 
-    name="w",
-    g1="overridden-by-worker"
-    g2="b"
-    timeout: "1000"
-    
-A new attribute, `name`, has been defined with the value `"w"`, and another,
-`timeout`, value "1000". 
-
-The global property `g1` has been overridden with the new value, `"overridden-by-worker"`.
-
-The global property `g2` is passed down unchanged.
-
-This example shows some key points about the design
-
-* each component gets its own map of properties, which is independent from
-  that of other components.
-* all global properties are either present or overridden by a new value.
-  They can not be "undefined"
-* new properties defined in a component are not visible to any other component.
- 
-The final *resolved* model is as follows
-    
-    {
-      "schema": "http://example.org/specification/v2.0.0",
-
-      "global": {
-        "g1": "a",
-        "g2": "b"
-      },
-      "components": {
-        "simple": {
-          "g1": "a",
-          "g2": "b"
-        },
-        "master": {
-          "name": "m",
-          "g1": "overridden",
-          "g2": "b"
-        },
-        "worker": {
-          "name": "m",
-          "g1": "overridden-by-worker",
-          "g2": "b",
-          "timeout": "1000"
-        }
-      }
-    }
-
-This the specification JSON that would have generate exactly the same result as
-in the example, without any propagation of data from the global section
-to individual components. 
-
-Note that a resolved specification can still have the resolution operation applied
-to it -it just does not have any effect.
- 
-## Metadata
-
-The metadata section can contain arbitrary string values for use in diagnostics
-and by other applications.
-
-To avoid conflict with other applications, please use a unique name in strings,
-such as java-style package names.
-  
-# Resource Requirements: `resources.json`
-
-This file declares the resource requirements for YARN for the components
-of an application.
-
-`instances`: the number of instances of a role desired.
-`yarn.vcores`: number of "virtual"  required by a component.
-`yarn.memory`: the number of megabytes required by a component.
-
-  
-    {
-      "schema": "http://example.org/specification/v2.0.0",
-
-      "metadata": {
-        "description": "example of a resources file"
-      },
-      
-      "global": {
-        "yarn.vcores": "1",
-        "yarn.memory": "512"
-      },
-      
-      "components": {
-        "master": {
-          "instances": "1",
-          "yarn.memory": "1024"
-        },
-        "worker": {
-          "instances":"5"
-        }
-      }
-    }
-
-The resolved file would be
-  
-    {
-      "schema": "http://example.org/specification/v2.0.0",
-
-      "metadata": {
-        "description": "example of a resources file"
-      },
-      
-      "global": {
-        "yarn.vcores": "1",
-        "yarn.memory": "512"
-      },
-      
-      "components": {
-        "master": {
-          "instances": "1",
-          "yarn.vcores": "1",
-          "yarn.memory": "1024"
-        },
-        "worker": {
-          "instances":"5",
-          "yarn.vcores": "1",
-          "yarn.memory": "512"
-        }
-      }
-    }
-
-This declares this deployment of the application to consist of one instance of
-the master component, using 1 vcore and 1024MB of RAM, and five worker components
-each using one vcore and 512 MB of RAM.
-
-
-## Internal information, `internal.json`
- 
-This contains internal data related to the deployment -it is not
-intended for manual editing.
-
-There MAY be a component, `diagnostics`. If defined, its content contains
-diagnostic information for support calls, and MUST NOT be interpreted
-during application deployment, (though it may be included in the generation
-of diagnostics reports)
-
-
-    {
-      "schema": "http://example.org/specification/v2.0.0",
-
-      "metadata": {
-        "description": "Internal configuration DO NOT EDIT"
-      },
-      "global": {
-        "name": "small_cluster",
-        "application": "hdfs://cluster:8020/apps/hbase/v/1.0.0/application.tar"
-      },
-      "components": {
-    
-        "diagnostics": {
-          "create.hadoop.deployed.info": "(release-2.3.0) @dfe463",
-          "create.hadoop.build.info": "2.3.0",
-          "create.time.millis": "1393512091276",
-          "create.time": "27 Feb 2014 14:41:31 GMT"
-        }
-      }
-    }
-
-
-## Deployment specification: `app_configuration.json`
-
-
-This defines parameters that are to be used when creating the instance of the
-application, and instances of the individual components.
-    
-    {
-      "schema": "http://example.org/specification/v2.0.0",
-
-      "global": {
-    
-        "zookeeper.port": "2181",
-        "zookeeper.path": "/yarnapps_small_cluster",
-        "zookeeper.hosts": "zoo1,zoo2,zoo3",
-        "env.MALLOC_ARENA_MAX": "4",
-        "site.hbase.master.startup.retainassign": "true",
-        "site.fs.defaultFS": "hdfs://cluster:8020",
-        "site.fs.default.name": "hdfs://cluster:8020",
-        "site.hbase.master.info.port": "0",
-        "site.hbase.regionserver.info.port": "0"
-      },
-      "components": {
-    
-        "worker": {
-          "jvm.heapsize": "512M"
-        },
-        "master": {
-          "jvm.heapsize": "512M"
-        }
-      }
-    }
-      
-The resolved specification defines the values that are passed to the
-different components.
-
-    {
-      "schema": "http://example.org/specification/v2.0.0",
-
-      "global": {
-        "zookeeper.port": "2181",
-        "zookeeper.path": "/yarnapps_small_cluster",
-        "zookeeper.hosts": "zoo1,zoo2,zoo3",
-        "env.MALLOC_ARENA_MAX": "4",
-        "site.hbase.master.startup.retainassign": "true",
-        "site.fs.defaultFS": "hdfs://cluster:8020",
-        "site.fs.default.name": "hdfs://cluster:8020",
-        "site.hbase.master.info.port": "0",
-        "site.hbase.regionserver.info.port": "0"
-      },
-      "components": {
-    
-        "worker": {
-          "zookeeper.port": "2181",
-          "zookeeper.path": "/yarnapps_small_cluster",
-          "zookeeper.hosts": "zoo1,zoo2,zoo3",
-          "env.MALLOC_ARENA_MAX": "4",
-          "site.hbase.master.startup.retainassign": "true",
-          "site.fs.defaultFS": "hdfs://cluster:8020",
-          "site.fs.default.name": "hdfs://cluster:8020",
-          "site.hbase.master.info.port": "0",
-          "site.hbase.regionserver.info.port": "0",
-          "jvm.heapsize": "512M"
-        },
-        "master": {
-          "zookeeper.port": "2181",
-          "zookeeper.path": "/yarnapps_small_cluster",
-          "zookeeper.hosts": "zoo1,zoo2,zoo3",
-          "env.MALLOC_ARENA_MAX": "4",
-          "site.hbase.master.startup.retainassign": "true",
-          "site.fs.defaultFS": "hdfs://cluster:8020",
-          "site.fs.default.name": "hdfs://cluster:8020",
-          "site.hbase.master.info.port": "0",
-          "site.hbase.regionserver.info.port": "0",
-          "jvm.heapsize": "512M"
-        }
-      }
-    }
-    
-The `site.` properties have been passed down to each component, components
-whose templates may generate local site configurations. The override model
-does not prevent any component from overriding global configuration so as
-to create local configurations incompatible with the global state. (i.e.,
-there is no way to declare an attribute as final). It is the responsibility
-of the author of the configuration file (and their tools) to detect such issues.
diff --git a/src/site/markdown/configuration/example-app_configuration-resolved.json b/src/site/markdown/configuration/example-app_configuration-resolved.json
deleted file mode 100644
index 5b90ba9..0000000
--- a/src/site/markdown/configuration/example-app_configuration-resolved.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
-  "schema": "http://example.org/specification/v2.0.0",
-
-  "global": {
-    "zookeeper.port": "2181",
-    "zookeeper.path": "/yarnapps_small_cluster",
-    "zookeeper.hosts": "zoo1,zoo2,zoo3",
-    "env.MALLOC_ARENA_MAX": "4",
-    "site.hbase.master.startup.retainassign": "true",
-    "site.fs.defaultFS": "hdfs://cluster:8020",
-    "site.fs.default.name": "hdfs://cluster:8020",
-    "site.hbase.master.info.port": "0",
-    "site.hbase.regionserver.info.port": "0"
-  },
-  "components": {
-
-    "worker": {
-      "zookeeper.port": "2181",
-      "zookeeper.path": "/yarnapps_small_cluster",
-      "zookeeper.hosts": "zoo1,zoo2,zoo3",
-      "env.MALLOC_ARENA_MAX": "4",
-      "site.hbase.master.startup.retainassign": "true",
-      "site.fs.defaultFS": "hdfs://cluster:8020",
-      "site.fs.default.name": "hdfs://cluster:8020",
-      "site.hbase.master.info.port": "0",
-      "site.hbase.regionserver.info.port": "0",
-      "jvm.heapsize": "512M"
-    },
-    "master": {
-      "zookeeper.port": "2181",
-      "zookeeper.path": "/yarnapps_small_cluster",
-      "zookeeper.hosts": "zoo1,zoo2,zoo3",
-      "env.MALLOC_ARENA_MAX": "4",
-      "site.hbase.master.startup.retainassign": "true",
-      "site.fs.defaultFS": "hdfs://cluster:8020",
-      "site.fs.default.name": "hdfs://cluster:8020",
-      "site.hbase.master.info.port": "0",
-      "site.hbase.regionserver.info.port": "0",
-      "jvm.heapsize": "512M"
-    }
-  }
-}
\ No newline at end of file
diff --git a/src/site/markdown/configuration/example-app_configuration.json b/src/site/markdown/configuration/example-app_configuration.json
deleted file mode 100644
index 489acda..0000000
--- a/src/site/markdown/configuration/example-app_configuration.json
+++ /dev/null
@@ -1,25 +0,0 @@
-{
-  "schema": "http://example.org/specification/v2.0.0",
-
-  "global": {
-
-    "zookeeper.port": "2181",
-    "zookeeper.path": "/yarnapps_small_cluster",
-    "zookeeper.hosts": "zoo1,zoo2,zoo3",
-    "env.MALLOC_ARENA_MAX": "4",
-    "site.hbase.master.startup.retainassign": "true",
-    "site.fs.defaultFS": "hdfs://cluster:8020",
-    "site.fs.default.name": "hdfs://cluster:8020",
-    "site.hbase.master.info.port": "0",
-    "site.hbase.regionserver.info.port": "0"
-  },
-  "components": {
-
-    "worker": {
-      "jvm.heapsize": "512M"
-    },
-    "master": {
-      "jvm.heapsize": "512M"
-    }
-  }
-}
\ No newline at end of file
diff --git a/src/site/markdown/configuration/example-empty.json b/src/site/markdown/configuration/example-empty.json
deleted file mode 100644
index 5c05163..0000000
--- a/src/site/markdown/configuration/example-empty.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
-  "schema": "http://example.org/specification/v2.0.0",
-      
-  "global": {
-  },
-  "components": {
-  }
-}
\ No newline at end of file
diff --git a/src/site/markdown/configuration/example-internal.json b/src/site/markdown/configuration/example-internal.json
deleted file mode 100644
index 8617d1f..0000000
--- a/src/site/markdown/configuration/example-internal.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
-  "schema": "http://example.org/specification/v2.0.0",
-
-  "metadata": {
-    "description": "Internal configuration DO NOT EDIT"
-  },
-  "global": {
-    "application.name": "small_cluster",
-    "application.type": "hbase",
-    "application": "hdfs://cluster:8020/apps/hbase/v/1.0.0/application.tar"
-  },
-  "components": {
-
-    "diagnostics": {
-      "create.hadoop.deployed.info": "(release-2.3.0) @dfe463",
-      "create.hadoop.build.info": "2.3.0",
-      "create.time.millis": "1393512091276",
-      "create.time": "27 Feb 2014 14:41:31 GMT"
-    }
-  }
-}
\ No newline at end of file
diff --git a/src/site/markdown/configuration/example-overridden-resolved.json b/src/site/markdown/configuration/example-overridden-resolved.json
deleted file mode 100644
index 2b810b5..0000000
--- a/src/site/markdown/configuration/example-overridden-resolved.json
+++ /dev/null
@@ -1,25 +0,0 @@
-{
-  "schema": "http://example.org/specification/v2.0.0",
-
-  "global": {
-    "g1": "a",
-    "g2": "b"
-  },
-  "components": {
-    "simple": {
-      "g1": "a",
-      "g2": "b"
-    },
-    "master": {
-      "name": "m",
-      "g1": "overridden",
-      "g2": "b"
-    },
-    "worker": {
-      "name": "m",
-      "g1": "overridden-by-worker",
-      "g2": "b",
-      "timeout": "1000"
-    }
-  }
-}
\ No newline at end of file
diff --git a/src/site/markdown/configuration/example-overridden.json b/src/site/markdown/configuration/example-overridden.json
deleted file mode 100644
index 9a74143..0000000
--- a/src/site/markdown/configuration/example-overridden.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
-  "schema": "http://example.org/specification/v2.0.0",
-
-  "global": {
-    "g1": "a",
-    "g2": "b"
-  },
-  "components": {
-    "simple": {
-    },
-    "master": {
-      "name": "m",
-      "g1": "overridden"
-
-    },
-    "worker": {
-      "name": "m",
-      "g1": "overridden-by-worker",
-      "timeout": "1000"
-
-    }
-  }
-}
\ No newline at end of file
diff --git a/src/site/markdown/configuration/example-resources.json b/src/site/markdown/configuration/example-resources.json
deleted file mode 100644
index 06c3b54..0000000
--- a/src/site/markdown/configuration/example-resources.json
+++ /dev/null
@@ -1,25 +0,0 @@
-{
-  "schema": "http://example.org/specification/v2.0.0",
-
-  "metadata": {
-    "description": "example of a resources file"
-  },
-  
-  "global": {
-    "yarn.vcores": "1",
-    "yarn.memory": "512"
-  },
-  
-  "components": {
-    "master": {
-      "instances": "1",
-      "yarn.vcores": "1",
-      "yarn.memory": "1024"
-    },
-    "worker": {
-      "instances":"5",
-      "yarn.vcores": "1",
-      "yarn.memory": "512"
-    }
-  }
-}
\ No newline at end of file
diff --git a/src/site/markdown/configuration/index-markdown.md b/src/site/markdown/configuration/index-markdown.md
deleted file mode 100644
index 0616dfb..0000000
--- a/src/site/markdown/configuration/index-markdown.md
+++ /dev/null
@@ -1,30 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-# Specification of an application instance, revision 2.0
-
-The specification of an applicaton, comprises
-1. The persistent description of an application's configuration
-1. The persistent description of the desired topology and YARN resource
-requirements.
-1. The dynamic description of the running application, including information
-on the location of components and aggregated statistics. 
-
-
-1. [Redesign](redesign.md)
-1. [Specification](specification.md)
-1. [Example: current](original-hbase.json)
-1. [Example: proposed](proposed-hbase.json)
diff --git a/src/site/markdown/configuration/index.md b/src/site/markdown/configuration/index.md
deleted file mode 100644
index ad81b4f..0000000
--- a/src/site/markdown/configuration/index.md
+++ /dev/null
@@ -1,38 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Specification of an application instance, revision 2.0
-
-The specification of an applicaton, comprises
-1. The persistent description of an application's configuration
-1. The persistent description of the desired topology and YARN resource
-requirements.
-1. The dynamic description of the running application, including information
-on the location of components and aggregated statistics. 
-
-The specifics of this are covered in the [Core Configuration Specification](core.md)
-
-
-## Historical References
-
-1. [Specification](specification.html)
-1. [Redesign](redesign.html)
-
-
-1. [Example: current](original-hbase.json)
-1. [Example: proposed](proposed-hbase.json)
-
diff --git a/src/site/markdown/configuration/original-hbase.json b/src/site/markdown/configuration/original-hbase.json
deleted file mode 100644
index 257caeb..0000000
--- a/src/site/markdown/configuration/original-hbase.json
+++ /dev/null
@@ -1,139 +0,0 @@
-{
-  "version": "1.0",
-  "name": "test_cluster_lifecycle",
-  "type": "hbase",
-  "state": 3,
-  "createTime": 1393512091276,
-  "updateTime": 1393512117286,
-  "originConfigurationPath": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/snapshot",
-  "generatedConfigurationPath": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/generated",
-  "dataPath": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/database",
-  "options": {
-    "zookeeper.port": "2181",
-    "site.hbase.master.startup.retainassign": "true",
-    "slider.cluster.application.image.path": "hdfs://sandbox:8020/hbase.tar.gz",
-    "site.fs.defaultFS": "hdfs://sandbox:8020",
-    "slider.container.failure.threshold": "5",
-    "site.fs.default.name": "hdfs://sandbox:8020",
-    "slider.cluster.directory.permissions": "0770",
-    "slider.am.monitoring.enabled": "false",
-    "zookeeper.path": "/yarnapps_slider_stevel_test_cluster_lifecycle",
-    "slider.tmp.dir": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/tmp/am",
-    "slider.data.directory.permissions": "0770",
-    "zookeeper.hosts": "sandbox",
-    "slider.container.failure.shortlife": "60"
-  },
-  "info": {
-    "create.hadoop.deployed.info": "(detached from release-2.3.0) @dfe46336fbc6a044bc124392ec06b85",
-    "create.application.build.info": "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
-    "create.hadoop.build.info": "2.3.0",
-    "create.time.millis": "1393512091276",
-    "create.time": "27 Feb 2014 14:41:31 GMT",
-    "slider.am.restart.supported": "false",
-    "live.time": "27 Feb 2014 14:41:56 GMT",
-    "live.time.millis": "1393512116881",
-    "status.time": "27 Feb 2014 14:42:08 GMT",
-    "status.time.millis": "1393512128726",
-    "yarn.vcores": "32",
-    "yarn.memory": "2048",
-    "status.application.build.info": "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
-    "status.hadoop.build.info": "2.3.0",
-    "status.hadoop.deployed.info": "bigwheel-m16-2.2.0 @704f1e463ebc4fb89353011407e965"
-  },
-  "statistics": {
-    "worker": {
-      "containers.start.started": 0,
-      "containers.live": 0,
-      "containers.start.failed": 0,
-      "containers.active.requests": 0,
-      "containers.failed": 0,
-      "containers.completed": 0,
-      "containers.desired": 0,
-      "containers.requested": 0
-    },
-    "slider": {
-      "containers.unknown.completed": 0,
-      "containers.start.started": 0,
-      "containers.live": 1,
-      "containers.start.failed": 0,
-      "containers.failed": 0,
-      "containers.completed": 0,
-      "containers.surplus": 0
-    },
-    "master": {
-      "containers.start.started": 0,
-      "containers.live": 0,
-      "containers.start.failed": 0,
-      "containers.active.requests": 0,
-      "containers.failed": 0,
-      "containers.completed": 0,
-      "containers.desired": 0,
-      "containers.requested": 0
-    }
-  },
-  "status": {
-  },
-  "instances": {
-    "slider": [ "container_1393511571284_0002_01_000001" ]
-  },
-  "roles": {
-    "worker": {
-      "yarn.memory": "768",
-      "env.MALLOC_ARENA_MAX": "4",
-      "role.instances": "0",
-      "role.requested.instances": "0",
-      "role.name": "worker",
-      "role.failed.starting.instances": "0",
-      "role.actual.instances": "0",
-      "jvm.heapsize": "512M",
-      "yarn.vcores": "1",
-      "role.releasing.instances": "0",
-      "role.failed.instances": "0",
-      "app.infoport": "0"
-    },
-    "slider": {
-      "yarn.memory": "256",
-      "env.MALLOC_ARENA_MAX": "4",
-      "role.instances": "1",
-      "role.requested.instances": "0",
-      "role.name": "slider",
-      "role.failed.starting.instances": "0",
-      "role.actual.instances": "1",
-      "jvm.heapsize": "256M",
-      "yarn.vcores": "1",
-      "role.releasing.instances": "0",
-      "role.failed.instances": "0"
-    },
-    "master": {
-      "yarn.memory": "1024",
-      "env.MALLOC_ARENA_MAX": "4",
-      "role.instances": "0",
-      "role.requested.instances": "0",
-      "role.name": "master",
-      "role.failed.starting.instances": "0",
-      "role.actual.instances": "0",
-      "jvm.heapsize": "512M",
-      "yarn.vcores": "1",
-      "role.releasing.instances": "0",
-      "role.failed.instances": "0",
-      "app.infoport": "0"
-    }
-  },
-  "clientProperties": {
-    "fs.defaultFS": "hdfs://sandbox:8020",
-    "hbase.cluster.distributed": "true",
-    "hbase.master.info.port": "0",
-    "hbase.master.port": "0",
-    "hbase.master.startup.retainassign": "true",
-    "hbase.regionserver.hlog.tolerable.lowreplication": "1",
-    "hbase.regionserver.info.port": "0",
-    "hbase.regionserver.port": "0",
-    "hbase.rootdir": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/database",
-    "hbase.tmp.dir": "./hbase-tmp",
-    "hbase.zookeeper.property.clientPort": "2181",
-    "hbase.zookeeper.quorum": "sandbox",
-    "slider.template.origin": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/snapshot/hbase-site.xml",
-    "slider.unused.option": "1",
-    "zookeeper.znode.parent": "/yarnapps_slider_stevel_test_cluster_lifecycle"
-  }
-}
diff --git a/src/site/markdown/configuration/proposed-hbase.json b/src/site/markdown/configuration/proposed-hbase.json
deleted file mode 100644
index c4f637f..0000000
--- a/src/site/markdown/configuration/proposed-hbase.json
+++ /dev/null
@@ -1,273 +0,0 @@
-{
-  "version": "2.0.0",
-  "name": "test_cluster_lifecycle",
-  "valid`": true,
-  
-  "slider-internal":{
-    "type": "hbase",
-    "createTime": 1393512091276,
-    "updateTime": 1393512117286,
-    "originConfigurationPath": "hdfs://sandbox:8020/user/slider/.slider/cluster/test_cluster_lifecycle/snapshot",
-    "generatedConfigurationPath": "hdfs://sandbox:8020/user/slider/.slider/cluster/test_cluster_lifecycle/generated",
-    "dataPath": "hdfs://sandbox:8020/user/slider/.slider/cluster/test_cluster_lifecycle/database",
-    "slider.tmp.dir": "hdfs://sandbox:8020/user/slider/.slider/cluster/test_cluster_lifecycle/tmp/am",
-    "slider.cluster.directory.permissions": "0770",
-    "slider.data.directory.permissions": "0770"
-  },
-  
-  "options": {
-    "slider.am.monitoring.enabled": "false",
-    "slider.cluster.application.image.path": "hdfs://sandbox:8020/hbase.tar.gz",
-    "slider.container.failure.threshold": "5",
-    "slider.container.failure.shortlife": "60",
-    "zookeeper.port": "2181",
-    "zookeeper.path": "/yarnapps_slider_slider_test_cluster_lifecycle",
-    "zookeeper.hosts": "sandbox",
-    "site.hbase.master.startup.retainassign": "true",
-    "site.fs.defaultFS": "hdfs://sandbox:8020",
-    "site.fs.default.name": "hdfs://sandbox:8020",
-    "env.MALLOC_ARENA_MAX": "4",
-    "site.hbase.master.info.port": "0",
-    "site.hbase.regionserver.info.port": "0"
-  },
-  
-  "diagnostics": {
-    "create.hadoop.deployed.info": "(detached from release-2.3.0) @dfe46336fbc6a044bc124392ec06b85",
-    "create.application.build.info": "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by slider",
-    "create.hadoop.build.info": "2.3.0",
-    "create.time.millis": "1393512091276",
-    "create.time": "27 Feb 2014 14:41:31 GMT"
-  },
-  
-  "info": {
-    "slider.am.restart.supported": "false",
-    "live.time": "27 Feb 2014 14:41:56 GMT",
-    "live.time.millis": "1393512116881",
-    "status.time": "27 Feb 2014 14:42:08 GMT",
-    "status.time.millis": "1393512128726",
-    "yarn.vcores": "32",
-    "yarn.memory": "2048",
-    "status.application.build.info": "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by slider",
-    "status.hadoop.build.info": "2.3.0",
-    "status.hadoop.deployed.info": "bigwheel-m16-2.2.0 @704f1e463ebc4fb89353011407e965"
-  },
-
-  "statistics": {
-
-    "cluster": {
-      "containers.unknown.completed": 0,
-      "containers.start.completed": 3,
-      "containers.live": 1,
-      "containers.start.failed": 0,
-      "containers.failed": 0,
-      "containers.completed": 0,
-      "containers.surplus": 0
-
-    },
-    "roles": {
-      "worker": {
-        "containers.start.completed": 0,
-        "containers.live": 2,
-        "containers.start.failed": 0,
-        "containers.active.requests": 0,
-        "containers.failed": 0,
-        "containers.completed": 0,
-        "containers.desired": 2,
-        "containers.requested": 0
-      },
-      "master": {
-        "containers.start.completed": 0,
-        "containers.live": 1,
-        "containers.start.failed": 0,
-        "containers.active.requests": 0,
-        "containers.failed": 0,
-        "containers.completed": 0,
-        "containers.desired": 1,
-        "containers.requested": 0
-      }
-    }
-  },
-
-  "instances": {
-    "slider": [ "container_1393511571284_0002_01_000001" ],
-    "master": [ "container_1393511571284_0002_01_000003" ],
-    "worker": [ 
-      "container_1393511571284_0002_01_000002",
-      "container_1393511571284_0002_01_000004"
-    ]
-  },
-  
-  "roles": {
-    "worker": {
-      "yarn.memory": "768",
-      "role.instances": "0",
-      "role.name": "worker",
-      "jvm.heapsize": "512M",
-      "yarn.vcores": "1"
-    },
-    "slider": {
-      "yarn.memory": "256",
-      "role.instances": "1",
-      "role.name": "slider",
-      "jvm.heapsize": "256M",
-      "yarn.vcores": "1"
-    },
-    "master": {
-      "yarn.memory": "1024",
-      "role.instances": "0",
-      "role.name": "master",
-      "jvm.heapsize": "512M",
-      "yarn.vcores": "1"
-    }
-  },
-
-
-  "clientProperties": {
-    "fs.defaultFS": "hdfs://sandbox:8020",
-    "hbase.cluster.distributed": "true",
-    "hbase.master.info.port": "0",
-    "hbase.master.port": "0",
-    "hbase.master.startup.retainassign": "true",
-    "hbase.regionserver.hlog.tolerable.lowreplication": "1",
-    "hbase.regionserver.info.port": "0",
-    "hbase.regionserver.port": "0",
-    "hbase.rootdir": "hdfs://sandbox:8020/user/slider/.slider/cluster/test_cluster_lifecycle/database",
-    "hbase.tmp.dir": "./hbase-tmp",
-    "hbase.zookeeper.property.clientPort": "2181",
-    "hbase.zookeeper.quorum": "sandbox",
-    "zookeeper.znode.parent": "/yarnapps_slider_slider_test_cluster_lifecycle"
-  },
-
-
-  "clientfiles": {
-    "hbase-site.xml": "site information for HBase",
-    "log4.properties": "log4.property file"
-  },
-
-  "provider":{
-    "load":0.4,
-    "urls": {
-      "master": ["http://node4:28209"],
-      "worker": ["http://node4:28717", "http://node6:31268"]
-    }
-  },
-
-  "status": {
-    "live": {
-      "worker": {
-        "container_1394032374441_0001_01_000003": {
-          "name": "container_1394032374441_0001_01_000003",
-          "role": "worker",
-          "roleId": 1,
-          "createTime": 1394032384451,
-          "startTime": 1394032384503,
-          "released": false,
-          "host": "192.168.1.88",
-          "state": 3,
-          "exitCode": 0,
-          "command": "hbase-0.98.0/bin/hbase --config $PROPAGATED_CONFDIR regionserver start 1><LOG_DIR>/region-server.txt 2>&1 ; ",
-          "diagnostics": "",
-          "environment": [
-            "HADOOP_USER_NAME=\"slider\"",
-            "HBASE_LOG_DIR=\"/tmp/slider-slider\"",
-            "HBASE_HEAPSIZE=\"256\"",
-            "MALLOC_ARENA_MAX=\"4\"",
-            "PROPAGATED_CONFDIR=\"$PWD/propagatedconf\""
-          ]
-        },
-        "container_1394032374441_0001_01_000002": {
-          "name": "container_1394032374441_0001_01_000002",
-          "role": "worker",
-          "roleId": 1,
-          "createTime": 1394032384451,
-          "startTime": 1394032384552,
-          "released": false,
-          "host": "192.168.1.86",
-          "state": 3,
-          "exitCode": 0,
-          "command": "hbase-0.98.0/bin/hbase --config $PROPAGATED_CONFDIR regionserver start 1><LOG_DIR>/region-server.txt 2>&1 ; ",
-          "diagnostics": "",
-          "environment": [
-            "HADOOP_USER_NAME=\"slider\"",
-            "HBASE_LOG_DIR=\"/tmp/slider-slider\"",
-            "HBASE_HEAPSIZE=\"256\"",
-            "MALLOC_ARENA_MAX=\"4\"",
-            "PROPAGATED_CONFDIR=\"$PWD/propagatedconf\""
-          ]
-        }
-      },
-      "slider": {
-        "container_1394032374441_0001_01_000001": {
-          "name": "container_1394032374441_0001_01_000001",
-          "role": "slider",
-          "roleId": 0,
-          "createTime": 0,
-          "startTime": 0,
-          "released": false,
-          "host": "slider-8.local",
-          "state": 3,
-          "exitCode": 0,
-          "command": "",
-          "diagnostics": ""
-        }
-      },
-      "master": {
-        "container_1394032374441_0001_01_000004": {
-          "name": "container_1394032374441_0001_01_000004",
-          "role": "master",
-          "roleId": 2,
-          "createTime": 1394032384451,
-          "startTime": 1394032384573,
-          "released": false,
-          "host": "192.168.1.86",
-          "state": 3,
-          "exitCode": 0,
-          "command": "hbase-0.98.0/bin/hbase --config $PROPAGATED_CONFDIR master start 1><LOG_DIR>/master.txt 2>&1 ; ",
-          "diagnostics": "",
-          "environment": [
-            "HADOOP_USER_NAME=\"slider\"",
-            "HBASE_LOG_DIR=\"/tmp/slider-slider\"",
-            "HBASE_HEAPSIZE=\"256\"",
-            "MALLOC_ARENA_MAX=\"4\"",
-            "PROPAGATED_CONFDIR=\"$PWD/propagatedconf\""
-          ]
-        }
-      }
-    },
-    "failed": {
-      
-    },
-
-    "rolestatus": {
-      "worker": {
-        "role.instances": "2",
-        "role.requested.instances": "0",
-        "role.failed.starting.instances": "0",
-        "role.actual.instances": "2",
-        "role.releasing.instances": "0",
-        "role.failed.instances": "1"
-      },
-      "slider": {
-        "role.instances": "1",
-        "role.requested.instances": "0",
-        "role.name": "slider",
-        "role.actual.instances": "1",
-        "role.releasing.instances": "0",
-        "role.failed.instances": "0"
-      },
-      "master": {
-        "role.instances": "1",
-        "role.requested.instances": "1",
-        "role.name": "master",
-        "role.failed.starting.instances": "0",
-        "role.actual.instances": "0",
-        "role.releasing.instances": "0",
-        "role.failed.instances": "0"
-      }
-    }
-  }
-
-
-
-
-}
diff --git a/src/site/markdown/configuration/redesign.md b/src/site/markdown/configuration/redesign.md
deleted file mode 100644
index e2f7046..0000000
--- a/src/site/markdown/configuration/redesign.md
+++ /dev/null
@@ -1,478 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Cluster Specification
-
-### Notation: 
-
-In this document, a full path to a value is represented as a path 
-`options/zookeeper.port`  ; an assigment as  `options/zookeeper.port=2181`.
-
-A wildcard indicates all entries matching a path: `options/zookeeper.*`
-or `/roles/*/yarn.memory`
-
-
-## History
-
-The Slider cluster specification was implicitly defined in the file
-`org.apache.slider.api.ClusterDescription`. It had a number of roles
-
-1. Persistent representaton of cluster state
-1. Internal model of desired cluster state within the Application Master.
-1. Dynamic representation of current cluster state when the AM
-was queried, marshalled over the network as JSON.
-1. Description of updated state when reconfiguring a running cluster.
-
-Initially the dynamic status included a complete history of all containers
--this soon highlit some restrictions on the maximum size of a JSON-formatted
-string in Hadoop's "classic" RPC: 32K, after which the string was silently
-truncated. Accordingly, this history was dropped.
-
-Having moved to Protocol Buffers as the IPC wire format, with a web view
-alongside, this history could be reconsidered.
-
-The initial design place most values into the root entry, and relied
-on Jaxon introspection to set and retrieve the values -it was a
-Java-first specification, with no external specificatin or regression tests.
-
-As the number of entries in the root increased, the design switched to storing
-more attributes into specific sections *under* the root path:
-
-* `info`: read-only information about the cluster.
-* `statistics`: Numeric statistics about the cluster
-
-# Sections
-
-## Root
-
-Contains various string and integer values
-
-    "version": "1.0",
-    "name": "test_cluster_lifecycle",
-    "type": "hbase",
-    "state": 3,
-    "createTime": 1393512091276,
-    "updateTime": 1393512117286,
-    "originConfigurationPath": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/snapshot",
-    "generatedConfigurationPath": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/generated",
-    "dataPath": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/database",
-
-
-* `version`: version of the JSON file. Not currently used
-to validate version compatibility; at this point in time
-releases may not be able to read existing .json files.
-
-* `name`: cluster name
-* `type`: reference to the provider type -this triggers a Hadoop configuration
-property lookup to find the implementation classes.
-* `state`: an enumeration value of the cluster state.
-
-        int STATE_INCOMPLETE = 0;
-        int STATE_SUBMITTED = 1;
-        int STATE_CREATED = 2;
-        int STATE_LIVE = 3;
-        int STATE_STOPPED = 4;
-        int STATE_DESTROYED = 5;
-        
-  Only two states are persisted, "incomplete" and "created", though more
-  are used internally.
-  The `incomplete` state is used during cluster create/build,
-   allowing an incomplete JSON file to be written
-  -so minimising the window for race conditions on cluster construction.
-        
-* `createTime` and `updateTime`: timestamps, informative only.
- The `createTime` value is duplicated in `/info/createTimeMillis`
-* `originConfigurationPath`, `generatedConfigurationPath`, `dataPath` paths
-used internally -if changed the cluster may not start.
-
-*Proposed*: 
-1. Move all state bar `name` and cluster state
-into a section `/slider-internal`.
-1. The cluster state is moved from an enum to a simple
- boolean, `valid`, set to true when the cluster JSON
- has been fully constructed.
-
-## `/info`
-
-Read-only list of information about the application. Generally this is
-intended to be used for debugging and testing.
-
-### Persisted values: static information about the file history
- 
-    "info" : {
-      "create.hadoop.deployed.info" : "(detached from release-2.3.0) @dfe46336fbc6a044bc124392ec06b85",
-      "create.application.build.info" : "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
-      "create.hadoop.build.info" : "2.3.0",
-      "create.time.millis" : "1393512091276",
-    },
- 
-*Proposed*: move persisted info K-V pairs to a section `/diagnostics`.
- 
-### Dynamic values: 
- 
- 
- whether the AM supports service restart without killing all the containers hosting
- the role instances:
- 
-    "slider.am.restart.supported" : "false",
-    
- timestamps of the cluster going live, and when the status query was made
-    
-    "live.time" : "27 Feb 2014 14:41:56 GMT",
-    "live.time.millis" : "1393512116881",
-    "status.time" : "27 Feb 2014 14:42:08 GMT",
-    "status.time.millis" : "1393512128726",
-    
-  yarn data provided to the AM
-    
-    "yarn.vcores" : "32",
-    "yarn.memory" : "2048",
-  
-  information about the application and hadoop versions in use. Here
-  the application was built using Hadoop 2.3.0, but is running against the version
-  of Hadoop built for HDP-2.
-  
-    "status.application.build.info" : "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
-    "status.hadoop.build.info" : "2.3.0",
-    "status.hadoop.deployed.info" : "bigwheel-m16-2.2.0 @704f1e463ebc4fb89353011407e965"
- 
- 
- ## `instances`
- 
- Information about the live containers in a cluster
-
-     "instances": {
-       "slider": [ "container_1393511571284_0002_01_000001" ],
-       "master": [ "container_1393511571284_0002_01_000003" ],
-       "worker": [ 
-         "container_1393511571284_0002_01_000002",
-         "container_1393511571284_0002_01_000004"
-       ]
-     },
-
-There's no information about location, nor is there any history about containers
-that are no longer part of the cluster (i.e. failed & released containers). 
-
-It could be possible to include a list of previous containers,
-though Slider would need to be selective about how many to store
-(or how much detail to retain) on those previous containers.
-
-Perhaps the list could be allowed to grow without limit, but detail
-only preserved on the last 100. If more containers fail than that,
-there is likely to be a problem which the most recent containers
-will also display.
-
-*Proposed* 
-
-1. Return to the full serialization of container state -but only for running containers.
-1. Have a list of failed containers, but only include last 8; make it a rolling
-buffer. This avoids a significantly failing role to overload the status document.
-
- 
- ## `statistics`
- 
- Statistics on each role. 
- 
- They can be divided into counters that only increase
-
-    "containers.start.completed": 0,
-    "containers.start.failed": 0,
-    "containers.failed": 0,
-    "containers.completed": 0,
-    "containers.requested": 0
-
-and those that vary depending upon the current state
-
-    "containers.live": 0,
-    "containers.active.requests": 0,
-    "containers.desired": 0,
-
-
-* Propose: move these values out of statistics into some other section, as they
-are state, not statistics*
-
-
-       "statistics": {
-         "worker": {
-           "containers.start.completed": 0,
-           "containers.live": 2,
-           "containers.start.failed": 0,
-           "containers.active.requests": 0,
-           "containers.failed": 0,
-           "containers.completed": 0,
-           "containers.desired": 2,
-           "containers.requested": 0
-         },
-         "slider": {
-           "containers.unknown.completed": 0,
-           "containers.start.completed": 3,
-           "containers.live": 1,
-           "containers.start.failed": 0,
-           "containers.failed": 0,
-           "containers.completed": 0,
-           "containers.surplus": 0
-         },
-         "master": {
-           "containers.start.completed": 0,
-           "containers.live": 1,
-           "containers.start.failed": 0,
-           "containers.active.requests": 0,
-           "containers.failed": 0,
-           "containers.completed": 0,
-           "containers.desired": 1,
-           "containers.requested": 0
-         }
-       },
-    
-The `/statistics/slider` section is unusual in that it provides the aggregate statistics
-of the cluster -this is not obvious. A different name could be used -but
-again, there's a risk of clash with or confusion with a role. 
-
-Better to have a specific `/statistics/cluster` element, 
-and to move the roles' statistics under `/statistics/roles`:
-
-    "statistics": {
-      "cluster": {
-        "containers.unknown.completed": 0,
-        "containers.start.completed": 3,
-        "containers.live": 1,
-        "containers.start.failed": 0,
-        "containers.failed": 0,
-        "containers.completed": 0,
-        "containers.surplus": 0
-  
-      },
-      "roles": {
-        "worker": {
-          "containers.start.completed": 0,
-          "containers.live": 2,
-          "containers.start.failed": 0,
-          "containers.active.requests": 0,
-          "containers.failed": 0,
-          "containers.completed": 0,
-          "containers.desired": 2,
-          "containers.requested": 0
-        },
-        "master": {
-          "containers.start.completed": 0,
-          "containers.live": 1,
-          "containers.start.failed": 0,
-          "containers.active.requests": 0,
-          "containers.failed": 0,
-          "containers.completed": 0,
-          "containers.desired": 1,
-          "containers.requested": 0
-        }
-      }
-    },
-
-This approach allows extra statistics sections to be added (perhaps
-by providers), without any changes to the toplevel section.
-
-## Options
-
-A list of options used by Slider and its providers to build up the AM
-and the configurations of the deployed service components
-
-
-    "options": {
-      "zookeeper.port": "2181",
-      "site.hbase.master.startup.retainassign": "true",
-      "slider.cluster.application.image.path": "hdfs://sandbox:8020/hbase.tar.gz",
-      "site.fs.defaultFS": "hdfs://sandbox:8020",
-      "slider.container.failure.threshold": "5",
-      "site.fs.default.name": "hdfs://sandbox:8020",
-      "slider.cluster.directory.permissions": "0770",
-      "slider.am.monitoring.enabled": "false",
-      "zookeeper.path": "/yarnapps_slider_stevel_test_cluster_lifecycle",
-      "slider.tmp.dir": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/tmp/am",
-      "slider.data.directory.permissions": "0770",
-      "zookeeper.hosts": "sandbox",
-      "slider.container.failure.shortlife": "60"
-    },
-  
-Some for these options have been created by slider itself ("slider.tmp.dir")
-for internal use -and are cluster specific. If/when the ability to use
-an existing json file as a template for a new cluster is added, having these
-options in the configuration will create problems
-
-
-# Proposed Changes
-
-
-## Move Slider internal state to `/slider-internal`
-
-Move all slider "private" data to an internal section,`/slider-internal`
-including those in the toplevel directory and in `/options`
-  
-## Allow `/options` and `roles/*/` options entries to take the value "null".
-
-This would be a definition that the value must be defined before the cluster
-can start. Provider templates could declare this.
-  
-## Make client configuration retrieval hierarchical -and maybe move out of the
-status
-
-The current design assumes that it is a -site.xml file being served up. This
-does not work for alternate file formats generated by the Provider.
-
-## Role Options
-
-The `/roles/$ROLENAME/` clauses each provide options for a
-specific role.
-
-This includes
-1. `role.instances`: defines the number of instances of a role to create
-1. `env.` environment variables for launching the container
-1. `yarn.` properties to configure YARN requests.
-1. `jvm.heapsize`: an option supported by some providers to 
-fix the heap size of a component.
-1. `app.infoport`: an option supported by some providers (e.g. HBase)
-to fix the port to which a role (master or worker) binds its web UI.
-
-
-
-      "worker": {
-        "yarn.memory": "768",
-        "env.MALLOC_ARENA_MAX": "4",
-        "role.instances": "0",
-        "role.name": "worker",
-        "jvm.heapsize": "512M",
-        "yarn.vcores": "1",
-        "app.infoport": "0"
-      },
-
-In a live cluster, the role information also includes status information
-about the cluster.
-
-      "master": {
-        "yarn.memory": "1024",
-        "env.MALLOC_ARENA_MAX": "4",
-        "role.instances": "0",
-        "role.requested.instances": "0",
-        "role.name": "master",
-        "role.failed.starting.instances": "0",
-        "role.actual.instances": "0",
-        "jvm.heapsize": "512M",
-        "yarn.vcores": "1",
-        "role.releasing.instances": "0",
-        "role.failed.instances": "0",
-        "app.infoport": "0"
-      }
-
-The role `slider` represents the Slider Application Master itself.
-
-      
-      "slider": {
-        "yarn.memory": "256",
-        "env.MALLOC_ARENA_MAX": "4",
-        "role.instances": "1",
-        "role.name": "slider",
-        "jvm.heapsize": "256M",
-        "yarn.vcores": "1",
-      },
-
-### Proposed: 
-1. move all dynamic role status to its own clauses.
-1. use a simple inheritance model from `/options`
-1. don't allow role entries to alter the cluster state. 
-  
-### Proposed:  `/clientProperties` continues return Key-val pairs
-
-The `/clientProperties` section will remain, with key-val pairs of type
-string, the expectation being this is where providers can insert specific
-single attributes for client applications.
-
-These values can be converted to application-specific files on the client,
-in code -as done today in the Slider CLI-, or via template expansion (beyond
-the scope of this document.
-
-
-
-### Proposed: alongside `/clientProperties`  comes `/clientfiles` 
-
-This section will list all files that an application instance can generate
-for clients, along with with a description.
-
-    "/clientfiles/hbase-site.xml": "site information for HBase"
-    "/clientfiles/log4.properties": "log4.property file"
-
-A new CLI command would be added to retrieve a client file.
-1. The specific file must be named.
-1. If it is not present, an error must be raised.
-1. If it is present, it is downloaded and output to the console/to a named
-destination file/directory `--outfile <file>` and `--outdir <dir>`
-1. If the `--list` argument is provided, the list of available files is
-returned (e.g.) 
-
-    hbase-site.xml: site information for HBase
-    log4.properties: log4.property file
-    
-*No attempt to parse/process the body of the messages will be returned.*
-
-In a REST implementation of the client API, /clientconf would be a path
-to the list of options; each file a path underneath.
-
-Client configuration file retrieval outside the status completely;
-the status just lists the possible values; a separate call returns them.
-
-This will  permit binary content to be retrieved, and avoid any marshalling
-problems and inefficiencies.
-
-With this change, there will now be two ways to generate client configuration
-files
-
-* Client-side: as today
-* Server-side: via the provider
-
-Client side is more extensible as it allows for arbitrary clients; server-side
-is restricted to those files which the application provider is capable of
-generating. The advantage of the server-side option is that for those files
-about which the provider is aware of, they will be visible through the 
-REST and Web UIs, so trivially retrieved.
-
-### Stop intermixing role specification with role current state
-
-Create a new section, `rolestatus`, which lists the current status
-of the roles: how many are running vs requested, how many are being
-released.
-
-There's some overlap here with the `/statistics` field, so we should
-either merge them or clearly separate the two. Only the `role.failed`
-properties match entries in the statistics -perhaps they should be cut.
-
-#### provider-specific status
-
-Allow providers to publish information to the status, in their
-own section.
-
-There already is support for providers updating the cluster status
-in Slider 12.1 and earlier, but it has flaws
-
-A key one is that it is done sychronously on a `getStatus()` call;
-as providers may perform a live query of their status (example, the HBase
-provider looks up the Web UI ports published by HBase to zookeeper),
-there's overhead, and if the operation blocks (example: when HBase hasn't
-ever been deployed and the zookeeper path is empty), then the status
-call blocks.
-
-*Proposed:*
-
-1. There is a specific `/provider` section
-1. There's no restriction on what JSON is permitted in this section.
-1. Providers may make their own updates to the application state to read and
-write this block -operations that are asynchronous to any status queries.
diff --git a/src/site/markdown/configuration/resolved-resources.json b/src/site/markdown/configuration/resolved-resources.json
deleted file mode 100644
index 5299897..0000000
--- a/src/site/markdown/configuration/resolved-resources.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
-  "schema": "http://example.org/specification/v2.0.0",
-
-  "metadata": {
-    "description": "example of a resources file"
-  },
-  
-  "global": {
-    "yarn.vcores": "1",
-    "yarn.memory": "512"
-  },
-  
-  "components": {
-    "master": {
-      "instances": "1",
-      "yarn.memory": "1024"
-    },
-    "worker": {
-      "instances":"5"
-    }
-  }
-}
\ No newline at end of file
diff --git a/src/site/markdown/configuration/specification.md b/src/site/markdown/configuration/specification.md
deleted file mode 100644
index ccc49fe..0000000
--- a/src/site/markdown/configuration/specification.md
+++ /dev/null
@@ -1,512 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Specification of the "Cluster Description"
-
-* This is partially obsolete. Slider still returns the Slider Cluster Description
-as changing it will break most of the unit tests -once these are updated
-this document will be completely obsolete and replaced with a new one.
-
-
-### Notation: 
-
-In this document, a full path to a value is represented as a path 
-`options/zookeeper.port`  ; an assigment as  `options/zookeeper.port=2181`.
-
-A wildcard indicates all entries matching a path: `options/zookeeper.*`
-or `/roles/*/yarn.memory`
-
-
-## Core Concepts
-
-The specificaton of an application instance is defined in an application instance
-directory, `${user.home}/.slidera/clusters/${clustername}/cluster.json`)
-
-
-## Sections for specifying and describing cluster state
-
-The cluster desciption is hierarchal, with standardized sections.
-
-Different sections have one of three roles.
-
-1. Storage and specification of internal properties used to define a cluster -properties
-that should not be modified by users -doing so is likely to render the
-cluster undeployable.
-
-1. Storage and specification of the components deployed by Slider.
-These sections define options for the deployed application, the size of
-the deployed application, attributes of the deployed roles, and customizable
-aspects of the Slider application master. 
-
-  This information defines the *desired state* of a cluster.
-   
-  Users may edit these sections, either via the CLI, or by directly editing the `cluster.json` file of
-  a frozen cluster.
-
-1. Status information provided by a running cluster. These include:
- information about the cluster, statistics, information about reach role in
- the cluster -as well as other aspects of the deployment.
- 
- This information describes the *actual state* of a cluster.
-  
-Using a common format for both the specification and description of a cluster
-may be confusing, but it is designed to unify the logic needed to parse
-and process cluster descriptions. There is only one JSON file to parse
--merely different sections of relevance at different times.
-
-## Role-by-role subsections
-
-A slider-deployed application consists of the single Slider application master,
-and one or more roles -specific components in the actual application.
-
-The `/roles` section contains a listing for each role, 
-declaring the number of instances of each role desired,
-possibly along with some details defining the actual execution of the application.
-
-The `/statistics/roles/` section returns statistics on each role,
-while `/instances` has a per-role entry listing the YARN
-containers hosting instances. 
-
-
-## Cluster information for applications
-
-The AM/application provider may generate information for use by client applications.
-
-There are three ways to provide this
-
-1. A section in which simple key-value pairs are provided for interpretation
-by client applications -usually to generate configuration documents
-2. A listing of files that may be provided directly to a client. The API to provide these files is not covered by this document.
-3. A provider-specific section in which arbitrary values and structures may be defined. This allows greater flexibility in the information that a provider can publish -though it does imply custom code to process this data on the client.
-
-
-# Persistent Specification Sections
-
-## "/" : root
-
-The root contains a limited number of key-value pairs, 
-
-* `version`: string; required.
-The version of the JSON file, as an `x.y.z` version string.
-    1. Applications MUST verify that they can process a specific version.
-    1. The version number SHOULD be incremented in the minor "z" value
-    after enhancements that are considered backwards compatible.
-    Incompatible updates MUST be updated with a new "y" value.
-    The final, "x" number, is to be reserved for major reworkings
-    of the cluster specification itself (this document or its
-    successors).
-
-* `name`: string; required. Cluster name; 
-* `type`: string; required.
-Reference to the provider type -this triggers a Hadoop configuration
-property lookup to find the implementation classes.
-* `valid`: boolean; required.
-Flag to indicate whether or not a specification is considered valid.
-If false, the rest of the document is in an unknown state.
-
-## `/slider-internal`: internal confiugration
-
-Stores internal configuration options. These parameters
-are not defined in this document.
-
-## `/diagnostics`: diagnostics sections
-
-Persisted list of information about Slider. 
-
-Static information about the file history
- 
-    "diagnostics" : {
-      "create.hadoop.deployed.info" : 
-       "(detached from release-2.3.0) @dfe46336fbc6a044bc124392ec06b85",
-      "create.application.build.info" : 
-       "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
-      "create.hadoop.build.info" : "2.3.0",
-      "create.time.millis" : "1393512091276",
-    },
- 
-This information is not intended to provide anything other
-than diagnostics to an application; the values and their meaning
-are not defined. All applications MUST be able to process
-an empty or absent `/diagnostics` section.
-
-## Options: cluster options
-
-A persisted list of options used by Slider and its providers to build up the AM
-and the configurations of the deployed service components
-
-  
-    "options": {
-      "slider.am.monitoring.enabled": "false",
-      "slider.cluster.application.image.path": "hdfs://sandbox:8020/hbase.tar.gz",
-      "slider.container.failure.threshold": "5",
-      "slider.container.failure.shortlife": "60",
-      "zookeeper.port": "2181",
-      "zookeeper.path": "/yarnapps_slider_stevel_test_cluster_lifecycle",
-      "zookeeper.hosts": "sandbox",
-      "site.hbase.master.startup.retainassign": "true",
-      "site.fs.defaultFS": "hdfs://sandbox:8020",
-      "site.fs.default.name": "hdfs://sandbox:8020",
-      "env.MALLOC_ARENA_MAX": "4",
-      "site.hbase.master.info.port": "0",
-      "site.hbase.regionserver.info.port": "0"
-    },
-
-Many of the properties are automatically set by Slider when a cluster is constructed.
-They may be edited afterwards.
-
-
-### Standard Option types
-
-All option values MUST be strings.
-
-#### `slider.`
-All options that begin with `slider.` are intended for use by slider and 
-providers to configure the Slider application master itself, and the
-application. For example, `slider.container.failure.threshold` defines
-the number of times a container must fail before the role (and hence the cluster)
-is considered to have failed. As another example, the zookeeper bindings
-such as `zookeeper.hosts` are read by the HBase and Ambari providers, and
-used to modify the applications' site configurations with application-specific
-properties.
-
-#### `site.`
- 
-These are properties that are expected to be propagated to an application's
- `site` configuration -if such a configuration is created. For HBase, the 
- site file is `hbase-site.xml`; for Accumulo it is `accumulo-site.xml`
-
-1. The destination property is taken by removing the prefix `site.`, and
-setting the shortened key with the defined value.
-1. Not all applications have the notion of a site file; These applications MAY
-ignore the settings.
-1. Providers MAY validate site settings to recognise invalid values. This
-aids identifying and diagnosing startup problems.
-
-#### `env.`
-
-These are options to configure environment variables in the roles. When
-a container is started, all `env.` options have the prefix removed, and
-are then set as environment variables in the target context.
-
-1. The Slider AM uses these values to configure itself, after following the
-option/role merge process.
-1. Application providers SHOULD follow the same process.
-
-
-## '/roles': role declarations
-
-The `/roles/$ROLENAME/` clauses each provide options for a
-specific role.
-
-This includes
-1. `role.instances`: defines the number of instances of a role to create
-1. `env.` environment variables for launching the container
-1. `yarn.` properties to configure YARN requests.
-1. `jvm.heapsize`: an option supported by some providers to 
-fix the heap size of a component.
-
-
-      "worker": {
-        "yarn.memory": "768",
-        "env.MALLOC_ARENA_MAX": "4",
-        "role.instances": "0",
-        "role.name": "worker",
-        "role.failed.starting.instances": "0",
-        "jvm.heapsize": "512M",
-        "yarn.vcores": "1",
-      },
-
-
-The role `slider` represents the Slider Application Master itself.
-
-      
-      "slider": {
-        "yarn.memory": "256",
-        "env.MALLOC_ARENA_MAX": "4",
-        "role.instances": "1",
-        "role.name": "slider",
-        "jvm.heapsize": "256M",
-        "yarn.vcores": "1",
-      },
-
-Providers may support a fixed number of roles -or they may support a dynamic
-number of roles defined at run-time, potentially from other data sources.
-
-## How `/options` and role options are merged.
-
-The options declared for a specific role are merged with the cluster-wide options
-to define the final options for a role. This is implemented in a simple
-override model: role-specific options can override any site-wide options.
-
-1. The options defined in `/options` are used to create the initial option
-map for each role.
-1. The role's options are then applied to the map -this may overwrite definitions
-from the `/options` section.
-1. There is no way to "undefine" a cluster option, merely overwrite it. 
-1. The merged map is then used by the provider to create the component.
-1. The special `slider` role is used in the CLI to define the attributes of the AM.
-
-Options set on a role do not affect any site-wide options: they
-are specific to the invidual role being created. 
-
-As such, overwriting a `site.` option may have no effect -or it it may
-change the value of a site configuration document *in that specific role instance*.
-
-### Standard role options
-
-* `role.instances` : number; required.
-  The number of instances of that role desired in the application.
-* `yarn.vcores` : number.
-  The number of YARN "virtual cores" to request for each role instance.
-  The larger the number, the more CPU allocation -and potentially the longer
-  time to satisfy the request and so instantiate the node. 
-  If the value '"-1"` is used -for any role but `slider`-the maximum value
-  available to the application is requested.
-* `yarn.memory` : number.
-  The number in Megabytes of RAM to request for each role instance.
-  The larger the number, the more memory allocation -and potentially the longer
-  time to satisfy the request and so instantiate the node. 
-  If the value '"-1"` is used -for any role but `slider`-the maximum value
-  available to the application is requested.
- 
-* `env.` environment variables.
-String environment variables to use when setting up the container
-
-### Provider-specific role options
-  
-* `jvm.heapsize` -the amount of memory for a provider to allocate for
- a processes JVM. Example "512M". This option MAY be implemented by a provider.
- 
-
-
-
-
-# Dynamic Information Sections
-
-These are the parts of the document that provide dynamic run-time
-information about an application. They are provided by the
-Slider Application Master when a request for the cluster status is issued.
-
-## `/info`
-
-Dynamic set of string key-value pairs containing
-information about the running application -as provided by th 
-
-The values in this section are not normatively defined. 
-
-Here are some standard values
- 
-* `slider.am.restart.supported"`  whether the AM supports service restart without killing all the containers hosting
- the role instances:
- 
-        "slider.am.restart.supported" : "false",
-    
-* timestamps of the cluster going live, and when the status query was made
-    
-        "live.time" : "27 Feb 2014 14:41:56 GMT",
-        "live.time.millis" : "1393512116881",
-        "status.time" : "27 Feb 2014 14:42:08 GMT",
-        "status.time.millis" : "1393512128726",
-    
-* yarn data provided to the AM
-    
-        "yarn.vcores" : "32",
-        "yarn.memory" : "2048",
-      
-*  information about the application and hadoop versions in use. Here
-  the application was built using Hadoop 2.3.0, but is running against the version
-  of Hadoop built for HDP-2.
-  
-        "status.application.build.info" : "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
-        "status.hadoop.build.info" : "2.3.0",
-        "status.hadoop.deployed.info" : "bigwheel-m16-2.2.0 @704f1e463ebc4fb89353011407e965"
-     
- 
-As with the `/diagnostics` section, this area is primarily intended
-for debugging.
-
- ## `/instances`: instance list
- 
- Information about the live containers in a cluster
-
-     "instances": {
-       "slider": [ "container_1393511571284_0002_01_000001" ],
-       "master": [ "container_1393511571284_0002_01_000003" ],
-       "worker": [ 
-         "container_1393511571284_0002_01_000002",
-         "container_1393511571284_0002_01_000004"
-       ]
-     },
-
-
-## `/status`: detailed dynamic state
-
-This provides more detail on the application including live and failed instances
-
-### `/status/live`: live role instances by container
-
-    "cluster": {
-      "live": {
-        "worker": {
-          "container_1394032374441_0001_01_000003": {
-            "name": "container_1394032374441_0001_01_000003",
-            "role": "worker",
-            "roleId": 1,
-            "createTime": 1394032384451,
-            "startTime": 1394032384503,
-            "released": false,
-            "host": "192.168.1.88",
-            "state": 3,
-            "exitCode": 0,
-            "command": "hbase-0.98.0/bin/hbase --config $PROPAGATED_CONFDIR regionserver start 1><LOG_DIR>/region-server.txt 2>&1 ; ",
-            "diagnostics": "",
-            "environment": [
-              "HADOOP_USER_NAME=\"slider\"",
-              "HBASE_LOG_DIR=\"/tmp/slider-slider\"",
-              "HBASE_HEAPSIZE=\"256\"",
-              "MALLOC_ARENA_MAX=\"4\"",
-              "PROPAGATED_CONFDIR=\"$PWD/propagatedconf\""
-            ]
-          }
-        }
-        failed : {}
-      }
-
-All live instances MUST be described in `/status/live`
-
-Failed clusters MAY be listed in the `/status/failed` section, specifically,
-a limited set of recently failed clusters SHOULD be provided.
-
-Future versions of this document may introduce more sections under `/status`.
-        
-### `/status/rolestatus`: role status information
-
-This lists the current status of the roles: 
-How many are running vs requested, how many are being
-released.
- 
-      
-    "rolestatus": {
-      "worker": {
-        "role.instances": "2",
-        "role.requested.instances": "0",
-        "role.failed.starting.instances": "0",
-        "role.actual.instances": "2",
-        "role.releasing.instances": "0",
-        "role.failed.instances": "1"
-      },
-      "slider": {
-        "role.instances": "1",
-        "role.requested.instances": "0",
-        "role.name": "slider",
-        "role.actual.instances": "1",
-        "role.releasing.instances": "0",
-        "role.failed.instances": "0"
-      },
-      "master": {
-        "role.instances": "1",
-        "role.requested.instances": "1",
-        "role.name": "master",
-        "role.failed.starting.instances": "0",
-        "role.actual.instances": "0",
-        "role.releasing.instances": "0",
-        "role.failed.instances": "0"
-      }
-    }
-
-
-### `/status/provider`: provider-specific information
-
-Providers MAY publish information to the `/status/provider` section.
-
-1. There's no restriction on what JSON is permitted in this section.
-1. Providers may make their own updates to the application state to read and
-write this block -operations that are asynchronous to any status queries.
-
-
-
-## `/statistics`: aggregate statistics 
- 
-Statistics on the cluster and each role in the cluster 
-
-Better to have a specific `/statistics/cluster` element, 
-and to move the roles' statistics under `/statistics/roles`:
-
-    "statistics": {
-      "cluster": {
-        "containers.unknown.completed": 0,
-        "containers.start.completed": 3,
-        "containers.live": 1,
-        "containers.start.failed": 0,
-        "containers.failed": 0,
-        "containers.completed": 0,
-        "containers.surplus": 0
-      },
-      "roles": {
-        "worker": {
-          "containers.start.completed": 0,
-          "containers.live": 2,
-          "containers.start.failed": 0,
-          "containers.active.requests": 0,
-          "containers.failed": 0,
-          "containers.completed": 0,
-          "containers.desired": 2,
-          "containers.requested": 0
-        },
-        "master": {
-          "containers.start.completed": 0,
-          "containers.live": 1,
-          "containers.start.failed": 0,
-          "containers.active.requests": 0,
-          "containers.failed": 0,
-          "containers.completed": 0,
-          "containers.desired": 1,
-          "containers.requested": 0
-        }
-      }
-    },
-
-`/statistics/cluster` provides aggregate statistics for the entire cluster.
-
-Under `/statistics/roles` MUST come an entry for each role in the cluster.
-
-All simple values in statistics section are integers.
-
-
-### `/clientProperties` 
-
-The `/clientProperties` section contains key-val pairs of type
-string, the expectation being this is where providers can insert specific
-single attributes for client applications.
-
-These values can be converted to application-specific files on the client,
-in code -as done today in the Slider CLI-, or via template expansion (beyond
-the scope of this document.
-
-
-### `/clientfiles` 
-
-This section list all files that an application instance MAY generate
-for clients, along with with a description.
-
-    "/clientfiles/hbase-site.xml": "site information for HBase"
-    "/clientfiles/log4.properties": "log4.property file"
-
-Client configuration file retrieval is by other means; this
-status operation merely lists files that are available;
-
-
diff --git a/src/site/markdown/debugging.md b/src/site/markdown/debugging.md
deleted file mode 100644
index b98af1d..0000000
--- a/src/site/markdown/debugging.md
+++ /dev/null
@@ -1,92 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Debugging Slider
-There are a number of options available to you for debugging Slider applications.  They include:
-
-* Using Slider logging
-* IDE-based remote debugging of the Application Master
-
-## Using Slider logging
-There are a number of options for viewing the generated log files:
-
-1. Using a web browser
-2. Accessing the host machine
-  
-### Using a web browser
-
-The log files are accessible via the Yarn Resource Manager UI.  From the main page (e.g. `http://${YARN_RESOURCE_MGR_HOST}:8088`),
-click on the link for the application instance of interest, and then click on the `logs` link.
-This will present you with a page with links to the `slider-err.txt` file and the `slider-out.txt` file.
-The former is the file you should select -it is where the applicaton logs go
-Once the log page is presented, click on the link at the top of the page ("Click here for full log") to view the entire file.
-
-If the file `slider-out.txt` is empty, then examine  `slider-err.txt` -an empty
-output log usually means that the java process failed to start -this should be
-logged in the error file.
-     
-
-### Accessing the host machine
-
-If access to other log files is required, there is the option of logging in
- to the host machine on which the application component is running
-  -provided you have the correct permissions.
-  
-The root directory for all YARN associated files is the value of `yarn.nodemanager.log-dirs` in `yarn-site.xml` - e.g. `/hadoop/yarn/log`.
-Below the root directory you will find an application and container sub-directory (e.g. `/application_1398372047522_0009/container_1398372047522_0009_01_000001/`).
-Below the container directory you will find any log files associated with the processes running in the given Yarn container.
-
-Within a container log the following files are useful while debugging the application.
-
-**agent.log** 
-  
-E.g. `application_1398098639743_0024/container_1398098639743_0024_01_000003/infra/log/agent.log`
-This file contains the logs from the Slider-Agent.
-
-**application component log**
-
-E.g. `./log/application_1398098639743_0024/container_1398098639743_0024_01_000003/app/log/hbase-yarn-regionserver-c6403.ambari.apache.org.log`
-
-The location of the application log is defined by the application. "${AGENT_LOG_ROOT}" is a symbol available to the app developers to use as a root folder for logging.
-
-**agent operations log**
-
-E.g. ./log/application_1398098639743_0024/container_1398098639743_0024_01_000003/app/command-log/
-
-The command logs produced by the slider-agent are available in the `command-log` folder relative to `${AGENT_LOG_ROOT}/app`
-
-Note that the *fish* shell is convenient for debugging, as  `cat log/**/slider-out.txt` will find the relevant output file 
-irrespective of what the path leading to it is.
-
-## IDE-based remote debugging of the Application Master
-
-For situations in which the logging does not yield enough information to debug an issue,
-the user has the option of specifying JVM command line options for the
-Application Master that enable attaching to the running process with a debugger
-(e.g. the remote debugging facilities in Eclipse or Intellij IDEA). 
-In order to specify the JVM options, edit the application configuration file
-(the file specified as the `--template` argument value on the command line for cluster creation)
-and specify the `jvm.opts` property for the `slider-appmaster` component:
-
-	`"components": {
-    	"slider-appmaster": {
-      		"jvm.heapsize": "256M",
-      		"jvm.opts": "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005"
-    	},
- 		...`
- 		
-You may specify `suspend=y` in the line above if you wish to have the application master process wait for the debugger to attach before beginning its processing.
diff --git a/src/site/markdown/developing/building.md b/src/site/markdown/developing/building.md
deleted file mode 100644
index 8bf3954..0000000
--- a/src/site/markdown/developing/building.md
+++ /dev/null
@@ -1,374 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Building Slider
-
-
-Here's how to set this up.
-
-## Before you begin
-
-### Networking
-
-The network on the development system must be functional, with hostname lookup
-of the local host working. Tests will fail without this.
-
-### Java 7
-
-Slider is built on Java 7 -please have a JDK for Java 7 or 8 set up
-
-### Maven
-
-You will need a version of Maven 3.0+, set up with enough memory
-
-    MAVEN_OPTS=-Xms256m -Xmx512m -Djava.awt.headless=true
-
-
-*Important*: As of October 6, 2013, Maven 3.1 is not supported due to
-[version issues](https://cwiki.apache.org/confluence/display/MAVEN/AetherClassNotFound).
-
-### Protoc
-
-You need a copy of the `protoc`  compiler for protobuf compilation
-
-1. OS/X: `brew install protobuf`
-1. Others: consult (Building Hadoop documentation)[http://wiki.apache.org/hadoop/HowToContribute].
-
-The version of `protoc` installed must be the same as that used by Hadoop itself.
-This is absolutely critical to prevent JAR version problems.
-
-## Building a compatible Hadoop version
-
-
-Slider is built against Hadoop 2 -you can download and install
-a copy from the [Apache Hadoop Web Site](http://hadoop.apache.org).
-
-
-During development, its convenient (but not mandatory)
-to have a local version of Hadoop -so that we can find and fix bugs/add features in
-Hadoop as well in Slider.
-
-
-To build and install locally, check out apache svn/github, branch `release-2.4.0`,
-and create a branch off that tag
-
-    git clone git://git.apache.org/hadoop-common.git 
-    cd hadoop-common
-    git remote rename origin apache
-    git fetch --tags apache
-    git checkout release-2.4.0 -- 
-    git checkout -b release-2.4.0
-
-
-For the scripts below, set the `HADOOP_VERSION` variable to the version
-
-    export HADOOP_VERSION=2.4.0
-    
-or, for building against a pre-release version of Hadoop 2.4
- 
-    git checkout branch-2
-    export HADOOP_VERSION=2.4.0-SNAPSHOT
-
-To build and install it locally, skipping the tests:
-
-    mvn clean install -DskipTests
-
-To make a tarball for use in test runs:
-
-    #On  osx
-    mvn clean install package -Pdist -Dtar -DskipTests -Dmaven.javadoc.skip=true 
-    
-    # on linux
-    mvn clean package -Pdist -Pnative -Dtar -DskipTests -Dmaven.javadoc.skip=true 
-
-Then expand this
-
-    pushd hadoop-dist/target/
-    gunzip hadoop-$HADOOP_VERSION.tar.gz 
-    tar -xvf hadoop-$HADOOP_VERSION.tar 
-    popd
-
-This creates an expanded version of Hadoop. You can now actually run Hadoop
-from this directory. Do note that unless you have the native code built for
-your target platform, Hadoop will be slower. 
-
-## building a compatible HBase version
-
-If you need to build a version of HBase -rather than use a released version,
-here are the instructions (for the hbase-0.98 release branch)
-
-Checkout the HBase `trunk` branch from apache svn/github.  
-
-    
-    git clone git://git.apache.org/hbase.git
-    cd hbase
-    git remote rename origin apache
-    git fetch --tags apache
-
-then
-
-    git checkout -b apache/0.98
-or
-
-    git checkout tags/0.98.1
-    
-If you have already been building versions of HBase, remove the existing
-set of artifacts for safety:
-
-    rm -rf ~/.m2/repository/org/apache/hbase/
-    
-The maven command for building hbase artifacts against this hadoop version is 
-
-    mvn clean install assembly:single -DskipTests -Dmaven.javadoc.skip=true
-
-To use a different version of Hadoop from that defined in the `hadoop-two.version`
-property of`/pom.xml`:
-
-    mvn clean install assembly:single -DskipTests -Dmaven.javadoc.skip=true -Dhadoop-two.version=$HADOOP_VERSION
-
-This will create an hbase `tar.gz` file in the directory `hbase-assembly/target/`
-in the hbase source tree. 
-
-    export HBASE_VERSION=0.98.1
-    
-    pushd hbase-assembly/target
-    gunzip hbase-$HBASE_VERSION-bin.tar.gz 
-    tar -xvf hbase-$HBASE_VERSION-bin.tar
-    gzip hbase-$HBASE_VERSION-bin.tar
-    popd
-
-This will create an untarred directory containing
-hbase. Both the `.tar.gz` and untarred file are needed for testing. Most
-tests just work directly with the untarred file as it saves time uploading
-and downloading then expanding the file.
-
-(and if you set `HBASE_VERSION` to something else, you can pick up that version
--making sure that slider is in sync)
-
-For more information (including recommended Maven memory configuration options),
-see [HBase building](http://hbase.apache.org/book/build.html)
-
-For building just the JAR files:
-
-    mvn clean install -DskipTests -Dhadoop.profile=2.0 -Dhadoop-two.version=$HADOOP_VERSION
-
-*Tip:* you can force set a version in Maven by having it update all the POMs:
-
-    mvn versions:set -DnewVersion=0.98.1-SNAPSHOT
-
-## Building Accumulo
-
-Clone accumulo from apache;
-
-    git clone http://git-wip-us.apache.org/repos/asf/accumulo.git
-
-
-Check out branch 1.5.1-SNAPSHOT
-
-
-
-In the accumulo project directory, build it
-
-    mvn clean install -Passemble -DskipTests -Dmaven.javadoc.skip=true \
-     -Dhadoop.profile=2 
-     
-The default Hadoop version for accumulo-1.5.1 is hadoop 2.4.0; to build
-against a different version use the command
-     
-    mvn clean install -Passemble -DskipTests -Dmaven.javadoc.skip=true \
-     -Dhadoop.profile=2  -Dhadoop.version=$HADOOP_VERSION
-
-This creates an accumulo tar.gz file in `assemble/target/`. Unzip then untar
-this, to create a .tar file and an expanded directory
-
-    accumulo/assemble/target/accumulo-1.5.1-SNAPSHOT-bin.tar
-    
- This can be done with the command sequence
-    
-    export ACCUMULO_VERSION=1.5.1-SNAPSHOT
-    
-    pushd assemble/target/
-    gunzip -f accumulo-$ACCUMULO_VERSION-bin.tar.gz 
-    tar -xvf accumulo-$ACCUMULO_VERSION-bin.tar 
-    popd
-    
-Note that the final location of the accumulo files is needed for the configuration,
-it may be directly under target/ or it may be in a subdirectory, with 
-a path such as `target/accumulo-$ACCUMULO_VERSION-dev/accumulo-$ACCUMULO_VERSION/`
-
-
-## Testing
-
-### Configuring Slider to locate the relevant artifacts
-
-You must have the file `src/test/resources/slider-test.xml` (this
-is ignored by git), declaring where HBase, accumulo, Hadoop and zookeeper are:
-
-    <configuration>
-    
-      <property>
-        <name>slider.test.hbase.home</name>
-        <value>/home/slider/hbase/hbase-assembly/target/hbase-0.98.0-SNAPSHOT</value>
-        <description>HBASE Home</description>
-      </property>
-    
-      <property>
-        <name>slider.test.hbase.tar</name>
-        <value>/home/slider/hbase/hbase-assembly/target/hbase-0.98.0-SNAPSHOT-bin.tar.gz</value>
-        <description>HBASE archive URI</description>
-      </property> 
-         
-      <property>
-        <name>slider.test.accumulo.home</name>
-        <value>/home/slider/accumulo/assemble/target/accumulo-1.5.1-SNAPSHOT/</value>
-        <description>Accumulo Home</description>
-      </property>
-    
-      <property>
-        <name>slider.test.accumulo.tar</name>
-        <value>/home/slider/accumulo/assemble/target/accumulo-1.5.1-SNAPSHOT-bin.tar.gz</value>
-        <description>Accumulo archive URI</description>
-      </property>
-      
-      <property>
-        <name>zk.home</name>
-        <value>
-          /home/slider/Apps/zookeeper</value>
-        <description>Zookeeper home dir on target systems</description>
-      </property>
-    
-      <property>
-        <name>hadoop.home</name>
-        <value>
-          /home/slider/hadoop-common/hadoop-dist/target/hadoop-2.3.0</value>
-        <description>Hadoop home dir on target systems</description>
-      </property>
-      
-    </configuration>
-    
-
-## Debugging a failing test
-
-1. Locate the directory `target/$TESTNAME` where TESTNAME is the name of the 
-test case and or test method. This directory contains the Mini YARN Cluster
-logs. For example, `TestLiveRegionService` stores its data under 
-`target/TestLiveRegionService`
-
-1. Look under that directory for `-logdir` directories, then an application
-and container containing logs. There may be more than node being simulated;
-every node manager creates its own logdir.
-
-1. Look for the `out.txt` and `err.txt` files for stdout and stderr log output.
-
-1. Slider uses SLF4J to log to `out.txt`; remotely executed processes may use
-either stream for logging
-
-Example:
-
-    target/TestLiveRegionService/TestLiveRegionService-logDir-nm-1_0/application_1376095770244_0001/container_1376095770244_0001_01_000001/out.txt
-
-1. The actual test log from JUnit itself goes to the console and into 
-`target/surefire/`; this shows the events happening in the YARN services as well
- as (if configured) HDFS and Zookeeper. It is noisy -everything after the *teardown*
- message happens during cluster teardown, after the test itself has been completed.
- Exceptions and messages here can generally be ignored.
- 
-This is all a bit complicated -debugging is simpler if a single test is run at a
-time, which is straightforward
-
-    mvn clean test -Dtest=TestLiveRegionService
-
-
-### Building the JAR file
-
-You can create the JAR file and set up its directories with
-
-     mvn package -DskipTests
-
-# Development Notes
-
-<!---
-## Git branch model
-
-
-The git branch model uses is
-[Git Flow](http://nvie.com/posts/a-successful-git-branching-model/).
-
-This is a common workflow model for Git, and built in to
-[Atlassian Source Tree](http://sourcetreeapp.com/).
- 
-The command line `git-flow` tool is easy to install 
- 
-    brew install git-flow
- 
-or
-
-    apt-get install git-flow
- 
-You should then work on all significant features in their own branch and
-merge them back in when they are ready.
-
- 
-    # until we get a public JIRA we're just using an in-house one. sorry
-    git flow feature start BUG-8192
-    
-    # finishes merges back in to develop/
-    git flow feature finish BUG-8192
-    
-    # release branch
-    git flow release start 0.4.0
-    
-    git flow release finish 0.4.0
--->
-
-## Attention OS/X developers
-
-YARN on OS/X doesn't terminate subprocesses the way it does on Linux, so
-HBase Region Servers created by the hbase shell script remain running
-even after the tests terminate.
-
-This causes some tests -especially those related to flexing down- to fail, 
-and test reruns may be very confused. If ever a test fails because there
-are too many region servers running, this is the likely cause
-
-After every test run: do a `jps -v` to look for any leftover HBase services
--and kill them.
-
-Here is a handy bash command to do this
-
-    jps -l | grep HRegion | awk '{print $1}' | xargs kill -9
-
-
-## Groovy 
-
-Slider uses Groovy 2.x as its language for writing tests -for better assertions
-and easier handling of lists and closures. Although the first prototype
-used Groovy on the production source, this was dropped in favor of
-a Java-only production codebase.
-
-## Maven utils
-
-
-Here are some handy aliases to make maven easier 
-
-    alias mci='mvn clean install -DskipTests'
-    alias mi='mvn install -DskipTests'
-    alias mvct='mvn clean test'
-    alias mvnsite='mvn site:site -Dmaven.javadoc.skip=true'
-    alias mvt='mvn test'
-
-
diff --git a/src/site/markdown/developing/functional_tests.md b/src/site/markdown/developing/functional_tests.md
deleted file mode 100644
index d58c0ea..0000000
--- a/src/site/markdown/developing/functional_tests.md
+++ /dev/null
@@ -1,416 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Testing
-
-     The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL
-      NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED",  "MAY", and
-      "OPTIONAL" in this document are to be interpreted as described in
-      RFC 2119.
-
-# Functional Tests
-
-The functional test suite is designed to test slider against
-a live cluster. 
-
-For these to work you need
-
-1. A YARN Cluster -secure or insecure
-1. A `slider-client.xml` file configured to interact with the cluster
-1. Agent 
-1. HBase tests:  HBase `.tar.gz` uploaded to HDFS, and a local or remote accumulo conf 
-directory
-1. Accumulo Tests Accumulo `.tar.gz` uploaded to HDFS, and a local or remote accumulo conf 
-directory
-
-## Configuration of functional tests
-
-Maven needs to be given
-
-1. A path to the expanded test archive
-1. A path to a slider configuration directory for the cluster
-
-The path for the expanded test is automatically calculated as being the directory under
-`..\slider-assembly\target` where an untarred slider distribution can be found.
-If it is not present, the tests will fail
-
-The path to the configuration directory must be supplied in the property
-`slider.conf.dir` which can be set on the command line
-
-    mvn test -Dslider.conf.dir=src/test/configs/sandbox/slider
-
-It can also be set in the (optional) file `slider-funtest/build.properties`:
-
-    slider.conf.dir=src/test/configs/sandbox/slider
-
-This file is loaded whenever a slider build or test run takes place
-
-## Configuration of `slider-client.xml`
-
-The `slider-client.xml` must have extra configuration options for both the HBase and
-Accumulo tests, as well as a common set for actually talking to a YARN cluster.
-
-## Disabling the functional tests entirely
-
-All functional tests which require a live YARN cluster
-can be disabled through the property `slider.funtest.enabled`
-  
-    <property>
-      <name>slider.funtest.enabled</name>
-      <value>false</value>
-    </property>
-
-There is a configuration do do exactly this in
-`src/test/configs/offline/slider`:
-
-    slider.conf.dir=src/test/configs/offline/slider
-
-Tests which do not require a live YARN cluster will still run;
-these verify that the `bin/slider` script works.
-
-### Non-mandatory options
-
-The following test options may be added to `slider-client.xml` if the defaults
-need to be changed
-                   
-    <property>
-      <name>slider.test.zkhosts</name>
-      <description>comma separated list of ZK hosts</description>
-      <value>localhost</value>
-    </property>
-       
-    <property>
-      <name>slider.test.thaw.wait.seconds</name>
-      <description>Time to wait in seconds for a thaw to result in a running AM</description>
-      <value>60000</value>
-    </property>
-    
-    <property>
-      <name>slider.test.freeze.wait.seconds</name>
-      <description>Time to wait in seconds for a freeze to halt the cluster</description>
-      <value>60000</value>
-    </property>
-            
-     <property>
-      <name>slider.test.timeout.millisec</name>
-      <description>Time out in milliseconds before a test is considered to have failed.
-      There are some maven properties which also define limits and may need adjusting</description>
-      <value>180000</value>
-    </property>
-
-     <property>
-      <name>slider.test.yarn.ram</name>
-      <description>Size in MB to ask for containers</description>
-      <value>192</value>
-    </property>
-
-    
-Note that while the same properties need to be set in
-`slider-core/src/test/resources/slider-client.xml`, those tests take a file in the local
-filesystem -here a URI to a path visible across all nodes in the cluster are required
-the tests do not copy the .tar/.tar.gz files over. The application configuration
-directories may be local or remote -they are copied into the `.slider` directory
-during cluster creation.
-
-##  Provider-specific parameters
-
-An individual provider can pick up settings from their own
-`src/test/resources/slider-client.xml` file, or the one in `slider-core`.
-We strongly advice placing all the values in the `slider-core` file.
-
-1. All uncertainty about which file is picked up on the class path first goes
-away
-2. There's one place to  keep all the configuration values in sync.
-
-### Agent Tests
-
-Agent tests are executed through the following mvn command executed at slider/slider-funtest:
-
-```
-cd slider-funtest
-mvn test -Dslider.conf.dir=../src/test/clusters/remote/slider -Dtest=TestAppsThroughAgent -DfailIfNoTests=false
-```
-
-**Enable/Execute the tests**
-
-To enable the test ensure that *slider.test.agent.enabled* is set to *true*.
-
-    <property>
-      <name>slider.test.agent.enabled</name>
-      <description>Flag to enable/disable Agent tests</description>
-      <value>true</value>
-    </property>
-        
-**Test setup**
-
-Edit config file src/test/clusters/remote/slider/slider-client.xml and ensure that the host names are accurate for the test cluster.
-
-**User setup**
-
-Ensure that the user, running the test, is present on the cluster against which you are running the tests. The user must be a member of the hadoop group.
-
-E.g. adduser **testuser** -d /home/**testuser** -G hadoop -m
-
-**HDFS Setup**
-
-Set up hdfs folders for slider and test user
-
-*  su hdfs
-*  hdfs dfs -mkdir /slider
-*  hdfs dfs -chown testuser:hdfs /slider
-*  hdfs dfs -mkdir /user/testuser
-*  hdfs dfs -chown testuser:hdfs /user/testuser
-
-Load up agent package and config
-
-*  su **testuser**
-*  hdfs dfs -mkdir /slider/agent
-*  hdfs dfs -mkdir /slider/agent/conf
-*  hdfs dfs -copyFromLocal SLIDER_INSTALL_LOC/agent/conf/agent.ini /slider/agent/conf
-
-Ensure correct host name is provided for the agent tarball.
-        
-    <property>
-      <name>slider.test.agent.tar</name>
-      <description>Path to the Agent Tar file in HDFS</description>
-      <value>hdfs://NN_HOSTNAME:8020/slider/agent/slider-agent.tar.gz</value>
-    </property>
-
-
-
-### HBase Tests
-
-The HBase tests can be enabled or disabled
-    
-    <property>
-      <name>slider.test.hbase.enabled</name>
-      <description>Flag to enable/disable HBase tests</description>
-      <value>true</value>
-    </property>
-        
-Mandatory test parameters must be added to `slider-client.xml`
-
-    <property>
-      <name>slider.test.hbase.tar</name>
-      <description>Path to the HBase Tar file in HDFS</description>
-      <value>hdfs://sandbox:8020/user/slider/hbase.tar.gz</value>
-    </property>
-    
-    <property>
-      <name>slider.test.hbase.appconf</name>
-      <description>Path to the directory containing the HBase application config</description>
-      <value>file://${user.dir}/src/test/configs/sandbox/hbase</value>
-    </property>
-    
-Optional parameters:  
-  
-     <property>
-      <name>slider.test.hbase.launch.wait.seconds</name>
-      <description>Time to wait in seconds for HBase to start</description>
-      <value>1800</value>
-    </property>  
-
-#### Accumulo configuration options
-
-Enable/disable the tests
-
-     <property>
-      <name>slider.test.accumulo.enabled</name>
-      <description>Flag to enable/disable Accumulo tests</description>
-      <value>true</value>
-     </property>
-         
-Optional parameters
-         
-     <property>
-      <name>slider.test.accumulo.launch.wait.seconds</name>
-      <description>Time to wait in seconds for Accumulo to start</description>
-      <value>1800</value>
-     </property>
-
-### Configuring the YARN cluster for tests
-
-Here are the configuration options we use in `yarn-site.xml` for testing:
-
-These tell YARN to ignore memory requirements in allocating VMs, and
-to keep the log files around after an application run. 
-
-      <property>
-        <name>yarn.scheduler.minimum-allocation-mb</name>
-        <value>1</value>
-      </property>
-      <property>
-        <description>Whether physical memory limits will be enforced for
-          containers.
-        </description>
-        <name>yarn.nodemanager.pmem-check-enabled</name>
-        <value>false</value>
-      </property>
-      <!-- we really don't want checking here-->
-      <property>
-        <name>yarn.nodemanager.vmem-check-enabled</name>
-        <value>false</value>
-      </property>
-      
-      <!-- how long after a failure to see what is left in the directory-->
-      <property>
-        <name>yarn.nodemanager.delete.debug-delay-sec</name>
-        <value>60000</value>
-      </property>
-    
-      <!--ten seconds before the process gets a -9 -->
-      <property>
-        <name>yarn.nodemanager.sleep-delay-before-sigkill.ms</name>
-        <value>30000</value>
-      </property>
-
-
-## Testing against a secure cluster
-
-To test against a secure cluster
-
-1. `slider-client.xml` must be configured as per [Security](../security.html).
-1. the client must have the kerberos tokens issued so that the user running
-the tests has access to HDFS and YARN.
-
-If there are problems authenticating (including the cluster being offline)
-the tests appear to hang
-
-### Validating the configuration
-
-    mvn test -Dtest=TestBuildSetup
-
-### Using relative paths in test configurations
-
-When you are sharing configurations across machines via SCM or similar,
-its impossible to have absolute paths in the configuration options to
-the location of items in the local filesystem (e.g. configuration directories).
-
-There's two techniques
-
-1. Keep the data in HDFS and refer to it there. This works if there is a shared,
-persistent HDFS cluster.
-
-1. Use the special property `slider.test.conf.dir` that is set to the path
-of the directory, and which can then be used to create an absolute path
-from paths relative to the configuration dir:
-
-	    <property>
-    	  <name>slider.test.hbase.appconf</name>
-    	  <description>Path to the directory containing the HBase application config</description>
-    	  <value>file://${slider.test.conf.dir}/../hbase</value>
-    	</property>
-
-
-If the actual XML file path is required, a similar property
-`slider.test.conf.xml` is set.
-
-
-## Parallel execution
-
-Attempts to run test cases in parallel failed -even with a configuration
-to run methods in a class sequentially, but separate classes independently.
-
-Even after identifying and eliminating some unintended sharing of static
-mutable variables, trying to run test cases in parallel seemed to hang
-tests and produce timeouts.
-
-For this reason parallel tests have been disabled. To accelerate test runs
-through parallelization, run different tests on different hosts instead.
-
-## Other constraints
-
-* Port assignments SHOULD NOT be fixed, as this will cause clusters to fail if
-there are too many instances of a role on a same host, or if other tests are
-using the same port.
-* If a test does need to fix a port, it MUST be for a single instance of a role,
-and it must be different from all others. The assignment should be set in 
-`org.apache.slider.funtest.itest.PortAssignments` so as to ensure uniqueness
-over time. Otherwise: use the value of `0` to allow the OS to assign free ports
-on demand.
-
-## Test Requirements
-
-
-1. Test cases should be written so that each class works with exactly one
-Slider-deployed cluster
-1. Every test MUST have its own cluster name -preferably derived from the
-classname.
-1. This cluster should be deployed in an `@BeforeClass` method.
-1. The `@AfterClass` method MUST tear this cluster down.
-1. Tests must skip their execution if functional tests -or the 
-specific hbase or accumulo categories- are disabled.
-1. Tests within the suite (i.e. class) must be designed to be independent
--to work irrespectively of the ordering of other tests.
-
-## Running and debugging the functional tests.
-
-The functional tests all 
-
-1. In the root `slider` directory, build a complete Slider release
-
-        mvn install -DskipTests
-1. Start the YARN cluster/set up proxies to connect to it, etc.
-
-1. In the `slider-funtest` dir, run the tests
-
-        mvn test 
-        
-A common mistake during development is to rebuild the `slider-core` JARs
-then the `slider-funtest` tests without rebuilding the `slider-assembly`.
-In this situation, the tests are in sync with the latest build of the code
--including any bug fixes- but the scripts executed by those tests are
-of a previous build of `slider-core.jar`. As a result, the fixes are not picked
-up.
-
-#### To propagate changes in slider-core through to the funtest classes for
-testing, you must build/install all the slider packages from the root assembly.
-
-    mvn clean install -DskipTests
-
-## Limitations of slider-funtest
-
-1. All tests run from a single client -workload can't scale
-1. Output from failed AM and containers aren't collected
-
-## Troubleshooting the functional tests
-
-1. If application instances fail to come up as there are still outstanding
-requests, it means that YARN didn't have the RAM/cores to spare for the number
-of containers. Edit the `slider.test.yarn.ram` to make it smaller.
-
-1. If you are testing in a local VM and stops responding, it'll have been
-swapped out to RAM. Rebooting can help, but for a long term fix go through
-all the Hadoop configurations (HDFS, YARN, Zookeeper) and set their heaps to
-smaller numbers, like 256M each. Also: turn off unused services (hcat, oozie,
-webHDFS)
-
-1. The YARN UI will list the cluster launches -look for the one
-with a name close to the test and view its logs
-
-1. Container logs will appear "elsewhere". The log lists
-the containers used -you may be able to track the logs
-down from the specific nodes.
-
-1. If you browse the filesystem, look for the specific test clusters
-in `~/.slider/cluster/$testname`
-
-1. If you are using a secure cluster, make sure that the clocks
-are synchronized, and that you have a current token -`klist` will
-tell you this. In a VM: install and enable `ntp`, consider rebooting if ther
-are any problems. Check also that it has the same time zone settings
-as the host OS.
diff --git a/src/site/markdown/developing/index.md b/src/site/markdown/developing/index.md
deleted file mode 100644
index 7d8cf93..0000000
--- a/src/site/markdown/developing/index.md
+++ /dev/null
@@ -1,35 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-  
-# Developing Slider
-
-Slider is an open source project -anyone is free to contributed, and we
-strongly encourage people to do so. 
-
-Here are documents covering how to go about building, testing and releasing
-Slider
-
-* [Building](building.html)
-* [Debugging](debugging.html)
-* [Testing](testing.html)
-* [Functional Testing](functional_tests.html)
-* [Manual Testing](manual_testing.html)
-* [Agent test setup](agent_test_setup.html)
-* [Releasing](releasing.html)
-
-
- 
diff --git a/src/site/markdown/developing/manual_testing.md b/src/site/markdown/developing/manual_testing.md
deleted file mode 100644
index aa32446..0000000
--- a/src/site/markdown/developing/manual_testing.md
+++ /dev/null
@@ -1,53 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-  
-# Manual Testing
-
-Manual testing invloves using Slider package and an AppPackage to perform basic
- cluster functionalities such as create/destroy, flex up/down, and freeze/thaw.
-  A python helper script is provided that can be used to automatically test and app package.
-
-## `SliderTester.py`
-Details to be added.
-
-## `SliderTester.ini`
-The various config parameters are:
-
-### slider
-* `package`: location of the slider package
-* `jdk.path`: jdk path on the test hosts
-
-### app
-* `package`: location of the app package
-
-### cluster
-* `yarn.application.classpath`: yarn application classpaths
-* `slider.zookeeper.quorum`: the ZK quorum hosts
-* `yarn.resourcemanager.address`:
-* `yarn.resourcemanager.scheduler.address`:
-* `fs.defaultFS`: e.g. `hdfs://NN_HOST:8020`
-
-### test
-* `app.user`: user to use for app creation
-* `hdfs.root.user`: hdfs root user
-* `hdfs.root.dir`: HDFS root, default /slidertst
-* `hdfs.user.dir`: HDFS user dir, default /user
-* `test.root`: local test root folder, default /test
-* `cluster.name`: name of the test cluster, default tst1
-* `cluster.type`: cluster type to build and test, e.g. hbase,storm,accumulo
-
-### agent
diff --git a/src/site/markdown/developing/releasing.md b/src/site/markdown/developing/releasing.md
deleted file mode 100644
index 8c4ca19..0000000
--- a/src/site/markdown/developing/releasing.md
+++ /dev/null
@@ -1,195 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-
-# Release Process
-
-Here is our release process.
-
-
-## IMPORTANT: THIS IS OUT OF DATE WITH THE MOVE TO THE ASF ## 
-
-### Before you begin
-
-Check out the latest version of the develop branch,
-run the tests. This should be done on a checked out
-version of the code that is not the one you are developing on
-(ideally, a clean VM), to ensure that you aren't releasing a slightly
-modified version of your own, and that you haven't accidentally
-included passwords or other test run details into the build resource
-tree.
-
-The `slider-funtest` functional test package is used to run functional
-tests against a running Hadoop YARN cluster. It needs to be configured
-according to the instructions in [testing](testing.html) to
-create HBase and Accumulo clusters in the YARN cluster.
-
-*Make sure that the functional tests are passing (and not being skipped) before
-starting to make a release*
-
-
-
-**Step #1:** Create a JIRA for the release, estimate 3h
-(so you don't try to skip the tests)
-
-    export SLIDER_RELEASE_JIRA=SLIDER-13927
-    
-**Step #2:** Check everything in. Git flow won't let you progress without this.
-
-**Step #3:** Git flow: create a release branch
-
-    export SLIDER_RELEASE=0.5.2
-    
-    git flow release start slider-$SLIDER_RELEASE
-
-**Step #4:** in the new branch, increment those version numbers using (the maven
-versions plugin)[http://mojo.codehaus.org/versions-maven-plugin/]
-
-    mvn versions:set -DnewVersion=$SLIDER_RELEASE
-
-
-**Step #5:** commit the changed POM files
-  
-    git add <changed files>
-    git commit -m "$SLIDER_RELEASE_JIRA updating release POMs for $SLIDER_RELEASE"
-
-  
-**Step #6:** Do a final test run to make sure nothing is broken
-
-In the `slider` directory, run:
-
-    mvn clean install -DskipTests
-
-Once everything is built- including .tar files, run the tests
-
-    mvn test
-
-This will run the functional tests as well as the `slider-core` tests.
-
-It is wise to reset any VMs here, and on live clusters kill all running jobs.
-This stops functional tests failing because the job doesn't get started before
-the tests time out.
-
-As the test run takes 30-60+ minutes, now is a good time to consider
-finalizing the release notes.
-
-
-**Step #7:** Build the release package
-
-Run
-    
-    mvn clean site:site site:stage package -DskipTests
-
-
-
-**Step #8:** validate the tar file
-
-Look in `slider-assembly/target` to find the `.tar.gz` file, and the
-expanded version of it. Inspect that expanded version to make sure that
-everything looks good -and that the versions of all the dependent artifacts
-look good too: there must be no `-SNAPSHOT` dependencies.
-
-
-**Step #9:** Build the release notes
-
-Create a a one-line plain text release note for commits and tags
-And a multi-line markdown release note, which will be used for artifacts.
-
-
-    Release against hadoop 2.4.0, HBase-0.98.1 and Accumulo 1.5.1 artifacts. 
-
-The multi-line release notes should go into `slider/src/site/markdown/release_notes`.
-
-
-These should be committed
-
-    git add --all
-    git commit -m "$SLIDER_RELEASE_JIRA updating release notes"
-
-**Step #10:** End the git flow
-
-Finish the git flow release, either in the SourceTree GUI or
-the command line:
-
-    
-    git flow release finish slider-$SLIDER_RELEASE
-    
-
-On the command line you have to enter the one-line release description
-prepared earlier.
-
-You will now be back on the `develop` branch.
-
-**Step #11:** update mvn versions
-
-Switch back to `develop` and update its version number past
-the release number
-
-
-    export SLIDER_RELEASE=0.6.0-SNAPSHOT
-    mvn versions:set -DnewVersion=$SLIDER_RELEASE
-    git commit -a -m "$SLIDER_RELEASE_JIRA updating development POMs to $SLIDER_RELEASE"
-
-**Step #12:** Push the release and develop branches to github 
-
-    git push origin master develop 
-
-(assuming that `origin` maps to `git@github.com:hortonworks/slider.git`;
- you can check this with `git remote -v`
-
-
-The `git-flow` program automatically pushes up the `release/slider-X.Y` branch,
-before deleting it locally.
-
-If you are planning on any release work of more than a single test run,
-consider having your local release branch track the master.
-
-
-**Step #13:** ### Release on github small artifacts
-
-Browse to https://github.com/hortonworks/slider/releases/new
-
-Create a new release on the site by following the instructions
-
-Files under 5GB can be published directly. Otherwise, follow step 14
-
-**Step #14:**  For releasing via an external CDN (e.g. Rackspace Cloud)
-
-Using the web GUI for your particular distribution network, upload the
-`.tar.gz` artifact
-
-After doing this, edit the release notes on github to point to the
-tar file's URL.
-
-Example: 
-    [Download slider-0.10.1-all.tar.gz](http://dffeaef8882d088c28ff-185c1feb8a981dddd593a05bb55b67aa.r18.cf1.rackcdn.com/slider-0.10.1-all.tar.gz)
-
-**Step #15:** Announce the release 
-
-**Step #16:** Finish the JIRA
-
-Log the time, close the issue. This should normally be the end of a 
-sprint -so wrap that up too.
-
-**Step #17:** Get back to developing!
-
-Check out the develop branch and purge all release artifacts
-
-    git checkout develop
-    git pull origin
-    mvn clean
-    
diff --git a/src/site/markdown/developing/testing.md b/src/site/markdown/developing/testing.md
deleted file mode 100644
index 2ad2bdf..0000000
--- a/src/site/markdown/developing/testing.md
+++ /dev/null
@@ -1,182 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Testing
-
-     The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL
-      NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED",  "MAY", and
-      "OPTIONAL" in this document are to be interpreted as described in
-      RFC 2119.
-
-## Standalone Tests
-
-Slider core contains a suite of tests that are designed to run on the local machine,
-using Hadoop's `MiniDFSCluster` and `MiniYARNCluster` classes to create small,
-one-node test clusters. All the YARN/HDFS code runs in the JUnit process; the
-AM and spawned processeses run independently.
-
-
-
-### For HBase Tests in `slider-providers/hbase`
-
-Requirements
-* A copy of `hbase.tar.gz` in the local filesystem
-* A an expanded `hbase.tar.gz` in the local filesystem
-
-
-### For Accumulo Tests in `slider-providers/accumulo`
-* A copy of `accumulo.tar.gz` in the local filesystem, 
-* An expanded `accumulo.tar.gz` in the local filesystem, 
-* an expanded Zookeeper installation
-
-All of these need to be defined in the file `slider-core/src/test/resources/slider-test.xml`
-
-Example:
-  
-    <configuration>
-    
-      <property>
-        <name>slider.test.hbase.enabled</name>
-        <description>Flag to enable/disable HBase tests</description>
-        <value>true</value>
-      </property>
-      
-      <property>
-        <name>slider.test.hbase.home</name>
-        <value>/home/slider/hbase-0.98.0</value>
-        <description>HBASE Home</description>
-      </property>
-    
-      <property>
-        <name>slider.test.hbase.tar</name>
-        <value>/home/slider/Projects/hbase-0.98.0-bin.tar.gz</value>
-        <description>HBASE archive URI</description>
-      </property>
-    
-      <property>
-        <name>slider.test.accumulo.enabled</name>
-        <description>Flag to enable/disable Accumulo tests</description>
-        <value>true</value>
-      </property>
-    
-      <property>
-        <name>slider.test.accumulo.home</name>
-        <value>
-          /home/slider/accumulo-1.6.0-SNAPSHOT/</value>
-        <description>Accumulo Home</description>
-      </property>
-    
-      <property>
-        <name>slider.test.accumulo.tar</name>
-        <value>/home/slider/accumulo-1.6.0-SNAPSHOT-bin.tar</value>
-        <description>Accumulo archive URI</description>
-      </property>
-
-      <property>
-        <name>slider.test.am.restart.time</name>
-        <description>Time in millis to await an AM restart</description>
-        <value>30000</value>
-      </property>
-
-      <property>
-        <name>zk.home</name>
-        <value>/home/slider/zookeeper</value>
-        <description>Zookeeper home dir on target systems</description>
-      </property>
-    
-      <property>
-        <name>hadoop.home</name>
-        <value>/home/slider/hadoop-2.2.0</value>
-        <description>Hadoop home dir on target systems</description>
-      </property>
-      
-    </configuration>
-
-*Important:* For the local tests, a simple local filesystem path is used for
-all the values. 
-
-For the functional tests, the accumulo and hbase tar properties will
-need to be set to a URL of a tar file that is accessible to all the
-nodes in the cluster -which usually means HDFS, and so an `hdfs://` URL
-
-
-##  Provider-specific parameters
-
-An individual provider can pick up settings from their own
-`src/test/resources/slider-client.xml` file, or the one in `slider-core`.
-We strongly advice placing all the values in the `slider-core` file.
-
-1. All uncertainty about which file is picked up on the class path first goes
-away
-2. There's one place to  keep all the configuration values in sync.
-
-### Agent Tests
-
-
-### HBase Tests
-
-The HBase tests can be enabled or disabled
-    
-    <property>
-      <name>slider.test.hbase.enabled</name>
-      <description>Flag to enable/disable HBase tests</description>
-      <value>true</value>
-    </property>
-        
-Mandatory test parameters must be added to `slider-client.xml`
-
-  
-    <property>
-      <name>slider.test.hbase.tar</name>
-      <description>Path to the HBase Tar file in HDFS</description>
-      <value>hdfs://sandbox:8020/user/slider/hbase.tar.gz</value>
-    </property>
-    
-    <property>
-      <name>slider.test.hbase.appconf</name>
-      <description>Path to the directory containing the HBase application config</description>
-      <value>file://${user.dir}/src/test/configs/sandbox/hbase</value>
-    </property>
-    
-Optional parameters:  
-  
-     <property>
-      <name>slider.test.hbase.launch.wait.seconds</name>
-      <description>Time to wait in seconds for HBase to start</description>
-      <value>180000</value>
-    </property>  
-
-
-#### Accumulo configuration options
-
-Enable/disable the tests
-
-     <property>
-      <name>slider.test.accumulo.enabled</name>
-      <description>Flag to enable/disable Accumulo tests</description>
-      <value>true</value>
-     </property>
-         
-         
-Optional parameters
-         
-     <property>
-      <name>slider.test.accumulo.launch.wait.seconds</name>
-      <description>Time to wait in seconds for Accumulo to start</description>
-      <value>180000</value>
-     </property>
-
diff --git a/src/site/markdown/examples.md b/src/site/markdown/examples.md
deleted file mode 100644
index 4b79a10..0000000
--- a/src/site/markdown/examples.md
+++ /dev/null
@@ -1,159 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Examples
-
- 
-## Setup
- 
-### Setting up a YARN cluster
- 
-For simple local demos, a Hadoop pseudo-distributed cluster will suffice -if on a VM then
-its configuration should be changed to use a public (machine public) IP.
-
-# The examples below all assume there is a cluster node called 'master', which
-hosts the HDFS NameNode and the YARN Resource Manager
-
-
-# preamble
-
-    export HADOOP_CONF_DIR=~/conf
-    export PATH=~/hadoop/bin:/~/hadoop/sbin:~/zookeeper-3.4.5/bin:$PATH
-    
-    hdfs namenode -format master
-  
-
-
-
-# start all the services
-
-    nohup hdfs --config $HADOOP_CONF_DIR namenode & 
-    nohup hdfs --config $HADOOP_CONF_DIR datanode &
-    
-    
-    nohup yarn --config $HADOOP_CONF_DIR resourcemanager &
-    nohup yarn --config $HADOOP_CONF_DIR nodemanager &
-    
-# using hadoop/sbin service launchers
-    
-    hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode
-    hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start datanode
-    yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager
-    yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager
-    
-    ~/zookeeper/bin/zkServer.sh start
-    
-    
-# stop them
-
-    hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop namenode
-    hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop datanode
-    
-    yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager
-    yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager
-    
-
-
-NN up on [http://master:50070/dfshealth.jsp](http://master:50070/dfshealth.jsp)
-RM yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager
-
-    ~/zookeeper/bin/zkServer.sh start
-
-
-    # shutdown
-    ~/zookeeper/bin/zkServer.sh stop
-
-
-Tip: after a successful run on a local cluster, do a quick `rm -rf $HADOOP_HOME/logs`
-to keep the log bloat under control.
-
-## get hbase in
-
-copy to local 
-
-    get hbase-0.98.0-bin.tar on 
-
-
-    hdfs dfs -rm hdfs://master:9090/hbase.tar
-    hdfs dfs -copyFromLocal hbase-0.98.0-bin.tar hdfs://master:9090/hbase.tar
-
-or
-    
-    hdfs dfs -copyFromLocal hbase-0.96.0-bin.tar hdfs://master:9090/hbase.tar
-    hdfs dfs -ls hdfs://master:9090/
-    
-
-### Optional: point bin/slider at your chosen cluster configuration
-
-export SLIDER_CONF_DIR=~/Projects/slider/slider-core/src/test/configs/ubuntu-secure/slider
-
-## Optional: Clean up any existing slider cluster details
-
-This is for demos only, otherwise you lose the clusters and their databases.
-
-    hdfs dfs -rm -r hdfs://master:9090/user/home/stevel/.slider
-
-## Create a Slider Cluster
- 
- 
-    slider  create cl1 \
-    --component worker 1  --component master 1 \
-     --manager master:8032 --filesystem hdfs://master:9090 \
-     --zkhosts localhost:2181 --image hdfs://master:9090/hbase.tar
-    
-    # create the cluster
-    
-    slider create cl1 \
-     --component worker 4 --component master 1 \
-      --manager master:8032 --filesystem hdfs://master:9090 --zkhosts localhost \
-      --image hdfs://master:9090/hbase.tar \
-      --appconf file:////Users/slider/Hadoop/configs/master/hbase \
-      --compopt master jvm.heap 128 \
-      --compopt master env.MALLOC_ARENA_MAX 4 \
-      --compopt worker jvm.heap 128 
-
-    # freeze the cluster
-    slider freeze cl1 \
-    --manager master:8032 --filesystem hdfs://master:9090
-
-    # thaw a cluster
-    slider thaw cl1 \
-    --manager master:8032 --filesystem hdfs://master:9090
-
-    # destroy the cluster
-    slider destroy cl1 \
-    --manager master:8032 --filesystem hdfs://master:9090
-
-    # list clusters
-    slider list cl1 \
-    --manager master:8032 --filesystem hdfs://master:9090
-    
-    slider flex cl1 --component worker 2
-    --manager master:8032 --filesystem hdfs://master:9090 \
-    --component worker 5
-    
-## Create an Accumulo Cluster
-
-    slider create accl1 --provider accumulo \
-    --component master 1 --component tserver 1 --component gc 1 --component monitor 1 --component tracer 1 \
-    --manager localhost:8032 --filesystem hdfs://localhost:9000 \
-    --zkhosts localhost:2181 --zkpath /local/zookeeper \
-    --image hdfs://localhost:9000/user/username/accumulo-1.6.0-SNAPSHOT-bin.tar \
-    --appconf hdfs://localhost:9000/user/username/accumulo-conf \
-    -O zk.home /local/zookeeper -O hadoop.home /local/hadoop \
-    -O site.monitor.port.client 50095 -O accumulo.password secret 
-    
diff --git a/src/site/markdown/exitcodes.md b/src/site/markdown/exitcodes.md
deleted file mode 100644
index 3a78920..0000000
--- a/src/site/markdown/exitcodes.md
+++ /dev/null
@@ -1,161 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Client Exit Codes
-
-Here are the exit codes returned 
-
-Exit code values 1 and 2 are interpreted by YARN -in particular converting the
-"1" value from an error into a successful shut down. Slider
-converts the -1 error code from a forked process into `EXIT_MASTER_PROCESS_FAILED`;
-no. 72.
-
-
-    /**
-     * 0: success
-     */
-    int EXIT_SUCCESS                    =  0;
-    
-    /**
-     * -1: generic "false" response. The operation worked but
-     * the result was not true
-     */
-    int EXIT_FALSE                      = -1;
-    
-    /**
-     * Exit code when a client requested service termination:
-     */
-    int EXIT_CLIENT_INITIATED_SHUTDOWN  =  1;
-    
-    /**
-     * Exit code when targets could not be launched:
-     */
-    int EXIT_TASK_LAUNCH_FAILURE        =  2;
-    
-    /**
-     * Exit code when an exception was thrown from the service:
-     */
-    int EXIT_EXCEPTION_THROWN           = 32;
-    
-    /**
-     * Exit code when a usage message was printed:
-     */
-    int EXIT_USAGE                      = 33;
-    
-    /**
-     * Exit code when something happened but we can't be specific:
-     */
-    int EXIT_OTHER_FAILURE              = 34;
-    
-    /**
-     * Exit code when a control-C, kill -3, signal was picked up:
-     */
-                                  
-    int EXIT_INTERRUPTED                = 35;
-    
-    /**
-     * Exit code when the command line doesn't parse:, or
-     * when it is otherwise invalid.
-     */
-    int EXIT_COMMAND_ARGUMENT_ERROR     = 36;
-    
-    /**
-     * Exit code when the configurations in valid/incomplete:
-     */
-    int EXIT_BAD_CONFIGURATION          = 37;
-    
-    /**
-     * Exit code when the configurations in valid/incomplete:
-     */
-    int EXIT_CONNECTIVTY_PROBLEM        = 38;
-    
-    /**
-     * internal error: {@value}
-     */
-    int EXIT_INTERNAL_ERROR = 64;
-    
-    /**
-     * Unimplemented feature: {@value}
-     */
-    int EXIT_UNIMPLEMENTED =        65;
-  
-    /**
-     * service entered the failed state: {@value}
-     */
-    int EXIT_YARN_SERVICE_FAILED =  66;
-  
-    /**
-     * service was killed: {@value}
-     */
-    int EXIT_YARN_SERVICE_KILLED =  67;
-  
-    /**
-     * timeout on monitoring client: {@value}
-     */
-    int EXIT_TIMED_OUT =            68;
-  
-    /**
-     * service finished with an error: {@value}
-     */
-    int EXIT_YARN_SERVICE_FINISHED_WITH_ERROR = 69;
-  
-    /**
-     * the application instance is unknown: {@value}
-     */
-    int EXIT_UNKNOWN_INSTANCE = 70;
-  
-    /**
-     * the application instance is in the wrong state for that operation: {@value}
-     */
-    int EXIT_BAD_STATE =    71;
-  
-    /**
-     * A spawned master process failed 
-     */
-    int EXIT_PROCESS_FAILED = 72;
-  
-    /**
-     * The cluster failed -too many containers were
-     * failing or some other threshold was reached
-     */
-    int EXIT_DEPLOYMENT_FAILED = 73;
-  
-    /**
-     * The application is live -and the requested operation
-     * does not work if the cluster is running
-     */
-    int EXIT_APPLICATION_IN_USE = 74;
-  
-    /**
-     * There already is an application instance of that name
-     * when an attempt is made to create a new instance
-     */
-    int EXIT_INSTANCE_EXISTS = 75;
-    
-    /**
-     * The resource was not found
-     */
-    int EXIT_NOT_FOUND = 77;
-    
-## Other exit codes
-
-YARN itself can fail containers, here are some of the causes we've seen
-
-
-    143: Appears to be triggered by the container exceeding its cgroup memory
-    limits
- 
diff --git a/src/site/markdown/getting_started.md b/src/site/markdown/getting_started.md
deleted file mode 100644
index f73f3c2..0000000
--- a/src/site/markdown/getting_started.md
+++ /dev/null
@@ -1,509 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Project Slider: Getting Started
-
-
-## Introduction
-
-The following provides the steps required for setting up a cluster and deploying a YARN hosted application using Slider.
-
-* [Prerequisites](#sysreqs)
-
-* [Setup the Cluster](#setup)
-
-* [Download Slider Packages](#download)
-
-* [Build Slider](#build)
-
-* [Install Slider](#install)
-
-* [Deploy Slider Resources](#deploy)
-
-* [Download Sample Application Packages](#downsample)
-
-* [Install, Configure, Start and Verify Sample Application](#installapp)
-
-* [Appendix A: Storm Sample Application Specifications](#appendixa)
-
-* [Appendix B: HBase Sample Application Specifications](#appendixb)
-
-## <a name="sysreqs"></a>System Requirements
-
-The Slider deployment has the following minimum system requirements:
-
-* Hadoop 2.4+
-
-* Required Services: HDFS, YARN, MapReduce2 and ZooKeeper
-
-* Oracle JDK 1.7 (64-bit)
-
-## <a name="setup"></a>Setup the Cluster
-
-After setting up your Hadoop cluster (using Ambari or other means) with the 
-services listed above, modify your YARN configuration to allow for multiple
-containers on a single host. In `yarn-site.xml` make the following modifications:
-
-<table>
-  <tr>
-    <td>Property</td>
-    <td>Value</td>
-  </tr>
-  <tr>
-    <td>yarn.scheduler.minimum-allocation-mb</td>
-    <td>>= 256</td>
-  </tr>
-  <tr>
-    <td>yarn.nodemanager.delete.debug-delay-sec</td>
-    <td>>= 3600 (to retain for an hour)</td>
-  </tr>
-</table>
-
-
-There are other options detailed in the Troubleshooting file available <a href="troubleshooting.html">here</a>
-
-
-## <a name="download"></a>Download Slider Packages
-
-The sample application packages for Storm, HBase and Accumulo are available at:
-
-[http://public-repo-1.hortonworks.com/slider/slider-0.22.0-all.tar.gz](http://public-repo-1.hortonworks.com/slider/slider-0.22.0-all.tar.gz)
-## <a name="build"></a>Build Slider
-
-* From the top level directory, execute `mvn clean install -DskipTests`
-* Use the generated compressed tar file in slider-assembly/target directory (e.g. slider-0.22.0-all.tar.gz) for the subsequent steps
-
-## <a name="install"></a>Install Slider
-
-Follow the following steps to expand/install Slider:
-
-    mkdir ${slider-install-dir*;
-
-    cd ${slider-install-dir}
-
-Login as the "yarn" user (assuming this is a host associated with the installed cluster).  E.g., `su yarn`
-*This assumes that all apps are being run as ‘yarn’ user. Any other user can be used to run the apps - ensure that file permission is granted as required.*
-
-Expand the tar file:  `tar -xvf slider-0.22.0-all.tar.gz`
-
-Browse to the Slider directory: `cd slider-0.22.0/bin`
-
-      export PATH=$PATH:/usr/jdk64/jdk1.7.0_45/bin 
-    
-(or the path to the JDK bin directory)
-
-Modify Slider configuration file `${slider-install-dir}/slider-0.22.0/conf/slider-client.xml` to add the following properties:
-
-      <property>
-          <name>yarn.application.classpath</name>
-          <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
-      </property>
-      
-      <property>
-          <name>slider.zookeeper.quorum</name>
-          <value>yourZooKeeperHost:port</value>
-      </property>
-
-
-In addition, specify the scheduler and HDFS addresses as follows:
-
-    <property>
-        <name>yarn.resourcemanager.address</name>
-        <value>yourResourceManagerHost:8050</value>
-    </property>
-    <property>
-        <name>yarn.resourcemanager.scheduler.address</name>
-        <value>yourResourceManagerHost:8030</value>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://yourNameNodeHost:8020</value>
-    </property>
-
-
-Execute:
- 
-    ${slider-install-dir}/slider-0.22.0/bin/slider version
-
-Ensure there are no errors and you can see "Compiled against Hadoop 2.4.0"
-
-## <a name="deploy"></a>Deploy Slider Resources
-
-Ensure that all file folders are accessible to the user creating the application instance. The example assumes "yarn" to be that user.
-
-### Create HDFS root folder for Slider
-
-Perform the following steps to create the Slider root folder with the appropriate permissions:
-
-    su hdfs
-    
-    hdfs dfs -mkdir /slider
-    
-    hdfs dfs -chown yarn:hdfs /slider
-    
-    hdfs dfs -mkdir /user/yarn
-    
-    hdfs dfs -chown yarn:hdfs /user/yarn
-
-### Load Slider Agent
-
-    su yarn
-    
-    hdfs dfs -mkdir /slider/agent
-    
-    hdfs dfs -mkdir /slider/agent/conf
-    
-    hdfs dfs -copyFromLocal ${slider-install-dir}/slider-0.22.0/agent/slider-agent-0.22.0.tar.gz /slider/agent
-
-### Create and deploy Slider Agent configuration
-
-Create an agent config file (agent.ini) based on the sample available at:
-
-    ${slider-install-dir}/slider-0.22.0/agent/conf/agent.ini
-
-The sample agent.ini file can be used as is (see below). Some of the parameters of interest are:
-
-# `log_level` = INFO or DEBUG, to control the verbosity of log
-# `app_log_dir` = the relative location of the application log file
-# `log_dir` = the relative location of the agent and command log file
-
-    [server]
-    hostname=localhost
-    port=8440
-    secured_port=8441
-    check_path=/ws/v1/slider/agents/
-    register_path=/ws/v1/slider/agents/{name}/register
-    heartbeat_path=/ws/v1/slider/agents/{name}/heartbeat
-
-    [agent]
-    app_pkg_dir=app/definition
-    app_install_dir=app/install
-    app_run_dir=app/run
-    app_task_dir=app/command-log
-    app_log_dir=app/log
-    app_tmp_dir=app/tmp
-    log_dir=infra/log
-    run_dir=infra/run
-    version_file=infra/version
-    log_level=INFO
-
-    [python]
-
-    [command]
-    max_retries=2
-    sleep_between_retries=1
-
-    [security]
-
-    [heartbeat]
-    state_interval=6
-    log_lines_count=300
-
-
-Once created, deploy the agent.ini file to HDFS:
-
-    su yarn
-    
-    hdfs dfs -copyFromLocal agent.ini /slider/agent/conf
-
-## <a name="downsample"></a>Download Sample Application Packages
-
-There are three sample application packages available for download to use with Slider:
-
-<table>
-  <tr>
-    <td>Application</td>
-    <td>Version</td>
-    <td>URL</td>
-  </tr>
-  <tr>
-    <td>Apache HBase</td>
-    <td>0.96.0</td>
-    <td>http://public-repo-1.hortonworks.com/slider/hbase_v096.tar</td>
-  </tr>
-  <tr>
-    <td>Apache Storm</td>
-    <td>0.9.1</td>
-    <td>http://public-repo-1.hortonworks.com/slider/storm_v091.tar</td>
-  </tr>
-  <tr>
-    <td>Apache Accumulo</td>
-    <td>1.5.1</td>
-    <td>http://public-repo-1.hortonworks.com/slider/accumulo_v151.tar</td>
-  </tr>
-</table>
-
-
-Download the packages and deploy one of these sample applications to YARN via Slider using the steps below.
-
-## <a name="installapp"></a>Install, Configure, Start and Verify Sample Application
-
-* [Load Sample Application Package](#load)
-
-* [Create Application Specifications](#create)
-
-* [Start the Application](#start)
-
-* [Verify the Application](#verify)
-
-* [Manage the Application Lifecycle](#manage)
-
-### <a name="load"></a>Load Sample Application Package
-
-    hdfs dfs -copyFromLocal *sample-application-package/slider
-
-If necessary, create HDFS folders needed by the application. For example, HBase requires the following HDFS-based setup:
-
-    su hdfs
-    
-    hdfs dfs -mkdir /apps
-    
-    hdfs dfs -mkdir /apps/hbase
-    
-    hdfs dfs -chown yarn:hdfs /apps/hbase
-
-### <a name="create"></a>Create Application Specifications
-
-Configuring a Slider application consists of two parts: the [Resource Specification](#resspec),
- and the *[Application Configuration](#appconfig). Below are guidelines for creating these files.
-
-*Note: There are sample Resource Specifications (**resources.json**) and Application Configuration 
-(**appConfig.json**) files in the *[Appendix](#appendixa)* and also in the root directory of the
-Sample Applications packages (e.g. /**hbase-v096/resources.json** and /**hbase-v096/appConfig.json**).*
-
-#### <a name="resspec"></a>Resource Specification
-
-Slider needs to know what components (and how many components) are in an application package to deploy. For example, in HBase, the components are **_master_** and **_worker_** -- the latter hosting **HBase RegionServers**, and the former hosting the **HBase Master**. 
-
-As Slider creates each instance of a component in its own YARN container, it also needs to know what to ask YARN for in terms of **memory** and **CPU** for those containers. 
-
-All this information goes into the **Resources Specification** file ("Resource Spec") named `resources.json`. The Resource Spec tells Slider how many instances of each component in the application (such as an HBase RegionServer) to deploy and the parameters for YARN.
-
-Sample Resource Spec files are available in the Appendix:
-
-* [Appendix A: Storm Sample Resource Specification](#heading=h.1hj8hn5xne7c)
-
-* [Appendix B: HBase Sample Resource Specification](#heading=h.l7z5mvhvxmzv)
-
-Store the Resource Spec file on your local disk (e.g. `/tmp/resources.json`).
-
-#### <a name="appconfig"></a>Application Configuration
-
-Alongside the Resource Spec there is the **Application Configuration** file ("App Config") which includes parameters that are specific to the application, rather than YARN. The App Config is a file that contains all application configuration. This configuration is applied to the default configuration provided by the application definition and then handed off to the associated component agent.
-
-For example, the heap sizes of the JVMs,  The App Config defines the configuration details **specific to the application and component** instances. For HBase, this includes any values for the *to-be-generated *hbase-site.xml file, as well as options for individual components, such as their heap size.
-
-Sample App Configs are available in the Appendix:
-
-* [Appendix A: Storm Sample Application Configuration](#heading=h.2qai3c6w260l)
-
-* [Appendix B: HBase Sample Application Configuration](#heading=h.hetv1wn44c5x)
-
-Store the appConfig.json file on your local disc and a copy in HDFS:
-
-    su yarn
-    
-    hdfs dfs -mkdir /slider/appconf
-    
-    hdfs dfs -copyFromLocal appConf.json /slider/appconf
-
-### <a name="start"></a>Start the Application
-
-Once the steps above are completed, the application can be started through the **Slider Command Line Interface (CLI)**.
-
-Change directory to the "bin" directory under the slider installation
-
-    cd ${slider-install-dir}/slider-0.22.0/bin
-
-Execute the following command:
-
-    ./slider create cl1 --manager yourResourceManagerHost:8050 --image hdfs://yourNameNodeHost:8020/slider/agent/slider-agent-0.22.0.tar.gz --template appConfig.json --resources resources.json
-
-### <a name="verify"></a>Verify the Application
-
-The successful launch of the application can be verified via the YARN Resource Manager Web UI. In most instances, this UI is accessible via a web browser at port 8088 of the Resource Manager Host:
-
-![image alt text](images/image_0.png)
-
-The specific information for the running application is accessible via the "ApplicationMaster" link that can be seen in the far right column of the row associated with the running application (probably the top row):
-
-![image alt text](images/image_1.png)
-
-### <a name="manage"></a>Manage the Application Lifecycle
-
-Once started, applications can be frozen/stopped, thawed/restarted, and destroyed/removed as follows:
-
-#### Frozen:
-
-    ./slider freeze cl1 --manager yourResourceManagerHost:8050  --filesystem hdfs://yourNameNodeHost:8020
-
-#### Thawed: 
-
-    ./slider thaw cl1 --manager yourResourceManagerHost:8050  --filesystem hdfs://yourNameNodeHost:8020
-
-#### Destroyed: 
-
-    ./slider destroy cl1 --manager yourResourceManagerHost:8050  --filesystem hdfs://yourNameNodeHost:8020
-
-#### Flexed:
-
-    ./slider flex cl1 --component worker 5 --manager yourResourceManagerHost:8050  --filesystem hdfs://yourNameNodeHost:8020
-
-# <a name="appendixa"></a>Appendix A: Apache Storm Sample Application Specifications
-
-## Storm Resource Specification Sample
-
-    {
-      "schema" : "http://example.org/specification/v2.0.0",
-      "metadata" : {
-      },
-      "global" : {
-      },
-      "components" : {
-        "slider-appmaster" : {
-        },
-        "NIMBUS" : {
-            "yarn.role.priority" : "1",
-            "yarn.component.instances" : "1"
-        },
-        "STORM_REST_API" : {
-            "yarn.role.priority" : "2",
-            "yarn.component.instances" : "1"
-        },
-        "STORM_UI_SERVER" : {
-            "yarn.role.priority" : "3",
-            "yarn.component.instances" : "1"
-        },
-        "DRPC_SERVER" : {
-            "yarn.role.priority" : "4",
-            "yarn.component.instances" : "1"
-        },
-        "SUPERVISOR" : {
-            "yarn.role.priority" : "5",
-            "yarn.component.instances" : "1"
-        }
-      }
-    }
-
-
-## Storm Application Configuration Sample
-
-    {
-      "schema" : "http://example.org/specification/v2.0.0",
-      "metadata" : {
-      },
-      "global" : {
-          "A site property for type XYZ with name AA": "its value",
-          "site.XYZ.AA": "Value",
-          "site.hbase-site.hbase.regionserver.port": "0",
-          "site.core-site.fs.defaultFS": "${NN_URI}",
-          "Using a well known keyword": "Such as NN_HOST for name node host",
-          "site.hdfs-site.dfs.namenode.http-address": "${NN_HOST}:50070",
-          "a global property used by app scripts": "not affiliated with any site-xml",
-          "site.global.app_user": "yarn",
-          "Another example of available keywords": "Such as AGENT_LOG_ROOT",
-          "site.global.app_log_dir": "${AGENT_LOG_ROOT}/app/log",
-          "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
-      }
-    }
-
-
-# <a name="appendixb"></a>Appendix B:  Apache HBase Sample Application Specifications
-
-## HBase Resource Specification Sample
-
-    {
-      "schema" : "http://example.org/specification/v2.0.0",
-      "metadata" : {
-      },
-      "global" : {
-      },
-      "components" : {
-        "HBASE_MASTER" : {
-            "yarn.role.priority" : "1",
-            "yarn.component.instances" : "1"
-        },
-        "slider-appmaster" : {
-        },
-        "HBASE_REGIONSERVER" : {
-            "yarn.role.priority" : "2",
-            "yarn.component.instances" : "1"
-        }
-      }
-    }
-
-
-## HBase Application Configuration Sample
-
-    {
-      "schema" : "http://example.org/specification/v2.0.0",
-      "metadata" : {
-      },
-      "global" : {
-        "agent.conf": "/slider/agent/conf/agent.ini",
-        "agent.version": "/slider/agent/version",
-        "application.def": "/slider/hbase_v096.tar",
-        "config_types": "core-site,hdfs-site,hbase-site",
-        "java_home": "/usr/jdk64/jdk1.7.0_45",
-        "package_list": "files/hbase-0.96.1-hadoop2-bin.tar",
-        "site.global.app_user": "yarn",
-        "site.global.app_log_dir": "${AGENT_LOG_ROOT}/app/log",
-        "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
-        "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/hbase-0.96.1-hadoop2",
-        "site.global.app_install_dir": "${AGENT_WORK_ROOT}/app/install",
-        "site.global.hbase_master_heapsize": "1024m",
-        "site.global.hbase_regionserver_heapsize": "1024m",
-        "site.global.user_group": "hadoop",
-        "site.global.security_enabled": "false",
-        "site.hbase-site.hbase.hstore.flush.retries.number": "120",
-        "site.hbase-site.hbase.client.keyvalue.maxsize": "10485760",
-        "site.hbase-site.hbase.hstore.compactionThreshold": "3",
-        "site.hbase-site.hbase.rootdir": "${NN_URI}/apps/hbase/data",
-        "site.hbase-site.hbase.stagingdir": "${NN_URI}/apps/hbase/staging",
-        "site.hbase-site.hbase.regionserver.handler.count": "60",
-        "site.hbase-site.hbase.regionserver.global.memstore.lowerLimit": "0.38",
-        "site.hbase-site.hbase.hregion.memstore.block.multiplier": "2",
-        "site.hbase-site.hbase.hregion.memstore.flush.size": "134217728",
-        "site.hbase-site.hbase.superuser": "yarn",
-        "site.hbase-site.hbase.zookeeper.property.clientPort": "2181",
-        "site.hbase-site.hbase.regionserver.global.memstore.upperLimit": "0.4",
-        "site.hbase-site.zookeeper.session.timeout": "30000",
-        "site.hbase-site.hbase.tmp.dir": "${AGENT_WORK_ROOT}/work/app/tmp",
-        "site.hbase-site.hbase.local.dir": "${hbase.tmp.dir}/local",
-        "site.hbase-site.hbase.hregion.max.filesize": "10737418240",
-        "site.hbase-site.hfile.block.cache.size": "0.40",
-        "site.hbase-site.hbase.security.authentication": "simple",
-        "site.hbase-site.hbase.defaults.for.version.skip": "true",
-        "site.hbase-site.hbase.zookeeper.quorum": "${ZK_HOST}",
-        "site.hbase-site.zookeeper.znode.parent": "/hbase-unsecure",
-        "site.hbase-site.hbase.hstore.blockingStoreFiles": "10",
-        "site.hbase-site.hbase.hregion.majorcompaction": "86400000",
-        "site.hbase-site.hbase.security.authorization": "false",
-        "site.hbase-site.hbase.cluster.distributed": "true",
-        "site.hbase-site.hbase.hregion.memstore.mslab.enabled": "true",
-        "site.hbase-site.hbase.client.scanner.caching": "100",
-        "site.hbase-site.hbase.zookeeper.useMulti": "true",
-        "site.hbase-site.hbase.regionserver.info.port": "0",
-        "site.hbase-site.hbase.master.info.port": "60010",
-        "site.hbase-site.hbase.regionserver.port": "0",
-        "site.core-site.fs.defaultFS": "${NN_URI}",
-        "site.hdfs-site.dfs.namenode.https-address": "${NN_HOST}:50470",
-        "site.hdfs-site.dfs.namenode.http-address": "${NN_HOST}:50070"
-      }
-  }
-
-
diff --git a/src/site/markdown/index.md b/src/site/markdown/index.md
deleted file mode 100644
index 7a5d6e2..0000000
--- a/src/site/markdown/index.md
+++ /dev/null
@@ -1,83 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-  
-
-# Slider: Dynamic YARN Applications
-
-
-
-Slider is a YARN application to deploy existing distributed applications on YARN, 
-monitor them and make them larger or smaller as desired -even while 
-the application is running.
-
-Applications can be stopped, "frozen" and restarted, "thawed" later; the distribution
-of the deployed application across the YARN cluster is persisted -enabling
-a best-effort placement close to the previous locations on a cluster thaw.
-Applications which remember the previous placement of data (such as HBase)
-can exhibit fast start-up times from this feature.
-
-YARN itself monitors the health of 'YARN containers" hosting parts of 
-the deployed application -it notifies the Slider manager application of container
-failure. Slider then asks YARN for a new container, into which Slider deploys
-a replacement for the failed component. As a result, Slider can keep the
-size of managed applications consistent with the specified configuration, even
-in the face of failures of servers in the cluster -as well as parts of the
-application itself
-
-Some of the features are:
-
-* Allows users to create on-demand applications in a YARN cluster
-
-* Allow different users/applications to run different versions of the application.
-
-* Allow users to configure different application instances differently
-
-* Stop / Suspend / Resume application instances as needed
-
-* Expand / shrink application instances as needed
-
-The Slider tool is a Java command line application.
-
-The tool persists the information as JSON documents in HDFS.
-
-Once the cluster has been started, the cluster can be made to grow or shrink
-using the Slider commands. The cluster can also be stopped, *frozen*
-and later resumed, *thawed*.
-      
-Slider implements all its functionality through YARN APIs and the existing
-application shell scripts. The goal of the application was to have minimal
-code changes and as of this writing, it has required few changes.
-
-## Using 
-
-* [Getting Started](getting_started.html)
-* [Man Page](manpage.html)
-* [Examples](examples.html)
-* [Client Configuration](client-configuration.html)
-* [Client Exit Codes](exitcodes.html)
-* [Security](security.html)
-* [Logging](logging.html)
-* [How to define a new slider-packaged application](slider_specs/index.html)
-* [Application configuration model](configuration/index.html)
-
-
-## Developing 
-
-* [Architecture](architecture/index.html)
-* [Developing](developing/index.html)
-* [Application Needs](app_needs.html)
-* [Service Registry](registry/index.html)
diff --git a/src/site/markdown/manpage.md b/src/site/markdown/manpage.md
deleted file mode 100644
index e701db2..0000000
--- a/src/site/markdown/manpage.md
+++ /dev/null
@@ -1,483 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# slider: YARN-hosted applications
-
-## NAME
-
-slider -YARN-hosted applications
-
-## SYNOPSIS
-
-Slider enables applications to be dynamically created on a YARN-managed datacenter.
-The program can be used to create, pause, and shutdown the application. It can also be used to list current active
-and existing but not running "frozen" application instances.
- 
-## CONCEPTS
-
-1. A *Slider application* is an application packaged to be deployed by Slider. It consists of one or more distributed *components* 
-
-1. A *Slider application instance*  is a slider application configured to be deployable on a specific YARN cluster, with a specific configuration. An instance can be *live* -actually running- or *frozen*. When frozen all its configuration details and instance-specific data are preserved on HDFS.
-
-1. An *image* is a *tar.gz* file containing binaries used to create the application.  1. Images are kept in the HDFS filesystem and identified by their path names; filesystem permissions can be used to share images amongst users.
-
-1. An *image configuration* is a directory that is overlaid file-by-file onto the conf/ directory inside the HBase image.
-
-1. Users can have multiple image configurations -they too are kept in HDFS, identified by their path names, and can be shared by setting the appropriate permissions, along with a configuration template file.
-
-1. Only those files provided in the image configuration directory overwrite the default values contained in the image; all other configuration files are retained.
-
-1. Late-binding properties can also be provided at create time.
-
-1. Slider can overwrite some of the configuration properties to enable the dynamically created components to bind correctly to each other.
-
-1. An *instance directory* is a directory created in HDFS describing the application instance; it records the configuration -both user specified, application-default and any dynamically created by slider. 
-
-1. A user can create an application instance.
-
-1. A live instances can be *frozen*, saving its final state to its application instance state directory. All running components are shut down.
-
-1. A frozen instance can be *thawed* -a its components started on or near the servers where they were previously running.
-
-1. A frozen instance can be *destroyed*. 
-
-1. Running instances can be listed. 
-
-1. An instance consists of a set of components
-
-1. The supported component types depends upon the slider application.
-
-1. the count of each component must initially be specified when a application instance is created.
-
-1. Users can flex an application instance: adding or removing components dynamically.
-If the application instance is live, the changes will have immediate effect. If not, the changes will be picked up when the instance is next thawed.
-
-
-<!--- ======================================================================= -->
-
-## Invoking Slider
-
- 
-    slider <ACTION> [<name>] [<OPTIONS>]
-
-
-<!--- ======================================================================= -->
-
-## COMMON COMMAND-LINE OPTIONS
-
-### `--conf configuration.xml`
-
-Configure the Slider client. This allows the filesystem, zookeeper instance and other properties to be picked up from the configuration file, rather than on the command line.
-
-Important: *this configuration file is not propagated to the application. It is purely for configuring the client itself. 
-
-### `-D name=value`
-
-Define a Hadoop configuration option which overrides any options in the client configuration XML files.
-
-
-### `-m, --manager url`
-
-URL of the YARN resource manager
-
-
-### `--fs filesystem-uri`
-
-Use the specific filesystem URI as an argument to the operation.
-
-
-
-
-
-<!--- ======================================================================= -->
-
-
-<!--- ======================================================================= -->
-
-## Actions
-
-COMMANDS
-
-
-
-### `build <name>`
-
-Build an instance of the given name, with the specific options.
-
-It is not started; this can be done later with a `thaw` command.
-
-### `create <name>`
-
-Build and run an applicaton instance of the given name 
-
-The `--wait` parameter, if provided, specifies the time to wait until the YARN application is actually running. Even after the YARN application has started, there may be some delay for the instance to start up.
-
-### Arguments for `build` and `create` 
-
-
-
-##### `--package <uri-to-package>`  
-
-This define the slider application package to be deployed.
-
-
-##### `--option <name> <value>`  
-
-Set a application instance option. 
-
-Example:
-
-Set an option to be passed into the `-site.xml` file of the target system, reducing
-the HDFS replication factor to 2. (
-
-    --option site.dfs.blocksize 128m
-    
-Increase the number of YARN containers which must fail before the Slider application instance
-itself fails.
-    
-    -O slider.container.failure.threshold 16
-
-##### `--appconf dfspath`
-
-A URI path to the configuration directory containing the template application specification. The path must be on a filesystem visible to all nodes in the YARN cluster.
-
-1. Only one configuration directory can be specified.
-1. The contents of the directory will only be read when the application instance is created/built.
-
-Example:
-
-    --appconf hdfs://namenode/users/slider/conf/hbase-template
-    --appconf file://users/accumulo/conf/template
-
-
-
-##### `--apphome localpath`
-
-A path to the home dir of a pre-installed application. If set when a Slider
-application instance is created, the instance will run with the binaries pre-installed
-on the nodes at this location
-
-*Important*: this is a path in the local filesystem which must be present
-on all hosts in the YARN cluster
-
-Example
-
-    --apphome /usr/hadoop/hbase
-
-##### `--template <filename>`
-
-Filename for the template application instance configuration. This
-will be merged with -and can overwrite- the built-in configuration options, and can
-then be overwritten by any command line `--option` and `--compopt` arguments to
-generate the final application configuration.
-
-##### `--resources <filename>`
-
-Filename for the resources configuration. This
-will be merged with -and can overwrite- the built-in resource options, and can
-then be overwritten by any command line `--resopt`, `--rescompopt` and `--component`
-arguments to generate the final resource configuration.
-
-
-##### `--image path`
-
-The full path in Hadoop HDFS  to a `.tar` or `.tar.gz` file containing 
-the binaries needed to run the target application.
-
-Example
-
-    --image hdfs://namenode/shared/binaries/hbase-0.96.tar.gz
-
-##### `--component <name> <count>`
-
-The desired number of instances of a component
-
-
-Example
-
-    --component worker 16
-
-This just sets the `component.instances` value of the named component's resource configuration.
-it is exactly equivalent to 
-
-	--rco worker component.instances 16
-
-
-
-#### `--compopt <component> <option> <value>` 
-
-Provide a specific application configuration option for a component
-
-Example
-
-    --compopt master env.TIMEOUT 10000
-
-These options are saved into the `app_conf.json` file; they are not used to configure the YARN Resource
-allocations, which must use the `--rco` parameter
-
-#### Resource Component option `--rescompopt` `--rco`
-
-`--rescompopt <component> <option> <value>` 
-
-Set any role-specific option, such as its YARN memory requirements.
-
-Example
-
-    --rco worker master yarn.memory 2048
-    --rco worker worker yarn.memory max
-
-
-##### `--zkhosts host1:port1,[host2:port2,host3:port3, ...] `
-
-The zookeeper quorum.
-
-Example
-
-    --zkhosts zk1:2181,zk2:2181,zk3:4096
-
-If unset, the zookeeper quorum defined in the property `slider.zookeeper.quorum`
-is used
-
-### `destroy <name>`
-
-Destroy a (stopped) applicaton instance .
-
-Important: This deletes all persistent data
-
-Example
-
-    slider destroy instance1
-
-### `exists <name> [--live]`
-
-Probe the existence of the named Slider application instance. If the `--live` flag is set, the instance
-must actually be running
-
-If not, an error code is returned.
-
-When the --live` flag is unset, the command looks for the application instance to be
-defined in the filesystem -its operation state is not checked.
-
-Return codes
-
-     0 : application instance is defined in the filesystem
-    70 : application instance is unknown
-
-Example:
-
-    slider exists instance4
-
-#### Live Tests
-
-When the `--live` flag is set, the application instance must be running for the command
-to succeed
-
-1. The probe does not check the status of any Slider-deployed services, merely that a application instance has been deployed
-1. A application instance that is finished or failed is not considered to be live.
-
-Return codes
-
-     0 : application instance is running
-    -1 : application instance exists but is not running
-    70 : application instance is unknown
-
-
-Example:
-
-    slider exists instance4 --live
-
-### `flex <name> [--component component count]* `
-
-Flex the number of workers in an application instance to the new value. If greater than before, new copies of the component will be requested. If less, component instances will be destroyed.
-
-This operation has a return value of 0 if the size of a running instance was changed. 
-
-It returns -1 if there is no running application instance, or the size of the flexed instance matches that of the original -in which case its state does not change.
-
-Example
-
-    slider flex instance1 --component worker 8 --filesystem hdfs://host:port
-    slider flex instance1 --component master 2 --filesystem hdfs://host:port
-    
-
-### `freeze <name>  [--force] [--wait time] [--message text]`
-
-freeze the application instance. The running application is stopped. Its settings are retained in HDFS.
-
-The `--wait` argument can specify a time in seconds to wait for the application instance to be frozen.
-
-The `--force` flag causes YARN asked directly to terminate the application instance. 
-The `--message` argument supplies an optional text message to be used in
-the request: this will appear in the application's diagnostics in the YARN RM UI.
-
-If an unknown (or already frozen) application instance is named, no error is returned.
-
-Examples
-
-    slider freeze instance1 --wait 30
-    slider freeze instance2 --force --message "maintenance session"
-
-
-### `list <name>`
-
-List running Slider application instances visible to the user.
-
-If an instance name is given and there is no running instance with that name, an error is returned. 
-
-Example
-
-    slider list
-    slider list instance1
-
-### `registry (--list | --listconf | --getconf <conf>) [--name <name>] [--servicetype <servicetype>] [--verbose]`
-
-List registered application instances visible to the user. This is slightly
-different from the `slider list` command in that it does not make use of the
-YARN application list. Instead it communicates with Zookeeper -and works
-with any applications which has registered itself with the "service registry"
-
-The `--name <name>` option names the registry entry to work with. For slider applications,
-this is the application instance
-
-
-The `--servicetype <servicetype>` option allows a different service type to be chosen.
-The default is `"org.apache.slider`
-
-The `--verbose` flag triggers more verbose output on the operations
-
-The `--internal` flag indicates the configurations to be listed and retrieved
-are from the "internal" list of configuration data provided for use within a
-deployed application. 
-
-There are two common exit codes, the exact values being documented
-in [Exit Codes](exitcodes.md)
-
-1. If there is no matching service then the operation fails with the
-`EXIT_NOT_FOUND` code (77).
-2. If there are no configurations in a listing, or the named configuration
-is not found, the command returns the exit code `EXIT_NOT_FOUND` (77)
-
-Operations:
-
-#### `registry --list  [--servicetype <servicetype>] [--name <name>] [--verbose]`
-
-List all services of the service type and optionally the name. 
- 
- 
- 
-#### `registry --listconf [--name <name>]  [--internal] [--servicetype <servicetype>]`
-
-List the configurations exported by of a named application
-
-
-
-#### `registry --getconf <configuration> [--format (xml|json|properties)] [--dest <path>] [--internal] ` get the configuration
- 
-Get a named configuration in a chosen format. Default: XML
-
-  `--dest <path>` : the filename or directory to save a configuration to.
-  `--format (xml|json|properties)` defines the output format
-  
- 
- 
- 
-
-
-
-### `status <name> [--out <filename>]`
-
-Get the status of the named application instance in JSON format. A filename can be used to 
-specify the destination.
-
-Examples:
-
-    slider status instance1 --manager host:port
-    
-    slider status instance2 --manager host:port --out status.json
-
-
-
-### `thaw <name> [--wait time`]
-
-Resume a frozen application instance, recreating it from its previously saved state. This will include a best-effort attempt to create the same number of nodes as before, though their locations may be different.
-
-Examples:
-
-    slider thaw instance2
-    slider thaw instance1 --wait 60
-
-
-If the application instance is already running, this command will not affect it.
-
-
-### `version`
-
-The command `slider version` prints out information about the compiled
-Slider application, the version of Hadoop against which it was built -and
-the version of Hadoop that is currently on its classpath.
-
-Note that this is the client-side Hadoop version, not that running on the server, though
-that can be obtained in the status operation
-
-
-
-## Commands for testing
-
-
-These are clearly abnormal operations; they are here primarily for testing
--and documented for completeness.
-
-### `kill-container <name> --id container-id`
-
-Kill a  YARN container belong to the application. This is useful primarily for testing the 
-resilience to failures.
-
-Container IDs can be determined from the application instance status JSON document.
-
-
-### `am-suicide <name> [--exitcode code] [--message message] [--wait time]`
-
-This operation is purely for testing Slider Application Master restart;
-it triggers an asynchronous self-destruct operation in the AM -an 
-operation that does not make any attempt to cleanly shut down the process. 
-
-If the application has not exceeded its restart limit (as set by
-`slider.yarn.restart.limit`), YARN will attempt to restart the failed application.
-
-Example
-
-    slider am-suicide --exitcode 1 --wait 5000 -message "test"
-
-<!--- ======================================================================= -->
-
-
-## Instance Naming
-
-Application instance names must:
-
-1. be at least one character long
-1. begin with a lower case letter
-1. All other characters must be in the range \[a-z,0-9,_]
-1. All upper case characters are converted to lower case
- 
-Example valid names:
-
-    slider1
-    storm4
-    hbase_instance
-    accumulo_m1_tserve4
-
diff --git a/src/site/markdown/registry/a_YARN_service_registry.md b/src/site/markdown/registry/a_YARN_service_registry.md
deleted file mode 100644
index fa58d9c..0000000
--- a/src/site/markdown/registry/a_YARN_service_registry.md
+++ /dev/null
@@ -1,227 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# A YARN Service Registry
-
-## April 2014
-
-# Introduction
-
-This document looks at the needs and options of a service registry.
-
-The core issue is that as the location(s) of a dynamically deployed application are unknown, the standard Hadoop and Java configuration model of some form of text files containing hostnames, ports and URLS no longer works. You cannot define up-front where a service will be.
-
-Some Hadoop applications -HBase and Accumulo -have solved this with custom ZK bindings. This works for the specific clients, but requires hbase and accumulo client JARs in order to be able to work with the content. (or a re-implementation with knowledge of the non-standard contents of the ZK nodes)
-
-Other YARN applications will need to publish their bindings - this includes, but is not limited to- Slider deployed applications. Again, these applications can use their own registration and binding model, which would again require custom clients to locate the registry information and parse the contents.
-
-YARN provides some minimal publishing of AM remote endpoints: a URL to what is assumed to be a Web UI (not a REST API), and an IPC port. The URL is displayed in the YARN UI -in which case it is accessed via a proxy which (currently) only support HTTP GET operations. The YARN API call to list all applications can be used to locate a named instance of an application by (user, application-type, name), and then obtain the raw URL and IPC endpoints. This enumeration process is an O(apps) operation on the YARN RM and only provides access to those two endpoints. Even with the RAW URL, REST operations have proven "troublesome", due to a web filter which redirects all direct requests to the proxy -unless it comes from the same host as the proxy.
-
-Hadoop client applications tend to retrieve all their configuration information from files in the local filesystem, hadoop-site.xml, hdfs-site.xml, hbase-site.xml, etc. This requires the configuration files to be present on all systems. Tools such as Ambari can keep the files in the server up to date -assuming a low rate of change- ---but these tools do nothing for the client applications themselves. It is up to the cluster clients to (somehow) retrieve these files, and to keep their copies up to date. *This is a problem that exists with today's applications*. 
-
-As an example, if a YARN client does not know the value of "yarn.application.classpath", it cannot successfully deploy any application in the YARN cluster which needs the cluster-side Hadoop and YARN JARs on its application master's classpath. This is not a theoretical problem, as some clusters have a different classpath from the default: without a correct value the Slider AM does not start. And, as it is designed to be run remotely, it cannot rely on a local installation of YARN to provide the correct values ([YARN-973](https://issues.apache.org/jira/browse/YARN-973)).
-
-# What do we need?
-
-**Discovery**: An IPC and URL discovery system for service-aware applications to use to look up a service to which it wishes to talk to. This is not an ORB -it's not doing redirection -, but it is something that needs to be used before starting IPC or REST communications. 
-
-**Configuration**: A way for clients of a service to retrieve more configuration data than simply the service endpoints. For example: everything needed to create a site.xml document.
-
-## Client-side
-
-* Allow clients of a YARN application to locate the service instance and its service ports (web, IPC, REST...) efficiently even on a large YARN cluster. 
-
-* Allow clients to retrieve configuration values which can be processed client-side into the configuration files and options which the application needs
-
-* Give clients confidence that the service with which they interact is the one they expect to interact with -not another potentially malicious service deployed by a different user. 
-
-* clients to be able to watch a service and retrieve notification of changes
-
-* cross-language support.
-
-## For all Services
-
-* Allow services to publish their binding details for the AM and of code running in the containers (which may be published by the containers)
-
-* Use entries in registry as a way of enforcing uniqueness of the instance (app, owner, name)? 
-
-* values to update when a service is restarted on a different host
-
-* values to indicate when a service is not running. This may be implicit "no entry found" or explicit "service exists but not running"
-
-* Services to be able to act as clients to other services
-
-## For Slider Services (and presumably others)
-
-* Ability to publish information about configuration documents that can be retrieved -and URLs
-
-* Ability to publish facts internal to the application (e.g. agent reporting URLs)
-
-* Ability to use service paths as a way to ensure a single instance of a named service can be deployed by a user
-
-## Management and PaaS UIs
-
-* Retrieve lists of web UI URLs of AM and of deployed components
-
-* Enum components and their status
-
-* retrieve dynamic assignments of IPC ports
-
-* retrieve dynamic assignments of JMX ports
-
-* retrieve any health URLs for regular probes
-
-* Listen to changes in the service mix -the arrival and departure of service instances, as well as changes in their contents.
-
-
-
-## Other Needs
-
-* Registry-configured applications. In-cluster applications should be able to subscribe to part of the registry
-to pick up changes that affect them -both for their own application configuration, and for details about
-applications on which they depend themselves.
-
-* Knox: get URLs that need to be converted into remote paths
-
-* Cloud-based deployments: work on virtual infrastructures where hostnames are unpredictable.
-
-# Open Source Registry code
-
-What can we use to implement this from ASF and ASF-compatible code? 
-
-## Zookeeper
-
-We'd need a good reason not to use this. There are still some issues
-
-1. Limits on amount of published data?
-
-2. Load limits, especially during cluster startup, or if a 500-mapper job all wants to do a lookup.
-
-3. Security story
-
-4. Impact of other ZK load on the behaviour of the service registry -will it cause problems if overloaded -and are they recoverable?
-
-## Apache Curator
-
-Netflix's core curator -now [Apache Curator](http://curator.apache.org/)- framework adds a lot to make working with ZK easier, including pluggable retry policies, binding tools and other things.
-
-There is also its "experimental" [service discovery framework](http://curator.apache.org/curator-x-discovery-server/index.html), which
-
-1. Allows a service to register a URL with a name and unique ID (and custom metadata). multiple services of a given name can be registered
-
-2. Allows a service to register >1 URL.
-
-3. Has a service client which performs lookup and can cache results.
-
-4. Has a REST API
-
-Limitations
-
-* The service discovery web UI and client does not work with the version of
-Jackson (1.8.8) in Hadoop 2.4. The upgraded version in Hadoop 2.5 is compatible [HADOOP-10104](https://issues.apache.org/jira/browse/HADOOP-10104).
-
-* The per-entry configuration payload attempts to get jason to perform Object/JSON mapping with the classname provided as an attribute in the JSON. This destroys all ability of arbitrary applications to parse the published data, as well as cross-language clients -is brittle and morally wrong from a data-sharing perspective.
-
-    {
-    
-      "name" : "name",
-      "id" : "service",
-      "address" : "localhost",
-      "port" : 8080,
-      "sslPort" : 443,
-      "payload" : {
-        "@class" : "org.apache.slider.core.registry.ServiceInstanceData",
-        "externalView" : {
-          "key" : "value"
-        }
-      },
-      "registrationTimeUTC" : 1397249829062,
-      "serviceType" : "DYNAMIC",
-      "uriSpec" : {
-        "parts" : [ {
-          "value" : "http:",
-          "variable" : false
-        }, {
-          "value" : ":",
-          "variable" : false
-        } ]
-      }
-    }
-
-
-
-## [Helix Service Registry](http://helix.apache.org/0.7.0-incubating-docs/recipes/service_discovery.html)
-
-This is inside Helix somewhere, used in LI in production at scale -and worth looking at. LI separate their Helix Zookeeper Quorum from their application-layer quorum, to isolate load.
-
-Notable features
-
-1. The registry is also the liveness view of the deployed application. Client's aren't watching the service registry for changes, they are watching Helix's model of the deployed application.
-1. The deployed application can pick up changes to its state the same way, allowing for live application manipulation.
-1. Tracks nodes that continually join/leave the group and drops them as unreliable.
-
-## Twill Service Registry
-
-Twill's [service registry code](http://twill.incubator.apache.org/apidocs/index.html), lets applications register a  [(hostname, port)](http://twill.incubator.apache.org/apidocs/org/apache/twill/discovery/Discoverable.html) pair in the registry by a name, a name by which clients can look up and enumerate all services with a specific name.
-
-Clients can subscribe to changes in the list of services with a specific name -so picking up the arrival and departure of instances, and probe to see if a previously discovered entity is still registered.
-
-Zookeeper- and in-memory registry implementations are provided.
-
-One nice feature about this architecture -and Twill in general- is that its general single-method callback model means that it segues nicely into Java-8 lambda-expressions. This is something to retain in a YARN-wide service registry.
-
-Comparing it to curator, it offers a proper subset of curator's registered services [ServiceInstance](http://curator.apache.org/apidocs/org/apache/curator/x/discovery/ServiceInstance.html) -implying that you could publish and retrieve Curator-registered services via a new implementation of Twill's DiscoveryService. This would require extensions to the curator service discovery client allow ZK nodes to be watched for changes. This is a feature that would be useful in many use cases -such as watching service availability across a cluster, or simply blocking until a dependent service was launched.
-
-As with curator, the amount of information that can be published isn't enough for management tools to make effective use of the service registration, while for slider there's no way to publish configuration data. However a YARN registry will inevitably be a superset of the Twill client's enumerated and retrieved data -so if its registration API were sufficient to register a minimal service, supporting the YARN registry via Twill's existing API should be straightforward.
-
-## Twitter Commons Service Registration
-
-[Twitter Commons](https://github.com/twitter/commons) has a service registration library, which allows for registration of sets of servers, [publishing the hostname and port of each](http://twitter.github.io/commons/apidocs/com/twitter/common/service/registration/package-tree.html)., along with a map of string properties.
-
-Zookeeper based, it suffices if all servers are identical and only publishing single (hostname, port) pairs for callers.
-
-## AirBnB Smartstack
-
-SmartStack is [Air BnB's cloud-based service discovery system](http://nerds.airbnb.com/smartstack-service-discovery-cloud/).
-
-It has two parts, *Nerve* and *Synapse*:
-
-[**Nerve**](https://github.com/airbnb/nerve) is a ruby agent designed to monitor processes and register healthy instances in ZK (or to a mock reporter). It includes [probes for TCP ports, HTTP and rabbitMQ](https://github.com/airbnb/nerve/tree/master/lib/nerve/service_watcher). It's [a fairly simple liveness monitor](https://github.com/airbnb/nerve/blob/master/lib/nerve/service_watcher.rb).
-
-[**Synapse**](https://github.com/airbnb/synapse) takes the data and uses it to configure [HAProxy instances](http://haproxy.1wt.eu/). HAProxy handles the load balancing, queuing and integrating liveness probes into the queues. Synapse generates all the configuration files for an instance -but also tries to reconfigure the live instances via their socket APIs, 
-
-Alongside these, AirBnB have another published project on Github, [Optica](https://github.com/airbnb/optica), which is a web application for nodes to register themselves with (POST) and for others to query. It publishes events to RabbitMQ, and again uses ZK to store state.
-
-AirBnB do complain a bit about ZK and its brittleness. They do mention that they suspect it is due to bugs in the Ruby ZK client library. This may be exacerbated by in-cloud deployments. Hard-coding the list of ZK nodes may work for a physical cluster, but in a virtualized cluster, the hostnames/IP Addresses of those nodes may change -leading to a meta-discovery problem: how to find the ZK quorum -especially if you can't control the DNS servers.
-
-## [Apache Directory](http://directory.apache.org/apacheds/)
-
-This is an embeddable LDAP server
-
-* Embeddable inside Java apps
-
-* Supports Kerberos alongside X.500 auth. It can actually act as a Key server and TGT if desired.
-
-* Supports DNS and DHCP queries.
-
-* Accessible via classic LDAP APIs.
-
-This isn't a registry service directly, though LDAP queries do make enumeration of services *and configuration data* straightforward. As LDAP libraries are common across languages -even built in to the Java runtime- LDAP support makes publishing information to arbitrary clients relatively straightforward.
-
-If service information were to be published via LDAP, then it should allow IT-managed LDAP services to both host this information, and publish configuration data. This would be relevant for classic Hadoop applications if we were to move the Configuration class to support back-end configuration sources beyond XML files on the classpath.
-
-# Proposal
diff --git a/src/site/markdown/registry/index.md b/src/site/markdown/registry/index.md
deleted file mode 100644
index 8131fd4..0000000
--- a/src/site/markdown/registry/index.md
+++ /dev/null
@@ -1,47 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-  
-# Service Registry
-
-The service registry is a core part of the Slider Architecture -it is how
-dynamically generated configurations are published for clients to pick up.
-
-The need for a service registry goes beyond Slider, however. We effectively
-have application-specific registries for HBase and Accumulo, and explicit
-registries in Apache Helix and Apache Twill, as well as re-usable registry
-code in Apache Curator.
-
-[YARN-913](https://issues.apache.org/jira/browse/YARN-913) covers the need
-for YARN itself to have a service registry. This would be the ideal ultimate
-solution -it would operate at a fixed location/ZK path, and would be guaranteed
-to be on all YARN clusters, so code could be written expecting it to be there.
-
-It could also be used to publish binding data from static applications,
-including HBase, Accumulo, Oozie, -applications deployed by management tools.
-Unless/until these applications self-published their binding data, it would
-be the duty of the management tools to do the registration.
-
-
-
-## Contents
-
-1. [YARN Application Registration and Binding: the Problem](the_YARN_application_registration_and_binding_problem.html)
-1. [A YARN Service Registry](src/site/markdown/registry/a_YARN_service_registry.html)
-1. [April 2014 Initial Registry Design](initial_registry_design.html)
-1. [Service Registry End-to-End Scenarios](service_registry_end_to_end_scenario.html)
-1. [P2P Service Registries](p2p_service_registries.html)
-1. [References](references.html)
\ No newline at end of file
diff --git a/src/site/markdown/registry/initial_registry_design.md b/src/site/markdown/registry/initial_registry_design.md
deleted file mode 100644
index a816d84..0000000
--- a/src/site/markdown/registry/initial_registry_design.md
+++ /dev/null
@@ -1,110 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# April 2014 Initial Registry Design
-
-This is the plan for the initial registry design.
-
-1. Use Apache Curator [service discovery code](http://curator.apache.org/curator-x-discovery/index.html). 
-
-2. AMs to register as (user, name). Maybe "service type" if we add that as an option in the slider configs
-
-3. Lift "external view" term from Helix -concept that this is the public view, not internal.
-
-4. application/properties section to list app-wide values
-
-5. application/services section to list public service URLs; publish each as unique-ID-> (human name, URL, human text). code can resolve from UniqueID; UIs can use human data.
-
-6. String Template 2 templates for generation of output (rationale:  library for Python Java, .NET)
-
-7. Java CLI to retrieve values from ZK and apply named template (local, hdfs). Include ability to restrict to list of named properties (pattern match).
-
-8. AM to serve up curator service (later -host in RM? elsewhere?)
-
-### forwards-compatilibity
-
-1. This initial design will hide the fact that Apache Curator is being used to discover services,
-by storing information in the payload, `ServiceInstanceData` rather than in (the minimdal) curator
-service entries themselves. If we move to an alternate registry, provided we
-can use the same datatype -or map to it- changes should not be visible.
-
-1. The first implementation will not support watching for changes.
-
-### Initial templates 
-
-* hadoop XML conf files
-
-* Java properties file
-
-* HTML listing of services
-
-
-
-## Example Curator Service Entry
-
-This is the prototype's content
-
-Toplevel
-
-    service CuratorServiceInstance{name='slider', id='stevel.test_registry_am', address='192.168.1.101', port=62552, sslPort=null, payload=org.apache.slider.core.registry.info.ServiceInstanceData@4e9af21b, registrationTimeUTC=1397574073203, serviceType=DYNAMIC, uriSpec=org.apache.curator.x.discovery.UriSpec@ef8dacf0} 
-
-Slider payload.
-
-    payload=
-    {
-      "internalView" : {
-        "endpoints" : {
-          "/agents" : {
-            "value" : "http://stevel-8.local:62552/ws/v1/slider/agents",
-            "protocol" : "http",
-            "type" : "url",
-            "description" : "Agent API"
-          }
-        },
-        "settings" : { }
-      },
-    
-      "externalView" : {
-        "endpoints" : {
-          "/mgmt" : {
-            "value" : "http://stevel-8.local:62552/ws/v1/slider/mgmt",
-            "protocol" : "http",
-            "type" : "url",
-            "description" : "Management API"
-          },
-    
-          "slider/IPC" : {
-            "value" : "stevel-8.local/192.168.1.101:62550",
-            "protocol" : "org.apache.hadoop.ipc.Protobuf",
-            "type" : "address",
-            "description" : "Slider AM RPC"
-          },
-          "registry" : {
-            "value" : "http://stevel-8.local:62552/ws/registry",
-            "protocol" : "http",
-            "type" : "url",
-            "description" : "Registry"
-          }
-        },
-        "settings" : { }
-      }
-    }
-
- 
-
-   
-
diff --git a/src/site/markdown/registry/p2p_service_registries.md b/src/site/markdown/registry/p2p_service_registries.md
deleted file mode 100644
index 6b2724a..0000000
--- a/src/site/markdown/registry/p2p_service_registries.md
+++ /dev/null
@@ -1,99 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-  
-# P2P Service Registries
-
-Alongside the centralized service registries, there's much prior work on P2P discovery systems, especially for mobile and consumer devices.
-
-They perform some multicast- or distributed hash table-based lookup, and tend to have common limitations:
-
-* scalability
-
-* the bootstrapping problem
-
-* security: can you trust the results to be honest?
-
-* consistency: can you trust the results to be complete and current?
-
-Bootstrapping is usually done via multicast, possibly then switching to unicast for better scale. As multicasting doesn't work in cloud infrastructures, none of the services work unmodified  in public clouds. There's multiple anecdotes of [Amazon's SimpleDB service](http://aws.amazon.com/simpledb/) being used as a registry for in-EC2 applications. At the very least, this service and its equivalents in other cloud providers could be used to bootstrap ZK client bindings in cloud environments. 
-
-## Service Location Protocol 
-
-Service Location Protocol is a protocol for discovery services that came out of Sun, Novell and others -it is still available for printer discovery and suchlike
-
-It supports both a multicast discovery mechanism, and a unicast protocol to talk to a Directory Agent -an agent that is itself discovered by multicast requests, or by listening for the agent's intermittent multicast announcements.
-
-There's an extension to DHCP, RFC2610, which added the ability for DHCP to advertise Directory Agents -this was designed to solve the bootstrap problem (though not necessarily security or in-cloud deployment). Apart from a few mentions in Windows Server technical notes, it does not appear to exist.
-
-* [[RFC2608](http://www.ietf.org/rfc/rfc2608.txt)] *Service Location Protocol, Version 2* , IEEE, 1999
-
-* [[RFC3224](http://www.ietf.org/rfc/rfc3224.txt)] *Vendor Extensions for Service Location Protocol, Version 2*, IETF, 2003
-
-* [[RFC2610](http://www.ietf.org/rfc/rfc2610.txt)] *DHCP Options for Service Location Protocol, IETF, 1999*
-
-## [Zeroconf](http://www.zeroconf.org/)
-
-The multicast discovery service implemented in Apple's Bonjour system -multicasting DNS lookups to all peers in the subnet.
-
-This allows for URLs and hostnames to be dynamically positioned, with DNS domain searches allowing for enumeration of service groups. 
-
-This protocol scales very badly; the load on *every* client in the subnet is is O(DNS-queries-across-subnet), hence implicitly `O(devices)*O(device-activity)`. 
-
-The special domains "_tcp", "_udp"  and below can also be served up via a normal DNS server.
-
-##  [Jini/Apache River](http://river.apache.org/doc/specs/html/lookup-spec.html)
-
-Attribute-driven service enumeration, which drives the, Java-client-only model of downloading client-side code. There's no requirement for the remote services to be in Java, only that drivers are.
-
-## [Serf](http://www.serfdom.io/)  
-
-This is a library that implements the [SWIM protocol](http://www.cs.cornell.edu/~asdas/research/dsn02-swim.pdf) to propagate information around a cluster. Apparently works in virtualized clusters too. It's already been used in a Flume-on-Hoya provider.
-
-## [Anubis](http://sourceforge.net/p/smartfrog/svn/HEAD/tree/trunk/core/components/anubis/)
-
-An HP Labs-built [High Availability tuple-space](http://sourceforge.net/p/smartfrog/svn/HEAD/tree/trunk/core/components/anubis/doc/HPL-2005-72.pdf?format=raw) in SmartFrog; used in production in some of HP's telco products. An agent publishes facts into the T-Space, and within one heartbeat all other agents have it. One heart-beat later, unless there's been a change in the membership, the publisher knows the others have it. One heartbeat later the agents know the publisher knows it, etc.
-
-Strengths: 
-
-* The shared knowledge mechanism permits reasoning and mathematical proofs.
-
-* Strict ordering between heartbeats implies an ordering in receipt. This is stronger than ZK's guarantees.
-
-* Lets you share a moderate amount of data (the longer the heartbeat interval, the more data you can publish).
-
-* Provided the JVM hosting the Anubis agent is also hosting the service, liveness is implicit
-
-* Secure to the extent that it can be locked down to allow only nodes with mutual trust of HTTPS certificates to join the tuple-space.
-
-Weaknesses
-
-* (Currently) bootstraps via multicast discovery.
-
-* Brittle to timing, especially on virtualized clusters where clocks are unpredictable.
-
-It proved good for workload sharing -tasks can be published to it, any agent can say "I'm working on it" and take up the work. If the process fails, the task becomes available again. We used this for distributed scheduling in a rendering farm.
-
-## [Carmen](http://www.hpl.hp.com/techreports/2002/HPL-2002-257)
-
-This was another HP Labs project, related to the Cooltown "ubiquitous computing" work, which was a decade too early to be relevant. It was also positioned by management as a B2B platform, so ended up competing with - and losing against - WS-* and UDDI.. 
-
-Carmen aimed to provide service discovery with both fixed services, and with highly mobile client services that will roam around the network -they are assumed to be wireless devices.
-
-Services were published with and searched for by attributed, locality was considered to be a key attribute -local instances of a service prioritized. Those services with a static location and low rate of change became the stable caches of service information -becoming, as with skype, "supernodes". 
-
-Bootstrapping the cluster relied on multicast, though alternatives based on DHCP and DNS were proposed.
-
diff --git a/src/site/markdown/registry/references.md b/src/site/markdown/registry/references.md
deleted file mode 100644
index bf0f068..0000000
--- a/src/site/markdown/registry/references.md
+++ /dev/null
@@ -1,49 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-  
-# References
-
-Service registration and discovery is a problem in distributed computing that has been explored for over thirty years, with 
-[Birrell81]'s *Grapevine* system the first known implementation -though of 
-
-# Papers
-
-* **[Birrell81]** Birrell, A. et al, [*Grapevine: An exercise in distributed computing*](http://research.microsoft.com/apps/pubs/default.aspx?id=63661). Comm. ACM 25, 4 (Apr 1982), pp260-274. 
-The first documented directory service; relied on service shutdown to resolve update operations.
-
-* **[Das02]** [*SWIM: Scalable Weakly-consistent Infection-style Process Group Membership Protocol*](http://www.cs.cornell.edu/~asdas/research/dsn02-swim.pdf)
-P2P gossip-style data sharing protocol with random liveness probes to address scalable liveness checking. Ceph uses similar liveness checking.
-
-* **[Marti02]** Marti S. and Krishnam V., [*Carmen: A Dynamic Service Discovery Architecture*](http://www.hpl.hp.com/techreports/2002/HPL-2002-257), 
-
-* **[Lampson86]** Lampson, B. [*Designing a Global Naming Service*](http://research.microsoft.com/en-us/um/people/blampson/36-GlobalNames/Acrobat.pdf). DEC. 
-Distributed; includes an update protocol and the ability to add links to other parts of the tree. Also refers to [*Xerox Clearinghouse*](http://bitsavers.informatik.uni-stuttgart.de/pdf/xerox/parc/techReports/OPD-T8103_The_Clearinghouse.pdf), which apparently shipped.
-
-* **[Mockapetris88]** Mockapetris, P. [*Development of the domain name system*](http://bnrg.eecs.berkeley.edu/~randy/Courses/CS268.F08/papers/31_dns.pdf). The history of DNS
-
-* **[Schroeder84]** Schroeder, M.D. et al, [*Experience with Grapevine: The Growth of a Distributed System*](http://research.microsoft.com/apps/pubs/default.aspx?id=61509). Xerox.
-Writeup of the experiences of using grapevine, with its eventual consistency and lack of idempotent message delivery called out -along with coverage of operations issues.
-
-* **[van Renesse08]**  van Renesse, R. et al, [*Astrolabe: A Robust and Scalable Technology For Distributed System Monitoring, Management, and Data Mining*](http://www.cs.cornell.edu/home/rvr/papers/astrolabe.pdf). ACM Transactions on Computer Systems
-Grandest P2P management framework to date; the work that earned Werner Vogel his CTO position at Amazon.
- 
-* **[van Steen86]** van Steen, M. et al, [*A Scalable Location Service for Distributed Objects*](http://www.cs.vu.nl/~ast/publications/asci-1996a.pdf). 
-Vrije Universiteit, Amsterdam. Probably the first Object Request Broker
-
-
-
- 
diff --git a/src/site/markdown/registry/registry-model.md b/src/site/markdown/registry/registry-model.md
deleted file mode 100644
index 31bd592..0000000
--- a/src/site/markdown/registry/registry-model.md
+++ /dev/null
@@ -1,75 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-  
-# Registry
-
-The service registry model is designed to support dynamically
-deployed Slider applications, *and* statically deployed versions
-of the same application -provided the latter also registers itself,
-its public network services, and any configurations and files
-that it wishes clients to be able to retrieve.
-
-The architecture and implementation of this registry is not defined
-here -instead the view of it seen by clients.
-
-1. A 'service registry' exists in the YARN cluster into which
-services can be registered. 
-
-1. There is no restriction on the number of services that can be registered in
-the registry, the type of service that may register, or even on how many
-registered services an application running in the YARN cluster may register.
-
-1. Services are registered by their type, owner and name. As an example,
-Alice's slider-managed HBase cluster `ingress` would have a type `org.apache.hbase`,
-owner `alice` and name `ingress`. 
-
-1. In the case of Slider-managed services, there is a separate slider instance
-registration which publishes information about slider itself. In the example
-above, this would be (`org.apache.slider`,`alice`,`ingress`).
-
-1. Services can publish information about themselves, with common entries being:
-
-    * service name and description.
-    * URLs of published web UIs and web service APIs
-    * network address of other protocols
-    
-1. Services may also publish.    
-    
-    * URLs to configuration details
-    * URLs documents published for use as client-side configuration data -either
-      directly or through some form of processing.
-    * public service-specific data, for use by applications that are aware of
-      the specific service type.
-    * internal service-specific data -for use by the components that comprise
-      an application. This allows the registry to be used to glue together
-      the application itself.
-      
-1. Services can be listed and examined.
-
-1. Service-published configuration key-value pairs can be retrieved by clients
-
-1. Service-published documents (and packages of such documents) can be
-retrieved by clients.
-
-1. There's no requirement for service instances to support any standard protocols;
-
-1. Some protocols are defined which they MAY implement. For example, the protocol
-to enumerate and retrieve configuration documents is designed to be implemented
-by any service that wishes to publish such content.
-
-1. In a secure cluster, the registry data may be restricted, along with any
-service protocols offered by the registered services. 
diff --git a/src/site/markdown/registry/service_registry_end_to_end_scenario.md b/src/site/markdown/registry/service_registry_end_to_end_scenario.md
deleted file mode 100644
index ebc32c9..0000000
--- a/src/site/markdown/registry/service_registry_end_to_end_scenario.md
+++ /dev/null
@@ -1,156 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Service Registry End-to-End Scenarios
-
-## AM startup
-
-1. AM starts, reads in configuration, creates provider
-
-2. AM builds web site, involving provider in process  (*there's a possible race condition here, due to the AM registration sequence)*
-
-3. AM registers self with RM, including web and IPC ports, and receives list of existing containers; container loss notifications come in asynchronously *(which is why the AM startup process is in a synchronized block)*
-
-4. AM inits it's `ApplicationState` instance with the config, instance description and RM-supplied container list.
-
-5. AM creates service registry client using ZK quorum and path provided when AM was started
-
-6. AM registers standard endpoints: RPC, WebUI, REST APIs
-
-7. AM registers standard content it can serve (e.g `yarn-site.xml`)
-
-8. AM passes registry to provider in `bind()` operation.
-
-9. AM triggers review of application state, requesting/releasing nodes as appropriate
-
-## Agent Startup: standalone
-
-1. Container is issued to AM
-
-2. AM chooses component, launches agent on it -with URL of AM a parameter (TODO: Add registry bonding of ZK quorum and path)
-
-3. Agent starts up.
-
-4. Agent locates AM via URL/ZK info
-
-5. Agent heartbeats in with state
-
-6. AM gives agent next state command.
-
-## AM gets state from agent:
-
-1. Agent heartbeats in
-
-2. AM decides if it wants to receive config 
-
-3. AM issues request for state information -all (dynamic) config data
-
-4. Agent receives it
-
-5. Agent returns all config state, including: hostnames, allocated ports, generated values (e.g. database connection strings, URLs) - as two-level (allows agent to define which config options are relevant to which document)
-
-## AM saves state for serving
-
-1. AM saves state in RAM (assumptions: small, will rebuild on restart)
-
-2. AM updates service registry with list of content that can be served up and URLs to retrieve them.
-
-3. AM fields HTTP GET requests on content
-
-## AM Serves content
-
-A simple REST service serves up content on paths published to the service registry. It is also possible to enumerate documents published by GET  operations on parent paths.
-
-1. On GET command, AM locates referenced agent values
-
-2. AM builds up response document from K-V pairs. This can be in a limited set of formats: Hadoop XML, Java properties, YAML, CSV, HTTP, JSON chosen as ? type param. (this generation is done from template processing in AM using slider.core.template module)
-
-3. response is streamed with headers of : `content-type`, `content-length`, do not cache in proxy, expires,* (with expiry date chosen as ??)*
-
-# Slider Client
-
-Currently slider client enumerates the YARN registry looking for slider instances -including any instances of the same application running before launching a cluster. 
-
-This 
-
-* has race conditions
-* has scale limitations `O(apps-in-YARN-cluster)` + `O(completed-apps-in-RM-memory)`
-* only retrieves configuration information from slider-deployed application instances. *We do not need to restrict ourselves here.*
-
-## Slider Client lists applications
-
-    slider registry --list [--servicetype <application-type>]
-
-1. Client starts
-
-2. Client creates creates service registry client using ZK quorum and path provided in client config properties (slider-client.xml)
-
-3. Client enumerates registered services and lists them
-
-## Slider Client lists content published by an application instance
-
-    slider registry <instance> --listconf  [--servicetype <application-type>]
-
-1. Client starts
-
-2. Client creates creates service registry client using ZK quorum and path provided in client config properties (slider-client.xml)
-
-3. Client locates registered service entry -or fails
-
-4. Client retrieves service data, specifically the listing of published documents
-
-5. Client displays list of content
-
-## Slider Client retrieves content published by an application instance
-
-    slider registry <instance> --getconf <document> [--format (xml|properties|text|html|csv|yaml|json,...) [--dest <file>]  [--servicetype <application-type>]
-
-1. Client starts
-
-2. Client creates creates service registry client using ZK quorum and path provided in client config properties (slider-client.xml)
-
-3. Client locates registered service entry -or fails
-
-4. Client retrieves service data, specifically the listing of published documents
-
-5. Client locates URL of content
-
-6. Client builds GET request including format
-
-7. Client executes command, follows redirects, validates content length against supplied data.
-
-8. Client prints response to console or saves to output file. This is the path specified as a destination, or, if that path refers to a directory, to
-a file underneath.
-
-## Slider Client retrieves content set published by an application instance
-
-Here a set of documents published is retrieved in the desired format of an application.
-
-## Slider Client retrieves document and applies template to it
-
-Here a set of documents published is retrieved in the desired format of an application.
-
-    slider registry <instance> --source <document> [--template <path-to-template>] [--outfile <file>]  [--servicetype <application-type>]
-
-1. document is retrieved as before, using a simple format such as json to retrieve it.
-
-2. The document is parsed and converted back into K-V pairs
-
-3. A template using a common/defined template library is applied to the content , generating the final output.
-
-Template paths may include local filesystem paths or (somehow) something in a package file
-
diff --git a/src/site/markdown/registry/the_YARN_application_registration_and_binding_problem.md b/src/site/markdown/registry/the_YARN_application_registration_and_binding_problem.md
deleted file mode 100644
index 805e69b..0000000
--- a/src/site/markdown/registry/the_YARN_application_registration_and_binding_problem.md
+++ /dev/null
@@ -1,145 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# YARN Application Registration and Binding: the Problem
-
-## March 2014
-
-# How to bind client applications to services dynamically placed applications?
-
-
-There are some constraints here
-
-1. The clients may be running outside the cluster -potentially over long-haul links.
-
-1. The location of an application deployed in a YARN cluster cannot be predicted.
-
-2. The ports used for application service endpoints cannot be hard-coded or predicted. (Alternatively: if they are hard-coded, then Socket-In-Use exceptions may occur)
-
-3: As components fail and get re-instantiated, their location may change. The rate of this depends on cluster and application stability; the longer lived the application, the more common it is.
-
-Existing Hadoop client apps have a configuration problem of their own: how are the settings in files such as `yarn-site.xml`picked up by today's applications? This is an issue which has historically been out of scope for Hadoop clusters -but if we are looking at registration and binding of YARN applications, there should be no reason why
-static applications cannot be discovered and bonded to using the same mechanisms. 
-
-# Other constraints:
-
-1. Reduce the amount of change needed in existing applications to a minimum -ideally none, though some pre-launch setup may be acceptable.
-
-2. Prevent malicious applications from registering a service endpoints.
-
-3. Scale with #of applications and #of clients; not overload on a cluster partitioning.
-
-4. Offer a design that works with apps that are deployed in a YARN custer outside of Slider. Rationale: want a mechanism that works with pure-YARN apps
-
-## Possible Solutions:
-
-### ZK
-
-Client applications use ZK to find services (addresses #1, #2 and #3). Requires location code in the client.
-
-HBase and Accumulo do this as part of a failover-ready design.
-
-### DNS
-
-Client apps use DNS to find services, with custom DNS server for a subdomain representing YARN services. Addresses #1; with a shortened TTL and no DNS address caching, #3. #2 addressed only if other DNS entries are used to publish service entries. 
-
-Should support existing applications, with a configuration that is stable over time. It does require the clients to not cache DNS addresses forever (this must be explicitly set on Java applications,
-irrespective of the published TTL). It generates a load on the DNS servers that is `O(clients/TTL)`
-
-Google Chubby offers a DNS service to handle this. ZK does not -yet.
-
-### Floating IP Addresses
-
-If the clients know/cache IP addresses of services, these addresses could be floated across service instances. Linux HA has floating IP address support, while Docker containers can make use of them, especially if an integrated DHCP server handles the assignment of IP addresses to specific containers. 
-
-ARP caching is the inevitable problem here, but it is still less brittle than relying on applications to know not to cache IP addresses -and nor does it place so much on DNS servers as short-TTL DNS entries.
-
-### LDAP
-
-Enterprise Directory services are used to publish/locate services. Requires lookup into the directory on binding (#1, #2), re-lookup on failure (#3). LDAP permissions can prevent untrusted applications registering.
-
-* Works well with Windows registries.
-
-* Less common Java-side, though possible -and implemented in the core Java libraries. Spring-LDAP is focused on connection to an LDAP server -not LDAP-driven application config.
-
-### Registration Web Service
-
- Custom web service registration services used. 
-
-* The sole WS-* one, UDDI, does not have a REST equivalent -DNS is assumed to take on that role.
-
-* Requires new client-side code anyway.
-
-### Zookeeper URL Schema
-
-Offer our own `zk://` URL; java & .NET implementations (others?) to resolve, browser plugins. 
-
-* Would address requirements #1 & #3
-
-* Cost: non-standard; needs an extension for every application/platform, and will not work with tools such as CURL or web browsers
-
-### AM-side config generation
-
-App-side config generation-YARN applications to generate client-side configuration files for launch-time information (#1, #2). The AM can dynamically create these, and as the storage load is all in the AM, does not consume as much resources in a central server as would publishing it all to that central server.
-
-* Requires application to know of client-side applications to support - and be able to generate to their configuration information (i.e. formatted files).
-
-* Requires the AM to get all information from deployed application components needed to generate bindings. Unless the AM can resolve YARN App templates, need a way to get one of the components in the app to generate settings for the entire cluster, and push them back.
-
-* Needs to be repeated for all YARN apps, however deployed.
-
-* Needs something similar for statically deployed applications.
-
-
-### Client-side config generation
-
-YARN app to publish attributes as key-val pairs, client-side code to read and generate configs from  (#1, #2).  Example configuration generators could create: Hadoop-client XML, Spring, tomcat, guice configs, something for .NET.
-
-* Not limited to Hoya application deployments only.
-
-* K-V pairs need to be published "somewhere". A structured section in the ZK tree per app is the obvious location -though potentially expensive. An alternative is AM-published data.
-
-* Needs client-side code capable of extracting information from YARN cluster to generate client-specific configuration.
-
-* Assumes (key, value) pairs sufficient for client config generation. Again, some template expansion will aid here (this time: client-side interpretation).
-
-* Client config generators need to find and bind to the target application themselves.
-
- 
-
-Multiple options:
-
-* Standard ZK structure for YARN applications (maybe: YARN itself to register paths in ZK & set up child permissions,so enforcing security).
-
-* Agents to push to ZK dynamic information as K-V pairs
-
-* Agent provider on AM to fetch K-V pairs and include in status requests
-
-* CLI to fetch app config keys, echo out responses (needs client log4j settings to log all slf/log4j to stderr, so that app > results.txt would save results explicitly
-
-*  client side code per app to generate specific binding information
-
-### Load-balancer app Yarn App 
-
-Spread requests round a set of registered handlers, e.g web servers. Support plugins for session binding/sharding. 
-
-Some web servers can do this already; a custom YARN app could use grizzy embedded. Binding problem exists, but would support scaleable dispatch of values.
-
-*  Could be offered as an AM extension (in provider, ...): scales well with #of apps in cluster, but adds initial location/failover problems.
-
-* If offered as a core-YARN service, location is handled via a fixed URL. This could place high load on the service, even just 302 redirects.
-
diff --git a/src/site/markdown/release_notes/release-0.22.0.md b/src/site/markdown/release_notes/release-0.22.0.md
deleted file mode 100644
index b68aafd..0000000
--- a/src/site/markdown/release_notes/release-0.22.0.md
+++ /dev/null
@@ -1,48 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-  
-# Slider Release 0.22.0
-
-April 2014
-
-This release is built against the Apache Hadoop 2.4.0, HBase-0.98.0RC1
-and Accumulo 1.5.0 artifacts. 
-
-Download: [slider-0.22.0-all.tar.gz](http://public-repo-1.hortonworks.com/slider/slider-0.22.0-all.tar.gz)
-
-
-## Key changes
-
-### Added support for Slider App Package
-
-Slider AppPackages are a declarative definition of an application for application management. Slider can deploy and manage any application described in the App Package format.
-
-
-### Added support for Slider Agent
-
-Slider agent is a generic provider that can process Slider App Packages.
-
-### Added documentation on developing and using Slider App Packages
-
-### Added Service Registry and associated REST resources
-
-### Enhanced Agent Provider AM Web UI with additional application info
-
-Added links to WADL listing expose AM REST management resources as well as the exposed Registry REST resources
-
-
-
diff --git a/src/site/markdown/release_notes/release-0.30.0.md b/src/site/markdown/release_notes/release-0.30.0.md
deleted file mode 100644
index e88a709..0000000
--- a/src/site/markdown/release_notes/release-0.30.0.md
+++ /dev/null
@@ -1,36 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-  
-# Slider Release 0.30.0
-
-May 2014
-
-This release is built against the Apache Hadoop 2.4.0, HBase-0.98.1
-and Accumulo 1.5.1 artifacts. 
-
-Download: []()
-
-
-## Key changes
-1. Slider application registry that allow registration and discovery of application configuration and URLs (such as jmx endpoints and management UI) for client consumption.
-2. Move to a .zip packaging for Slider application packages.
-3. Richer metainfo support to provide start ordering and arbitrary template that can be published.
-4. Updated application definition packages for HBase, Accumulo, and Storm. 
-
-## Other changes
-
-1. [SLIDER-13](https://issues.apache.org/jira/browse/SLIDER-13) switch build to be java7+ only.
\ No newline at end of file
diff --git a/src/site/markdown/security.md b/src/site/markdown/security.md
deleted file mode 100644
index 51665f7..0000000
--- a/src/site/markdown/security.md
+++ /dev/null
@@ -1,197 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Security
-
-This document discusses the design, implementation and use of Slider
-to deploy secure applications on a secure Hadoop cluster.
-
-### Important:
- 
-This document does not cover Kerberos, how to secure a Hadoop cluster, Kerberos
-command line tools or how Hadoop uses delegation tokens to delegate permissions
-round a cluster. These are assumed, though some links to useful pages are
-listed at the bottom. 
-
-
-## Concepts
-
-Slider runs in secure clusters, but with restrictions
-
-1. The keytabs to allow a worker to authenticate with the master must
-   be distributed in advance: Slider does not attempt to pass these around.
-1. Until the location of Slider node instances can be strictly limited to
-  a set of nodes (a future YARN feature), the keytabs must be passed to
-  all the nodes in the cluster in advance, *and made available to the
-  user creating the cluster*
-1. due to the way that HBase and accumulo authenticate worker nodes to
-  the masters, any HBase node running on a server must authenticate as
-  the same principal, and so have equal access rights to the HBase cluster.
-1. As the data directories for a slider cluster are created under the home
-  directories of that user, the principals representing all role instances
-  in the clusters *MUST* have read/write access to these files. This can be
-  done with a shortname that matches that of the user, or by requesting
-  that Slider create a directory with group write permissions -and using LDAP
-  to indentify the application principals as members of the same group
-  as the user.
-
-
-## Security Requirements
-
-
-### Needs
-*  Slider and deployed applications to work against secure HDFS
-*  Slider to work with secure YARN.
-*  Slider to start secure applications
-*  Kerberos and ActiveDirectory to perform the authentication.
-*  Slider to only allow cluster operations by authenticated users -command line and direct RPC. 
-*  Any Slider Web UI and REST API for Ambari to only allow access to authenticated users.
-*  The Slider database in `~/.slider/clusters/$name/data` to be writable by HBase
-
-
-### Short-lived Clusters
-*  Cluster to remain secure for the duration of the Kerberos tokens issued to Slider.
-
-
-### Long-lived Clusters
-
-*  Slider application instance and HBase instance to remain functional and secure over an indefinite period of time.
-
-### Initial Non-requirements
-*  secure audit trail of cluster operations.
-*  multiple authorized users being granted rights to a Slider Cluster (YARN admins can always kill the Slider application instance.
-*  More than one HBase cluster in the YARN cluster belonging to a single user (irrespective of how they are started).
-*  Any way to revoke certificates/rights of running containers.
-
-### Assumptions
-*  Kerberos is running and that HDFS and YARN are running Kerberized.
-*  LDAP cannot be assumed. 
-*  Credentials needed for HBase can be pushed out into the local filesystems of 
-  the of the worker nodes via some external mechanism (e.g. scp), and protected by
-  the access permissions of the native filesystem. Any user with access to these
-  credentials is considered to have been granted such rights.
-*  These credentials can  outlive the duration of the HBase containers
-*  The user running HBase has the same identity as that of the HBase cluster.
-
-## Design
-
-
-1. The user is expected to have their own Kerberos principal, and have used `kinit`
-  or equivalent to authenticate with Kerberos and gain a (time-bounded) TGT
-1. The user is expected to have their own principals for every host in the cluster of the form
-  username/hostname@REALM
-1. A keytab must be generated which contains all these principals -and distributed
-  to all the nodes in the cluster with read access permissions to the user.
-1. When the user creates a secure cluster, they provide the standard HBase kerberos options
-  to identify the principals to use and the keytab location.
-
-The Slider Client will talk to HDFS and YARN authenticating itself with the TGT,
-talking to the YARN and HDFS principals which it has been configured to expect.
-
-This can be done as described in [Client Configuration] (client-configuration.html) on the command line as
-
-     -D yarn.resourcemanager.principal=yarn/master@LOCAL 
-     -D dfs.namenode.kerberos.principal=hdfs/master@LOCAL
-
-The Slider Client will create the cluster data directory in HDFS with `rwx` permissions for  
-user `r-x` for the group and `---` for others. (these can be configurable as part of the cluster options), 
-
-It will then deploy the AM, which will (somehow? for how long?) retain the access
-rights of the user that created the cluster.
-
-The Application Master will read in the JSON cluster specification file, and instantiate the
-relevant number of componentss. 
-
-
-## Securing communications between the Slider Client and the Slider AM.
-
-When the AM is deployed in a secure cluster,
-it automatically uses Kerberos-authorized RPC channels. The client must acquire a
-token to talk the AM. 
-
-This is provided by the YARN Resource Manager when the client application
-wishes to talk with the SliderAM -a token which is only provided after
-the caller authenticates itself as the user that has access rights
-to the cluster
-
-To allow the client to freeze a Slider application instance while they are unable to acquire
-a token to authenticate with the AM, use the `--force` option.
-
-### How to enable a secure Slider client
-
-Slider can be placed into secure mode by setting the Hadoop security options:
-
-This can be done in `slider-client.xml`:
-
-
-    <property>
-      <name>hadoop.security.authorization</name>
-      <value>true</value>
-    </property>
-    
-    <property>
-      <name>hadoop.security.authentication</name>
-      <value>kerberos</value>
-    </property>
-
-
-Or it can be done on the command line
-
-    -D hadoop.security.authorization=true -D hadoop.security.authentication=kerberos
-
-### Adding Kerberos binding properties to the Slider Client JVM
-
-The Java Kerberos library needs to know the Kerberos controller and
-realm to use. This should happen automatically if this is set up as the
-default Kerberos binding (on a Unix system this is done in `/etc/krb5.conf`.
-
-If is not set up, a stack trace with kerberos classes at the top and
-the message `java.lang.IllegalArgumentException: Can't get Kerberos realm`
-will be printed -and the client will then fail.
-
-The realm and controller can be defined in the Java system properties
-`java.security.krb5.realm` and `java.security.krb5.kdc`. These can be fixed
-in the JVM options, as described in the [Client Configuration] (client-configuration.html)
-documentation.
-
-They can also be set on the Slider command line itself, using the `-S` parameter.
-
-    -S java.security.krb5.realm=MINICLUSTER  -S java.security.krb5.kdc=hadoop-kdc
-
-### Java Cryptography Exceptions 
-
-
-When trying to talk to a secure, cluster you may see the message:
-
-    No valid credentials provided (Mechanism level: Illegal key size)]
-
-This means that the JRE does not have the extended cryptography package
-needed to work with the keys that Kerberos needs. This must be downloaded
-from Oracle (or other supplier of the JVM) and installed according to
-its accompanying instructions.
-
-## Useful Links
-
-1. [Adding Security to Apache Hadoop](http://hortonworks.com/wp-content/uploads/2011/10/security-design_withCover-1.pdf)
-1. [The Role of Delegation Tokens in Apache Hadoop Security](http://hortonworks.com/blog/the-role-of-delegation-tokens-in-apache-hadoop-security/)
-1. [Chapter 8. Secure Apache HBase](http://hbase.apache.org/book/security.html)
-1. Hadoop Operations p135+
-1. [Java Kerberos Requirements](http://docs.oracle.com/javase/7/docs/technotes/guides/security/jgss/tutorials/KerberosReq.htmla)
-1. [Troubleshooting Kerberos on Java](http://docs.oracle.com/javase/7/docs/technotes/guides/security/jgss/tutorials/Troubleshooting.html)
-1. For OS/X users, the GUI ticket viewer is `/System/Library/CoreServices/Ticket\ Viewer.app`
-
-
diff --git a/src/site/markdown/slider_specs/application_configuration.md b/src/site/markdown/slider_specs/application_configuration.md
deleted file mode 100644
index 2b50dde..0000000
--- a/src/site/markdown/slider_specs/application_configuration.md
+++ /dev/null
@@ -1,82 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Application Configuration
-
-App Configuration captures the default configuration associated with the application. *Details of configuration management is discussed in a separate spec*. The default configuration is modified based on user provided InstanceConfiguration, cluster specific details (e.g. HDFS root, local dir root), container allocated resources (port and hostname), and dependencies (e.g. ZK quorom hosts) and handed to the component instances.
-
-App Configuration is a folder containing all configuration needed by the application. Config files include any site.xml, log4j properties file, etc. 
-
-In addition, application may have configuration parameters that do not necessarily go into a config files. Such configurations may be used during template expansion (parameters in env.sh files), as environment variables (e.g. JAVA_HOME), customize user names (for runas). These configurations can be provided as user inputs or are automatically inferred from the environment. Such configurations are stored in a file named "app_config.xml".
-
-![Image](../../resources/images/app_config_folders_01.png?raw=true)
-
-A config file is of the form:
-
-
-    <?xml version="1.0"?>
-    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-    <configuration>
-      <property>
-      ...
-      </property>
-    </configuration>
-
-
-
-Each configuration property is specified as follows:
-
-
-    <property>
-        <name>storm.zookeeper.session.timeout</name>
-        <value>20000</value>
-        <description>The session timeout for clients to ZooKeeper.</description>
-        <required>false</required>
-        <valueRestriction>0-30000</valueRestriction>
-      </property>
-      <property>
-        <name>storm.zookeeper.root</name>
-        <value>/storm</value>
-        <description>The root location at which Storm stores data in ZK.</description>
-        <required>true</required>
-      </property>
-      <property>
-        <name>jvm.heapsize</name>
-        <value>256</value>
-        <description>The default JVM heap size for any component instance.</description>
-        <required>true</required>
-      </property>
-      <property>
-        <name>nimbus.host</name>
-        <value>localhost</value>
-        <description>The host that the master server is running on.</description>
-        <required>true</required>
-        <clientVisible>true</clientVisible>
-      </property>
-      
-
-
-* name: name of the parameter
-
-* value: default value of the parameter
-
-* description: a short description of the parameter
-
-* required: if the parameter is mandatory in which case it must have a value - default is "false"
-
-* clientVisible: if the property must be exported for a client
-
diff --git a/src/site/markdown/slider_specs/application_definition.md b/src/site/markdown/slider_specs/application_definition.md
deleted file mode 100644
index 6e417d2..0000000
--- a/src/site/markdown/slider_specs/application_definition.md
+++ /dev/null
@@ -1,182 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Application Definition
-
-App definition is a declarative definition of a YARN application describing its content. The AppDefinition is used in conjunction with the [AppPackage](application_package.md). Application definition is an XML file and is packaged as metainfo.xml.
-
-## Structure
-
-*Non-mandatory fields are described in **italics**.*
-
-The fields to describe an application is as follows:
-
-* **name**: the name of the application
-
-* **version**: the version of the application. name and version together uniquely identify an application.
-
-* **type**: the type of the application. "YARN-APP" identifies an application definition suitable for YARN.
-
-* **minHadoopVersion**: the minimum version of hadoop on which the app can run
-
-* **components**: the list of component that the application is comprised of
-
-* **osSpecifics**: OS specific package information for the application
-
-* *commandScript*: application wide commands may also be defined. The command is executed on a component instance that is a client
-
-* *dependencies*: application can define a list of dependencies. Dependencies can be on the base services such as HDFS, ZOOKEEPER, YARN which are infrastructure services or GANGLIA, NAGIOS, etc. which are monitoring/alert services. The dependencies are parsed by the management infrastructure to provide the necessary configurations to allow the app to access the services. For example, a HDFS folder could be requested by the app to store its data, a ZOOKEEPER node to co-ordinate among components.
-
-An application contains several component. The fields associated with a component are:
-
-* **name**: name of the component
-
-* **category**: type of the component - MASTER, SLAVE, and CLIENT
-
-* **minInstanceCount**: the minimum number of instances required for this component
-
-* *maxInstanceCount*: maximum number of instances allowed for a component
-
-* **commandScript**: the script that implements the commands.
-
- * **script**: the script location - relative to the AppPackage root
-
- * **scriptType**: type of the script
-
- * **timeout**: default timeout of the script
-
-* *customCommands*: any additional commands available for the component and their implementation
-
-An application definition also includes the package used to install the application. Its typically a tarball or some other form of package that does not require root access to install. The details of what happens during install is captured in the command script.
-
-* **osSpecific**: details on a per OS basis
-
-* **osType**: "any" refers to any OS ~ typical for tarballs
-
-* **packages**: list of packages that needs to be deployed
-
-* **type**: type of package
-
-* **name**: name of the package
-
-* **location**: location of the package (can be a relative folder within the parent AppPackage)
-
-Application can define a an order of activation which dictates if some component activation must follow the successful activation of other components.
-
-* **command**: specifies the component and the command in the form component-command *currently, START is the only valid command*
-
-* **requires**: specifies component and their state that the command depends on, provided in the form component-state *currently, STARTED is the only valid state*
-
-Applications can also advertise a set of properties (typically urls) that can only be bound when the application components are active. One such item can be the jmx endpoint. The properties to be advertised are organized as export groups (exportGroup) and each group can export one or more properties organized as a property bag. These values are visible through the registry service.
-
-* **name**: specifies the name of the export group
-
-Each exportGroup contains one or more exports.
-
-* **name**: the name of the export
-
-* **value**: the template that will be populated by Slider and then exported
-
-
-      <metainfo>
-        <schemaVersion>2.0</schemaVersion>
-        <application>
-          <name>HBASE</name>
-          <version>0.96.0.2.1.1</version>
-          <type>YARN-APP</type>
-          <minHadoopVersion>2.1.0</minHadoopVersion>
-          <components>
-            <component>
-              <name>HBASE_MASTER</name>
-              <category>MASTER</category>
-              <minInstanceCount>1</minInstanceCount>
-              <maxInstanceCount>2</maxInstanceCount>
-              <commandScript>
-                <script>scripts/hbase_master.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-              <customCommands>
-                <customCommand>
-                  <name>GRACEFUL_STOP</name>
-                  <commandScript>
-                    <script>scripts/hbase_master.py</script>
-                    <scriptType>PYTHON</scriptType>
-                    <timeout>1800</timeout>
-                  </commandScript>
-              </customCommand>
-            </customCommands>
-            </component>
-    
-            <component>
-              <name>HBASE_REGIONSERVER</name>
-              <category>SLAVE</category>
-              <minInstanceCount>1</minInstanceCount>
-              ...
-            </component>
-    
-            <component>
-              <name>HBASE_CLIENT</name>
-              <category>CLIENT</category>
-              ...
-          </components>
-    
-          <osSpecifics>
-            <osSpecific>
-              <osType>any</osType>
-              <packages>
-                <package>
-                  <type>tarball</type>
-                  <name>hbase-0.96.1-tar.gz</name>
-                  <location>package/files</location>
-                </package>
-              </packages>
-            </osSpecific>
-          </osSpecifics>
-          
-          <commandOrders>
-            <commandOrder>
-              <command>HBASE_REGIONSERVER-START</command>
-              <requires>HBASE_MASTER-STARTED</requires>
-            </commandOrder>
-          </commandOrders>
-          
-          <exportGroups>
-            <exportGroup>
-              <name>QuickLinks</name>
-                <exports>
-                  <export>
-                    <name>JMX_Endpoint</name>
-                    <value>http://${HBASE_MASTER_HOST}:${site.hbase-site.hbase.master.info.port}/jmx</value>
-                  </export>
-                  <export>
-                    <name>Master_Status</name>
-                    <value>http://${HBASE_MASTER_HOST}:${site.hbase-site.hbase.master.info.port}/master-status</value>
-                  </export>
-               </exports>
-            </exportGroup>
-          </exportGroups>
-    
-          <commandScript>
-            <script>scripts/app_health_check.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>300</timeout>
-          </commandScript>
-    
-        </application>
-      </metainfo>
-
diff --git a/src/site/markdown/slider_specs/application_instance_configuration.md b/src/site/markdown/slider_specs/application_instance_configuration.md
deleted file mode 100644
index 764d3ce..0000000
--- a/src/site/markdown/slider_specs/application_instance_configuration.md
+++ /dev/null
@@ -1,104 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-#App Instance Configuration
-
-App Instance Configuration is the configuration override provided by the application owner when creating an application instance using Slider. This configuration values override the default configuration available in the App Package.
-
-Instance configuration is a JSON formatted doc in the following form:
-
-
-    {
-      "schema": "http://example.org/specification/v2.0.0",
-      "metadata": {
-      },
-      "global": {            
-      },
-    }
-
-An appConfig.json contains the application configuration. The sample below shows configuration for HBase.
-
-
-    {
-      "schema" : "http://example.org/specification/v2.0.0",
-      "metadata" : {
-      },
-      "global" : {
-          "config_types": "core-site,hdfs-site,hbase-site",
-          
-          "java_home": "/usr/jdk64/jdk1.7.0_45",
-          "package_list": "files/hbase-0.96.1-hadoop2-bin.tar",
-          
-          "site.global.app_user": "yarn",
-          "site.global.app_log_dir": "${AGENT_LOG_ROOT}/app/log",
-          "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
-          "site.global.security_enabled": "false",
-  
-          "site.hbase-site.hbase.hstore.flush.retries.number": "120",
-          "site.hbase-site.hbase.client.keyvalue.maxsize": "10485760",
-          "site.hbase-site.hbase.hstore.compactionThreshold": "3",
-          "site.hbase-site.hbase.rootdir": "${NN_URI}/apps/hbase/data",
-          "site.hbase-site.hbase.tmp.dir": "${AGENT_WORK_ROOT}/work/app/tmp",
-          "site.hbase-site.hbase.master.info.port": "${HBASE_MASTER.ALLOCATED_PORT}",
-          "site.hbase-site.hbase.regionserver.port": "0",
-  
-          "site.core-site.fs.defaultFS": "${NN_URI}",
-          "site.hdfs-site.dfs.namenode.https-address": "${NN_HOST}:50470",
-          "site.hdfs-site.dfs.namenode.http-address": "${NN_HOST}:50070"
-      }
-    }
-
-appConf.json allows you to pass in arbitrary set of configuration that Slider will forward to the application component instances.
-
-**Variable naming convention**
-In order to understand how the naming convention work, lets look at how the config is passed on to component commands. Slider agent recevies a structured bag of commands as input for all commands, INSTALL, CONFIGURE, START, etc. The command includes a section "configuration" which has config properties arranged into named property bags.
-
-* Variables of the form `site.xx.yy` translates to variables by the name `yy` within the group `xx` and are typically converted to site config files by the name `xx` containing variable `yy`. For example, `"site.hbase-site.hbase.regionserver.port":""` will be sent to the Slider-Agent as `"hbase-site" : { "hbase.regionserver.port": ""}` and app definition scripts can access all variables under `hbase-site` as a single property bag.
-* Similarly, `site.core-site.fs.defaultFS` allows you to pass in the default fs. *This specific variable is automatically made available by Slider but its shown here as an example.*
-* Variables of the form `site.global.zz` are sent in the same manner as other site variables except these variables are not expected to get translated to a site xml file. Usually, variables needed for template or other filter conditions (such as security_enabled = true/false) can be sent in as "global variable". 
-
-**slider variables**
-
-* Any config not of the form `site.xx.yy` are consumed by Slider itself. Some of the manadatory configuration are:
-  * `agent.conf`: location of the agent config file (typically, "/slider/agent/conf/agent.ini")
-  * `application.def`: location of the application definition package (typically, "/slider/hbase_v096.zip")
-  * `config_types`: list of config types sent to the containers (e.g. "core-site,hdfs-site,hbase-site")
-  * `java_home`: java home path (e.g. "/usr/jdk64/jdk1.7.0_45")
-  * `package_list`: location of the package relative to the root where AppPackage is installed (e.g. "files/hbase-0.96.1-hadoop2-bin.tar.gz"
-
-**dynamically allocated ports**
-
-Apps can ask port to be dynamically assigned by Slider or they can leave it as "0". If there is a need for advertising any listening endpoint then the ports can be marked such.
-
-For example, HBase master info port needs to be advertised so that jmx endpoint can be accessed. This is indicated by using a special value, of the form, ${COMPONENT_NAME.ALLOCATED_PORT}. E.g. "site.hbase-site.hbase.master.info.port": "${HBASE_MASTER.ALLOCATED_PORT}"
-
-[Application Definition](application_definition.md) describes how to advertise arbitrary set of properties that are dynamically finalized when application is activated.
-
-**configuraing an app for ganglia metrics**
-
-There is no set guideline for doing so. How an application emits metrics and how the metrics are emitted to the right place is completely defined by the application. In the following example, we hso how HBase app is configured to emit metrics to a ganglia server.
-
-Ganglia server lifecycle is not controlled by the app instance. So the app instance only needs to know where to emit the metrics. This is achieved by three global variables
-
-* "site.global.ganglia_server_host": "gangliaserver.my.org"
-* "site.global.ganglia_server_port": "8663"
-* "site.global.ganglia_server_id": "HBaseApplicationCluster3"
-
-All three variable values are user provided. It is also expected that a gmond server is available on host gangliaserver.my.org listening for metrics at port 8663 and is named "HBaseApplicationCluster3". Its the reponsibility of the ganglia server admin to ensure that the server is unique and is only receving metrics from the application instance.
-
-
-
diff --git a/src/site/markdown/slider_specs/application_needs.md b/src/site/markdown/slider_specs/application_needs.md
deleted file mode 100644
index 536a880..0000000
--- a/src/site/markdown/slider_specs/application_needs.md
+++ /dev/null
@@ -1,140 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Slider's needs of an application
-
-Slider installs and runs applications in a YARN cluster -applications that
-do not need to be written for YARN. 
-
-What they do need to be is deployable by Slider, which means installable by YARN,
-configurable by Slider, and, finally, executable by YARN. YARN will kill the
-executed process when destroying a container, so the deployed application
-must expect this to happen and be able to start up from a kill-initiated
-shutdown without any manual recovery process.
-
-They need to locate each other dynamically, both at startup and during execution,
-because the location of processes will be unknown at startup, and may change
-due to server and process failures. 
-
-## Must
-
-* Install and run from a tarball -and be run from a user that is not root. 
-
-* Be self contained or have all dependencies pre-installed.
-
-* Support dynamic discovery of nodes -such as via ZK.
-
-* Nodes to rebind themselves dynamically -so if nodes are moved, the application
-can continue
-
-* Handle kill as a normal shutdown mechanism.
-
-* Support multiple instances of the application running in the same cluster,
-  with processes from different application instances sharing
-the same servers.
-
-* Operate correctly when more than one role instance in the application is
-deployed on the same physical host. (If YARN adds anti-affinity options in 
-container requests this will no longer be a requirement)
-
-* Dynamically allocate any RPC or web ports -such as supporting 0 as the number
-of the port to listen on  in configuration options.
-
-* Be trusted. YARN does not run code in a sandbox.
-
-* If it talks to HDFS or other parts of Hadoop, be built against/ship with
-libaries compatible with the version of Hadoop running on the cluster.
-
-* Store persistent data in HDFS (directly or indirectly) with the exact storage location
-configurable. Specifically: not to the local filesystem, and not in a hard coded location
-such as `hdfs://app/data`. Slider creates per-instance directories for
-persistent data.
-
-* Be configurable as to where any configuration directory is (or simply relative
-to the tarball). The application must not require it to be in a hard-coded
-location such as `/etc`.
-
-* Not have a fixed location for log output -such as `/var/log/something`
-
-* Run until explicitly terminated. Slider treats an application termination
-(which triggers a container release) as a failure -and reacts to it by restarting
-the container.
-
-
-
-## MUST NOT
-
-* Require human intervention at startup or termination.
-
-## SHOULD
-
-These are the features that we'd like from a service:
-
-* Publish the actual RPC and HTTP ports in a way that can be picked up, such as via ZK
-or an admin API.
-
-* Be configurable via the standard Hadoop mechanisms: text files and XML configuration files.
-If not, custom parsers/configuration generators will be required.
-
-* Support an explicit parameter to define the configuration directory.
-
-* Take late bindings params via -D args or similar
-
-* Be possible to exec without running a complex script, so that process inheritance works everywhere, including (for testing) OS/X
-
-* Provide a way for Slider to get list of nodes in cluster and status. This will let Slider detect failed worker nodes and react to it.
-
-* FUTURE: If a graceful decommissioning is preferred, have an RPC method that a Slider provider can call to invoke this.
-
-* Be location aware from startup. Example: worker nodes to be allocated tables to serve based on which tables are
-stored locally/in-rack, rather than just randomly. This will accelerate startup time.
-
-* Support simple liveness probes (such as an HTTP GET operations).
-
-* Return a well documented set of exit codes, so that failures can be propagated
-  and understood.
-
-* Support cluster size flexing: the dynamic addition and removal of nodes.
-
-
-* Support a management platform such as Apache Ambari -so that the operational
-state of a Slider application can be monitored.
-
-## MAY
-
-* Include a single process that will run at a fixed location and whose termination
-can trigger application termination. Such a process will be executed
-in the same container as the Slider AM, and so known before all other containers
-are requested. If a live cluster is unable to handle restart/migration of 
-such a process, then the Slider application will be unable to handle
-Slider AM restarts.
-
-* Ideally: report on load/cost of decommissioning.
-  E.g amount of data; app load. 
-
-
-## MAY NOT
-
-* Be written for YARN.
-
-* Be (pure) Java. If the tarball contains native binaries for the cluster's hardware & OS,
-  they should be executable.
-
-* Be dynamically reconfigurable, except for the special requirement of handling
-movement of manager/peer containers in an application-specific manner.
-
-
diff --git a/src/site/markdown/slider_specs/application_package.md b/src/site/markdown/slider_specs/application_package.md
deleted file mode 100644
index 7039181..0000000
--- a/src/site/markdown/slider_specs/application_package.md
+++ /dev/null
@@ -1,147 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-#App Package
-
-All application artifacts, app definition, app configuration, scripts are packaged into a structured single package that can be handed off to any YARN application deployment tool including Slider
-
-## Overall Structure
-
-App package is a zip package containing all application artifacts. App package contains the following items:
-
-* **app definition file**
-application structure, content, definition, supported platforms, version, etc.
-
-* **default configurations folder**
-various configurations and configuration files associated with the application
-
-* **cmd_impl folder**
-management operations for the application/component
-
- * **scripts folder**
-various scripts that implement management operations
-
- * **templates folder**
-various templates used by the application
-
- * **files folder**
-other scripts, txt files, tarballs, etc.
-
-
-![Image](../../resources/images/app_package_sample_04.png)
-
-The example above shows a semi-expanded view of an application "HBASE-YARN-APP" and the package structure for OOZIE command scripts.
-
-## app definition
-
-App definition is a file named "metainfo.xml". The file contains application definition as described in [Application Definition](application_definition.md). 
-
-## default configurations
-
-This folder consists of various config files containing default configuration as described in [App Configuration](application_configuration.md).
-
-## package folder
-
-package includes the "implementation" of all management operations. The folders within are divided into scripts, templates, and files.
-
-### scripts folder
-
-Scripts are the implementation of management operations. There are five default operations and a composite operation. "restart" can be redefined to have a custom implementation.
-
-1. install
-
-2. configure
-
-3. start
-
-4. stop
-
-5. status
-
-6. restart (by default calls stop + start)
-
-The script specified in the metainfo is expected to understand the command. It can choose to call other scripts based on how the application author organizes the code base. For example:
-
-
-    class OozieServer(Script):
-      def install(self, env):
-        self.install_packages(env)
-        
-      def configure(self, env):
-        import params
-        env.set_params(params)
-        oozie(is_server=True)
-        
-      def start(self, env):
-        import params
-        env.set_params(params)
-        self.configure(env)
-        oozie_service(action='start')
-        
-      def stop(self, env):
-        import params
-        env.set_params(params)
-        oozie_service(action='stop')
-    
-      def status(self, env):
-        import status_params
-        env.set_params(status_params)
-        check_process_status(status_params.pid_file)
-
-
-
-The scripts are invoked in the following manner:
-
-    python SCRIPT COMMAND JSON_FILE PACKAGE_ROOT STRUCTURED_OUT_FILE
-
-* SCRIPT is the top level script that implements the commands for the component. 
-
-* COMMAND is one of the six commands listed above or can be a custom command as defined in Application Definition
-
-* JSON_FILE includes all configuration parameters and the values
-
-* PACKAGE_ROOT is the root folder of the package. From this folder, its possible to access files, scripts, templates, packages (e.g. tarballs), etc. The App author has complete control over the structure of the package
-
-* STRUCTURED_OUT_FILE is the file where the script can output structured data. The management infrastructure is expected to automatically reports back STD_OUT and STD_ERR.
-
-A separate document (link TBD) discusses how the scripts are developed and the structure of the JSON_FILE containing the parameters.
-
-### templates folder
-
-templates are configurable text files that are NOT regular config files. *A library has been developed that can materialize a complete site configuration file from a property bag and therefore are not generated from templates.* Other files such as env sh files, log4j properties file, etc. may be derived from a template. Again, the implementor can choose to create these files from scratch and not use templates. The following operations are allowed during template expansion:
-
-* variable expansion
-
-* if condition
-
-* for loop
-
-* ...
-
-Sample template file for dfs.exclude file to list excluded/decommissioned hosts. hdfs_exclude_files in the property defined in params.py which is populated from config parameters defined in JSON_FILE.
-
-    {% if hdfs_exclude_file %} 
-    {% for host in hdfs_exclude_file %}
-    {{host}}
-    {% endfor %}
-    {% endif %}
-
-
-### files folder
-
-files is a directory to store any other files that are needed for management operations. Sample files stored here are tarballs used to install the application, shell scripts used by various operations.
-
diff --git a/src/site/markdown/slider_specs/canonical_scenarios.md b/src/site/markdown/slider_specs/canonical_scenarios.md
deleted file mode 100644
index 3f204d6..0000000
--- a/src/site/markdown/slider_specs/canonical_scenarios.md
+++ /dev/null
@@ -1,165 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Guidelines for Clients and Client Applications
-
-This document will define the canonical scenarios for the deployment and management of Slider hosted applications.  It will define the types of applications supported, the sequence of events for deploying the application types, and the management facilities exposed for the deployed applications.
-
-## Deployable Application Types
-
-The server-side components of an application (alternatively referred to as the application components) will be deployed and managed using a fairly uniform mechanism.  However, applications can be distinguished by their associated client implementation.  Specifically, each different client application type can yield different development, deployment, and management approaches.  There are two primary application client types:
-
-1. **New application client** - the application deployed to Slider will interact with a newly coded application client leveraging the Slider supplied client API and management facilities.  The client API will make use of distributed management facilities such as Zookeeper to provide the application client with the configuration information required for invoking remote application components.  For example, the application client (or an application hosted component, e.g. a web application) will leverage the API to lookup the appropriate host(s) and port(s) for RPC based communication.  Alternatively, if annotation libraries or an "app container" environment is provided, the appropriate values will be injected into the client process.
-
-2. **Existing, configurable client** - the application client predates deployment in the Slider environment, but is implemented in a manner that can be integrated with the Slider application client support facilities (APIs etc).  This case is probably very similar in nature to the new application client in terms of the mechanisms it will use for component discovery, but is distinguished by the fact that it’s development pre-dates Slider.  There are two possible variants of application client in this category:
-
- 1. A client that is static - the client is dependent on existing configuration properties to communicate with master components and the existing code can not been altered (at least in the short term).  This type of client would require a support infrastructure that may be relatively complex (e.g. templating of configuration files, proxying of server components).
-
- 2. A client that can be enhanced - a client that can have its code altered can leverage a number of mechanisms (REST APIs, Zookeeper, a provided client discover API) to obtain the information required to invoke the master components of the application.
-
-## Deployment Scenarios
-
-There are two primary deployment mechanisms to examine:  application component and client-side (application client) component deployment.
-
-## Application Component Deployment
-
-Applications generally are composed of one or more components.  In the deployment steps below, be advised that there may be a need to repeat some of the configuration/definition steps for each component.
-
-The process of deploying applications (specifically, the non-client components of an application) is:
-
-1. Compose an application package that contains:
-
-   1. An application definition that provides the following items:
-
-      1. name and version of application
-
-      2. component type(s) and role(s)
-
-      3. system requirements (RAM, CPUs, disk space etc)
-
-      4. ports required for RPC, UI
-
-      5. software dependencies (HDP deployed services required in the cluster, JDK versions, python versions, etc)
-
-      6. configurable properties including:
-
-         1. application-specific properties.  If the properties designate port numbers, the general recommendation would be to set them to zero to allow for the system to assign a dynamically selected port number that subsequently can be published to zookeeper.  Other properties designating remote components running on other hosts may need some additional support (floating IPs, discovery APIs, etc).  The processing of these properties by the infrastructure requires agreed upon mechanisms for identification (e.g. all properties beginning with the application name are passed through as application properties with the application name prefix stripped)
-
-         2. Slider-specific properties.  The following Slider properties are currently available:
-
-            1. yarn.memory
-
-            2. Environment variables specified with a "env." prefix (e.g. env.MALLOC_ARENA_MAX)
-
-            3. role.instances
-
-            4. role.name
-
-            5. yarn.vcores
-
-   2. Other application artifacts (binaries etc)
-
-2. Install the application
-
-   1. The application package will be made available in a designated HDFS location
-
-   2. If there is a managed application client component it will be deployed to selected nodes as defined in the cluster specification.
-
-   3. Slider interacts with yarn (resource manager and node manager(s)) to populate the local resources on the designated nodes.
-
-   4. Some properties important for facilitating remote interaction with the deployed components are advertised via zookeeper (though this may actually take place during component start up as the assignment of ports is a late-binding operation.  Alternatively, developers may be encouraged to store these values in a registry rather than as direct-application configuration properties).
-
-## Client Application Deployment
-
-Although application clients are components that are deployed using mechanisms similar to other application components (especially in the managed case), there are a number of features that distinguish them:
-
-1. **configuration** - client applications generally require some information (host names, ports, etc) to ascertain the correct networking values required to communicate with the application's server components.  In order to work in a yarn deployment, this configuration may need to be manipulated to allow proper operation (e.g. the configuration files updated with correct values, the configuration properties ascertained dynamically from services such as Zookeeper)
-
-2. **execution context **- it may be necessary to provide an execution environment for application clients that allows for discovery mechanisms (dependency injection, annotation libraries, etc)
-
-For each of these application client types there are two possible deployment modes:
-
-* **Managed** - the application client is deployed via Slider mechanisms.  Clients, in this context, differ from the other application components in that they are not running, daemon processes.  However, in a managed environment there is the expectation that the appropriate binaries and application elements will be distributed to the designated client hosts, and the configuration on those hosts will be updated to allow for execution of requests to the application’s master/server components.  Therefore, client components should be defined in the application specification as elements that the management infrastructure supports (Figure 1).
-
-![Image](../images/managed_client.png)
-Figure 1 - Managed Application Client and associated Slider Application
-
-* **Unmanaged** - the application client is run as a process outside of Slider/yarn, although it may leverage Slider provided libraries that allow for server component discovery etc (Figure 2).  These libraries would primarily be client bindings providing access to the registry leveraged by Slider (e.g. Java and python bindings to Zookeeper)
-
-![Image](../images/unmanaged_client.png)
-Figure 2 - Unmanaged Application Client and associated Slider Application
-
-### Managed Application Client
-
-A managed application client is a component defined as part of the Slider/yarn application (i.e. it is part of the application definition presented to Slider).  As such, it is deployed and managed via standard Slider/yarn mechanisms.  This type of application client is more than likely already configured and written to work in a yarn environment.
-
-There are two primary needs to be met for a properly functioning client:
-
-1. **Discovery** - as a client, it is important that the client application retrieve the information it requires in order to communicate with the remote application components.  As application components are spawned they (or the associated container agent) will advertise the relevant information using zookeeper.  It will be up to the client (or the associated Slider client library) to contact zookeeper and retrieve the requested information.
-
-2. **Configuration** - there may be use cases in which a large number of configuration items are required by the client for its processing.  In such cases it is more appropriate for a client to perform a bulk download of the application component(s) configuration as a JSON or XML document (via zookeeper or Slider-app comm?)
-
-Whether ascertained via discovery or bulk configuration retrieval, the attributes that the client obtains will more than likely need to be populated into the client’s configuration files.  Therefore, a templating facility or the like should be provided to allow for such configuration file manipulation.
-
-### Unmanaged Application Client
-
-An unmanaged application client is a standalone application that leverages application components deployed into the Slider/yarn environment.  It is not possible to predict the deployment mechanisms or network topology employed for an unmanaged application.  However, it is feasible to provide some guidance and/or software (APIs etc) to allow for application component discovery and communication.
-
-## Application Management
-
-Post deployment, the Slider infrastructure will provide the requisite set of administrative facilities required for application management, including the ability to start/stop the application, monitor the application, and reconfigure the application. 
-
-### General Management
-
-There is one general management command:
-
-* List Yarn Apps - returns a listing of deployed yarn apps and associated information:
-
- * name and version
-
- * dependencies (required HDP services and versions, etc)
-
- * configuration properties
-
- * components/roles and associated configuration
-
-### Application/Component Management
-
-The following administrative functions are supported for applications:
-
-* Install the application - the installation command will take care of the population of the application resources into the pre-determined application resource directory in HDFS.  The required configuration and binary directories will also be created.
-
-* start/thaw the application - Slider/Yarn runtime negotiates and instantiates the number of component containers designated by the cluster description and the components are started.
-
-* stop/freeze the application - similar to stopping, applicaiton (or a subset of their components) can be stopped.
-
-* get application status - the retrieval of application status may take a number of forms, including:
-
- * liveness of service components
-
- * operational metrics (application-level or component-level)
-
- * viewing of logs
-
-* get application configuration - the configuration of application components is retrieved (JSON or XML form)
-
-* get cluster configuration - the cluster configuration is retrieved (number of various application components, associated hosts etc)
-
-* get cluster history
-
-* re-configure cluster
-
diff --git a/src/site/markdown/slider_specs/creating_app_definitions.md b/src/site/markdown/slider_specs/creating_app_definitions.md
deleted file mode 100644
index b89df62..0000000
--- a/src/site/markdown/slider_specs/creating_app_definitions.md
+++ /dev/null
@@ -1,123 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Slider AppPackage
-
-Slider AppPackages are a declarative definition of an application for application management. AppPackage is not a packaging scheme for application binaries and artifacts. Tarball, zip files, rpms etc. are available for that purpose. Instead AppPackage includes the application binaries along with other artifacts necessary for application management.
-
-An application instance consists of several active component such as one or more masters and several slaves. There may be a number of accompanying processes in addition to the basic master/slave processes - lets refer to all processes as app component instances. When run in the context of Yarn, the application specific processes are activated within individual Yarn Container. If you pry into an Yarn container (created through Slider) it will be apparent as to what is the role of Slider-Agent and the actual application components. The following image provides an high-level view. Within a container there are at least two processes - and instance of a slider agent and an instance of an application component. The application can itself spawn more procsses if needed.
-
-![Image](../../resources/images/slider-container.png?raw=true)
-
-Figure 1 - High-level view of a container
-
-For example:
-	
-    yarn      8849  -- python ./infra/agent/slider-agent/agent/main.py --label container_1397675825552_0011_01_000003___HBASE_REGIONSERVER --host AM_HOST --port 47830
-    yarn      9085  -- bash /hadoop/yarn/local/usercache/yarn/appcache/application_1397675825552_0011/ ... internal_start regionserver
-    yarn      9114 -- /usr/jdk64/jdk1.7.0_45/bin/java -Dproc_regionserver -XX:OnOutOfMemoryError=...
-
-The above list shows three processes, the Slider-Agent process, the bash script to start HBase Region Server and the HBase Region server itself. *Three of these together constitute the container*.	
-
-## Using an AppPackage
-The following command creates an HBase application using the AppPackage for HBase.
-
-	  ./slider create cl1 --image hdfs://NN:8020/slider/agent/slider-agent.tar.gz --template /work/appConf.json --resources /work/resources.json
-	
-Lets analyze various parameters from the perspective of app creation:
-  
-* `--image`: its the slider agent tarball
-* `--template`: app configuration
-* `--resources`: yarn resource requests
-* … other parameters are described in accompanying docs. 
-
-### AppPackage
-The structure of an AppPackage is described at [AppPackage](application_package.md).
-
-In the enlistment, there are three example AppPackages:
-
-* `app-packages/hbase-v0_96`
-* `app-packages/accumulo-v1_5`
-* `app-packages/storm-v0_91`
-
-The above folders, with minor edits, can be packaged as *zip* files to get the corresponding AppPackages. The application tarball file, containing the binaries/artifacts of the application itself is a component within the AppPackage. They are:
-
-* For hbase - `app-packages/hbase-v0_96/package/files/hbase-0.96.1-hadoop2-bin.tar.gz.REPLACE`
-* For accumulo - `app-packages/accumulo-v1_5/package/files/accumulo-1.5.1-bin.tar.gz.REPLACE`
-* For storm - `app-packages/storm-v0_91/package/files/apache-storm-0.9.1.2.1.1.0-237.tar.gz.placeholder`
-
-**They are placehoder files**, mostly because the files themselves are too large as well as users are free to use their own version of the package. To create a Slider AppPackage - replace the file with an actual application tarball and then ensure that the metainfo.xml has the correct file name. After that create a zip file using standard zip commands and ensure that the package has the metainfo.xml file at the root folder.
-
-For example:
-
-* cd slider/app-packages/hbase-v0_96
-* zip -r hbase_v096.zip .
-* Looking at the content through unzip -l "$@" hbase_v096.zip
-
-```
-Archive:  hbase_v096.zip
-  Length     Date   Time    Name
- --------    ----   ----    ----
-     3163  05-16-14 16:32   appConfig.json
-        0  05-02-14 07:51   configuration/
-     5077  05-02-14 07:51   configuration/global.xml
-     5248  05-02-14 07:51   configuration/hbase-log4j.xml
-     2250  05-02-14 07:51   configuration/hbase-policy.xml
-    14705  05-02-14 07:51   configuration/hbase-site.xml
-     3332  05-16-14 16:33   metainfo.xml
-        0  05-02-14 07:51   package/
-        0  05-19-14 20:36   package/files/
- 83154798  05-19-14 20:36   package/files/hbase-0.96.1-hadoop2-bin.tar.gz
-        0  05-02-14 07:51   package/scripts/
-      787  05-02-14 07:51   package/scripts/__init__.py
-     1378  05-02-14 07:51   package/scripts/functions.py
-     3599  05-02-14 07:51   package/scripts/hbase.py
-     1205  05-02-14 07:51   package/scripts/hbase_client.py
-     1640  05-02-14 07:51   package/scripts/hbase_master.py
-     1764  05-02-14 07:51   package/scripts/hbase_regionserver.py
-     1482  05-02-14 07:51   package/scripts/hbase_service.py
-     4924  05-02-14 07:51   package/scripts/params.py
-      973  05-02-14 07:51   package/scripts/status_params.py
-        0  05-02-14 07:51   package/templates/
-     2723  05-02-14 07:51   package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
-     2723  05-02-14 07:51   package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
-     3878  05-02-14 07:51   package/templates/hbase-env.sh.j2
-      909  05-02-14 07:51   package/templates/hbase_client_jaas.conf.j2
-      989  05-02-14 07:51   package/templates/hbase_master_jaas.conf.j2
-     1001  05-02-14 07:51   package/templates/hbase_regionserver_jaas.conf.j2
-      837  05-02-14 07:51   package/templates/regionservers.j2
-      357  05-12-14 12:04   resources.json
- --------                   -------
- 83219742                   29 files
-```
-
-Sample **resources.json** and **appConfig.json** files are also included in the enlistment. These are samples and are typically tested on one node test installations.
-
-
-### --template appConfig.json
-An appConfig.json contains the application configuration. See [Specifications InstanceConfiguration](application_instance_configuration.md) for details on how to create a template config file. The enlistment includes sample config files for HBase, Accumulo, and Storm.
-
-
-### --resources resources.json
-Resource specification is an input to Slider to specify the Yarn resource needs for each component type that belong to the application. [Specification of Resources](resource_specification.html) describes how to write a resource config json file. The enlistment includes sample config files for HBase, Accumulo, and Storm.
-
-
-## Scripting for AppPackage
-Refer to [App Command Scripts](writing_app_command_scripts) for details on how to write scripts for a AppPackage. These scripts are in the package/script folder within the AppPackage. *Use the checked in samples for HBase/Storm/Accumulo as reference for script development.*
-
-
-
diff --git a/src/site/markdown/slider_specs/index.md b/src/site/markdown/slider_specs/index.md
deleted file mode 100644
index 114950c..0000000
--- a/src/site/markdown/slider_specs/index.md
+++ /dev/null
@@ -1,53 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# PROJECT SLIDER
-
-##Introduction
-
-**SLIDER: A collection of tools and technologies to simplify the packaging, deployment and management of long-running applications on YARN.**
-
-- Availability (always-on) - YARN works with the application to ensure recovery or restart of running application components.
-- Flexibility (dynamic scaling) - YARN provides the application with the facilities to allow for scale-up or scale-down
-- Resource Mgmt (optimization) - YARN handles allocation of cluster resources.
-
-## Terminology
-
-### Apps on YARN
- - Application written to run directly on YARN
- - Packaging, deployment and lifecycle management are custom built for each application
-
-### Slider Apps
- - Applications deployed and managed on YARN using Slider
- - Use of slider minimizes custom code for deployment + lifecycle management
- - Requires apps to follow Slider guidelines and packaging ("Sliderize")
-
-Specifications
----
-
-The entry points to leverage Slider are:
-
-- [Application Needs](application_needs.html) What it takes to be deployable by Slider. 
-- [Slider AppPackage](creating_app_definitions.html) Overview of how to create an Slider AppPackage.
-- [Specifications for AppPackage](application_package.html) Describes the structure of an AppPackage
-- [Specifications for Application Definition](application_definition.html) How to write metainfo.xml?
-- [Specifications for Configuration](application_configuration.html) How to create a template config file for an app?
-- [Specification of Resources](resource_specification.html) How to write a resource spec for an app?
-- [Specifications InstanceConfiguration](application_instance_configuration.html) How to write a template config for an app?
-- [Guidelines for Clients and Client Applications](canonical_scenarios.html)
-- [Documentation for "General Developer Guidelines"](app_developer_guideline.html)
-		
\ No newline at end of file
diff --git a/src/site/markdown/slider_specs/resource_specification.md b/src/site/markdown/slider_specs/resource_specification.md
deleted file mode 100644
index 6d41f9e..0000000
--- a/src/site/markdown/slider_specs/resource_specification.md
+++ /dev/null
@@ -1,53 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-#Resource Specification
-Resource specification is an input to Slider to specify the Yarn resource needs for each component type that belong to the application.
-
-An example resource requirement for an application that has two components "master" and "worker" is as follows. Slider will automatically add the requirements for the AppMaster for the application. This compoent is named "slider-appmaster".
-
-Some parameters that can be specified for a component instance include:
-
-* `yarn.memory`: amount of memory requried for the component instance
-* `yarn.vcores`: number of vcores requested
-* `yarn.role.priority`: each component must be assigned unique priority. Component with higher priority come up earlier than components with lower priority
-* `yarn.component.instances`: number of instances for this component type
-
-Sample:
-
-    {
-      "schema" : "http://example.org/specification/v2.0.0",
-      "metadata" : {
-      },
-      "global" : {
-      },
-      "components" : {
-        "HBASE_MASTER" : {
-          "yarn.role.priority" : "1",
-          "yarn.component.instances" : "1"
-          "yarn.memory" : "768",
-          "yarn.vcores" : "1"
-        },
-        "slider-appmaster" : {
-        },
-        "HBASE_REGIONSERVER" : {
-          "yarn.role.priority" : "2",
-          "yarn.component.instances" : "1"
-        }
-      }
-    }
-
diff --git a/src/site/markdown/slider_specs/writing_app_command_scripts.md b/src/site/markdown/slider_specs/writing_app_command_scripts.md
deleted file mode 100644
index fd025d4..0000000
--- a/src/site/markdown/slider_specs/writing_app_command_scripts.md
+++ /dev/null
@@ -1,211 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Developing App Command Scripts
-
-App command implementations follow a standard structure so that they can be invoked in an uniform manner. For any command, the python scripts are invoked as:
-
-    python SCRIPT COMMAND JSON_FILE PACKAGE_ROOT STRUCTURED_OUT_FILE
-
-* SCRIPT is the top level script that implements the commands for the component. 
-
-* COMMAND is one of the following default commands - START, STOP, INSTALL, CONFIG, RESTART, STATUS or any custom commands. 
-
-* JSON_FILE includes all configuration parameters and the values. 
-
-* PACKAGE_ROOT is the root folder of the package. From this folder, its possible to access files, scripts, templates, packages (e.g. tarballs), etc. The Yarn-App author has complete control over the structure of the package as long as the PACKAGE_ROOT and SCRIPT path is known to the management tool. 
-
-* STRUCTURED_OUT_FILE is the file where the script can output structured data. 
-
-The management infrastructure is expected to automatically reports back STD_OUT and STD_ERR.
-
-Sample:
-
-    python /apps/HBASE_ON_YARN/package/scripts/hbase_regionserver.py START /apps/commands/cmd_332/command.json /apps/HBASE_ON_YARN/package /apps/commands/cmd_332/strout.txt
-
-**Note**: The above is how Slider-Agent invokes the scripts. Its provided as a reference for developing the scripts themselves as well as a way to test/debug the scripts.
-
-## Structure of JSON formatted parameter
-
-The parameters are organized as multi-layer name-value pairs.
-
-    {
-        "commandId": "Command Id as assigned by Slider",
-        "command": "Command being executed",
-        "commandType": "Type of command",
-        "clusterName": "Name of the cluster",
-        "appName": "Name of the app",
-        "component": "Name of the component",
-        "hostname": "Name of the host",
-        "public_hostname": "FQDN of the host",
-        "hostParams": {
-            "host specific parameters common to all commands"
-        },
-        "componentParams": {
-            "component specific parameters, if any"
-        },
-        "commandParams": {
-            "command specific parameters, usually used in case of custom commands"
-        },
-        "configurations": {
-            "app-global-config": {
-            },
-            "config-type-2": {
-            },
-            "config-type-2": {
-            }
-        }
-    }
-
-
-## Sample configuration parameters
-
-    {
-      "commandId": "2-2",
-      "command": "START",
-      "commandType": "EXECUTION_COMMAND",
-      "clusterName": "c1",
-      "appName": "HBASE",
-      "componentName": "HBASE_MASTER",
-      "hostParams": {
-          "java_home": "/usr/jdk64/jdk1.7.0_45"
-      },
-      "componentParams": {},
-      "commandParams": {},
-      "hostname": "c6403.ambari.apache.org",
-      "public_hostname": "c6403.ambari.apache.org",
-      "configurations": {
-          "hbase-log4j": {
-           "log4j.threshold": "ALL",
-           "log4j.rootLogger": "${hbase.root.logger}",
-           "log4j.logger.org.apache.zookeeper": "INFO",
-           "log4j.logger.org.apache.hadoop.hbase": "DEBUG",
-           "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher": "INFO",
-           "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil": "INFO",
-           "log4j.category.SecurityLogger": "${hbase.security.logger}",
-           "log4j.appender.console": "org.apache.log4j.ConsoleAppender",
-           "log4j.appender.console.target": "System.err",
-           "log4j.appender.console.layout": "org.apache.log4j.PatternLayout",
-           "log4j.appender.console.layout.ConversionPattern": "%d{ISO8601} %-5p [%t] %c{2}: %m%n",
-           "log4j.appender.RFAS": "org.apache.log4j.RollingFileAppender",
-           "log4j.appender.RFAS.layout": "org.apache.log4j.PatternLayout",
-           "log4j.appender.RFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n",
-           "log4j.appender.RFAS.MaxFileSize": "${hbase.security.log.maxfilesize}",
-           "log4j.appender.RFAS.MaxBackupIndex": "${hbase.security.log.maxbackupindex}",
-           "log4j.appender.RFAS.File": "${hbase.log.dir}/${hbase.security.log.file}",
-           "log4j.appender.RFA": "org.apache.log4j.RollingFileAppender",
-           "log4j.appender.RFA.layout": "org.apache.log4j.PatternLayout",
-           "log4j.appender.RFA.layout.ConversionPattern": "%d{ISO8601} %-5p [%t] %c{2}: %m%n",
-           "log4j.appender.RFA.MaxFileSize": "${hbase.log.maxfilesize}",
-           "log4j.appender.RFA.MaxBackupIndex": "${hbase.log.maxbackupindex}",
-           "log4j.appender.RFA.File": "${hbase.log.dir}/${hbase.log.file}",
-           "log4j.appender.NullAppender": "org.apache.log4j.varia.NullAppender",
-           "log4j.appender.DRFA": "org.apache.log4j.DailyRollingFileAppender",
-           "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout",
-           "log4j.appender.DRFA.layout.ConversionPattern": "%d{ISO8601} %-5p [%t] %c{2}: %m%n",
-           "log4j.appender.DRFA.File": "${hbase.log.dir}/${hbase.log.file}",
-           "log4j.appender.DRFA.DatePattern": ".yyyy-MM-dd",
-           "log4j.additivity.SecurityLogger": "false",
-           "hbase.security.logger": "INFO,console",
-           "hbase.security.log.maxfilesize": "256MB",
-           "hbase.security.log.maxbackupindex": "20",
-           "hbase.security.log.file": "SecurityAuth.audit",
-           "hbase.root.logger": "INFO,console",
-           "hbase.log.maxfilesize": "256MB",
-           "hbase.log.maxbackupindex": "20",
-           "hbase.log.file": "hbase.log",
-           "hbase.log.dir": "."
-          },
-          "app-global-config": {
-           "security_enabled": "false",
-           "pid_dir": "/hadoop/yarn/log/application_1394053491953_0003/run",
-           "log_dir": "/hadoop/yarn/log/application_1394053491953_0003/log",
-           "tmp_dir": "/hadoop/yarn/log/application_1394053491953_0003/tmp",
-           "user_group": "hadoop",
-           "user": "hbase",
-           "hbase_regionserver_heapsize": "1024m",
-           "hbase_master_heapsize": "1024m",
-           "fs_default_name": "hdfs://c6403.ambari.apache.org:8020",
-           "hdfs_root": "/apps/hbase/instances/01",
-           "zookeeper_node": "/apps/hbase/instances/01",
-           "zookeeper_quorom_hosts": "c6403.ambari.apache.org",
-           "zookeeper_port": "2181",
-          },
-          "hbase-site": {
-           "hbase.hstore.flush.retries.number": "120",
-           "hbase.client.keyvalue.maxsize": "10485760",
-           "hbase.hstore.compactionThreshold": "3",
-           "hbase.rootdir": "hdfs://c6403.ambari.apache.org:8020/apps/hbase/instances/01/data",
-           "hbase.stagingdir": "hdfs://c6403.ambari.apache.org:8020/apps/hbase/instances/01/staging",
-           "hbase.regionserver.handler.count": "60",
-           "hbase.regionserver.global.memstore.lowerLimit": "0.38",
-           "hbase.hregion.memstore.block.multiplier": "2",
-           "hbase.hregion.memstore.flush.size": "134217728",
-           "hbase.superuser": "yarn",
-           "hbase.zookeeper.property.clientPort": "2181",
-           "hbase.regionserver.global.memstore.upperLimit": "0.4",
-           "zookeeper.session.timeout": "30000",
-           "hbase.tmp.dir": "/hadoop/yarn/log/application_1394053491953_0003/tmp",
-           "hbase.hregion.max.filesize": "10737418240",
-           "hfile.block.cache.size": "0.40",
-           "hbase.security.authentication": "simple",
-           "hbase.defaults.for.version.skip": "true",
-           "hbase.zookeeper.quorum": "c6403.ambari.apache.org",
-           "zookeeper.znode.parent": "/apps/hbase/instances/01",
-           "hbase.hstore.blockingStoreFiles": "10",
-           "hbase.hregion.majorcompaction": "86400000",
-           "hbase.security.authorization": "false",
-           "hbase.cluster.distributed": "true",
-           "hbase.hregion.memstore.mslab.enabled": "true",
-           "hbase.client.scanner.caching": "100",
-           "hbase.zookeeper.useMulti": "true",
-           "hbase.regionserver.info.port": "",
-           "hbase.master.info.port": "60010"
-          }
-      }
-    }
-
-
-## Sample command script
-
-    class OozieServer(Script):
-      def install(self, env):
-        self.install_packages(env)
-        
-      def configure(self, env):
-        import params
-        env.set_params(params)
-        oozie(is_server=True)
-        
-      def start(self, env):
-        import params
-        env.set_params(params)
-        self.configure(env)
-        oozie_service(action='start')
-        
-      def stop(self, env):
-        import params
-        env.set_params(params)
-        oozie_service(action='stop')
-    
-      def status(self, env):
-        import status_params
-        env.set_params(status_params)
-        check_process_status(status_params.pid_file)
-
-
-
diff --git a/src/site/markdown/specification/cli-actions.md b/src/site/markdown/specification/cli-actions.md
deleted file mode 100644
index 75cfaf0..0000000
--- a/src/site/markdown/specification/cli-actions.md
+++ /dev/null
@@ -1,675 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-  
-# CLI Actions
-
- 
-## Important
-
-1. This document is still being updated from the original hoya design
-2. The new cluster model of separated specification files for internal, resource and application configuration
-has not been incorporated.
-1. What is up to date is the CLI command list and arguments
- 
-## client configuration
- 
-As well as the CLI options, the `conf/slider-client.xml` XML file can define arguments used to communicate with the Application instance
-
-
-####    `fs.defaultFS`
-
-Equivalent to setting the filesystem with `--filesystem`
-
-
-
-## Common
-
-### System Properties
-
-Arguments of the form `-S key=value` define JVM system properties.
-
-These are supported primarily to define options needed for some Kerberos configurations.
-
-### Definitions
- 
-Arguments of the form `-D key=value` define JVM system properties.
-
-These can define client options that are not set in `conf/slider-client.xml` - or to override them.
- 
-### Cluster names
-
-All actions that must take an instance name will fail with `EXIT_UNKNOWN_INSTANCE`
-if one is not provided.
-
-## Action: Build
-
-Builds a cluster -creates all the on-filesystem datastructures, and generates a cluster description
-that is both well-defined and deployable -*but does not actually start the cluster*
-
-    build (instancename,
-      options:List[(String,String)],
-      components:List[(String, int)],
-      componentOptions:List[(String,String, String)],
-      resourceOptions:List[(String,String)],
-      resourceComponentOptions:List[(String,String, String)],
-      confdir: URI,
-      provider: String
-      zkhosts,
-      zkport,
-      image
-      apphome
-      appconfdir
-      
-
-#### Preconditions
-
-(Note that the ordering of these preconditions is not guaranteed to remain constant)
-
-The instance name is valid
-
-    if not valid-instance-name(instancename) : raise SliderException(EXIT_COMMAND_ARGUMENT_ERROR)
-
-The instance must not be live. This is purely a safety check as the next test should have the same effect.
-
-    if slider-instance-live(YARN, instancename) : raise SliderException(EXIT_CLUSTER_IN_USE)
-
-The instance must not exist
-
-    if is-dir(HDFS, instance-path(FS, instancename)) : raise SliderException(EXIT_CLUSTER_EXISTS)
-
-The configuration directory must exist it does not have to be the instance's HDFS instance,
-as it will be copied there -and must contain only files
-
-    let FS = FileSystem.get(appconfdir)
-    if not isDir(FS, appconfdir) raise SliderException(EXIT_COMMAND_ARGUMENT_ERROR)
-    forall f in children(FS, appconfdir) :
-        if not isFile(f): raise IOException
-
-There's a race condition at build time where between the preconditions being met and the instance specification being saved, the instance
-is created by another process. This addressed by creating a lock file, `writelock.json` in the destination directory. If the file
-exists, no other process may acquire the lock.
-
-There is a less exclusive readlock file, `readlock.json` which may be created by any process that wishes to read the configuration.
-If it exists when another process wishes to access the files, the subsequent process may read the data, but MUST NOT delete it
-afterwards. A process attempting to acquire the writelock must check for the existence of this file before AND after creating the
-writelock file, failing if its present. This retains a small race condition: a second or later reader may still be reading the data
-when a process successfully acquires the write lock. If this proves to be an issue, a stricter model could be implemented, with each reading process creating a unique named readlock- file.
-
-
-
-
-#### Postconditions
-
-All the instance directories exist
-
-    is-dir(HDFS', instance-path(HDFS', instancename))
-    is-dir(HDFS', original-conf-path(HDFS', instancename))
-    is-dir(HDFS', generated-conf-path(HDFS', instancename))
-
-The application cluster specification saved is well-defined and deployable
-
-    let instance-description = parse(data(HDFS', instance-json-path(HDFS', instancename)))
-    well-defined-instance(instance-description)
-    deployable-application-instance(HDFS', instance-description)
-
-More precisely: the specification generated before it is saved as JSON is well-defined and deployable; no JSON file will be created
-if the validation fails.
-
-Fields in the cluster description have been filled in
-
-    internal.global["internal.provider.name"] == provider
-    app_conf.global["zookeeper.port"]  == zkport
-    app_conf.global["zookeeper.hosts"]  == zkhosts
-    
-
-    package => app_conf.global["agent.package"] = package
-    
-    
-
-Any `apphome` and `image` properties have propagated
-
-    apphome == null or clusterspec.options["cluster.application.home"] == apphome
-    image == null or clusterspec.options["cluster.application.image.path"] == image
-
-(The `well-defined-application-instance()` requirement above defines the valid states
-of this pair of options)
-
-
-All role sizes have been mapped to `component.instances` fields
-
-    forall (name, size) in components :
-        resources.components[name]["components.instances"] == size
-
-
-
-
-All option parameters have been added to the `options` map in the specification
-
-    forall (opt, val) in options :
-        app_conf.global[opt] == val
-        
-    forall (opt, val) in resourceOptions :
-        resource.global[opt] == val
-
-All component option parameters have been added to the specific components's option map
-in the relevant configuration file
-
-    forall (name, opt, val) in componentOptions :
-        app_conf.components[name][opt] == val
-
-    forall (name, opt, val) in resourceComponentOptions :
-        resourceComponentOptions.components[name][opt] == val
-
-To avoid some confusion as to where keys go, all options beginning with the
-prefix `component.` are automatically copied into the resources file:
-
-    forall (opt, val) in options where startswith(opt, "component.") 
-            or startswith(opt, "role.") 
-            or startswith(opt, "yarn."): 
-        resource.global[opt] == val
-
-    forall (name, opt, val) in componentOptions where startswith(opt, "component.") 
-            or startswith(opt, "role.") 
-            or startswith(opt, "yarn."):
-        resourceComponentOptions.components[name][opt] == val
-          
-
-There's no explicit rejection of duplicate options, the outcome of that
-state is 'undefined'. 
-
-What is defined is that if Slider or its provider provided a default option value,
-the command-line supplied option will override it.
-
-All files that were in the configuration directory are now copied into the "original" configuration directory
-
-    let FS = FileSystem.get(appconfdir)
-    let dest = original-conf-path(HDFS', instancename)
-    forall [c in children(FS, confdir) :
-        data(HDFS', dest + [filename(c)]) == data(FS, c)
-
-All files that were in the configuration directory now have equivalents in the generated configuration directory
-
-    let FS = FileSystem.get(appconfdir)
-    let dest = generated-conf-path(HDFS', instancename)
-    forall [c in children(FS, confdir) :
-        isfile(HDFS', dest + [filename(c)])
-
-
-## Action: Thaw
-
-    thaw <instancename> [--wait <timeout>]
-
-Thaw takes an application instance with configuration and (possibly) data on disk, and
-attempts to create a live application with the specified number of nodes
-
-#### Preconditions
-
-    if not valid-instance-name(instancename) : raise SliderException(EXIT_COMMAND_ARGUMENT_ERROR)
-
-The cluster must not be live. This is purely a safety check as the next test should have the same effect.
-
-    if slider-instance-live(YARN, instancename) : raise SliderException(EXIT_CLUSTER_IN_USE)
-
-The cluster must not exist
-
-    if is-dir(HDFS, application-instance-path(FS, instancename)) : raise SliderException(EXIT_CLUSTER_EXISTS)
-
-The cluster specification must exist, be valid and deployable
-
-    if not is-file(HDFS, cluster-json-path(HDFS, instancename)) : SliderException(EXIT_UNKNOWN_INSTANCE)
-    if not well-defined-application-instance(HDFS, application-instance-path(HDFS, instancename)) : raise SliderException(EXIT_BAD_CLUSTER_STATE)
-    if not deployable-application-instance(HDFS, application-instance-path(HDFS, instancename)) : raise SliderException(EXIT_BAD_CLUSTER_STATE)
-
-### Postconditions
-
-
-After the thaw has been performed, there is now a queued request in YARN
-for the chosen (how?) queue
-
-    YARN'.Queues'[amqueue] = YARN.Queues[amqueue] + [launch("slider", instancename, requirements, context)]
-
-If a wait timeout was specified, the cli waits until the application is considered
-running by YARN (the AM is running), the wait timeout has been reached, or
-the application has failed
-
-    waittime < 0 or (exists a in slider-running-application-instances(yarn-application-instances(YARN', instancename, user))
-        where a.YarnApplicationState == RUNNING)
-
-
-## Outcome: AM-launched state
-
-Some time after the AM was queued, if the relevant
-prerequisites of the launch request are met, the AM will be deployed
-
-#### Preconditions
-
-* The resources referenced in HDFS (still) are accessible by the user
-* The requested YARN memory and core requirements could be met on the YARN cluster and 
-specific YARN application queue.
-* There is sufficient capacity in the YARN cluster to create a container for the AM.
-
-#### Postconditions
-
-Define a YARN state at a specific time `t` as `YARN(t)`; the fact that
-an AM is launched afterwards
-
-The AM is deployed if there is some time `t` after the submission time `t0`
-where the application is listed 
-
-    exists t1 where t1 > t0 and slider-instance-live(YARN(t1), user, instancename)
-
-At which time there is a container in the cluster hosting the AM -it's
-context is the launch context
-
-    exists c in containers(YARN(t1)) where container.context = launch.context
-
-There's no way to determine when this time `t1` will be reached -or if it ever
-will -its launch may be postponed due to a lack of resources and/or higher priority
-requests using resources as they become available.
-
-For tests on a dedicated YARN cluster, a few tens of seconds appear to be enough
-for the AM-launched state to be reached, a failure to occur, or to conclude
-that the resource requirements are unsatisfiable.
-
-## Outcome: AM-started state
-
-A (usually short) time after the AM is launched, it should start
-
-* The node hosting the container is working reliably
-* The supplied command line could start the process
-* the localized resources in the context could be copied to the container (which implies
-that they are readable by the user account the AM is running under)
-* The combined classpath of YARN, extra JAR files included in the launch context,
-and the resources in the slider client 'conf' dir contain all necessary dependencies
-to run Slider.
-* There's no issue with the cluster specification that causes the AM to exit
-with an error code.
-
-Node failures/command line failures are treated by YARN as an AM failure which
-will trigger a restart attempt -this may be on the same or a different node.
-
-#### preconditions
-
-The AM was launched at an earlier time, `t1`
-
-    exists t1 where t1 > t0 and am-launched(YARN(t1)
-
-
-#### Postconditions
-
-The application is actually started if it is listed in the YARN application list
-as being in the state `RUNNING`, an RPC port has been registered with YARN (visible as the `rpcPort`
-attribute in the YARN Application Report,and that port is servicing RPC requests
-from authenticated callers.
-
-    exists t2 where:
-        t2 > t1 
-        and slider-instance-live(YARN(t2), YARN, instancename, user)
-        and slider-live-instances(YARN(t2))[0].rpcPort != 0
-        and rpc-connection(slider-live-instances(YARN(t2))[0], SliderClusterProtocol)
-
-A test for accepting cluster requests is querying the cluster status
-with `SliderClusterProtocol.getJSONClusterStatus()`. If this returns
-a parseable cluster description, the AM considers itself live.
-
-## Outcome: Applicaton Instance operational state
-
-Once started, Slider enters the operational state of trying to keep the numbers
-of live role instances matching the numbers specified in the cluster specification.
-
-The AM must request the a container for each desired instance of a specific roles of the
-application, wait for those requests to be granted, and then instantiate
-the specific application roles on the allocated containers.
-
-Such a request is made on startup, whenever a failure occurs, or when the
-cluster size is dynamically updated.
-
-The AM releases containers when the cluster size is shrunk during a flex operation,
-or during teardown.
-
-### steady state condition
-
-The steady state of a Slider cluster is that the number of live instances of a role,
-plus the number of requested instances , minus the number of instances for
-which release requests have been made must match that of the desired number.
-
-If the internal state of the Slider AM is defined as `AppState`
-
-    forall r in clusterspec.roles :
-        r["yarn.component.instances"] ==
-          AppState.Roles[r].live + AppState.Roles[r].requested - AppState.Roles[r].released
-
-The `AppState` represents Slider's view of the external YARN system state, based on its
-history of notifications received from YARN. 
-
-It is indirectly observable from the cluster state which an AM can be queried for
-
-
-    forall r in AM.getJSONClusterStatus().roles :
-        r["yarn.component.instances"] ==
-          r["role.actual.instances"] + r["role.requested.instances"] - r["role.releasing.instances"]
-
-Slider does not consider it an error if the number of actual instances remains below
-the desired value (i.e. outstanding requests are not being satisfied) -this is
-an operational state of the cluster that Slider cannot address.
-
-### Cluster startup
-
-On a healthy dedicated test cluster, the time for the requests to be satisfied is
-a few tens of seconds at most: a failure to achieve this state is a sign of a problem.
-
-### Node or process failure
-
-After a container or node failure, a new container for a new instance of that role
-is requested.
-
-The failure count is incremented -it can be accessed via the `"role.failed.instances"`
-attribute of a role in the status report.
-
-The number of failures of a role is tracked, and used by Slider as to when to
-conclude that the role is somehow failing consistently -and it should fail the
-entire application.
-
-This has initially been implemented as a simple counter, with the cluster
-option: `"slider.container.failure.threshold"` defining that threshold.
-
-    let status = AM.getJSONClusterStatus() 
-    forall r in in status.roles :
-        r["role.failed.instances"] < status.options["slider.container.failure.threshold"]
-
-
-### Instance startup failure
-
-
-Startup failures are measured alongside general node failures.
-
-A container is deemed to have failed to start if either of the following conditions
-were met:
-
-1. The AM received an `onNodeManagerContainerStartFailed` event.
-
-1. The AM received an `onCompletedNode` event on a node that started less than 
-a specified number of seconds earlier -a number given in the cluster option
-`"slider.container.failure.shortlife"`. 
-
-More sophisticated failure handling logic than is currently implemented may treat
-startup failures differently from ongoing failures -as they can usually be
-treated as a sign that the container is failing to launch the program reliably -
-either the generated command line is invalid, or the application is failing
-to run/exiting on or nearly immediately.
-
-## Action: Create
-
-Create is simply `build` + `thaw` in sequence  - the postconditions from the first
-action are intended to match the preconditions of the second.
-
-## Action: Freeze
-
-    freeze instancename [--wait time] [--message message]
-
-The *freeze* action "freezes" the cluster: all its nodes running in the YARN
-cluster are stopped, leaving all the persistent state.
-
-The operation is intended to be idempotent: it is not an error if 
-freeze is invoked on an already frozen cluster
-
-#### Preconditions
-
-The cluster name is valid and it matches a known cluster 
-
-    if not valid-instance-name(instancename) : raise SliderException(EXIT_COMMAND_ARGUMENT_ERROR)
-    
-    if not is-file(HDFS, application-instance-path(HDFS, instancename)) :
-        raise SliderException(EXIT_UNKNOWN_INSTANCE)
-
-#### Postconditions
-
-If the cluster was running, an RPC call has been sent to it `stopCluster(message)`
-
-If the `--wait` argument specified a wait time, then the command will block
-until the cluster has finished or the wait time was exceeded. 
-
-If the `--message` argument specified a message -it must appear in the
-YARN logs as the reason the cluster was frozen.
-
-
-The outcome should be the same:
-
-    not slider-instance-live(YARN', instancename)
-
-## Action: Flex
-
-Flex the cluster size: add or remove roles. 
-
-    flex instancename 
-    components:List[(String, int)]
-
-1. The JSON cluster specification in the filesystem is updated
-1. if the cluster is running, it is given the new cluster specification,
-which will change the desired steady-state of the application
-
-#### Preconditions
-
-    if not is-file(HDFS, cluster-json-path(HDFS, instancename)) :
-        raise SliderException(EXIT_UNKNOWN_INSTANCE)
-
-#### Postconditions
-
-    let originalSpec = data(HDFS, cluster-json-path(HDFS, instancename))
-    
-    let updatedSpec = originalspec where:
-        forall (name, size) in components :
-            updatedSpec.roles[name]["yarn.component.instances"] == size
-    data(HDFS', cluster-json-path(HDFS', instancename)) == updatedSpec
-    rpc-connection(slider-live-instances(YARN(t2))[0], SliderClusterProtocol)
-    let flexed = rpc-connection(slider-live-instances(YARN(t2))[0], SliderClusterProtocol).flexClusterupdatedSpec)
-
-
-#### AM actions on flex
-
-    boolean SliderAppMaster.flexCluster(ClusterDescription updatedSpec)
-  
-If the  cluster is in a state where flexing is possible (i.e. it is not in teardown),
-then `AppState` is updated with the new desired role counts. The operation will
-return once all requests to add or remove role instances have been queued,
-and be `True` iff the desired steady state of the cluster has been changed.
-
-#### Preconditions
-
-      well-defined-application-instance(HDFS, updatedSpec)
-  
-
-#### Postconditions
-
-    forall role in AppState.Roles.keys:
-        AppState'.Roles'[role].desiredCount = updatedSpec[roles]["yarn.component.instances"]
-    result = AppState' != AppState
-
-
-The flexing may change the desired steady state of the cluster, in which
-case the relevant requests will have been queued by the completion of the
-action. It is not possible to state whether or when the requests will be
-satisfied.
-
-## Action: Destroy
-
-Idempotent operation to destroy a frozen cluster -it succeeds if the 
-cluster has already been destroyed/is unknown, but not if it is
-actually running.
-
-#### Preconditions
-
-    if not valid-instance-name(instancename) : raise SliderException(EXIT_COMMAND_ARGUMENT_ERROR)
-
-    if slider-instance-live(YARN, instancename) : raise SliderException(EXIT_CLUSTER_IN_USE)
-
-
-#### Postconditions
-
-The cluster directory and all its children do not exist
-
-    not is-dir(HDFS', application-instance-path(HDFS', instancename))
-  
-
-## Action: Status
-
-    status instancename [--out outfile]
-    2
-#### Preconditions
-
-    if not slider-instance-live(YARN, instancename) : raise SliderException(EXIT_UNKNOWN_INSTANCE)
-
-#### Postconditions
-
-The status of the application has been successfully queried and printed out:
-
-    let status = slider-live-instances(YARN).rpcPort.getJSONClusterStatus()
-    
-if the `outfile` value is not defined then the status appears part of stdout
-    
-    status in STDOUT'
-
-otherwise, the outfile exists in the local filesystem
-
-    (outfile != "") ==>  data(LocalFS', outfile) == body
-    (outfile != "") ==>  body in STDOUT'
-
-## Action: Exists
-
-This probes for a named cluster being defined or actually being in the running
-state.
-
-In the running state; it is essentially the status
-operation with only the exit code returned
-
-#### Preconditions
-
-
-    if not is-file(HDFS, application-instance-path(HDFS, instancename)) :
-        raise SliderException(EXIT_UNKNOWN_INSTANCE)
-
-#### Postconditions
-
-The operation succeeds if the cluster is running and the RPC call returns the cluster
-status.
-
-    if live and not slider-instance-live(YARN, instancename):
-      retcode = -1
-    else:  
-      retcode = 0
- 
-## Action: getConf
-
-This returns the live client configuration of the cluster -the
-site-xml file.
-
-    getconf --format (xml|properties) --out [outfile]
-
-*We may want to think hard about whether this is needed*
-
-#### Preconditions
-
-    if not slider-instance-live(YARN, instancename) : raise SliderException(EXIT_UNKNOWN_INSTANCE)
-
-
-#### Postconditions
-
-The operation succeeds if the cluster status can be retrieved and saved to 
-the named file/printed to stdout in the format chosen
-
-    let status = slider-live-instances(YARN).rpcPort.getJSONClusterStatus()
-    let conf = status.clientProperties
-    if format == "xml" : 
-        let body = status.clientProperties.asXmlDocument()
-    else:
-        let body = status.clientProperties.asProperties()
-        
-    if outfile != "" :
-        data(LocalFS', outfile) == body
-    else
-        body in STDOUT'
-
-## Action: list
-
-    list [instancename]
-
-Lists all clusters of a user, or only the one given
-
-#### Preconditions
-
-If a instancename is specified it must be in YARNs list of active or completed applications
-of that user:
-
-    if instancename != "" and [] == yarn-application-instances(YARN, instancename, user) 
-        raise SliderException(EXIT_UNKNOWN_INSTANCE)
-
-
-#### Postconditions
-
-If no instancename was given, all slider applications of that user are listed,
-else only the one running (or one of the finished ones)
-  
-    if instancename == "" :
-        forall a in yarn-application-instances(YARN, user) :
-            a.toString() in STDOUT'
-    else
-       let e = yarn-application-instances(YARN, instancename, user) 
-       e.toString() in STDOUT'
-
-## Action: killcontainer
-
-This is an operation added for testing. It will kill a container in the cluster
-*without flexing the cluster size*. As a result, the cluster will detect the
-failure and attempt to recover from the failure by instantiating a new instance
-of the cluster
-
-    killcontainer cluster --id container-id
-    
-#### Preconditions
-
-    if not slider-instance-live(YARN, instancename) : raise SliderException(EXIT_UNKNOWN_INSTANCE)
-
-    exists c in slider-app-containers(YARN, instancename, user) where c.id == container-id 
-    
-    let status := AM.getJSONClusterStatus() 
-    exists role = status.instances where container-id in status.instances[role].values
-
-
-#### Postconditions
-
-The container is not in the list of containers in the cluster
-
-    not exists c in containers(YARN) where c.id == container-id 
-
-And implicitly, not in the running containers of that application
-
-    not exists c in slider-app-containers(YARN', instancename, user) where c.id == container-id 
-
-At some time `t1 > t`, the status of the application (`AM'`) will be updated to reflect
-that YARN has notified the AM of the loss of the container
-
-     
-    let status' = AM'.getJSONClusterStatus() 
-    len(status'.instances[role]) < len(status.instances[role]) 
-    status'.roles[role]["role.failed.instances"] == status'.roles[role]["role.failed.instances"]+1
-
-
-At some time `t2 > t1` in the future, the size of the containers of the application
-in the YARN cluster `YARN''` will be as before 
-
-    let status'' = AM''.getJSONClusterStatus() 
-    len(status''.instances[r] == len(status.instances[r]) 
diff --git a/src/site/markdown/specification/index.md b/src/site/markdown/specification/index.md
deleted file mode 100644
index d732959..0000000
--- a/src/site/markdown/specification/index.md
+++ /dev/null
@@ -1,41 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-  
-# Specification of Slider behaviour
-
-This is a a "more rigorous" definition of the behavior of Slider in terms
-of its state and its command-line operations -by defining a 'formal' model
-of HDFS, YARN and Slider's internal state, then describing the operations
-that can take place in terms of their preconditions and postconditions.
-
-This is to show what tests we can create to verify that an action
-with a valid set of preconditions results in an outcome whose postconditions
-can be verified. It also makes more apparent what conditions should be
-expected to result in failures, as well as what the failure codes should be.
-
-Specifying the behavior has also helped identify areas where there was ambiguity,
-where clarification and more tests were needed.
- 
-The specification depends on ongoing work in [HADOOP-9361](https://issues.apache.org/jira/browse/HADOOP-9361): 
-to define the Hadoop Filesytem APIs --This specification uses [the same notation](https://github.com/steveloughran/hadoop-trunk/blob/stevel/HADOOP-9361-filesystem-contract/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/notation.md)
-
- 
-1. [Model: YARN And Slider](slider-model.html)
-1. [CLI actions](cli-actions.html)
-
-Exceptions and operations may specify exit codes -these are listed in
-[Client Exit Codes](../exitcodes.html)
diff --git a/src/site/markdown/specification/slider-model.md b/src/site/markdown/specification/slider-model.md
deleted file mode 100644
index 720626d..0000000
--- a/src/site/markdown/specification/slider-model.md
+++ /dev/null
@@ -1,286 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-  
-# Formal Slider Model
-
-This is the model of Slider and YARN for the rest of the specification.
-
-## File System
-
-A File System `HDFS` represents a Hadoop FileSystem -either HDFS or another File
-System which spans the cluster. There are also other filesystems that
-can act as sources of data that is then copied into HDFS. These will be marked
-as `FS` or with the generic `FileSystem` type.
-
-
-There's ongoing work in [HADOOP-9361](https://issues.apache.org/jira/browse/HADOOP-9361)
-to define the Hadoop Filesytem APIs using the same notation as here,
-the latest version being available on [github](https://github.com/steveloughran/hadoop-trunk/tree/stevel/HADOOP-9361-filesystem-contract/hadoop-common-project/hadoop-common/src/site/markdown/filesystem)
-Two key references are
-
- 1. [The notation reused in the Slider specifications](https://github.com/steveloughran/hadoop-trunk/blob/stevel/HADOOP-9361-filesystem-contract/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/notation.md)
- 1. [The model of the filesystem](https://github.com/steveloughran/hadoop-trunk/blob/stevel/HADOOP-9361-filesystem-contract/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/model.md)
- 
- The model and its predicates and invariants will be used in these specifications.
- 
-## YARN
-
-From the perspective of YARN application, The YARN runtime is a state, `YARN`, 
-comprised of: ` (Apps, Queues, Nodes)`
-
-    Apps: Map[AppId, ApplicationReport]
-    
-An application has a name, an application report and a list of outstanding requests
-    
-    App: (Name, report: ApplicationReport, Requests:List[AmRequest])
-
-An application report contains a mixture of static and dynamic state of the application
-and the AM.
-
-    ApplicationReport: AppId, Type, User, YarnApplicationState, AmContainer, RpcPort, TrackingURL,
-
-YARN applications have a number of states. These are ordered such that if the
-`state.ordinal() > RUNNING.ordinal()  ` then the application has entered an exit state.
- 
-    YarnApplicationState : [NEW, NEW_SAVING, SUBMITTED, ACCEPTED, RUNNING, FINISHED, FAILED, KILLED ]
-  
-AMs can request containers to be added or released    
-
-    AmRequest = { add-container(priority, requirements), release(containerId)}
-
-Job queues are named queues of job requests; there is always a queue called `"default"`
-
-    Queues: Map[String:Queue]
-        Queue:  List[Requests]
-        Request = {
-          launch(app-name, app-type, requirements, context)
-        }
-        Context: (localized-resources: Map[String,URL], command)
-
-
-This is doesn't completely model the cluster from the AM perspective -there's no
-notion of node operations (launching code in a container) or events coming from YARN.
-
-The `Nodes` structure models the nodes in a cluster
-
-    Nodes:  Map[nodeID,(name, containers:List[Container])] 
-
-A container contains some state
-
-    Container: (containerId, appId, context)
-
-The containers in a cluster are the aggregate set of all containers across
-all nodes
-
-    def containers(YARN) =
-        [c for n in keys(YARN.Nodes) for c in YARN.Nodes[n].Containers ]
-
-
-The containers of an application are all containers that are considered owned by it,
-
-    def app-containers(YARN, appId: AppId) =
-        [c in containers(YARN) where c.appId == appId ]
-
-### Operations & predicates used the specifications
-
-
-    def applications(YARN, type) = 
-        [ app.report for app in YARN.Apps.values where app.report.Type == type]
-    
-    def user-applications(YARN, type, user)
-        [a in applications(YARN, type) where: a.User == user]
-    
-
-## UserGroupInformation
-
-Applications are launched and executed on hosts computers: either client machines
-or nodes in the cluster, these have their own state which may need modeling
-
-    HostState: Map[String, String]
-
-A key part of the host state is actually the identity of the current user,
-which is used to define the location of the persistent state of the cluster -including
-its data, and the identity under which a deployed container executes.
-
-In a secure cluster, this identity is accompanied by kerberos tokens that grant the caller
-access to the filesystem and to parts of YARN itself.
-
-This specification does not currently explicitly model the username and credentials.
-If it did they would be used throughout the specification to bind to a YARN or HDFS instance.
-
-`UserGroupInformation.getCurrentUser(): UserGroupInformation`
-
-Returns the current user information. This information is immutable and fixed for the duration of the process.
-
-
-
-## Slider Model
-
-### Cluster name
-
-A valid cluster name is a name of length > 1 which follows the internet hostname scheme of letter followed by letter or digit
-
-    def valid-cluster-name(c) =
-        len(c)> 0
-        and c[0] in ['a'..'z']
-        and c[1] in (['a'..'z'] + ['-'] + ['0..9']) 
-
-### Persistent Cluster State
-
-A Slider cluster's persistent state is stored in a path
-
-    def cluster-path(FS, clustername) = user-home(FS) + ["clusters", clustername]
-    def cluster-json-path(FS, clustername) = cluster-path(FS, clustername) + ["cluster.json"]
-    def original-conf-path(FS, clustername) = cluster-path(FS, clustername) + ["original"] 
-    def generated-conf-path(FS, clustername) = cluster-path(FS, clustername) + ["generated"]
-    def data-path(FS, clustername) = cluster-path(FS, clustername) + ["data"]
-
-When a cluster is built/created the specified original configuration directory
-is copied to `original-conf-path(FS, clustername)`; this is patched for the
-specific instance bindings and saved into `generated-conf-path(FS, clustername)`.
-
-A cluster *exists* if all of these paths are found:
-
-    def cluster-exists(FS, clustername) =
-        is-dir(FS, cluster-path(FS, clustername))
-        and is-file(FS, cluster-json-path(FS, clustername))
-        and is-dir(FS, original-conf-path(FS, clustername))
-        and generated-conf-path(FS, original-conf-path(FS, clustername))
-
-A cluster is considered `running` if there is a Slider application type belonging to the current user in one of the states
-`{NEW, NEW_SAVING, SUBMITTED, ACCEPTED, RUNNING}`. 
-
-    def final-yarn-states = {FINISHED, FAILED, KILLED }
-
-    def slider-app-instances(YARN, clustername, user) =
-        [a in user-applications(YARN, "slider", user) where:
-             and a.Name == clustername]
-             
-    def slider-app-running-instances(YARN, clustername, user) =
-        [a in slider-app-instances(YARN, user, clustername) where:
-             not a.YarnApplicationState in final-yarn-state]
-    
-    def slider-app-running(YARN, clustername, user) =
-        [] != slider-app-running-instances(YARN, clustername, user) 
-        
-    def slider-app-live-instances(YARN, clustername, user) =
-        [a in slider-app-instances(YARN, user, clustername) where:
-             a.YarnApplicationState == RUNNING]
-             
-    def slider-app-live(YARN, clustername, user) =
-       [] != slider-app-live-instances(YARN, clustername, user) 
-
-### Invariant: there must never be more than one running instance of a named Slider cluster
-
-
-There must never be more than one instance of the same Slider cluster running:
-
-    forall a in user-applications(YARN, "slider", user):
-        len(slider-app-running-instances(YARN, a.Name, user)) <= 1
-
-There may be multiple instances in a finished state, and one running instance alongside multiple finished instances -the applications
-that work with Slider MUST select a running cluster ahead of any terminated clusters.
-
-### Containers of an application 
-
-     
-The containers of a slider application are the set of containers of that application
-
-    def slider-app-containers(YARN, clustername, user) =
-      app-containers(YARN, appid where
-        appid = slider-app-running-instances(YARN, clustername, user)[0])
-
-
-
-
-### RPC Access to a slider cluster
-
-
- An application is accepting RPC requests for a given protocol if there is a port binding
- defined and it is possible to authenticate a connection using the specified protocol
-
-     def rpc-connection(appReport, protocol) =
-         appReport.host != null 
-         appReport.rpcPort != 0 
-         and RPC.getProtocolProxy(appReport.host, appReport.rpcPort, protocol)
-
- Being able to open an RPC port is the strongest definition of liveness possible
- to make: if the AM responds to RPC operations, it is doing useful work.
-
-### Valid Cluster Description
-
-The `cluster.json` file of a cluster configures Slider to deploy the application. 
-
-#### well-defined-cluster(cluster-description)
-
-A Cluster Description is well-defined if it is valid JSON and required properties are present
-
-**OBSOLETE**
-
-
-Irrespective of specific details for deploying the Slider AM or any provider-specific role instances,
-a Cluster Description defined in a `cluster.json` file at the path `cluster-json-path(FS, clustername)`
-is well-defined if
-
-1. It is parseable by the jackson JSON parser.
-1. Root elements required of a Slider cluster specification must be defined, and, where appropriate, non-empty
-1. It contains the extensible elements required of a Slider cluster specification. For example, `options` and `roles`
-1. The types of the extensible elements match those expected by Slider.
-1. The `version` element matches a supported version
-1. Exactly one of `options/cluster.application.home` and `options/cluster.application.image.path` must exist.
-1. Any cluster options that are required to be integers must be integers
-
-This specification is very vague here to avoid duplication: the cluster description structure is currently implicitly defined in 
-`org.apache.slider.api.ClusterDescription` 
-
-Currently Slider ignores unknown elements during parsing. This may be changed.
-
-The test for this state does not refer to the cluster filesystem
-
-#### deployable-cluster(FS, cluster-description)
-
-A  Cluster Description defines a deployable cluster if it is well-defined cluster and the contents contain valid information to deploy a cluster
-
-This defines how a cluster description is valid in the extends the valid configuration with 
-
-* The entry `name` must match a supported provider
-* Any elements that name the cluster match the cluster name as defined by the path to the cluster:
-
-        originConfigurationPath == original-conf-path(FS, clustername)
-        generatedConfigurationPath == generated-conf-path(FS, clustername)
-        dataPath == data-path(FS, clustername)
-
-* The paths defined in `originConfigurationPath` , `generatedConfigurationPath` and `dataPath` must all exist.
-* `options/zookeeper.path` must be defined and refer to a path in the ZK cluster
-defined by (`options/zookeeper.hosts`, `zookeeper.port)` to which the user has write access (required by HBase and Accumulo)
-* If `options/cluster.application.image.path` is defined, it must exist and be readable by the user.
-* It must declare a type that maps to a provider entry in the Slider client's XML configuration:
-
-        len(clusterspec["type"]) > 0 
-        clientconfig["slider.provider."+ clusterspec["type"]] != null
-
-* That entry must map to a class on the classpath which can be instantiated
-and cast to `SliderProviderFactory`.
-
-        let classname = clientconfig["slider.provider."+ clusterspec["type"]] 
-        (Class.forName(classname).newInstance()) instanceof SliderProviderFactory 
-
-#### valid-for-provider(cluster-description, provider)
-
-A provider considers a specification valid if its own validation logic is satisfied. This normally
-consists of rules about the number of instances of different roles; it may include other logic.
-
diff --git a/src/site/markdown/troubleshooting.md b/src/site/markdown/troubleshooting.md
deleted file mode 100644
index 17f3718..0000000
--- a/src/site/markdown/troubleshooting.md
+++ /dev/null
@@ -1,154 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Troubleshooting
-
-Slider can be tricky to start using, because it combines the need to set
-up a YARN application, with the need to have an HBase configuration
-that works
-
-
-### Common problems
-
-## Classpath for Slider AM wrong
-
-The Slider Application Master, the "Slider AM" builds up its classpath from
-those JARs it has locally, and the JARS pre-installed on the classpath
-
-This often surfaces in an exception that can be summarized as
-"hadoop-common.jar is not on the classpath":
-
-    Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/util/ExitUtil$ExitException
-    Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.util.ExitUtil$ExitException
-      at java.net.URLClassLoader$1.run(URLClassLoader.java:202)
-      at java.security.AccessController.doPrivileged(Native Method)
-      at java.net.URLClassLoader.findClass(URLClassLoader.java:190)
-      at java.lang.ClassLoader.loadClass(ClassLoader.java:306)
-      at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:301)
-      at java.lang.ClassLoader.loadClass(ClassLoader.java:247)
-    Could not find the main class: org.apache.hadoop.yarn.service.launcher.ServiceLauncher.  Program will exit.
-
-
-For ambari-managed deployments, we recommend the following
-
-  
-      <property>
-        <name>yarn.application.classpath</name>
-        <value>
-          /etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*
-        </value>
-      </property>
-
-The `yarn-site.xml` file for the site will contain the relevant value.
-
-### Application  Instantiation fails, "TriggerClusterTeardownException: Unstable Cluster" 
-
-Slider gives up if it cannot keep enough instances of a role running -or more
-precisely, if they keep failing. 
-
-If this happens on cluster startup, it means that the application is not working
-
-     org.apache.slider.core.exceptions.TriggerClusterTeardownException: Unstable Cluster: 
-     - failed with role worker failing 4 times (4 in startup); threshold is 2
-     - last failure: Failure container_1386872971874_0001_01_000006 on host 192.168.1.86,
-       see http://hor12n22.gq1.ygridcore.net:19888/jobhistory/logs/192.168.1.86:45454/container_1386872971874_0001_01_000006/ctx/yarn
-
-This message warns that a role -here worker- is failing to start and it has failed
-more than the configured failure threshold is. What it doesn't do is say why it failed,
-because that is not something the AM knows -that is a fact hidden in the logs on
-the container that failed.
-
-The final bit of the exception message can help you track down the problem,
-as it points you to the logs.
-
-In the example above the failure was in `container_1386872971874_0001_01_000006`
-on the host `192.168.1.86`. If you go to then node manager on that machine (the YARN
-RM web page will let you do this), and look for that container,
-you may be able to grab the logs from it. 
-
-A quicker way is to browse to the URL on the next line.
-Note: the URL depends on yarn.log.server.url being properly configured.
-
-It is from those logs that the cause of the problem -because they are the actual
-output of the actual application which Slider is trying to deploy.
-
-
-
-### Not all the containers start -but whenever you kill one, another one comes up.
-
-This is often caused by YARN not having enough capacity in the cluster to start
-up the requested set of containers. The AM has submitted a list of container
-requests to YARN, but only when an existing container is released or killed
-is one of the outstanding requests granted.
-
-Fix #1: Ask for smaller containers
-
-edit the `yarn.memory` option for roles to be smaller: set it 64 for a smaller
-YARN allocation. *This does not affect the actual heap size of the 
-application component deployed*
-
-Fix #2: Tell YARN to be less strict about memory consumption
-
-Here are the properties in `yarn-site.xml` which we set to allow YARN 
-to schedule more role instances than it nominally has room for.
-
-    <property>
-      <name>yarn.scheduler.minimum-allocation-mb</name>
-      <value>1</value>
-    </property>
-    <property>
-      <description>Whether physical memory limits will be enforced for
-        containers.
-      </description>
-      <name>yarn.nodemanager.pmem-check-enabled</name>
-      <value>false</value>
-    </property>
-    <!-- we really don't want checking here-->
-    <property>
-      <name>yarn.nodemanager.vmem-check-enabled</name>
-      <value>false</value>
-    </property>
-  
-If you create too many instances, your hosts will start swapping and
-performance will collapse -we do not recommend using this in production.
-
-
-### Configuring YARN for better debugging
- 
- 
-One configuration to aid debugging is tell the nodemanagers to
-keep data for a short period after containers finish
-
-    <!-- 10 minutes after a failure to see what is left in the directory-->
-    <property>
-      <name>yarn.nodemanager.delete.debug-delay-sec</name>
-      <value>600</value>
-    </property>
-
-You can then retrieve logs by either the web UI, or by connecting to the
-server (usually by `ssh`) and retrieve the logs from the log directory
-
-
-We also recommend making sure that YARN kills processes
-
-    <!--time before the process gets a -9 -->
-    <property>
-      <name>yarn.nodemanager.sleep-delay-before-sigkill.ms</name>
-      <value>30000</value>
-    </property>
-
- 
diff --git a/src/site/resources/hoya_am_architecture.png b/src/site/resources/hoya_am_architecture.png
deleted file mode 100644
index 191a8db..0000000
--- a/src/site/resources/hoya_am_architecture.png
+++ /dev/null
Binary files differ
diff --git a/src/site/resources/images/app_config_folders_01.png b/src/site/resources/images/app_config_folders_01.png
deleted file mode 100644
index 4e78b63..0000000
--- a/src/site/resources/images/app_config_folders_01.png
+++ /dev/null
Binary files differ
diff --git a/src/site/resources/images/app_package_sample_04.png b/src/site/resources/images/app_package_sample_04.png
deleted file mode 100644
index 170256b..0000000
--- a/src/site/resources/images/app_package_sample_04.png
+++ /dev/null
Binary files differ
diff --git a/src/site/resources/images/image_0.png b/src/site/resources/images/image_0.png
deleted file mode 100644
index e62a3e7..0000000
--- a/src/site/resources/images/image_0.png
+++ /dev/null
Binary files differ
diff --git a/src/site/resources/images/image_1.png b/src/site/resources/images/image_1.png
deleted file mode 100644
index d0888ac..0000000
--- a/src/site/resources/images/image_1.png
+++ /dev/null
Binary files differ
diff --git a/src/site/resources/images/managed_client.png b/src/site/resources/images/managed_client.png
deleted file mode 100644
index 9c094b1..0000000
--- a/src/site/resources/images/managed_client.png
+++ /dev/null
Binary files differ
diff --git a/src/site/resources/images/slider-container.png b/src/site/resources/images/slider-container.png
deleted file mode 100644
index 2e02833..0000000
--- a/src/site/resources/images/slider-container.png
+++ /dev/null
Binary files differ
diff --git a/src/site/resources/images/unmanaged_client.png b/src/site/resources/images/unmanaged_client.png
deleted file mode 100644
index 739d56d..0000000
--- a/src/site/resources/images/unmanaged_client.png
+++ /dev/null
Binary files differ
diff --git a/src/site/site.xml b/src/site/site.xml
deleted file mode 100644
index d7f574d..0000000
--- a/src/site/site.xml
+++ /dev/null
@@ -1,63 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<project name="Slider ${project.version}">
-<!--
-
-  <skin>
-    <groupId>org.apache.maven.skins</groupId>
-    <artifactId>maven-stylus-skin</artifactId>
-    <version>1.2</version>
-  </skin>
-
-
-
-  <skin>
-    <groupId>org.apache.maven.skins</groupId>
-    <artifactId>maven-fluido-skin</artifactId>
-    <version>1.3.0</version>
-  </skin>
-
--->
-  <skin>
-    <groupId>org.apache.maven.skins</groupId>
-    <artifactId>maven-application-skin</artifactId>
-    <version>1.0</version>
-  </skin>
-
-  <custom>
-    <fluidoSkin>
-      <topBarEnabled>true</topBarEnabled>
-      <sideBarEnabled>false</sideBarEnabled>
-    </fluidoSkin>
-  </custom>
-
-  <version position="right"/>
-
-  <body>
-    <menu ref="reports"/>
-
-    <menu name="Documents">
-      <item name="Getting Started" href="/getting_started.html"/>
-      <item name="manpage" href="/manpage.html"/>
-      <item name="Troubleshooting" href="/troubleshooting.html"/>
-      <item name="Architecture" href="/architecture/index.html"/>
-      <item name="Developing" href="/developing/index.html"/>
-      <item name="Exitcodes" href="/exitcodes.html"/>
-    </menu>
-  </body>
-</project>
diff --git a/src/test/clusters/c6401/slider/log4j.properties b/src/test/clusters/c6401/slider/log4j.properties
new file mode 100644
index 0000000..d814f14
--- /dev/null
+++ b/src/test/clusters/c6401/slider/log4j.properties
@@ -0,0 +1,53 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#   
+#    http://www.apache.org/licenses/LICENSE-2.0
+#   
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License. See accompanying LICENSE file.
+#
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=INFO,stdout
+log4j.threshhold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+
+# log layout skips stack-trace creation operations by avoiding line numbers and method
+#log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n
+
+# debug edition is much more expensive
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+log4j.appender.subprocess=org.apache.log4j.ConsoleAppender
+log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout
+log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n
+
+log4j.logger.org.apache.slider=DEBUG
+
+# uncomment to debug service lifecycle issues
+#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG
+#log4j.logger.org.apache.hadoop.yarn.service=DEBUG
+
+# uncomment for YARN operations
+#log4j.logger.org.apache.hadoop.yarn.client=DEBUG
+
+# uncomment this to debug security problems
+#log4j.logger.org.apache.hadoop.security=DEBUG
+
+#crank back on some noise
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.hadoop.hdfs=WARN
+
+
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN
+log4j.logger.org.apache.zookeeper=WARN
+
+
diff --git a/src/test/clusters/c6401/slider/slider-client.xml b/src/test/clusters/c6401/slider/slider-client.xml
new file mode 100644
index 0000000..526881b
--- /dev/null
+++ b/src/test/clusters/c6401/slider/slider-client.xml
@@ -0,0 +1,72 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!--
+  Properties set here are picked up in the client.
+  They are not passed to the AM
+-->
+<configuration>
+  
+  <property>
+    <name>hostname</name>
+    <value>c6401</value>
+  </property>
+  
+  <property>
+    <name>slider.client.resource.origin</name>
+    <value>configs/${hostname}/slider</value>
+    <description>This is just for diagnostics</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>${hostname}:8050</value>
+  </property>
+  
+  <property>
+    <name>fs.defaultFS</name>
+    <value>hdfs://${hostname}.ambari.apache.org:8020</value>
+  </property>
+
+  <property>
+    <name>slider.zookeeper.quorum</name>
+    <value>${hostname}:2181</value>
+  </property>
+
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>
+      /etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*
+    </value>
+  </property>
+
+  <property>
+    <name>slider.test.agent.enabled</name>
+    <description>Flag to enable/disable Agent tests</description>
+    <value>true</value>
+  </property>
+
+
+  <property>
+    <name>slider.test.am.restart.time</name>
+    <description>Time in millis to await an AM restart</description>
+    <value>60000</value>
+  </property>
+
+
+</configuration>
diff --git a/src/test/clusters/morzine/slider/log4j.properties b/src/test/clusters/morzine/slider/log4j.properties
new file mode 100644
index 0000000..d814f14
--- /dev/null
+++ b/src/test/clusters/morzine/slider/log4j.properties
@@ -0,0 +1,53 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#   
+#    http://www.apache.org/licenses/LICENSE-2.0
+#   
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License. See accompanying LICENSE file.
+#
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=INFO,stdout
+log4j.threshhold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+
+# log layout skips stack-trace creation operations by avoiding line numbers and method
+#log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n
+
+# debug edition is much more expensive
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+log4j.appender.subprocess=org.apache.log4j.ConsoleAppender
+log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout
+log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n
+
+log4j.logger.org.apache.slider=DEBUG
+
+# uncomment to debug service lifecycle issues
+#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG
+#log4j.logger.org.apache.hadoop.yarn.service=DEBUG
+
+# uncomment for YARN operations
+#log4j.logger.org.apache.hadoop.yarn.client=DEBUG
+
+# uncomment this to debug security problems
+#log4j.logger.org.apache.hadoop.security=DEBUG
+
+#crank back on some noise
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.hadoop.hdfs=WARN
+
+
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN
+log4j.logger.org.apache.zookeeper=WARN
+
+
diff --git a/src/test/clusters/morzine/slider/slider-client.xml b/src/test/clusters/morzine/slider/slider-client.xml
new file mode 100644
index 0000000..4d7ab41
--- /dev/null
+++ b/src/test/clusters/morzine/slider/slider-client.xml
@@ -0,0 +1,76 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!--
+  Properties set here are picked up in the client.
+  They are not passed to the AM
+-->
+<configuration>
+
+  <property>
+    <name>hostname</name>
+    <value>morzine</value>
+  </property>
+
+  <property>
+    <name>slider.client.resource.origin</name>
+    <value>configs/${hostname}/slider</value>
+    <description>This is just for diagnostics</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>${hostname}:8032</value>
+  </property>
+
+  <property>
+    <name>fs.defaultFS</name>
+    <value>hdfs://${hostname}:8020</value>
+  </property>
+
+  <property>
+    <name>slider.zookeeper.quorum</name>
+    <value>${hostname}:2181</value>
+  </property>
+
+  <!-- 
+  This is a windows path as picked up from
+   http://morzine:8088/conf
+  -->
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>
+      %HADOOP_CONF_DIR%,%HADOOP_COMMON_HOME%/share/hadoop/common/*,%HADOOP_COMMON_HOME%/share/hadoop/common/lib/*,%HADOOP_HDFS_HOME%/share/hadoop/hdfs/*,%HADOOP_HDFS_HOME%/share/hadoop/hdfs/lib/*,%HADOOP_MAPRED_HOME%/share/hadoop/mapreduce/*,%HADOOP_MAPRED_HOME%/share/hadoop/mapreduce/lib/*,%HADOOP_YARN_HOME%/share/hadoop/yarn/*,%HADOOP_YARN_HOME%/share/hadoop/yarn/lib/*
+    </value>
+  </property>
+
+  <property>
+    <name>slider.test.agent.enabled</name>
+    <description>Flag to enable/disable Agent tests</description>
+    <value>true</value>
+  </property>
+
+
+  <property>
+    <name>slider.test.am.restart.time</name>
+    <description>Time in millis to await an AM restart</description>
+    <value>60000</value>
+  </property>
+
+
+</configuration>
diff --git a/src/test/clusters/offline/slider/log4j.properties b/src/test/clusters/offline/slider/log4j.properties
index 6211771..d814f14 100644
--- a/src/test/clusters/offline/slider/log4j.properties
+++ b/src/test/clusters/offline/slider/log4j.properties
@@ -11,36 +11,6 @@
 #   See the License for the specific language governing permissions and
 #   limitations under the License. See accompanying LICENSE file.
 #
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-#  or more contributor license agreements.  See the NOTICE file
-#  distributed with this work for additional information
-#  regarding copyright ownership.  The ASF licenses this file
-#  to you under the Apache License, Version 2.0 (the
-#  "License"); you may not use this file except in compliance
-#  with the License.  You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
-
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=INFO,stdout
diff --git a/src/test/clusters/offline/slider/slider-client.xml b/src/test/clusters/offline/slider/slider-client.xml
index a49dd29..8385086 100644
--- a/src/test/clusters/offline/slider/slider-client.xml
+++ b/src/test/clusters/offline/slider/slider-client.xml
@@ -41,12 +41,7 @@
     <name>slider.funtest.enabled</name>
     <value>false</value>
   </property>
-  
-  <property>
-    <name>slider.security.enabled</name>
-    <value>false</value>
-  </property>
-  
+
   <property>
     <name>yarn.application.classpath</name>
     <value>
diff --git a/src/test/clusters/remote/slider/log4j.properties b/src/test/clusters/remote/slider/log4j.properties
index 0f408f3..f672472 100644
--- a/src/test/clusters/remote/slider/log4j.properties
+++ b/src/test/clusters/remote/slider/log4j.properties
@@ -11,36 +11,6 @@
 #   See the License for the specific language governing permissions and
 #   limitations under the License. See accompanying LICENSE file.
 #
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-#  or more contributor license agreements.  See the NOTICE file
-#  distributed with this work for additional information
-#  regarding copyright ownership.  The ASF licenses this file
-#  to you under the Apache License, Version 2.0 (the
-#  "License"); you may not use this file except in compliance
-#  with the License.  You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
-
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=INFO,stdout
diff --git a/src/test/clusters/remote/slider/slider-client.xml b/src/test/clusters/remote/slider/slider-client.xml
index 5bd2edb..5ed4d10 100644
--- a/src/test/clusters/remote/slider/slider-client.xml
+++ b/src/test/clusters/remote/slider/slider-client.xml
@@ -50,11 +50,6 @@
   </property>
 
   <property>
-    <name>slider.security.enabled</name>
-    <value>false</value>
-  </property>
-
-  <property>
     <name>slider.test.agent.enabled</name>
     <value>true</value>
   </property>
diff --git a/src/test/clusters/sandbox/slider/log4j.properties b/src/test/clusters/sandbox/slider/log4j.properties
index 6211771..d814f14 100644
--- a/src/test/clusters/sandbox/slider/log4j.properties
+++ b/src/test/clusters/sandbox/slider/log4j.properties
@@ -11,36 +11,6 @@
 #   See the License for the specific language governing permissions and
 #   limitations under the License. See accompanying LICENSE file.
 #
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-#  or more contributor license agreements.  See the NOTICE file
-#  distributed with this work for additional information
-#  regarding copyright ownership.  The ASF licenses this file
-#  to you under the Apache License, Version 2.0 (the
-#  "License"); you may not use this file except in compliance
-#  with the License.  You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
-
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=INFO,stdout
diff --git a/src/test/clusters/sandbox/slider/slider-client.xml b/src/test/clusters/sandbox/slider/slider-client.xml
index f15f71d..30937ec 100644
--- a/src/test/clusters/sandbox/slider/slider-client.xml
+++ b/src/test/clusters/sandbox/slider/slider-client.xml
@@ -38,11 +38,6 @@
   </property>
 
   <property>
-    <name>slider.security.enabled</name>
-    <value>false</value>
-  </property>
-
-  <property>
     <name>slider.zookeeper.quorum</name>
     <value>sandbox:2181</value>
   </property>
diff --git a/src/test/clusters/ubuntu-secure/operations.md b/src/test/clusters/ubuntu-secure/operations.md
index cb14dfe..d894038 100644
--- a/src/test/clusters/ubuntu-secure/operations.md
+++ b/src/test/clusters/ubuntu-secure/operations.md
@@ -56,7 +56,7 @@
       --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
          --role workers 4\
           --zkhosts ubuntu --zkport 2121 \
-          -D slider.security.enabled=true -S java.security.krb5.realm=COTHAM \
+          -S java.security.krb5.realm=COTHAM \
           -S java.security.krb5.kdc=ubuntu \
           --image hdfs://ubuntu:9090/hbase.tar \
           --appconf file:////Users/slider/Hadoop/configs/master/hbase \
@@ -70,7 +70,7 @@
     bin/slider create cl1 \
           --provider hbase \
     --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-    -D slider.security.enabled=true -S java.security.krb5.realm=COTHAM \
+    -S java.security.krb5.realm=COTHAM \
     -S java.security.krb5.kdc=ubuntu \
      -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
             --role worker 1\
@@ -87,7 +87,7 @@
     bin/slider create cl1 \
           --provider hbase \
     --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-    -D slider.security.enabled=true -S java.security.krb5.realm=COTHAM \
+    -S java.security.krb5.realm=COTHAM \
     -S java.security.krb5.kdc=ubuntu \
      -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
      -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM \
@@ -102,14 +102,14 @@
         
     bin/slider status clu1 \
     --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-    -D slider.security.enabled=true -S java.security.krb5.realm=COTHAM \
+    -S java.security.krb5.realm=COTHAM \
     -S java.security.krb5.kdc=ubuntu \
      -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
      -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM 
            
     bin/slider list \
     --manager ubuntu:8032 \
-    -D slider.security.enabled=true -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
+    -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
      -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
       -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM
                
@@ -122,7 +122,7 @@
           --provider hbase \
     --zkhosts ubuntu --zkport 2121 \
     --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-    -D slider.security.enabled=true -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
+    -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
     -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
     -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM \
     --image hdfs://ubuntu:9090/hbase.tar \
@@ -138,7 +138,7 @@
       --provider hbase \
       --zkhosts ubuntu  --zkport 2121 \
       --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-      -D slider.security.enabled=true -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
+      -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
       -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
       -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM \
       --image hdfs://ubuntu:9090/hbase.tar \
@@ -151,7 +151,7 @@
           --provider hbase \
          --zkhosts ubuntu  --zkport 2121 \
          --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-         -D slider.security.enabled=true -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
+         -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
          -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
          -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM \
          --image hdfs://ubuntu:9090/hbase.tar \
@@ -166,7 +166,7 @@
       --zkhosts ubuntu \
       --zkport 2121 \
       --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-      -D slider.security.enabled=true -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
+      -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
       -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
       -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM \
       --image hdfs://ubuntu:9090/hbase.tar \
@@ -176,23 +176,23 @@
                
     bin/slider  status cl1 \
     --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-     -D slider.security.enabled=true -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
+     -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
      -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
      -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM 
      
                
-    bin/slider  status cl1 -D slider.security.enabled=true -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu 
+    bin/slider  status cl1 -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu 
     
     
     bin/slider  status cl1 \
     --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-    -D slider.security.enabled=true \
+    \
      -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
      -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM 
      
    bin/slider  status cluster3 \
     --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-     -D slider.security.enabled=true \
+     \
      -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
      -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM 
      
@@ -200,28 +200,28 @@
                
     bin/slider  thaw cl1 \
     --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-    -D slider.security.enabled=true \
+    \
      -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
      -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
      -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM 
                    
     bin/slider  freeze cl1 \
     --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-    -D slider.security.enabled=true \
+    \
     -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
      -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
      -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM   
                       
     bin/slider  freeze cluster3 \
     --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-    -D slider.security.enabled=true \
+    \
     -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
      -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
      -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM 
     
     bin/slider  destroy cl1 \
     --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-    -D slider.security.enabled=true \
+    \
     -S java.security.krb5.realm=COTHAM -S java.security.krb5.kdc=ubuntu \
      -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
      -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM \
@@ -231,7 +231,7 @@
          
     bin/slider  emergency-force-kill all \
     --manager ubuntu:8032 --filesystem hdfs://ubuntu:9090 \
-    -D slider.security.enabled=true -S java.security.krb5.realm=COTHAM \
+    -S java.security.krb5.realm=COTHAM \
      -S java.security.krb5.kdc=ubuntu \
      -D yarn.resourcemanager.principal=yarn/ubuntu@COTHAM \
      -D dfs.namenode.kerberos.principal=hdfs/ubuntu@COTHAM 
diff --git a/src/test/clusters/ubuntu-secure/slider/log4j.properties b/src/test/clusters/ubuntu-secure/slider/log4j.properties
index c99d4a3..73516f6 100644
--- a/src/test/clusters/ubuntu-secure/slider/log4j.properties
+++ b/src/test/clusters/ubuntu-secure/slider/log4j.properties
@@ -1,18 +1,4 @@
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#   
-#    http://www.apache.org/licenses/LICENSE-2.0
-#   
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License. See accompanying LICENSE file.
-#
-
-#
 # Licensed to the Apache Software Foundation (ASF) under one
 #  or more contributor license agreements.  See the NOTICE file
 #  distributed with this work for additional information
@@ -29,18 +15,6 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 #
-
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=INFO,stdout